X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fr600_shader.c;h=cd78104a010a97df32908cb2d70f792a731dd038;hb=897d2351322e4c516eee622b3f49eedca7a2e308;hp=493ebbea98aba8e36fa7543f0f7f57212fc092f7;hpb=91d47296967ebfaf685f3870998ea0a1450ecf55;p=mesa.git diff --git a/src/gallium/drivers/r600/r600_shader.c b/src/gallium/drivers/r600/r600_shader.c index 493ebbea98a..cd78104a010 100644 --- a/src/gallium/drivers/r600/r600_shader.c +++ b/src/gallium/drivers/r600/r600_shader.c @@ -20,18 +20,18 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include "r600_sq.h" +#include "r600_llvm.h" +#include "r600_formats.h" +#include "r600_opcodes.h" +#include "r600d.h" + #include "pipe/p_shader_tokens.h" #include "tgsi/tgsi_info.h" #include "tgsi/tgsi_parse.h" #include "tgsi/tgsi_scan.h" #include "tgsi/tgsi_dump.h" -#include "util/u_format.h" -#include "r600_pipe.h" -#include "r600_asm.h" -#include "r600_sq.h" -#include "r600_formats.h" -#include "r600_opcodes.h" -#include "r600d.h" +#include "util/u_memory.h" #include #include #include @@ -59,7 +59,7 @@ The compiler must issue the source argument to slots z, y, and x static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader) { - struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; + struct r600_context *rctx = (struct r600_context *)ctx; struct r600_shader *rshader = &shader->shader; uint32_t *ptr; int i; @@ -71,7 +71,7 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s if (shader->bo == NULL) { return -ENOMEM; } - ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE); + ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); if (R600_BIG_ENDIAN) { for (i = 0; i < rshader->bc.ndw; ++i) { ptr[i] = bswap_32(rshader->bc.bytecode[i]); @@ -79,7 +79,7 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s } else { memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * sizeof(*ptr)); } - rctx->ws->buffer_unmap(shader->bo->buf); + rctx->ws->buffer_unmap(shader->bo->cs_buf); } /* build state */ switch (rshader->processor_type) { @@ -103,12 +103,12 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s return 0; } -static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pipe_shader *pipeshader); +static int r600_shader_from_tgsi(struct r600_context * rctx, struct r600_pipe_shader *pipeshader); int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader) { static int dump_shaders = -1; - struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; + struct r600_context *rctx = (struct r600_context *)ctx; int r; /* Would like some magic "get_bool_option_once" routine. @@ -182,12 +182,12 @@ struct r600_shader_ctx { unsigned file_offset[TGSI_FILE_COUNT]; unsigned temp_reg; struct r600_shader_tgsi_instruction *inst_info; - struct r600_bytecode *bc; + struct r600_bytecode *bc; struct r600_shader *shader; struct r600_shader_src src[4]; - u32 *literals; - u32 nliterals; - u32 max_driver_temp_used; + uint32_t *literals; + uint32_t nliterals; + uint32_t max_driver_temp_used; /* needed for evergreen interpolation */ boolean input_centroid; boolean input_linear; @@ -195,6 +195,10 @@ struct r600_shader_ctx { int num_interp_gpr; int face_gpr; int colors_used; + boolean clip_vertex_write; + unsigned cv_output; + int fragcoord_input; + int native_integers; }; struct r600_shader_tgsi_instruction { @@ -206,6 +210,276 @@ struct r600_shader_tgsi_instruction { static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[]; static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx); +static inline void callstack_check_depth(struct r600_shader_ctx *ctx, unsigned reason, unsigned check_max_only); +static void fc_pushlevel(struct r600_shader_ctx *ctx, int type); +static int tgsi_else(struct r600_shader_ctx *ctx); +static int tgsi_endif(struct r600_shader_ctx *ctx); +static int tgsi_bgnloop(struct r600_shader_ctx *ctx); +static int tgsi_endloop(struct r600_shader_ctx *ctx); +static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx); + +/* + * bytestream -> r600 shader + * + * These functions are used to transform the output of the LLVM backend into + * struct r600_bytecode. + */ + +static unsigned r600_src_from_byte_stream(unsigned char * bytes, + unsigned bytes_read, struct r600_bytecode_alu * alu, unsigned src_idx) +{ + unsigned i; + unsigned sel0, sel1; + sel0 = bytes[bytes_read++]; + sel1 = bytes[bytes_read++]; + alu->src[src_idx].sel = sel0 | (sel1 << 8); + alu->src[src_idx].chan = bytes[bytes_read++]; + alu->src[src_idx].neg = bytes[bytes_read++]; + alu->src[src_idx].abs = bytes[bytes_read++]; + alu->src[src_idx].rel = bytes[bytes_read++]; + alu->src[src_idx].kc_bank = bytes[bytes_read++]; + for (i = 0; i < 4; i++) { + alu->src[src_idx].value |= bytes[bytes_read++] << (i * 8); + } + return bytes_read; +} + +static unsigned r600_alu_from_byte_stream(struct r600_shader_ctx *ctx, + unsigned char * bytes, unsigned bytes_read) +{ + unsigned src_idx; + unsigned inst0, inst1; + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(alu)); + for(src_idx = 0; src_idx < 3; src_idx++) { + bytes_read = r600_src_from_byte_stream(bytes, bytes_read, + &alu, src_idx); + } + + alu.dst.sel = bytes[bytes_read++]; + alu.dst.chan = bytes[bytes_read++]; + alu.dst.clamp = bytes[bytes_read++]; + alu.dst.write = bytes[bytes_read++]; + alu.dst.rel = bytes[bytes_read++]; + inst0 = bytes[bytes_read++]; + inst1 = bytes[bytes_read++]; + alu.inst = inst0 | (inst1 << 8); + alu.last = bytes[bytes_read++]; + alu.is_op3 = bytes[bytes_read++]; + alu.predicate = bytes[bytes_read++]; + alu.bank_swizzle = bytes[bytes_read++]; + alu.bank_swizzle_force = bytes[bytes_read++]; + alu.omod = bytes[bytes_read++]; + alu.index_mode = bytes[bytes_read++]; + r600_bytecode_add_alu(ctx->bc, &alu); + + /* XXX: Handle other KILL instructions */ + if (alu.inst == CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT)) { + ctx->shader->uses_kill = 1; + /* XXX: This should be enforced in the LLVM backend. */ + ctx->bc->force_add_cf = 1; + } + return bytes_read; +} + +static void llvm_if(struct r600_shader_ctx *ctx, struct r600_bytecode_alu * alu, + unsigned pred_inst) +{ + alu->inst = pred_inst; + alu->predicate = 1; + alu->dst.write = 0; + alu->src[1].sel = V_SQ_ALU_SRC_0; + alu->src[1].chan = 0; + alu->last = 1; + r600_bytecode_add_alu_type(ctx->bc, alu, + CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE)); + + r600_bytecode_add_cfinst(ctx->bc, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_JUMP)); + fc_pushlevel(ctx, FC_IF); + callstack_check_depth(ctx, FC_PUSH_VPM, 0); +} + +static void r600_break_from_byte_stream(struct r600_shader_ctx *ctx, + struct r600_bytecode_alu *alu, unsigned compare_opcode) +{ + unsigned opcode = TGSI_OPCODE_BRK; + if (ctx->bc->chip_class == CAYMAN) + ctx->inst_info = &cm_shader_tgsi_instruction[opcode]; + else if (ctx->bc->chip_class >= EVERGREEN) + ctx->inst_info = &eg_shader_tgsi_instruction[opcode]; + else + ctx->inst_info = &r600_shader_tgsi_instruction[opcode]; + llvm_if(ctx, alu, compare_opcode); + tgsi_loop_brk_cont(ctx); + tgsi_endif(ctx); +} + +static unsigned r600_fc_from_byte_stream(struct r600_shader_ctx *ctx, + unsigned char * bytes, unsigned bytes_read) +{ + struct r600_bytecode_alu alu; + unsigned inst; + memset(&alu, 0, sizeof(alu)); + bytes_read = r600_src_from_byte_stream(bytes, bytes_read, &alu, 0); + inst = bytes[bytes_read++]; + switch (inst) { + case 0: + llvm_if(ctx, &alu, + CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE)); + break; + case 1: + tgsi_else(ctx); + break; + case 2: + tgsi_endif(ctx); + break; + case 3: + tgsi_bgnloop(ctx); + break; + case 4: + tgsi_endloop(ctx); + break; + case 5: + r600_break_from_byte_stream(ctx, &alu, + CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE)); + break; + case 6: + r600_break_from_byte_stream(ctx, &alu, + CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT)); + break; + case 7: + { + unsigned opcode = TGSI_OPCODE_CONT; + if (ctx->bc->chip_class == CAYMAN) { + ctx->inst_info = + &cm_shader_tgsi_instruction[opcode]; + } else if (ctx->bc->chip_class >= EVERGREEN) { + ctx->inst_info = + &eg_shader_tgsi_instruction[opcode]; + } else { + ctx->inst_info = + &r600_shader_tgsi_instruction[opcode]; + } + tgsi_loop_brk_cont(ctx); + } + break; + case 8: + r600_break_from_byte_stream(ctx, &alu, + CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT)); + break; + } + + return bytes_read; +} + +static unsigned r600_tex_from_byte_stream(struct r600_shader_ctx *ctx, + unsigned char * bytes, unsigned bytes_read) +{ + struct r600_bytecode_tex tex; + + tex.inst = bytes[bytes_read++]; + tex.resource_id = bytes[bytes_read++]; + tex.src_gpr = bytes[bytes_read++]; + tex.src_rel = bytes[bytes_read++]; + tex.dst_gpr = bytes[bytes_read++]; + tex.dst_rel = bytes[bytes_read++]; + tex.dst_sel_x = bytes[bytes_read++]; + tex.dst_sel_y = bytes[bytes_read++]; + tex.dst_sel_z = bytes[bytes_read++]; + tex.dst_sel_w = bytes[bytes_read++]; + tex.lod_bias = bytes[bytes_read++]; + tex.coord_type_x = bytes[bytes_read++]; + tex.coord_type_y = bytes[bytes_read++]; + tex.coord_type_z = bytes[bytes_read++]; + tex.coord_type_w = bytes[bytes_read++]; + tex.offset_x = bytes[bytes_read++]; + tex.offset_y = bytes[bytes_read++]; + tex.offset_z = bytes[bytes_read++]; + tex.sampler_id = bytes[bytes_read++]; + tex.src_sel_x = bytes[bytes_read++]; + tex.src_sel_y = bytes[bytes_read++]; + tex.src_sel_z = bytes[bytes_read++]; + tex.src_sel_w = bytes[bytes_read++]; + + r600_bytecode_add_tex(ctx->bc, &tex); + + return bytes_read; +} + +static int r600_vtx_from_byte_stream(struct r600_shader_ctx *ctx, + unsigned char * bytes, unsigned bytes_read) +{ + struct r600_bytecode_vtx vtx; + memset(&vtx, 0, sizeof(vtx)); + vtx.inst = bytes[bytes_read++]; + vtx.fetch_type = bytes[bytes_read++]; + vtx.buffer_id = bytes[bytes_read++]; + vtx.src_gpr = bytes[bytes_read++]; + vtx.src_sel_x = bytes[bytes_read++]; + vtx.mega_fetch_count = bytes[bytes_read++]; + vtx.dst_gpr = bytes[bytes_read++]; + vtx.dst_sel_x = bytes[bytes_read++]; + vtx.dst_sel_y = bytes[bytes_read++]; + vtx.dst_sel_z = bytes[bytes_read++]; + vtx.dst_sel_w = bytes[bytes_read++]; + vtx.use_const_fields = bytes[bytes_read++]; + vtx.data_format = bytes[bytes_read++]; + vtx.num_format_all = bytes[bytes_read++]; + vtx.format_comp_all = bytes[bytes_read++]; + vtx.srf_mode_all = bytes[bytes_read++]; + vtx.offset = bytes[bytes_read++]; + vtx.endian = bytes[bytes_read++]; + + if (r600_bytecode_add_vtx(ctx->bc, &vtx)) { + fprintf(stderr, "Error adding vtx\n"); + } + /* Use the Texture Cache */ + ctx->bc->cf_last->inst = EG_V_SQ_CF_WORD1_SQ_CF_INST_TEX; + return bytes_read; +} + +static void r600_bytecode_from_byte_stream(struct r600_shader_ctx *ctx, + unsigned char * bytes, unsigned num_bytes) +{ + unsigned bytes_read = 0; + unsigned i, byte; + while (bytes_read < num_bytes) { + char inst_type = bytes[bytes_read++]; + switch (inst_type) { + case 0: + bytes_read = r600_alu_from_byte_stream(ctx, bytes, + bytes_read); + break; + case 1: + bytes_read = r600_tex_from_byte_stream(ctx, bytes, + bytes_read); + break; + case 2: + bytes_read = r600_fc_from_byte_stream(ctx, bytes, + bytes_read); + break; + case 3: + r600_bytecode_add_cfinst(ctx->bc, CF_NATIVE); + for (i = 0; i < 2; i++) { + for (byte = 0 ; byte < 4; byte++) { + ctx->bc->cf_last->isa[i] |= + (bytes[bytes_read++] << (byte * 8)); + } + } + break; + + case 4: + bytes_read = r600_vtx_from_byte_stream(ctx, bytes, + bytes_read); + break; + default: + /* XXX: Error here */ + break; + } + } +} + +/* End bytestream -> r600 shader functions*/ static int tgsi_is_supported(struct r600_shader_ctx *ctx) { @@ -274,9 +548,9 @@ static int evergreen_interp_alu(struct r600_shader_ctx *ctx, int input) memset(&alu, 0, sizeof(struct r600_bytecode_alu)); if (i < 4) - alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_ZW; + alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INTERP_ZW; else - alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_XY; + alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INTERP_XY; if ((i > 1) && (i < 6)) { alu.dst.sel = ctx->shader->input[input].gpr; @@ -308,7 +582,7 @@ static int evergreen_interp_flat(struct r600_shader_ctx *ctx, int input) for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INTERP_LOAD_P0; + alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INTERP_LOAD_P0; alu.dst.sel = ctx->shader->input[input].gpr; alu.dst.write = 1; @@ -448,17 +722,23 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) ctx->shader->input[i].name = d->Semantic.Name; ctx->shader->input[i].sid = d->Semantic.Index; ctx->shader->input[i].spi_sid = r600_spi_sid(&ctx->shader->input[i]); - ctx->shader->input[i].interpolate = d->Declaration.Interpolate; - ctx->shader->input[i].centroid = d->Declaration.Centroid; + ctx->shader->input[i].interpolate = d->Interp.Interpolate; + ctx->shader->input[i].centroid = d->Interp.Centroid; ctx->shader->input[i].gpr = ctx->file_offset[TGSI_FILE_INPUT] + d->Range.First; if (ctx->type == TGSI_PROCESSOR_FRAGMENT) { - if (ctx->shader->input[i].name == TGSI_SEMANTIC_FACE) + switch (ctx->shader->input[i].name) { + case TGSI_SEMANTIC_FACE: ctx->face_gpr = ctx->shader->input[i].gpr; - else if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) + break; + case TGSI_SEMANTIC_COLOR: ctx->colors_used++; + break; + case TGSI_SEMANTIC_POSITION: + ctx->fragcoord_input = i; + break; + } if (ctx->bc->chip_class >= EVERGREEN) { - r = evergreen_interp_input(ctx, i); - if (r) + if ((r = evergreen_interp_input(ctx, i))) return r; } } @@ -469,7 +749,7 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) ctx->shader->output[i].sid = d->Semantic.Index; ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]); ctx->shader->output[i].gpr = ctx->file_offset[TGSI_FILE_OUTPUT] + d->Range.First; - ctx->shader->output[i].interpolate = d->Declaration.Interpolate; + ctx->shader->output[i].interpolate = d->Interp.Interpolate; ctx->shader->output[i].write_mask = d->Declaration.UsageMask; if (ctx->type == TGSI_PROCESSOR_VERTEX) { switch (d->Semantic.Name) { @@ -478,6 +758,11 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) break; case TGSI_SEMANTIC_PSIZE: ctx->shader->vs_out_misc_write = 1; + ctx->shader->vs_out_point_size = 1; + break; + case TGSI_SEMANTIC_CLIPVERTEX: + ctx->clip_vertex_write = TRUE; + ctx->cv_output = i; break; } } @@ -490,20 +775,22 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) case TGSI_FILE_SYSTEM_VALUE: if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) { - struct r600_bytecode_alu alu; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + if (!ctx->native_integers) { + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT); - alu.src[0].sel = 0; - alu.src[0].chan = 3; + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT); + alu.src[0].sel = 0; + alu.src[0].chan = 3; - alu.dst.sel = 0; - alu.dst.chan = 3; - alu.dst.write = 1; - alu.last = 1; + alu.dst.sel = 0; + alu.dst.chan = 3; + alu.dst.write = 1; + alu.last = 1; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } break; } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID) break; @@ -562,7 +849,7 @@ static int evergreen_gpr_count(struct r600_shader_ctx *ctx) ctx->num_interp_gpr += (num_baryc + 1) >> 1; - /* TODO PULL MODEL and LINE STIPPLE, FIXED PT POS */ + /* XXX PULL MODEL and LINE STIPPLE, FIXED PT POS */ return ctx->num_interp_gpr; } @@ -792,7 +1079,7 @@ static int process_twoside_color_inputs(struct r600_shader_ctx *ctx) return 0; } -static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pipe_shader *pipeshader) +static int r600_shader_from_tgsi(struct r600_context * rctx, struct r600_pipe_shader *pipeshader) { struct r600_shader *shader = &pipeshader->shader; struct tgsi_token *tokens = pipeshader->tokens; @@ -803,11 +1090,21 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi struct r600_bytecode_output output[32]; unsigned output_done, noutput; unsigned opcode; - int i, j, r = 0, pos0; - + int i, j, k, r = 0; + int next_pixel_base = 0, next_pos_base = 60, next_param_base = 0; + /* Declarations used by llvm code */ + bool use_llvm = false; + unsigned char * inst_bytes = NULL; + unsigned inst_byte_count = 0; + +#ifdef R600_USE_LLVM + use_llvm = debug_get_bool_option("R600_LLVM", TRUE); +#endif ctx.bc = &shader->bc; ctx.shader = shader; - r600_bytecode_init(ctx.bc, rctx->chip_class); + ctx.native_integers = (rctx->screen->glsl_feature_level >= 130); + + r600_bytecode_init(ctx.bc, rctx->chip_class, rctx->family); ctx.tokens = tokens; tgsi_scan_shader(tokens, &ctx.info); tgsi_parse_init(&ctx.parse, tokens); @@ -816,13 +1113,11 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi ctx.bc->type = shader->processor_type; ctx.face_gpr = -1; + ctx.fragcoord_input = -1; ctx.colors_used = 0; + ctx.clip_vertex_write = 0; shader->two_side = (ctx.type == TGSI_PROCESSOR_FRAGMENT) && rctx->two_side; - - shader->clamp_color = (((ctx.type == TGSI_PROCESSOR_FRAGMENT) && rctx->clamp_fragment_color) || - ((ctx.type == TGSI_PROCESSOR_VERTEX) && rctx->clamp_vertex_color)); - shader->nr_cbufs = rctx->nr_cbufs; /* register allocations */ @@ -860,8 +1155,46 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi if (ctx.type == TGSI_PROCESSOR_FRAGMENT && ctx.bc->chip_class >= EVERGREEN) { ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx); } - ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] + - ctx.info.file_max[TGSI_FILE_INPUT] + 1; + + /* LLVM backend setup */ +#ifdef R600_USE_LLVM + if (use_llvm && ctx.info.indirect_files) { + fprintf(stderr, "Warning: R600 LLVM backend does not support " + "indirect adressing. Falling back to TGSI " + "backend.\n"); + use_llvm = 0; + } + if (use_llvm) { + struct radeon_llvm_context radeon_llvm_ctx; + LLVMModuleRef mod; + unsigned dump = 0; + memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx)); + radeon_llvm_ctx.reserved_reg_count = ctx.file_offset[TGSI_FILE_INPUT]; + mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens); + if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE)) { + dump = 1; + } + if (r600_llvm_compile(mod, &inst_bytes, &inst_byte_count, + rctx->family, dump)) { + FREE(inst_bytes); + radeon_llvm_dispose(&radeon_llvm_ctx); + use_llvm = 0; + fprintf(stderr, "R600 LLVM backend failed to compile " + "shader. Falling back to TGSI\n"); + } else { + ctx.file_offset[TGSI_FILE_OUTPUT] = + ctx.file_offset[TGSI_FILE_INPUT]; + } + radeon_llvm_dispose(&radeon_llvm_ctx); + } +#endif + /* End of LLVM backend setup */ + + if (!use_llvm) { + ctx.file_offset[TGSI_FILE_OUTPUT] = + ctx.file_offset[TGSI_FILE_INPUT] + + ctx.info.file_max[TGSI_FILE_INPUT] + 1; + } ctx.file_offset[TGSI_FILE_TEMPORARY] = ctx.file_offset[TGSI_FILE_OUTPUT] + ctx.info.file_max[TGSI_FILE_OUTPUT] + 1; @@ -920,6 +1253,38 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi } } + if (ctx.fragcoord_input >= 0) { + if (ctx.bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = BC_INST(ctx.bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE); + alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr; + alu.src[0].chan = 3; + + alu.dst.sel = shader->input[ctx.fragcoord_input].gpr; + alu.dst.chan = j; + alu.dst.write = (j == 3); + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx.bc, &alu))) + return r; + } + } else { + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = BC_INST(ctx.bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE); + alu.src[0].sel = shader->input[ctx.fragcoord_input].gpr; + alu.src[0].chan = 3; + + alu.dst.sel = shader->input[ctx.fragcoord_input].gpr; + alu.dst.chan = 3; + alu.dst.write = 1; + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx.bc, &alu))) + return r; + } + } + if (shader->two_side && ctx.colors_used) { if ((r = process_twoside_color_inputs(&ctx))) return r; @@ -930,6 +1295,9 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi tgsi_parse_token(&ctx.parse); switch (ctx.parse.FullToken.Token.Type) { case TGSI_TOKEN_TYPE_INSTRUCTION: + if (use_llvm) { + continue; + } r = tgsi_is_supported(&ctx); if (r) goto out_err; @@ -957,35 +1325,54 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi } } + /* Get instructions if we are using the LLVM backend. */ + if (use_llvm) { + r600_bytecode_from_byte_stream(&ctx, inst_bytes, inst_byte_count); + FREE(inst_bytes); + } + noutput = shader->noutput; - /* clamp color outputs */ - if (shader->clamp_color) { - for (i = 0; i < noutput; i++) { - if (shader->output[i].name == TGSI_SEMANTIC_COLOR || - shader->output[i].name == TGSI_SEMANTIC_BCOLOR) { + if (ctx.clip_vertex_write) { + /* need to convert a clipvertex write into clipdistance writes and not export + the clip vertex anymore */ - int j; - for (j = 0; j < 4; j++) { - struct r600_bytecode_alu alu; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + memset(&shader->output[noutput], 0, 2*sizeof(struct r600_shader_io)); + shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST; + shader->output[noutput].gpr = ctx.temp_reg; + noutput++; + shader->output[noutput].name = TGSI_SEMANTIC_CLIPDIST; + shader->output[noutput].gpr = ctx.temp_reg+1; + noutput++; - /* MOV_SAT R, R */ - alu.inst = BC_INST(ctx.bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - alu.dst.sel = shader->output[i].gpr; - alu.dst.chan = j; - alu.dst.write = 1; - alu.dst.clamp = 1; - alu.src[0].sel = alu.dst.sel; - alu.src[0].chan = j; + /* reset spi_sid for clipvertex output to avoid confusing spi */ + shader->output[ctx.cv_output].spi_sid = 0; - if (j == 3) { - alu.last = 1; - } - r = r600_bytecode_add_alu(ctx.bc, &alu); - if (r) - return r; - } + shader->clip_dist_write = 0xFF; + + for (i = 0; i < 8; i++) { + int oreg = i >> 2; + int ochan = i & 3; + + for (j = 0; j < 4; j++) { + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = BC_INST(ctx.bc, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4); + alu.src[0].sel = shader->output[ctx.cv_output].gpr; + alu.src[0].chan = j; + + alu.src[1].sel = 512 + i; + alu.src[1].kc_bank = 1; + alu.src[1].chan = j; + + alu.dst.sel = ctx.temp_reg + oreg; + alu.dst.chan = j; + alu.dst.write = (j == ochan); + if (j == 3) + alu.last = 1; + r = r600_bytecode_add_alu(ctx.bc, &alu); + if (r) + return r; } } } @@ -1001,8 +1388,8 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi r = -EINVAL; goto out_err; } - if (so.output[i].start_component) { - R600_ERR("stream_output - start_component cannot be non-zero\n"); + if (so.output[i].dst_offset < so.output[i].start_component) { + R600_ERR("stream_output - dst_offset cannot be less than start_component\n"); r = -EINVAL; goto out_err; } @@ -1010,12 +1397,14 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi memset(&output, 0, sizeof(struct r600_bytecode_output)); output.gpr = shader->output[so.output[i].register_index].gpr; output.elem_size = 0; - output.array_base = so.output[i].dst_offset; + output.array_base = so.output[i].dst_offset - so.output[i].start_component; output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE; output.burst_count = 1; output.barrier = 1; - output.array_size = 0; - output.comp_mask = (1 << so.output[i].num_components) - 1; + /* array_size is an upper limit for the burst_count + * with MEM_STREAM instructions */ + output.array_size = 0xFFF; + output.comp_mask = ((1 << so.output[i].num_components) - 1) << so.output[i].start_component; if (ctx.bc->chip_class >= EVERGREEN) { switch (so.output[i].output_buffer) { case 0: @@ -1054,89 +1443,86 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi } /* export output */ - j = 0; - - for (i = 0, pos0 = 0; i < noutput; i++) { - memset(&output[i+j], 0, sizeof(struct r600_bytecode_output)); - output[i + j].gpr = shader->output[i].gpr; - output[i + j].elem_size = 3; - output[i + j].swizzle_x = 0; - output[i + j].swizzle_y = 1; - output[i + j].swizzle_z = 2; - output[i + j].swizzle_w = 3; - output[i + j].burst_count = 1; - output[i + j].barrier = 1; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; - output[i + j].array_base = i+j - pos0; - output[i + j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); + for (i = 0, j = 0; i < noutput; i++, j++) { + memset(&output[j], 0, sizeof(struct r600_bytecode_output)); + output[j].gpr = shader->output[i].gpr; + output[j].elem_size = 3; + output[j].swizzle_x = 0; + output[j].swizzle_y = 1; + output[j].swizzle_z = 2; + output[j].swizzle_w = 3; + output[j].burst_count = 1; + output[j].barrier = 1; + output[j].type = -1; + output[j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); switch (ctx.type) { case TGSI_PROCESSOR_VERTEX: switch (shader->output[i].name) { case TGSI_SEMANTIC_POSITION: - output[i + j].array_base = 60; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS; - /* position doesn't count in array_base */ - pos0++; + output[j].array_base = next_pos_base++; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS; break; case TGSI_SEMANTIC_PSIZE: - output[i + j].array_base = 61; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS; - /* position doesn't count in array_base */ - pos0++; + output[j].array_base = next_pos_base++; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS; + break; + case TGSI_SEMANTIC_CLIPVERTEX: + j--; break; - case TGSI_SEMANTIC_CLIPDIST: - /* array base for enabled OUT_MISC_VEC & CCDIST[0|1]_VEC - * vectors is allocated sequentially, starting from 61 */ - output[i + j].array_base = 61 + shader->output[i].sid - /* +1 if OUT_MISC_VEC is enabled */ - + shader->vs_out_misc_write - /* -1 if OUT_CCDIST0_VEC is disabled */ - - (((shader->clip_dist_write & 0xF) == 0)? 1 : 0); - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS; - j++; - pos0++; - /* duplicate it as PARAM to pass to the pixel shader */ - memcpy(&output[i+j], &output[i+j-1], sizeof(struct r600_bytecode_output)); - output[i + j].array_base = i+j-pos0; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; + output[j].array_base = next_pos_base++; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_POS; + /* spi_sid is 0 for clipdistance outputs that were generated + * for clipvertex - we don't need to pass them to PS */ + if (shader->output[i].spi_sid) { + j++; + /* duplicate it as PARAM to pass to the pixel shader */ + memcpy(&output[j], &output[j-1], sizeof(struct r600_bytecode_output)); + output[j].array_base = next_param_base++; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; + } + break; + case TGSI_SEMANTIC_FOG: + output[j].swizzle_y = 4; /* 0 */ + output[j].swizzle_z = 4; /* 0 */ + output[j].swizzle_w = 5; /* 1 */ break; } break; case TGSI_PROCESSOR_FRAGMENT: if (shader->output[i].name == TGSI_SEMANTIC_COLOR) { - output[i + j].array_base = shader->output[i].sid; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; + output[j].array_base = next_pixel_base++; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; if (shader->fs_write_all && (rctx->chip_class >= EVERGREEN)) { - for (j = 1; j < shader->nr_cbufs; j++) { - memset(&output[i + j], 0, sizeof(struct r600_bytecode_output)); - output[i + j].gpr = shader->output[i].gpr; - output[i + j].elem_size = 3; - output[i + j].swizzle_x = 0; - output[i + j].swizzle_y = 1; - output[i + j].swizzle_z = 2; - output[i + j].swizzle_w = 3; - output[i + j].burst_count = 1; - output[i + j].barrier = 1; - output[i + j].array_base = shader->output[i].sid + j; - output[i + j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; + for (k = 1; k < shader->nr_cbufs; k++) { + j++; + memset(&output[j], 0, sizeof(struct r600_bytecode_output)); + output[j].gpr = shader->output[i].gpr; + output[j].elem_size = 3; + output[j].swizzle_x = 0; + output[j].swizzle_y = 1; + output[j].swizzle_z = 2; + output[j].swizzle_w = 3; + output[j].burst_count = 1; + output[j].barrier = 1; + output[j].array_base = next_pixel_base++; + output[j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; } - j = shader->nr_cbufs-1; } } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) { - output[i + j].array_base = 61; - output[i + j].swizzle_x = 2; - output[i + j].swizzle_y = 7; - output[i + j].swizzle_z = output[i + j].swizzle_w = 7; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; + output[j].array_base = 61; + output[j].swizzle_x = 2; + output[j].swizzle_y = 7; + output[j].swizzle_z = output[j].swizzle_w = 7; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; } else if (shader->output[i].name == TGSI_SEMANTIC_STENCIL) { - output[i + j].array_base = 61; - output[i + j].swizzle_x = 7; - output[i + j].swizzle_y = 1; - output[i + j].swizzle_z = output[i + j].swizzle_w = 7; - output[i + j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; + output[j].array_base = 61; + output[j].swizzle_x = 7; + output[j].swizzle_y = 1; + output[j].swizzle_z = output[j].swizzle_w = 7; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; } else { R600_ERR("unsupported fragment output name %d\n", shader->output[i].name); r = -EINVAL; @@ -1148,48 +1534,49 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi r = -EINVAL; goto out_err; } + + if (output[j].type==-1) { + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; + output[j].array_base = next_param_base++; + } } - noutput += j; + /* add fake param output for vertex shader if no param is exported */ - if (ctx.type == TGSI_PROCESSOR_VERTEX) { - for (i = 0, pos0 = 0; i < noutput; i++) { - if (output[i].type == V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM) { - pos0 = 1; - break; - } - } - if (!pos0) { - memset(&output[i], 0, sizeof(struct r600_bytecode_output)); - output[i].gpr = 0; - output[i].elem_size = 3; - output[i].swizzle_x = 7; - output[i].swizzle_y = 7; - output[i].swizzle_z = 7; - output[i].swizzle_w = 7; - output[i].burst_count = 1; - output[i].barrier = 1; - output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; - output[i].array_base = 0; - output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); - noutput++; - } + if (ctx.type == TGSI_PROCESSOR_VERTEX && next_param_base == 0) { + memset(&output[j], 0, sizeof(struct r600_bytecode_output)); + output[j].gpr = 0; + output[j].elem_size = 3; + output[j].swizzle_x = 7; + output[j].swizzle_y = 7; + output[j].swizzle_z = 7; + output[j].swizzle_w = 7; + output[j].burst_count = 1; + output[j].barrier = 1; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; + output[j].array_base = 0; + output[j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); + j++; } + /* add fake pixel export */ - if (ctx.type == TGSI_PROCESSOR_FRAGMENT && !noutput) { - memset(&output[0], 0, sizeof(struct r600_bytecode_output)); - output[0].gpr = 0; - output[0].elem_size = 3; - output[0].swizzle_x = 7; - output[0].swizzle_y = 7; - output[0].swizzle_z = 7; - output[0].swizzle_w = 7; - output[0].burst_count = 1; - output[0].barrier = 1; - output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; - output[0].array_base = 0; - output[0].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); - noutput++; + if (ctx.type == TGSI_PROCESSOR_FRAGMENT && j == 0) { + memset(&output[j], 0, sizeof(struct r600_bytecode_output)); + output[j].gpr = 0; + output[j].elem_size = 3; + output[j].swizzle_x = 7; + output[j].swizzle_y = 7; + output[j].swizzle_z = 7; + output[j].swizzle_w = 7; + output[j].burst_count = 1; + output[j].barrier = 1; + output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; + output[j].array_base = 0; + output[j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT); + j++; } + + noutput = j; + /* set export done on last export of each type */ for (i = noutput - 1, output_done = 0; i >= 0; i--) { if (ctx.bc->chip_class < CAYMAN) { @@ -1212,6 +1599,14 @@ static int r600_shader_from_tgsi(struct r600_pipe_context * rctx, struct r600_pi if (ctx.bc->chip_class == CAYMAN) cm_bytecode_add_cf_end(ctx.bc); + /* check GPR limit - we have 124 = 128 - 4 + * (4 are reserved as alu clause temporary registers) */ + if (ctx.bc->ngpr > 124) { + R600_ERR("GPR limit exceeded - shader requires %d registers\n", ctx.bc->ngpr); + r = -ENOMEM; + goto out_err; + } + free(ctx.literals); tgsi_parse_free(&ctx.parse); return 0; @@ -1401,6 +1796,34 @@ static int cayman_emit_float_instr(struct r600_shader_ctx *ctx) return 0; } +static int cayman_mul_int_instr(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + int i, j, k, r; + struct r600_bytecode_alu alu; + int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3; + for (k = 0; k < last_slot; k++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << k))) + continue; + + for (i = 0 ; i < 4; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = ctx->inst_info->r600_opcode; + for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { + r600_bytecode_src(&alu.src[j], &ctx->src[j], k); + } + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.dst.write = (i == k); + if (i == 3) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + return 0; +} + /* * r600 - trunc to -PI..PI range * r700 - normalize by dividing by 2PI @@ -1867,7 +2290,7 @@ static int tgsi_rsq(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - /* FIXME: + /* XXX: * For state trackers other than OpenGL, we'll want to use * _RECIPSQRT_IEEE instead. */ @@ -2019,271 +2442,868 @@ static int tgsi_pow(struct r600_shader_ctx *ctx) return tgsi_helper_tempx_replicate(ctx); } -static int tgsi_idiv(struct r600_shader_ctx *ctx) +static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; - int i, r; + int i, r, j; unsigned write_mask = inst->Dst[0].Register.WriteMask; - int last_inst = tgsi_last_instruction(write_mask); int tmp0 = ctx->temp_reg; int tmp1 = r600_get_temp(ctx); - int unsigned_op = (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_UDIV); + int tmp2 = r600_get_temp(ctx); + int tmp3 = r600_get_temp(ctx); + /* Unsigned path: + * + * we need to represent src1 as src2*q + r, where q - quotient, r - remainder + * + * 1. tmp0.x = rcp (src2) = 2^32/src2 + e, where e is rounding error + * 2. tmp0.z = lo (tmp0.x * src2) + * 3. tmp0.w = -tmp0.z + * 4. tmp0.y = hi (tmp0.x * src2) + * 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src2)) + * 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error + * 7. tmp1.x = tmp0.x - tmp0.w + * 8. tmp1.y = tmp0.x + tmp0.w + * 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) + * 10. tmp0.z = hi(tmp0.x * src1) = q + * 11. tmp0.y = lo (tmp0.z * src2) = src2*q = src1 - r + * + * 12. tmp0.w = src1 - tmp0.y = r + * 13. tmp1.x = tmp0.w >= src2 = r >= src2 (uint comparison) + * 14. tmp1.y = src1 >= tmp0.y = r >= 0 (uint comparison) + * + * if DIV + * + * 15. tmp1.z = tmp0.z + 1 = q + 1 + * 16. tmp1.w = tmp0.z - 1 = q - 1 + * + * else MOD + * + * 15. tmp1.z = tmp0.w - src2 = r - src2 + * 16. tmp1.w = tmp0.w + src2 = r + src2 + * + * endif + * + * 17. tmp1.x = tmp1.x & tmp1.y + * + * DIV: 18. tmp0.z = tmp1.x==0 ? tmp0.z : tmp1.z + * MOD: 18. tmp0.z = tmp1.x==0 ? tmp0.w : tmp1.z + * + * 19. tmp0.z = tmp1.y==0 ? tmp1.w : tmp0.z + * 20. dst = src2==0 ? MAX_UINT : tmp0.z + * + * Signed path: + * + * Same as unsigned, using abs values of the operands, + * and fixing the sign of the result in the end. + */ - /* tmp0 = float(src0) */ for (i = 0; i < 4; i++) { if (!(write_mask & (1<src[0], i); - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + alu.src[0].sel = V_SQ_ALU_SRC_0; + + r600_bytecode_src(&alu.src[1], &ctx->src[0], i); + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* tmp2.y = -src1 */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT); + + alu.dst.sel = tmp2; + alu.dst.chan = 1; + alu.dst.write = 1; + + alu.src[0].sel = V_SQ_ALU_SRC_0; + + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* tmp2.z sign bit is set if src0 and src2 signs are different */ + /* it will be a sign of the quotient */ + if (!mod) { + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT); + + alu.dst.sel = tmp2; + alu.dst.chan = 2; + alu.dst.write = 1; + + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* tmp2.x = |src0| */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT); + alu.is_op3 = 1; + + alu.dst.sel = tmp2; + alu.dst.chan = 0; + alu.dst.write = 1; + + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + r600_bytecode_src(&alu.src[1], &ctx->src[0], i); + alu.src[2].sel = tmp2; + alu.src[2].chan = 0; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* tmp2.y = |src1| */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT); + alu.is_op3 = 1; + + alu.dst.sel = tmp2; + alu.dst.chan = 1; + alu.dst.write = 1; + + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + alu.src[2].sel = tmp2; + alu.src[2].chan = 1; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + } + + /* 1. tmp0.x = rcp_u (src2) = 2^32/src2 + e, where e is rounding error */ + if (ctx->bc->chip_class == CAYMAN) { + /* tmp3.x = u2f(src2) */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT); + + alu.dst.sel = tmp3; + alu.dst.chan = 0; + alu.dst.write = 1; + + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 1; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* tmp0.x = recip(tmp3.x) */ + for (j = 0 ; j < 3; j++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE; + + alu.dst.sel = tmp0; + alu.dst.chan = j; + alu.dst.write = (j == 0); + + alu.src[0].sel = tmp3; + alu.src[0].chan = 0; + + if (j == 2) + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 0x4f800000; + + alu.dst.sel = tmp3; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 0; + alu.dst.write = 1; + + alu.src[0].sel = tmp3; + alu.src[0].chan = 0; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 0; + alu.dst.write = 1; + + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 1; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* 2. tmp0.z = lo (tmp0.x * src2) */ + if (ctx->bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = j; + alu.dst.write = (j == 2); + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + } + + alu.last = (j == 3); + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 2; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* 3. tmp0.w = -tmp0.z */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT); + + alu.dst.sel = tmp0; + alu.dst.chan = 3; + alu.dst.write = 1; + + alu.src[0].sel = V_SQ_ALU_SRC_0; + alu.src[1].sel = tmp0; + alu.src[1].chan = 2; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 4. tmp0.y = hi (tmp0.x * src2) */ + if (ctx->bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = j; + alu.dst.write = (j == 1); + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + } + alu.last = (j == 3); + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 1; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDE_INT); + alu.is_op3 = 1; + + alu.dst.sel = tmp0; + alu.dst.chan = 2; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 1; + alu.src[1].sel = tmp0; + alu.src[1].chan = 3; + alu.src[2].sel = tmp0; + alu.src[2].chan = 2; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */ + if (ctx->bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = j; + alu.dst.write = (j == 3); + + alu.src[0].sel = tmp0; + alu.src[0].chan = 2; + + alu.src[1].sel = tmp0; + alu.src[1].chan = 0; + + alu.last = (j == 3); + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 3; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 2; + + alu.src[1].sel = tmp0; + alu.src[1].chan = 0; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* 7. tmp1.x = tmp0.x - tmp0.w */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT); + + alu.dst.sel = tmp1; + alu.dst.chan = 0; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + alu.src[1].sel = tmp0; + alu.src[1].chan = 3; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 8. tmp1.y = tmp0.x + tmp0.w */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT); + + alu.dst.sel = tmp1; + alu.dst.chan = 1; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + alu.src[1].sel = tmp0; + alu.src[1].chan = 3; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 9. tmp0.x = (tmp0.y == 0 ? tmp1.y : tmp1.x) */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDE_INT); + alu.is_op3 = 1; + + alu.dst.sel = tmp0; + alu.dst.chan = 0; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 1; + alu.src[1].sel = tmp1; + alu.src[1].chan = 1; + alu.src[2].sel = tmp1; + alu.src[2].chan = 0; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 10. tmp0.z = hi(tmp0.x * src1) = q */ + if (ctx->bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = j; + alu.dst.write = (j == 2); + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 0; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[0], i); + } + + alu.last = (j == 3); + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULHI_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 2; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 0; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[0], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */ + if (ctx->bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = j; + alu.dst.write = (j == 1); + + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 1; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + } + + alu.src[1].sel = tmp0; + alu.src[1].chan = 2; + + alu.last = (j == 3); + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT); + + alu.dst.sel = tmp0; + alu.dst.chan = 1; + alu.dst.write = 1; + + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 1; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + } + + alu.src[1].sel = tmp0; + alu.src[1].chan = 2; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + + /* 12. tmp0.w = src1 - tmp0.y = r */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT); + + alu.dst.sel = tmp0; + alu.dst.chan = 3; + alu.dst.write = 1; + + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 0; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + } + + alu.src[1].sel = tmp0; + alu.src[1].chan = 1; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 13. tmp1.x = tmp0.w >= src2 = r >= src2 */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT); + + alu.dst.sel = tmp1; + alu.dst.chan = 0; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 3; + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + /* 14. tmp1.y = src1 >= tmp0.y = r >= 0 */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT); + + alu.dst.sel = tmp1; + alu.dst.chan = 1; + alu.dst.write = 1; + + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 0; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + } + + alu.src[1].sel = tmp0; + alu.src[1].chan = 1; + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + + if (mod) { /* UMOD */ + + /* 15. tmp1.z = tmp0.w - src2 = r - src2 */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT); + + alu.dst.sel = tmp1; + alu.dst.chan = 2; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 3; - if (!unsigned_op) { - /* tmp1 = tmp0>=0 ? 0.5 : -0.5 for int*/ - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<src[1], i); + } + + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + /* 16. tmp1.w = tmp0.w + src2 = r + src2 */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE); - alu.is_op3 = 1; + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT); alu.dst.sel = tmp1; - alu.dst.chan = i; + alu.dst.chan = 3; alu.dst.write = 1; alu.src[0].sel = tmp0; - alu.src[0].chan = i; - - alu.src[1].sel = V_SQ_ALU_SRC_0_5; - - if (unsigned_op) - alu.src[2].sel = V_SQ_ALU_SRC_0; - else { - alu.src[2].sel = V_SQ_ALU_SRC_0_5; - alu.src[2].neg = 1; + alu.src[0].chan = 3; + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); } - if (i == last_inst) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) return r; - } - } - /* tmp0 = tmp0 + tmp1 for int */ - /* tmp0 = tmp0 + 0.5 for uint */ - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<bc, &alu))) + return r; + + /* 16. tmp1.w = tmp0.z - 1 = q - 1 */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT); + + alu.dst.sel = tmp1; + alu.dst.chan = 3; + alu.dst.write = 1; + + alu.src[0].sel = tmp0; + alu.src[0].chan = 2; + alu.src[1].sel = V_SQ_ALU_SRC_M_1_INT; - if (i == last_inst) alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; - /* tmp1 = float(src1) */ - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<src[1], i); + alu.src[0].sel = tmp1; + alu.src[0].chan = 0; + alu.src[1].sel = tmp1; + alu.src[1].chan = 1; + alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) return r; - } - - /* tmp1 = 1.0/src1 */ - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<bc, &alu); - if (r) + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) return r; - } - - /* tmp1 = tmp0 * tmp1 */ - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<temp_reg; - alu.src[0].chan = i; + if (signed_op) { + alu.dst.sel = tmp0; + alu.dst.chan = 2; + alu.dst.write = 1; + } else { + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + } + alu.src[0].sel = tmp1; + alu.src[0].chan = 1; alu.src[1].sel = tmp1; - alu.src[1].chan = i; + alu.src[1].chan = 3; + alu.src[2].sel = tmp0; + alu.src[2].chan = 2; - if (i == last_inst) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) return r; - } - /* tmp1 = trunc(tmp1) for evergreen+ */ - if (ctx->bc->chip_class >= EVERGREEN) { - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<bc, &alu); - if (r) - return r; - } - } + alu.dst.sel = tmp0; + alu.dst.chan = 0; + alu.dst.write = 1; - /* dst = int(tmp1) */ - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<bc, &alu))) + return r; - if (unsigned_op) - alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT); - else - alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT); + /* sign of the remainder is the same as the sign of src0 */ + /* tmp0.x = src0>=0 ? tmp0.z : tmp0.x */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT); + alu.is_op3 = 1; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.src[0].sel = tmp1; - alu.src[0].chan = i; + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + alu.src[1].sel = tmp0; + alu.src[1].chan = 2; + alu.src[2].sel = tmp0; + alu.src[2].chan = 0; - if ((ctx->bc->chip_class < EVERGREEN || unsigned_op) || i == last_inst) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; - return 0; -} + } else { -static int tgsi_f2i(struct r600_shader_ctx *ctx) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int i, r; - unsigned write_mask = inst->Dst[0].Register.WriteMask; - int last_inst = tgsi_last_instruction(write_mask); + /* tmp0.x = -tmp0.z */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT); - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<temp_reg; - alu.dst.chan = i; - alu.dst.write = 1; + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - if (i == last_inst) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + /* fix the quotient sign (same as the sign of src0*src1) */ + /* tmp0.x = tmp2.z>=0 ? tmp0.z : tmp0.x */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE_INT); + alu.is_op3 = 1; - for (i = 0; i < 4; i++) { - if (!(write_mask & (1<Dst[0], i, &alu.dst); - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.inst = ctx->inst_info->r600_opcode; + alu.src[0].sel = tmp2; + alu.src[0].chan = 2; + alu.src[1].sel = tmp0; + alu.src[1].chan = 2; + alu.src[2].sel = tmp0; + alu.src[2].chan = 0; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } + } + return 0; +} - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = i; +static int tgsi_udiv(struct r600_shader_ctx *ctx) +{ + return tgsi_divmod(ctx, 0, 0); +} - if (i == last_inst) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } +static int tgsi_umod(struct r600_shader_ctx *ctx) +{ + return tgsi_divmod(ctx, 1, 0); +} - return 0; +static int tgsi_idiv(struct r600_shader_ctx *ctx) +{ + return tgsi_divmod(ctx, 0, 1); +} + +static int tgsi_imod(struct r600_shader_ctx *ctx) +{ + return tgsi_divmod(ctx, 1, 1); } static int tgsi_iabs(struct r600_shader_ctx *ctx) @@ -2569,7 +3589,8 @@ static inline boolean tgsi_tex_src_requires_loading(struct r600_shader_ctx *ctx, { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; return (inst->Src[index].Register.File != TGSI_FILE_TEMPORARY && - inst->Src[index].Register.File != TGSI_FILE_INPUT) || + inst->Src[index].Register.File != TGSI_FILE_INPUT && + inst->Src[index].Register.File != TGSI_FILE_OUTPUT) || ctx->src[index].neg || ctx->src[index].abs; } @@ -2594,7 +3615,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) const boolean src_requires_loading = tgsi_tex_src_requires_loading(ctx, 0); boolean src_loaded = FALSE; unsigned sampler_src_reg = 1; - u8 offset_x = 0, offset_y = 0, offset_z = 0; + uint8_t offset_x = 0, offset_y = 0, offset_z = 0; src_gpr = tgsi_tex_get_src_gpr(ctx, 0); @@ -2724,7 +3745,10 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) src_gpr = ctx->temp_reg; } - if (inst->Texture.Texture == TGSI_TEXTURE_CUBE) { + if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE || + inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) && + inst->Instruction.Opcode != TGSI_OPCODE_TXQ) { + static const unsigned src0_swizzle[] = {2, 2, 0, 1}; static const unsigned src1_swizzle[] = {1, 0, 2, 2}; @@ -2823,7 +3847,19 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - + /* write initial W value into Z component */ + if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); + r600_bytecode_src(&alu.src[0], &ctx->src[0], 3); + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 2; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } src_loaded = TRUE; src_gpr = ctx->temp_reg; } @@ -2850,6 +3886,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D || inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D || inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT || + inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE || inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY || inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) { switch (opcode) { @@ -2898,6 +3935,12 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) tex.src_sel_z = 3; tex.src_sel_w = 1; } + if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) { + tex.src_sel_x = 1; + tex.src_sel_y = 0; + tex.src_sel_z = 3; + tex.src_sel_w = 2; /* route Z compare value into W */ + } if (inst->Texture.Texture != TGSI_TEXTURE_RECT && inst->Texture.Texture != TGSI_TEXTURE_SHADOWRECT) { @@ -3822,7 +4865,7 @@ static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset r600_bytecode_add_cfinst(ctx->bc, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_JUMP)); ctx->bc->cf_last->pop_count = pops; - /* TODO work out offset */ + /* XXX work out offset */ return 0; } @@ -3934,7 +4977,7 @@ static int tgsi_endloop(struct r600_shader_ctx *ctx) for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) { ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id; } - /* TODO add LOOPRET support */ + /* XXX add LOOPRET support */ fc_poplevel(ctx); callstack_decrease_current(ctx, FC_LOOP); return 0; @@ -3956,11 +4999,9 @@ static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx) } r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->r600_opcode); - ctx->bc->cf_last->pop_count = 1; fc_set_mid(ctx, fscp); - pops(ctx, 1); callstack_check_depth(ctx, FC_PUSH_VPM, 1); return 0; } @@ -4023,7 +5064,7 @@ static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = { {TGSI_OPCODE_MOV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV, tgsi_op2}, {TGSI_OPCODE_LIT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_lit}, - /* FIXME: + /* XXX: * For state trackers other than OpenGL, we'll want to use * _RECIP_IEEE instead. */ @@ -4113,7 +5154,7 @@ static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = { {80, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_PUSHA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_POPA, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_CEIL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CEIL, tgsi_op2}, {TGSI_OPCODE_I2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT, tgsi_op2_trans}, {TGSI_OPCODE_NOT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT, tgsi_op2}, {TGSI_OPCODE_TRUNC, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_op2}, @@ -4122,7 +5163,7 @@ static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = { {88, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_AND, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT, tgsi_op2}, {TGSI_OPCODE_OR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT, tgsi_op2}, - {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_MOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_imod}, {TGSI_OPCODE_XOR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT, tgsi_op2}, {TGSI_OPCODE_SAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_TXF, 0, SQ_TEX_INST_LD, tgsi_tex}, @@ -4157,38 +5198,38 @@ static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = { {TGSI_OPCODE_IDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_idiv}, {TGSI_OPCODE_IMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT, tgsi_op2}, {TGSI_OPCODE_IMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT, tgsi_op2}, - {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT, tgsi_op2}, + {TGSI_OPCODE_INEG, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT, tgsi_ineg}, {TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT, tgsi_op2}, {TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT, tgsi_op2_trans}, - {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2}, + {TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2_swap}, {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT, tgsi_op2}, {TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT, tgsi_op2_trans}, {TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT, tgsi_op2}, - {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_idiv}, + {TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_udiv}, {TGSI_OPCODE_UMAD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_umad}, {TGSI_OPCODE_UMAX, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT, tgsi_op2}, {TGSI_OPCODE_UMIN, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT, tgsi_op2}, - {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT, tgsi_op2}, + {TGSI_OPCODE_UMOD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_umod}, + {TGSI_OPCODE_UMUL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT, tgsi_op2_trans}, {TGSI_OPCODE_USEQ, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT, tgsi_op2}, {TGSI_OPCODE_USGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT, tgsi_op2}, {TGSI_OPCODE_USHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT, tgsi_op2_trans}, - {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2_swap}, + {TGSI_OPCODE_USLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT, tgsi_op2_swap}, {TGSI_OPCODE_USNE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT, tgsi_op2_swap}, {TGSI_OPCODE_SWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_CASE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_DEFAULT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_ENDSWITCH, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_LOAD, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_LOAD_MS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SAMPLE_I, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SAMPLE_I_MS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_B, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_C, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_C_LZ, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_D, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_L, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_GATHER4, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_RESINFO, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SVIEWINFO, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_POS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_INFO, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_UARL, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT, tgsi_r600_arl}, @@ -4287,7 +5328,7 @@ static struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = { {80, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_PUSHA, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_POPA, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_CEIL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_CEIL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CEIL, tgsi_op2}, {TGSI_OPCODE_I2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT, tgsi_op2_trans}, {TGSI_OPCODE_NOT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT, tgsi_op2}, {TGSI_OPCODE_TRUNC, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_op2}, @@ -4296,7 +5337,7 @@ static struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = { {88, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_AND, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT, tgsi_op2}, {TGSI_OPCODE_OR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT, tgsi_op2}, - {TGSI_OPCODE_MOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_MOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_imod}, {TGSI_OPCODE_XOR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT, tgsi_op2}, {TGSI_OPCODE_SAD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_TXF, 0, SQ_TEX_INST_LD, tgsi_tex}, @@ -4327,7 +5368,7 @@ static struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = { {TGSI_OPCODE_END, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */ /* gap */ {118, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_F2I, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT, tgsi_f2i}, + {TGSI_OPCODE_F2I, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT, tgsi_op2}, {TGSI_OPCODE_IDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_idiv}, {TGSI_OPCODE_IMAX, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT, tgsi_op2}, {TGSI_OPCODE_IMIN, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT, tgsi_op2}, @@ -4335,34 +5376,34 @@ static struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = { {TGSI_OPCODE_ISGE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT, tgsi_op2}, {TGSI_OPCODE_ISHR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT, tgsi_op2}, {TGSI_OPCODE_ISLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2_swap}, - {TGSI_OPCODE_F2U, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT, tgsi_f2i}, - {TGSI_OPCODE_U2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT, tgsi_op2}, + {TGSI_OPCODE_F2U, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT, tgsi_op2_trans}, + {TGSI_OPCODE_U2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT, tgsi_op2_trans}, {TGSI_OPCODE_UADD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT, tgsi_op2}, - {TGSI_OPCODE_UDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_idiv}, + {TGSI_OPCODE_UDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_udiv}, {TGSI_OPCODE_UMAD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_umad}, {TGSI_OPCODE_UMAX, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT, tgsi_op2}, {TGSI_OPCODE_UMIN, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT, tgsi_op2}, - {TGSI_OPCODE_UMOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_UMOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_umod}, {TGSI_OPCODE_UMUL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_UINT, tgsi_op2_trans}, {TGSI_OPCODE_USEQ, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT, tgsi_op2}, {TGSI_OPCODE_USGE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT, tgsi_op2}, {TGSI_OPCODE_USHR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT, tgsi_op2}, - {TGSI_OPCODE_USLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2_swap}, + {TGSI_OPCODE_USLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT, tgsi_op2_swap}, {TGSI_OPCODE_USNE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT, tgsi_op2}, {TGSI_OPCODE_SWITCH, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_CASE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_DEFAULT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_ENDSWITCH, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_LOAD, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_LOAD_MS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SAMPLE_I, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SAMPLE_I_MS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_B, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_C, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_C_LZ, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_D, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_L, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_GATHER4, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_RESINFO, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SVIEWINFO, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_POS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_INFO, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_UARL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT, tgsi_eg_arl}, @@ -4461,16 +5502,16 @@ static struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = { {80, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_PUSHA, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_POPA, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_CEIL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_I2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_CEIL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CEIL, tgsi_op2}, + {TGSI_OPCODE_I2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT, tgsi_op2}, {TGSI_OPCODE_NOT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOT_INT, tgsi_op2}, {TGSI_OPCODE_TRUNC, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC, tgsi_op2}, - {TGSI_OPCODE_SHL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_SHL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT, tgsi_op2}, /* gap */ {88, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_AND, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_OR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_MOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_AND, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT, tgsi_op2}, + {TGSI_OPCODE_OR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_OR_INT, tgsi_op2}, + {TGSI_OPCODE_MOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_imod}, {TGSI_OPCODE_XOR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_XOR_INT, tgsi_op2}, {TGSI_OPCODE_SAD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_TXF, 0, SQ_TEX_INST_LD, tgsi_tex}, @@ -4501,45 +5542,47 @@ static struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = { {TGSI_OPCODE_END, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_end}, /* aka HALT */ /* gap */ {118, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_F2I, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_IDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_F2I, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT, tgsi_op2}, + {TGSI_OPCODE_IDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_idiv}, {TGSI_OPCODE_IMAX, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_INT, tgsi_op2}, {TGSI_OPCODE_IMIN, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_INT, tgsi_op2}, - {TGSI_OPCODE_INEG, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_ISGE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_ISHR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_ISLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_F2U, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_U2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UADD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UMAD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UMAX, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UMIN, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UMOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_UMUL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_USEQ, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_USGE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_USHR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_USLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_USNE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, + {TGSI_OPCODE_INEG, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SUB_INT, tgsi_ineg}, + {TGSI_OPCODE_ISGE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT, tgsi_op2}, + {TGSI_OPCODE_ISHR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT, tgsi_op2}, + {TGSI_OPCODE_ISLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2_swap}, + {TGSI_OPCODE_F2U, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT, tgsi_op2}, + {TGSI_OPCODE_U2F, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT, tgsi_op2}, + {TGSI_OPCODE_UADD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT, tgsi_op2}, + {TGSI_OPCODE_UDIV, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_udiv}, + {TGSI_OPCODE_UMAD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_umad}, + {TGSI_OPCODE_UMAX, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX_UINT, tgsi_op2}, + {TGSI_OPCODE_UMIN, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN_UINT, tgsi_op2}, + {TGSI_OPCODE_UMOD, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_umod}, + {TGSI_OPCODE_UMUL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT, cayman_mul_int_instr}, + {TGSI_OPCODE_USEQ, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE_INT, tgsi_op2}, + {TGSI_OPCODE_USGE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_UINT, tgsi_op2}, + {TGSI_OPCODE_USHR, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT, tgsi_op2}, + {TGSI_OPCODE_USLT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_UINT, tgsi_op2_swap}, + {TGSI_OPCODE_USNE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE_INT, tgsi_op2}, {TGSI_OPCODE_SWITCH, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_CASE, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_DEFAULT, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, {TGSI_OPCODE_ENDSWITCH, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, - {TGSI_OPCODE_LOAD, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_LOAD_MS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SAMPLE_I, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SAMPLE_I_MS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_B, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_C, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_C_LZ, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_D, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_L, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_GATHER4, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_RESINFO, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_SVIEWINFO, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_POS, 0, 0, tgsi_unsupported}, {TGSI_OPCODE_SAMPLE_INFO, 0, 0, tgsi_unsupported}, - {TGSI_OPCODE_UARL, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_UARL, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT, tgsi_eg_arl}, {TGSI_OPCODE_UCMP, 0, 0, tgsi_unsupported}, + {TGSI_OPCODE_IABS, 0, 0, tgsi_iabs}, + {TGSI_OPCODE_ISSG, 0, 0, tgsi_issg}, {TGSI_OPCODE_LAST, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported}, };