X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_fs_generator.cpp;h=2a486513691f1d3393fb020d29385980b8582c4e;hb=e98cf031149cd6031b3e22bc06be0a70550ac85b;hp=2ed0bac6fd9d520d874f9e9e6122e452e44fe218;hpb=6ee082718fca884fbda73001e0ecb32095409549;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_fs_generator.cpp b/src/mesa/drivers/dri/i965/brw_fs_generator.cpp index 2ed0bac6fd9..2a486513691 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_generator.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_generator.cpp @@ -27,40 +27,51 @@ * native instructions. */ -#include "main/macros.h" -#include "brw_context.h" #include "brw_eu.h" #include "brw_fs.h" #include "brw_cfg.h" +#include "brw_program.h" -static uint32_t brw_file_from_reg(fs_reg *reg) +static enum brw_reg_file +brw_file_from_reg(fs_reg *reg) { switch (reg->file) { - case GRF: + case ARF: + return BRW_ARCHITECTURE_REGISTER_FILE; + case FIXED_GRF: + case VGRF: return BRW_GENERAL_REGISTER_FILE; case MRF: return BRW_MESSAGE_REGISTER_FILE; case IMM: return BRW_IMMEDIATE_VALUE; - default: + case BAD_FILE: + case ATTR: + case UNIFORM: unreachable("not reached"); } + return BRW_ARCHITECTURE_REGISTER_FILE; } static struct brw_reg -brw_reg_from_fs_reg(fs_reg *reg) +brw_reg_from_fs_reg(const struct brw_codegen *p, + fs_inst *inst, fs_reg *reg, unsigned gen) { struct brw_reg brw_reg; switch (reg->file) { - case GRF: case MRF: + assert((reg->nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(gen)); + /* Fallthrough */ + case VGRF: if (reg->stride == 0) { - brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->reg, 0); - } else if (reg->width < 8) { - brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->reg, 0); - brw_reg = stride(brw_reg, reg->width * reg->stride, - reg->width, reg->stride); + brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0); + } else if (!p->compressed && + inst->exec_size * reg->stride * type_sz(reg->type) <= 32) { + brw_reg = brw_vecn_reg(inst->exec_size, brw_file_from_reg(reg), + reg->nr, 0); + brw_reg = stride(brw_reg, inst->exec_size * reg->stride, + inst->exec_size, reg->stride); } else { /* From the Haswell PRM: * @@ -68,55 +79,34 @@ brw_reg_from_fs_reg(fs_reg *reg) * rule implies that elements within a 'Width' cannot cross GRF * boundaries. * - * So, for registers with width > 8, we have to use a width of 8 - * and trust the compression state to sort out the exec size. + * So, for registers that are large enough, we have to split the exec + * size in two and trust the compression state to sort it out. */ - brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->reg, 0); - brw_reg = stride(brw_reg, 8 * reg->stride, 8, reg->stride); + assert(inst->exec_size / 2 * reg->stride * type_sz(reg->type) <= 32); + brw_reg = brw_vecn_reg(inst->exec_size / 2, brw_file_from_reg(reg), + reg->nr, 0); + brw_reg = stride(brw_reg, inst->exec_size / 2 * reg->stride, + inst->exec_size / 2, reg->stride); } brw_reg = retype(brw_reg, reg->type); brw_reg = byte_offset(brw_reg, reg->subreg_offset); + brw_reg.abs = reg->abs; + brw_reg.negate = reg->negate; break; + case ARF: + case FIXED_GRF: case IMM: - switch (reg->type) { - case BRW_REGISTER_TYPE_F: - brw_reg = brw_imm_f(reg->fixed_hw_reg.dw1.f); - break; - case BRW_REGISTER_TYPE_D: - brw_reg = brw_imm_d(reg->fixed_hw_reg.dw1.d); - break; - case BRW_REGISTER_TYPE_UD: - brw_reg = brw_imm_ud(reg->fixed_hw_reg.dw1.ud); - break; - case BRW_REGISTER_TYPE_W: - brw_reg = brw_imm_w(reg->fixed_hw_reg.dw1.d); - break; - case BRW_REGISTER_TYPE_UW: - brw_reg = brw_imm_uw(reg->fixed_hw_reg.dw1.ud); - break; - case BRW_REGISTER_TYPE_VF: - brw_reg = brw_imm_vf(reg->fixed_hw_reg.dw1.ud); - break; - default: - unreachable("not reached"); - } - break; - case HW_REG: - assert(reg->type == reg->fixed_hw_reg.type); - brw_reg = reg->fixed_hw_reg; + brw_reg = reg->as_brw_reg(); break; case BAD_FILE: /* Probably unused. */ brw_reg = brw_null_reg(); break; - default: + case ATTR: + case UNIFORM: unreachable("not reached"); } - if (reg->abs) - brw_reg = brw_abs(brw_reg); - if (reg->negate) - brw_reg = negate(brw_reg); return brw_reg; } @@ -125,17 +115,16 @@ fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const void *key, struct brw_stage_prog_data *prog_data, - struct gl_program *prog, unsigned promoted_constants, bool runtime_check_aads_emit, - const char *stage_abbrev) + gl_shader_stage stage) : compiler(compiler), log_data(log_data), devinfo(compiler->devinfo), key(key), prog_data(prog_data), - prog(prog), promoted_constants(promoted_constants), + promoted_constants(promoted_constants), runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false), - stage_abbrev(stage_abbrev), mem_ctx(mem_ctx) + stage(stage), mem_ctx(mem_ctx) { p = rzalloc(mem_ctx, struct brw_codegen); brw_init_codegen(devinfo, p, mem_ctx); @@ -217,11 +206,11 @@ fs_generator::fire_fb_write(fs_inst *inst, if (inst->opcode == FS_OPCODE_REP_FB_WRITE) msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED; else if (prog_data->dual_src_blend) { - if (dispatch_width == 8 || !inst->eot) + if (!inst->force_sechalf) msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01; else msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23; - } else if (dispatch_width == 16) + } else if (inst->exec_size == 16) msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE; else msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01; @@ -312,6 +301,14 @@ fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload) brw_imm_ud(inst->target)); } + /* Set computes stencil to render target */ + if (prog_data->computed_stencil) { + brw_OR(p, + vec1(retype(payload, BRW_REGISTER_TYPE_UD)), + vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)), + brw_imm_ud(0x1 << 14)); + } + implied_header = brw_null_reg(); } else { implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW); @@ -349,6 +346,109 @@ fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload) } } +void +fs_generator::generate_mov_indirect(fs_inst *inst, + struct brw_reg dst, + struct brw_reg reg, + struct brw_reg indirect_byte_offset) +{ + assert(indirect_byte_offset.type == BRW_REGISTER_TYPE_UD); + assert(indirect_byte_offset.file == BRW_GENERAL_REGISTER_FILE); + + unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr; + + if (indirect_byte_offset.file == BRW_IMMEDIATE_VALUE) { + imm_byte_offset += indirect_byte_offset.ud; + + reg.nr = imm_byte_offset / REG_SIZE; + reg.subnr = imm_byte_offset % REG_SIZE; + brw_MOV(p, dst, reg); + } else { + /* Prior to Broadwell, there are only 8 address registers. */ + assert(inst->exec_size == 8 || devinfo->gen >= 8); + + /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */ + struct brw_reg addr = vec8(brw_address_reg(0)); + + /* The destination stride of an instruction (in bytes) must be greater + * than or equal to the size of the rest of the instruction. Since the + * address register is of type UW, we can't use a D-type instruction. + * In order to get around this, re retype to UW and use a stride. + */ + indirect_byte_offset = + retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW); + + struct brw_reg ind_src; + if (devinfo->gen < 8) { + /* From the Haswell PRM section "Register Region Restrictions": + * + * "The lower bits of the AddressImmediate must not overflow to + * change the register address. The lower 5 bits of Address + * Immediate when added to lower 5 bits of address register gives + * the sub-register offset. The upper bits of Address Immediate + * when added to upper bits of address register gives the register + * address. Any overflow from sub-register offset is dropped." + * + * This restriction is only listed in the Haswell PRM but emperical + * testing indicates that it applies on all older generations and is + * lifted on Broadwell. + * + * Since the indirect may cause us to cross a register boundary, this + * makes the base offset almost useless. We could try and do + * something clever where we use a actual base offset if + * base_offset % 32 == 0 but that would mean we were generating + * different code depending on the base offset. Instead, for the + * sake of consistency, we'll just do the add ourselves. + */ + brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset)); + ind_src = brw_VxH_indirect(0, 0); + } else { + brw_MOV(p, addr, indirect_byte_offset); + ind_src = brw_VxH_indirect(0, imm_byte_offset); + } + + brw_inst *mov = brw_MOV(p, dst, retype(ind_src, dst.type)); + + if (devinfo->gen == 6 && dst.file == BRW_MESSAGE_REGISTER_FILE && + !inst->get_next()->is_tail_sentinel() && + ((fs_inst *)inst->get_next())->mlen > 0) { + /* From the Sandybridge PRM: + * + * "[Errata: DevSNB(SNB)] If MRF register is updated by any + * instruction that “indexed/indirect” source AND is followed by a + * send, the instruction requires a “Switch”. This is to avoid + * race condition where send may dispatch before MRF is updated." + */ + brw_inst_set_thread_control(devinfo, mov, BRW_THREAD_SWITCH); + } + } +} + +void +fs_generator::generate_urb_read(fs_inst *inst, + struct brw_reg dst, + struct brw_reg header) +{ + assert(header.file == BRW_GENERAL_REGISTER_FILE); + assert(header.type == BRW_REGISTER_TYPE_UD); + + brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND); + brw_set_dest(p, send, dst); + brw_set_src0(p, send, header); + brw_set_src1(p, send, brw_imm_ud(0u)); + + brw_inst_set_sfid(p->devinfo, send, BRW_SFID_URB); + brw_inst_set_urb_opcode(p->devinfo, send, GEN8_URB_OPCODE_SIMD8_READ); + + if (inst->opcode == SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT) + brw_inst_set_urb_per_slot_offset(p->devinfo, send, true); + + brw_inst_set_mlen(p->devinfo, send, inst->mlen); + brw_inst_set_rlen(p->devinfo, send, inst->regs_written); + brw_inst_set_header_present(p->devinfo, send, true); + brw_inst_set_urb_global_offset(p->devinfo, send, inst->offset); +} + void fs_generator::generate_urb_write(fs_inst *inst, struct brw_reg payload) { @@ -363,6 +463,14 @@ fs_generator::generate_urb_write(fs_inst *inst, struct brw_reg payload) brw_inst_set_sfid(p->devinfo, insn, BRW_SFID_URB); brw_inst_set_urb_opcode(p->devinfo, insn, GEN8_URB_OPCODE_SIMD8_WRITE); + if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT || + inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) + brw_inst_set_urb_per_slot_offset(p->devinfo, insn, true); + + if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED || + inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) + brw_inst_set_urb_channel_mask_present(p->devinfo, insn, true); + brw_inst_set_mlen(p->devinfo, insn, inst->mlen); brw_inst_set_rlen(p->devinfo, insn, 0); brw_inst_set_eot(p->devinfo, insn, inst->eot); @@ -377,7 +485,7 @@ fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload) insn = brw_next_insn(p, BRW_OPCODE_SEND); - brw_set_dest(p, insn, brw_null_reg()); + brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW)); brw_set_src0(p, insn, payload); brw_set_src1(p, insn, brw_imm_d(0)); @@ -402,26 +510,51 @@ fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload) } void -fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src) +fs_generator::generate_stencil_ref_packing(fs_inst *inst, + struct brw_reg dst, + struct brw_reg src) { - brw_barrier(p, src); - brw_WAIT(p); + assert(dispatch_width == 8); + assert(devinfo->gen >= 9); + + /* Stencil value updates are provided in 8 slots of 1 byte per slot. + * Presumably, in order to save memory bandwidth, the stencil reference + * values written from the FS need to be packed into 2 dwords (this makes + * sense because the stencil values are limited to 1 byte each and a SIMD8 + * send, so stencil slots 0-3 in dw0, and 4-7 in dw1.) + * + * The spec is confusing here because in the payload definition of MDP_RTW_S8 + * (Message Data Payload for Render Target Writes with Stencil 8b) the + * stencil value seems to be dw4.0-dw4.7. However, if you look at the type of + * dw4 it is type MDPR_STENCIL (Message Data Payload Register) which is the + * packed values specified above and diagrammed below: + * + * 31 0 + * -------------------------------- + * DW | | + * 2-7 | IGNORED | + * | | + * -------------------------------- + * DW1 | STC | STC | STC | STC | + * | slot7 | slot6 | slot5 | slot4| + * -------------------------------- + * DW0 | STC | STC | STC | STC | + * | slot3 | slot2 | slot1 | slot0| + * -------------------------------- + */ + + src.vstride = BRW_VERTICAL_STRIDE_4; + src.width = BRW_WIDTH_1; + src.hstride = BRW_HORIZONTAL_STRIDE_0; + assert(src.type == BRW_REGISTER_TYPE_UB); + brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UB), src); } void -fs_generator::generate_blorp_fb_write(fs_inst *inst) +fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src) { - brw_fb_WRITE(p, - 16 /* dispatch_width */, - brw_message_reg(inst->base_mrf), - brw_reg_from_fs_reg(&inst->src[0]), - BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE, - inst->target, - inst->mlen, - 0, - true, - true, - inst->header_size != 0); + brw_barrier(p, src); + brw_WAIT(p); } void @@ -537,12 +670,56 @@ fs_generator::generate_math_g45(fs_inst *inst, BRW_MATH_PRECISION_FULL); } +void +fs_generator::generate_get_buffer_size(fs_inst *inst, + struct brw_reg dst, + struct brw_reg src, + struct brw_reg surf_index) +{ + assert(devinfo->gen >= 7); + assert(surf_index.file == BRW_IMMEDIATE_VALUE); + + uint32_t simd_mode; + int rlen = 4; + + switch (inst->exec_size) { + case 8: + simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8; + break; + case 16: + simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16; + break; + default: + unreachable("Invalid width for texture instruction"); + } + + if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) { + rlen = 8; + dst = vec16(dst); + } + + brw_SAMPLE(p, + retype(dst, BRW_REGISTER_TYPE_UW), + inst->base_mrf, + src, + surf_index.ud, + 0, + GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO, + rlen, /* response length */ + inst->mlen, + inst->header_size > 0, + simd_mode, + BRW_SAMPLER_RETURN_FORMAT_SINT32); + + brw_mark_surface_used(prog_data, surf_index.ud); +} + void fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src, + struct brw_reg surface_index, struct brw_reg sampler_index) { int msg_type = -1; - int rlen = 4; uint32_t simd_mode; uint32_t return_format; bool is_combined_send = inst->eot; @@ -559,6 +736,17 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src break; } + /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type + * is set as part of the message descriptor. On gen4, the PRM seems to + * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on + * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is + * gone from the message descriptor entirely and you just get UINT32 all + * the time regasrdless. Since we can really only do non-UINT32 on gen4, + * just stomp it to UINT32 all the time. + */ + if (inst->opcode == SHADER_OPCODE_TXS) + return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32; + switch (inst->exec_size) { case 8: simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8; @@ -593,6 +781,14 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; } break; + case SHADER_OPCODE_TXL_LZ: + assert(devinfo->gen >= 9); + if (inst->shadow_compare) { + msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ; + } else { + msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LZ; + } + break; case SHADER_OPCODE_TXS: msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; break; @@ -608,6 +804,14 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src case SHADER_OPCODE_TXF: msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; + case SHADER_OPCODE_TXF_LZ: + assert(devinfo->gen >= 9); + msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ; + break; + case SHADER_OPCODE_TXF_CMS_W: + assert(devinfo->gen >= 9); + msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W; + break; case SHADER_OPCODE_TXF_CMS: if (devinfo->gen >= 7) msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; @@ -642,6 +846,9 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO; } break; + case SHADER_OPCODE_SAMPLEINFO: + msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO; + break; default: unreachable("not reached"); } @@ -651,7 +858,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src /* Note that G45 and older determines shadow compare and dispatch width * from message length for most messages. */ - if (dispatch_width == 8) { + if (inst->exec_size == 8) { msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE; if (inst->shadow_compare) { assert(inst->mlen == 6); @@ -670,7 +877,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src break; case FS_OPCODE_TXB: if (inst->shadow_compare) { - assert(dispatch_width == 8); + assert(inst->exec_size == 8); assert(inst->mlen == 6); msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE; } else { @@ -681,7 +888,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src break; case SHADER_OPCODE_TXL: if (inst->shadow_compare) { - assert(dispatch_width == 8); + assert(inst->exec_size == 8); assert(inst->mlen == 6); msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE; } else { @@ -692,7 +899,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src break; case SHADER_OPCODE_TXD: /* There is no sample_d_c message; comparisons are done manually */ - assert(dispatch_width == 8); + assert(inst->exec_size == 8); assert(inst->mlen == 7 || inst->mlen == 10); msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS; break; @@ -713,15 +920,9 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src assert(msg_type != -1); if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) { - rlen = 8; dst = vec16(dst); } - if (is_combined_send) { - assert(devinfo->gen >= 9 || devinfo->is_cherryview); - rlen = 0; - } - assert(devinfo->gen < 7 || inst->header_size == 0 || src.file == BRW_GENERAL_REGISTER_FILE); @@ -756,6 +957,14 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src /* Set the offset bits in DWord 2. */ brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(inst->offset)); + } else if (stage != MESA_SHADER_VERTEX && + stage != MESA_SHADER_FRAGMENT) { + /* The vertex and fragment stages have g0.2 set to 0, so + * header0.2 is 0 when g0 is copied. Other stages may not, so we + * must set it to 0 to avoid setting undesirable bits in the + * message. + */ + brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(0)); } brw_adjust_sampler_state_pointer(p, header_reg, sampler_index); @@ -768,35 +977,42 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src ? prog_data->binding_table.gather_texture_start : prog_data->binding_table.texture_start; - if (sampler_index.file == BRW_IMMEDIATE_VALUE) { - uint32_t sampler = sampler_index.dw1.ud; + if (surface_index.file == BRW_IMMEDIATE_VALUE && + sampler_index.file == BRW_IMMEDIATE_VALUE) { + uint32_t surface = surface_index.ud; + uint32_t sampler = sampler_index.ud; brw_SAMPLE(p, retype(dst, BRW_REGISTER_TYPE_UW), inst->base_mrf, src, - sampler + base_binding_table_index, + surface + base_binding_table_index, sampler % 16, msg_type, - rlen, + inst->regs_written, inst->mlen, inst->header_size != 0, simd_mode, return_format); - brw_mark_surface_used(prog_data, sampler + base_binding_table_index); + brw_mark_surface_used(prog_data, surface + base_binding_table_index); } else { /* Non-const sampler index */ struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD)); + struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD)); struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD)); brw_push_insn_state(p); brw_set_default_mask_control(p, BRW_MASK_DISABLE); brw_set_default_access_mode(p, BRW_ALIGN_1); - /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */ - brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101)); + if (brw_regs_equal(&surface_reg, &sampler_reg)) { + brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101)); + } else { + brw_SHL(p, addr, sampler_reg, brw_imm_ud(8)); + brw_OR(p, addr, addr, surface_reg); + } if (base_binding_table_index) brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index)); brw_AND(p, addr, addr, brw_imm_ud(0xfff)); @@ -810,7 +1026,7 @@ fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src 0 /* surface */, 0 /* sampler */, msg_type, - rlen, + inst->regs_written, inst->mlen /* mlen */, inst->header_size != 0 /* header */, simd_mode, @@ -895,8 +1111,7 @@ fs_generator::generate_ddx(enum opcode opcode, */ void fs_generator::generate_ddy(enum opcode opcode, - struct brw_reg dst, struct brw_reg src, - bool negate_value) + struct brw_reg dst, struct brw_reg src) { if (opcode == FS_OPCODE_DDY_FINE) { /* From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register @@ -916,12 +1131,16 @@ fs_generator::generate_ddy(enum opcode opcode, * * Similar text exists in the g45 PRM. * + * Empirically, compressed align16 instructions using odd register + * numbers don't appear to work on Sandybridge either. + * * On these platforms, if we're building a SIMD16 shader, we need to * manually unroll to a pair of SIMD8 instructions. */ bool unroll_to_simd8 = (dispatch_width == 16 && - (devinfo->gen == 4 || (devinfo->gen == 7 && !devinfo->is_haswell))); + (devinfo->gen == 4 || devinfo->gen == 6 || + (devinfo->gen == 7 && !devinfo->is_haswell))); /* produce accurate derivatives */ struct brw_reg src0 = brw_reg(src.file, src.nr, 0, @@ -943,20 +1162,11 @@ fs_generator::generate_ddy(enum opcode opcode, if (unroll_to_simd8) { brw_set_default_exec_size(p, BRW_EXECUTE_8); brw_set_default_compression_control(p, BRW_COMPRESSION_NONE); - if (negate_value) { - brw_ADD(p, firsthalf(dst), firsthalf(src1), negate(firsthalf(src0))); - brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF); - brw_ADD(p, sechalf(dst), sechalf(src1), negate(sechalf(src0))); - } else { - brw_ADD(p, firsthalf(dst), firsthalf(src0), negate(firsthalf(src1))); - brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF); - brw_ADD(p, sechalf(dst), sechalf(src0), negate(sechalf(src1))); - } + brw_ADD(p, firsthalf(dst), negate(firsthalf(src0)), firsthalf(src1)); + brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF); + brw_ADD(p, sechalf(dst), negate(sechalf(src0)), sechalf(src1)); } else { - if (negate_value) - brw_ADD(p, dst, src1, negate(src0)); - else - brw_ADD(p, dst, src0, negate(src1)); + brw_ADD(p, dst, negate(src0), src1); } brw_pop_insn_state(p); } else { @@ -975,10 +1185,7 @@ fs_generator::generate_ddy(enum opcode opcode, BRW_WIDTH_4, BRW_HORIZONTAL_STRIDE_0, BRW_SWIZZLE_XYZW, WRITEMASK_XYZW); - if (negate_value) - brw_ADD(p, dst, src1, negate(src0)); - else - brw_ADD(p, dst, src0, negate(src1)); + brw_ADD(p, dst, negate(src0), src1); } } @@ -1036,16 +1243,14 @@ fs_generator::generate_uniform_pull_constant_load(fs_inst *inst, assert(index.file == BRW_IMMEDIATE_VALUE && index.type == BRW_REGISTER_TYPE_UD); - uint32_t surf_index = index.dw1.ud; + uint32_t surf_index = index.ud; assert(offset.file == BRW_IMMEDIATE_VALUE && offset.type == BRW_REGISTER_TYPE_UD); - uint32_t read_offset = offset.dw1.ud; + uint32_t read_offset = offset.ud; brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf), read_offset, surf_index); - - brw_mark_surface_used(prog_data, surf_index); } void @@ -1054,7 +1259,6 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst, struct brw_reg index, struct brw_reg offset) { - assert(inst->mlen == 0); assert(index.type == BRW_REGISTER_TYPE_UD); assert(offset.file == BRW_GENERAL_REGISTER_FILE); @@ -1069,12 +1273,10 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst, struct brw_reg src = offset; bool header_present = false; - int mlen = 1; if (devinfo->gen >= 9) { /* Skylake requires a message header in order to use SIMD4x2 mode. */ - src = retype(brw_vec4_grf(offset.nr - 1, 0), BRW_REGISTER_TYPE_UD); - mlen = 2; + src = retype(brw_vec4_grf(offset.nr, 0), BRW_REGISTER_TYPE_UD); header_present = true; brw_push_insn_state(p); @@ -1090,12 +1292,13 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst, if (index.file == BRW_IMMEDIATE_VALUE) { - uint32_t surf_index = index.dw1.ud; + uint32_t surf_index = index.ud; brw_push_insn_state(p); brw_set_default_compression_control(p, BRW_COMPRESSION_NONE); brw_set_default_mask_control(p, BRW_MASK_DISABLE); brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND); + brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4); brw_pop_insn_state(p); brw_set_dest(p, send, dst); @@ -1105,13 +1308,10 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst, 0, /* LD message ignores sampler unit */ GEN5_SAMPLER_MESSAGE_SAMPLE_LD, 1, /* rlen */ - mlen, + inst->mlen, header_present, BRW_SAMPLER_SIMD_MODE_SIMD4X2, 0); - - brw_mark_surface_used(prog_data, surf_index); - } else { struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD)); @@ -1135,17 +1335,12 @@ fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst, 0, /* LD message ignores sampler unit */ GEN5_SAMPLER_MESSAGE_SAMPLE_LD, 1, /* rlen */ - mlen, + inst->mlen, header_present, BRW_SAMPLER_SIMD_MODE_SIMD4X2, 0); brw_pop_insn_state(p); - - /* visitor knows more than we do about the surface limit required, - * so has already done marking. - */ - } } @@ -1161,7 +1356,7 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst, assert(index.file == BRW_IMMEDIATE_VALUE && index.type == BRW_REGISTER_TYPE_UD); - uint32_t surf_index = index.dw1.ud; + uint32_t surf_index = index.ud; uint32_t simd_mode, rlen, msg_type; if (dispatch_width == 16) { @@ -1212,8 +1407,6 @@ fs_generator::generate_varying_pull_constant_load(fs_inst *inst, inst->header_size != 0, simd_mode, return_format); - - brw_mark_surface_used(prog_data, surf_index); } void @@ -1243,7 +1436,7 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst, if (index.file == BRW_IMMEDIATE_VALUE) { - uint32_t surf_index = index.dw1.ud; + uint32_t surf_index = index.ud; brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND); brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW)); @@ -1258,8 +1451,6 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst, simd_mode, 0); - brw_mark_surface_used(prog_data, surf_index); - } else { struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD)); @@ -1290,10 +1481,6 @@ fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst, false /* header */, simd_mode, 0); - - /* visitor knows more than we do about the surface limit required, - * so has already done marking. - */ } } @@ -1327,15 +1514,14 @@ fs_generator::generate_pixel_interpolator_query(fs_inst *inst, struct brw_reg msg_data, unsigned msg_type) { - assert(msg_data.file == BRW_IMMEDIATE_VALUE && - msg_data.type == BRW_REGISTER_TYPE_UD); + assert(msg_data.type == BRW_REGISTER_TYPE_UD); brw_pixel_interpolator_query(p, retype(dst, BRW_REGISTER_TYPE_UW), src, inst->pi_noperspective, msg_type, - msg_data.dw1.ud, + msg_data, inst->mlen, inst->regs_written); } @@ -1363,37 +1549,6 @@ fs_generator::generate_set_simd4x2_offset(fs_inst *inst, brw_pop_insn_state(p); } -/* Sets vstride=16, width=8, hstride=2 or vstride=0, width=1, hstride=0 - * (when mask is passed as a uniform) of register mask before moving it - * to register dst. - */ -void -fs_generator::generate_set_omask(fs_inst *inst, - struct brw_reg dst, - struct brw_reg mask) -{ - bool stride_8_8_1 = - (mask.vstride == BRW_VERTICAL_STRIDE_8 && - mask.width == BRW_WIDTH_8 && - mask.hstride == BRW_HORIZONTAL_STRIDE_1); - - bool stride_0_1_0 = has_scalar_region(mask); - - assert(stride_8_8_1 || stride_0_1_0); - assert(dst.type == BRW_REGISTER_TYPE_UW); - - brw_push_insn_state(p); - brw_set_default_compression_control(p, BRW_COMPRESSION_NONE); - brw_set_default_mask_control(p, BRW_MASK_DISABLE); - - if (stride_8_8_1) { - brw_MOV(p, dst, retype(stride(mask, 16, 8, 2), dst.type)); - } else if (stride_0_1_0) { - brw_MOV(p, dst, retype(mask, dst.type)); - } - brw_pop_insn_state(p); -} - /* Sets vstride=1, width=4, hstride=0 of register src1 during * the ADD instruction. */ @@ -1408,18 +1563,18 @@ fs_generator::generate_set_sample_id(fs_inst *inst, assert(src0.type == BRW_REGISTER_TYPE_D || src0.type == BRW_REGISTER_TYPE_UD); - brw_push_insn_state(p); - brw_set_default_exec_size(p, BRW_EXECUTE_8); - brw_set_default_compression_control(p, BRW_COMPRESSION_NONE); - brw_set_default_mask_control(p, BRW_MASK_DISABLE); - struct brw_reg reg = retype(stride(src1, 1, 4, 0), BRW_REGISTER_TYPE_UW); - if (dispatch_width == 8) { + struct brw_reg reg = stride(src1, 1, 4, 0); + if (devinfo->gen >= 8 || dispatch_width == 8) { brw_ADD(p, dst, src0, reg); } else if (dispatch_width == 16) { + brw_push_insn_state(p); + brw_set_default_exec_size(p, BRW_EXECUTE_8); + brw_set_default_compression_control(p, BRW_COMPRESSION_NONE); brw_ADD(p, firsthalf(dst), firsthalf(src0), reg); + brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF); brw_ADD(p, sechalf(dst), sechalf(src0), suboffset(reg, 2)); + brw_pop_insn_state(p); } - brw_pop_insn_state(p); } void @@ -1559,31 +1714,26 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) unsigned int last_insn_offset = p->next_insn_offset; bool multiple_instructions_emitted = false; - if (unlikely(debug_flag)) - annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset); - - for (unsigned int i = 0; i < inst->sources; i++) { - src[i] = brw_reg_from_fs_reg(&inst->src[i]); - - /* The accumulator result appears to get used for the - * conditional modifier generation. When negating a UD - * value, there is a 33rd bit generated for the sign in the - * accumulator value, so now you can't check, for example, - * equality with a 32-bit value. See piglit fs-op-neg-uvec4. - */ - assert(!inst->conditional_mod || - inst->src[i].type != BRW_REGISTER_TYPE_UD || - !inst->src[i].negate); + /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the + * "Register Region Restrictions" section: for BDW, SKL: + * + * "A POW/FDIV operation must not be followed by an instruction + * that requires two destination registers." + * + * The documentation is often lacking annotations for Atom parts, + * and empirically this affects CHV as well. + */ + if (devinfo->gen >= 8 && + p->nr_insn > 1 && + brw_inst_opcode(devinfo, brw_last_inst) == BRW_OPCODE_MATH && + brw_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW && + inst->dst.component_size(inst->exec_size) > REG_SIZE) { + brw_NOP(p); + last_insn_offset = p->next_insn_offset; } - dst = brw_reg_from_fs_reg(&inst->dst); - brw_set_default_predicate_control(p, inst->predicate); - brw_set_default_predicate_inverse(p, inst->predicate_inverse); - brw_set_default_flag_reg(p, 0, inst->flag_subreg); - brw_set_default_saturate(p, inst->saturate); - brw_set_default_mask_control(p, inst->force_writemask_all); - brw_set_default_acc_write_control(p, inst->writes_accumulator); - brw_set_default_exec_size(p, cvt(inst->exec_size) - 1); + if (unlikely(debug_flag)) + annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset); switch (inst->exec_size) { case 1: @@ -1604,7 +1754,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) /* If the instruction writes to more than one register, it needs to * be a "compressed" instruction on Gen <= 5. */ - if (inst->exec_size * inst->dst.stride * type_sz(inst->dst.type) > 32) + if (inst->dst.component_size(inst->exec_size) > REG_SIZE) brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED); else brw_set_default_compression_control(p, BRW_COMPRESSION_NONE); @@ -1613,6 +1763,32 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) unreachable("Invalid instruction width"); } + for (unsigned int i = 0; i < inst->sources; i++) { + src[i] = brw_reg_from_fs_reg(p, inst, &inst->src[i], devinfo->gen); + + /* The accumulator result appears to get used for the + * conditional modifier generation. When negating a UD + * value, there is a 33rd bit generated for the sign in the + * accumulator value, so now you can't check, for example, + * equality with a 32-bit value. See piglit fs-op-neg-uvec4. + */ + assert(!inst->conditional_mod || + inst->src[i].type != BRW_REGISTER_TYPE_UD || + !inst->src[i].negate); + } + dst = brw_reg_from_fs_reg(p, inst, &inst->dst, devinfo->gen); + + brw_set_default_predicate_control(p, inst->predicate); + brw_set_default_predicate_inverse(p, inst->predicate_inverse); + brw_set_default_flag_reg(p, 0, inst->flag_subreg); + brw_set_default_saturate(p, inst->saturate); + brw_set_default_mask_control(p, inst->force_writemask_all); + brw_set_default_acc_write_control(p, inst->writes_accumulator); + brw_set_default_exec_size(p, cvt(inst->exec_size) - 1); + + assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen)); + assert(inst->mlen <= BRW_MAX_MSG_LENGTH); + switch (inst->opcode) { case BRW_OPCODE_MOV: brw_MOV(p, dst, src[0]); @@ -1872,7 +2048,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) break; case BRW_OPCODE_DO: - brw_DO(p, BRW_EXECUTE_8); + brw_DO(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8); break; case BRW_OPCODE_BREAK: @@ -1938,19 +2114,26 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) src[0].subnr = 4 * type_sz(src[0].type); brw_MOV(p, dst, stride(src[0], 8, 4, 1)); break; + case FS_OPCODE_GET_BUFFER_SIZE: + generate_get_buffer_size(inst, dst, src[0], src[1]); + break; case SHADER_OPCODE_TEX: case FS_OPCODE_TXB: case SHADER_OPCODE_TXD: case SHADER_OPCODE_TXF: + case SHADER_OPCODE_TXF_LZ: case SHADER_OPCODE_TXF_CMS: + case SHADER_OPCODE_TXF_CMS_W: case SHADER_OPCODE_TXF_UMS: case SHADER_OPCODE_TXF_MCS: case SHADER_OPCODE_TXL: + case SHADER_OPCODE_TXL_LZ: case SHADER_OPCODE_TXS: case SHADER_OPCODE_LOD: case SHADER_OPCODE_TG4: case SHADER_OPCODE_TG4_OFFSET: - generate_tex(inst, dst, src[0], src[1]); + case SHADER_OPCODE_SAMPLEINFO: + generate_tex(inst, dst, src[0], src[1], src[2]); break; case FS_OPCODE_DDX_COARSE: case FS_OPCODE_DDX_FINE: @@ -1958,8 +2141,7 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) break; case FS_OPCODE_DDY_COARSE: case FS_OPCODE_DDY_FINE: - assert(src[1].file == BRW_IMMEDIATE_VALUE); - generate_ddy(inst->opcode, dst, src[0], src[1].dw1.ud); + generate_ddy(inst->opcode, dst, src[0]); break; case SHADER_OPCODE_GEN4_SCRATCH_WRITE: @@ -1977,7 +2159,19 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) fill_count++; break; + case SHADER_OPCODE_MOV_INDIRECT: + generate_mov_indirect(inst, dst, src[0], src[1]); + break; + + case SHADER_OPCODE_URB_READ_SIMD8: + case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT: + generate_urb_read(inst, dst, src[0]); + break; + case SHADER_OPCODE_URB_WRITE_SIMD8: + case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT: + case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED: + case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT: generate_urb_write(inst, src[0]); break; @@ -2002,10 +2196,6 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) generate_fb_write(inst, src[0]); break; - case FS_OPCODE_BLORP_FB_WRITE: - generate_blorp_fb_write(inst); - break; - case FS_OPCODE_MOV_DISPATCH_TO_FLAGS: generate_mov_dispatch_to_flags(inst); break; @@ -2019,42 +2209,38 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) break; case SHADER_OPCODE_UNTYPED_ATOMIC: - assert(src[1].file == BRW_IMMEDIATE_VALUE && - src[2].file == BRW_IMMEDIATE_VALUE); - brw_untyped_atomic(p, dst, src[0], src[1], src[2].dw1.ud, + assert(src[2].file == BRW_IMMEDIATE_VALUE); + brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud, inst->mlen, !inst->dst.is_null()); - brw_mark_surface_used(prog_data, src[1].dw1.ud); break; case SHADER_OPCODE_UNTYPED_SURFACE_READ: - assert(src[1].file == BRW_IMMEDIATE_VALUE && - src[2].file == BRW_IMMEDIATE_VALUE); + assert(src[2].file == BRW_IMMEDIATE_VALUE); brw_untyped_surface_read(p, dst, src[0], src[1], - inst->mlen, src[2].dw1.ud); - brw_mark_surface_used(prog_data, src[1].dw1.ud); + inst->mlen, src[2].ud); break; case SHADER_OPCODE_UNTYPED_SURFACE_WRITE: assert(src[2].file == BRW_IMMEDIATE_VALUE); brw_untyped_surface_write(p, src[0], src[1], - inst->mlen, src[2].dw1.ud); + inst->mlen, src[2].ud); break; case SHADER_OPCODE_TYPED_ATOMIC: assert(src[2].file == BRW_IMMEDIATE_VALUE); brw_typed_atomic(p, dst, src[0], src[1], - src[2].dw1.ud, inst->mlen, !inst->dst.is_null()); + src[2].ud, inst->mlen, !inst->dst.is_null()); break; case SHADER_OPCODE_TYPED_SURFACE_READ: assert(src[2].file == BRW_IMMEDIATE_VALUE); brw_typed_surface_read(p, dst, src[0], src[1], - inst->mlen, src[2].dw1.ud); + inst->mlen, src[2].ud); break; case SHADER_OPCODE_TYPED_SURFACE_WRITE: assert(src[2].file == BRW_IMMEDIATE_VALUE); - brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].dw1.ud); + brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].ud); break; case SHADER_OPCODE_MEMORY_FENCE: @@ -2073,9 +2259,27 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) brw_broadcast(p, dst, src[0], src[1]); break; - case FS_OPCODE_SET_OMASK: - generate_set_omask(inst, dst, src[0]); + case SHADER_OPCODE_EXTRACT_BYTE: { + assert(src[0].type == BRW_REGISTER_TYPE_D || + src[0].type == BRW_REGISTER_TYPE_UD); + + enum brw_reg_type type = + src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_B + : BRW_REGISTER_TYPE_UB; + brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 4)); + break; + } + + case SHADER_OPCODE_EXTRACT_WORD: { + assert(src[0].type == BRW_REGISTER_TYPE_D || + src[0].type == BRW_REGISTER_TYPE_UD); + + enum brw_reg_type type = + src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_W + : BRW_REGISTER_TYPE_UW; + brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 2)); break; + } case FS_OPCODE_SET_SAMPLE_ID: generate_set_sample_id(inst, dst, src[0], src[1]); @@ -2129,6 +2333,10 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) generate_barrier(inst, src[0]); break; + case FS_OPCODE_PACK_STENCIL_REF: + generate_stencil_ref_packing(inst, dst, src[0]); + break; + default: unreachable("Unsupported opcode"); @@ -2156,6 +2364,13 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) brw_set_uip_jip(p); annotation_finalize(&annotation, p->next_insn_offset); +#ifndef NDEBUG + bool validated = brw_validate_instructions(p, start_offset, &annotation); +#else + if (unlikely(debug_flag)) + brw_validate_instructions(p, start_offset, &annotation); +#endif + int before_size = p->next_insn_offset - start_offset; brw_compact_instructions(p, start_offset, annotation.ann_count, annotation.ann); @@ -2163,24 +2378,27 @@ fs_generator::generate_code(const cfg_t *cfg, int dispatch_width) if (unlikely(debug_flag)) { fprintf(stderr, "Native code for %s\n" - "SIMD%d shader: %d instructions. %d loops. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d" + "SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d" " bytes (%.0f%%)\n", - shader_name, dispatch_width, before_size / 16, loop_count, + shader_name, dispatch_width, before_size / 16, loop_count, cfg->cycle_count, spill_count, fill_count, promoted_constants, before_size, after_size, 100.0f * (before_size - after_size) / before_size); dump_assembly(p->store, annotation.ann_count, annotation.ann, - p->devinfo, prog); - ralloc_free(annotation.ann); + p->devinfo); + ralloc_free(annotation.mem_ctx); } + assert(validated); compiler->shader_debug_log(log_data, - "%s SIMD%d shader: %d inst, %d loops, " + "%s SIMD%d shader: %d inst, %d loops, %u cycles, " "%d:%d spills:fills, Promoted %u constants, " - "compacted %d to %d bytes.\n", - stage_abbrev, dispatch_width, before_size / 16, - loop_count, spill_count, fill_count, - promoted_constants, before_size, after_size); + "compacted %d to %d bytes.", + _mesa_shader_stage_to_abbrev(stage), + dispatch_width, before_size / 16, + loop_count, cfg->cycle_count, spill_count, + fill_count, promoted_constants, before_size, + after_size); return start_offset; }