#include "brw_vec4_live_variables.h"
#include "brw_vec4_vs.h"
#include "brw_dead_control_flow.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
#include "program/prog_parameter.h"
#include "util/u_math.h"
switch (opcode) {
case SHADER_OPCODE_SHADER_TIME_ADD:
case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
- case SHADER_OPCODE_UNTYPED_ATOMIC:
- case SHADER_OPCODE_UNTYPED_SURFACE_READ:
- case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
- case SHADER_OPCODE_TYPED_ATOMIC:
- case SHADER_OPCODE_TYPED_SURFACE_READ:
- case SHADER_OPCODE_TYPED_SURFACE_WRITE:
+ case VEC4_OPCODE_UNTYPED_ATOMIC:
+ case VEC4_OPCODE_UNTYPED_SURFACE_READ:
+ case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
case VEC4_OPCODE_URB_READ:
case TCS_OPCODE_URB_WRITE:
case TCS_OPCODE_RELEASE_INPUT:
{
switch (opcode) {
case SHADER_OPCODE_SHADER_TIME_ADD:
- case SHADER_OPCODE_UNTYPED_ATOMIC:
- case SHADER_OPCODE_UNTYPED_SURFACE_READ:
- case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
- case SHADER_OPCODE_TYPED_ATOMIC:
- case SHADER_OPCODE_TYPED_SURFACE_READ:
- case SHADER_OPCODE_TYPED_SURFACE_WRITE:
+ case VEC4_OPCODE_UNTYPED_ATOMIC:
+ case VEC4_OPCODE_UNTYPED_SURFACE_READ:
+ case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
case TCS_OPCODE_URB_WRITE:
if (arg == 0)
return mlen * REG_SIZE;
bool progress = false;
foreach_block(block, cfg) {
- int last_reg = -1, last_offset = -1;
+ unsigned last_reg = ~0u, last_offset = ~0u;
enum brw_reg_file last_reg_file = BAD_FILE;
uint8_t imm[4] = { 0 };
foreach_inst_in_block_safe(vec4_instruction, inst, block) {
int vf = -1;
- enum brw_reg_type need_type;
+ enum brw_reg_type need_type = BRW_REGISTER_TYPE_LAST;
/* Look for unconditional MOVs from an immediate with a partial
* writemask. Skip type-conversion MOVs other than integer 0,
need_type = BRW_REGISTER_TYPE_F;
}
} else {
- last_reg = -1;
+ last_reg = ~0u;
}
/* If this wasn't a MOV, or the destination register doesn't match,
}
inst_count = 0;
- last_reg = -1;
+ last_reg = ~0u;;
writemask = 0;
dest_type = BRW_REGISTER_TYPE_F;
progress = true;
}
break;
- case BRW_OPCODE_CMP:
- if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
- inst->src[0].abs &&
- inst->src[0].negate &&
- inst->src[1].is_zero()) {
- inst->src[0].abs = false;
- inst->src[0].negate = false;
- inst->conditional_mod = BRW_CONDITIONAL_Z;
- progress = true;
- break;
- }
- break;
case SHADER_OPCODE_BROADCAST:
if (is_uniform(inst->src[0]) ||
inst->src[1].is_zero()) {
if (devinfo->gen == 6 && is_math() && swizzle != BRW_SWIZZLE_XYZW)
return false;
+ /* If we write to the flag register changing the swizzle would change
+ * what channels are written to the flag register.
+ */
+ if (writes_flag())
+ return false;
+
/* We can't swizzle implicit accumulator access. We'd have to
* reswizzle the producer of the accumulator value in addition
* to the consumer (i.e. both MUL and MACH). Just skip this.
opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2 &&
opcode != VEC4_OPCODE_PACK_BYTES) {
for (int i = 0; i < 3; i++) {
- if (src[i].file == BAD_FILE || src[i].file == IMM)
+ if (src[i].file == BAD_FILE)
continue;
+ if (src[i].file == IMM) {
+ assert(src[i].type != BRW_REGISTER_TYPE_V &&
+ src[i].type != BRW_REGISTER_TYPE_UV);
+
+ /* Vector immediate types need to be reswizzled. */
+ if (src[i].type == BRW_REGISTER_TYPE_VF) {
+ const unsigned imm[] = {
+ (src[i].ud >> 0) & 0x0ff,
+ (src[i].ud >> 8) & 0x0ff,
+ (src[i].ud >> 16) & 0x0ff,
+ (src[i].ud >> 24) & 0x0ff,
+ };
+
+ src[i] = brw_imm_vf4(imm[BRW_GET_SWZ(swizzle, 0)],
+ imm[BRW_GET_SWZ(swizzle, 1)],
+ imm[BRW_GET_SWZ(swizzle, 2)],
+ imm[BRW_GET_SWZ(swizzle, 3)]);
+ }
+
+ continue;
+ }
+
src[i].swizzle = brw_compose_swizzle(swizzle, src[i].swizzle);
}
}
* in the register instead.
*/
if (to_mrf && scan_inst->mlen > 0) {
- if (inst->dst.nr >= scan_inst->base_mrf &&
- inst->dst.nr < scan_inst->base_mrf + scan_inst->mlen) {
+ unsigned start = scan_inst->base_mrf;
+ unsigned end = scan_inst->base_mrf + scan_inst->mlen;
+
+ if (inst->dst.nr >= start && inst->dst.nr < end) {
break;
}
} else {
struct brw_vs_prog_data *prog_data,
nir_shader *shader,
int shader_time_index,
+ struct brw_compile_stats *stats,
char **error_str)
{
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
- shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
+ brw_nir_apply_key(shader, compiler, &key->base, 8, is_scalar);
const unsigned *assembly = NULL;
brw_nir_lower_vs_inputs(shader, key->gl_attrib_wa_flags);
brw_nir_lower_vue_outputs(shader);
- shader = brw_postprocess_nir(shader, compiler, is_scalar);
+ brw_postprocess_nir(shader, compiler, is_scalar);
prog_data->base.clip_distance_mask =
((1 << shader->info.clip_distance_array_size) - 1);
if (is_scalar) {
prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
- fs_visitor v(compiler, log_data, mem_ctx, key, &prog_data->base.base,
- NULL, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
+ fs_visitor v(compiler, log_data, mem_ctx, &key->base,
+ &prog_data->base.base,
shader, 8, shader_time_index);
if (!v.run_vs()) {
if (error_str)
prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
fs_generator g(compiler, log_data, mem_ctx,
- &prog_data->base.base, v.promoted_constants,
+ &prog_data->base.base, v.shader_stats,
v.runtime_check_aads_emit, MESA_SHADER_VERTEX);
if (INTEL_DEBUG & DEBUG_VS) {
const char *debug_name =
g.enable_debug(debug_name);
}
- g.generate_code(v.cfg, 8);
+ g.generate_code(v.cfg, 8, stats);
assembly = g.get_assembly();
}
}
assembly = brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
- shader, &prog_data->base, v.cfg);
+ shader, &prog_data->base,
+ v.cfg, stats);
}
return assembly;