}
void
-fs_visitor::fail(const char *format, ...)
+fs_visitor::vfail(const char *format, va_list va)
{
- va_list va;
char *msg;
if (failed)
failed = true;
- va_start(va, format);
msg = ralloc_vasprintf(mem_ctx, format, va);
- va_end(va);
msg = ralloc_asprintf(mem_ctx, "FS compile failed: %s\n", msg);
this->fail_msg = msg;
}
}
+void
+fs_visitor::fail(const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+ vfail(format, va);
+ va_end(va);
+}
+
+/**
+ * Mark this program as impossible to compile in SIMD16 mode.
+ *
+ * During the SIMD8 compile (which happens first), we can detect and flag
+ * things that are unsupported in SIMD16 mode, so the compiler can skip
+ * the SIMD16 compile altogether.
+ *
+ * During a SIMD16 compile (if one happens anyway), this just calls fail().
+ */
+void
+fs_visitor::no16(const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+
+ if (dispatch_width == 16) {
+ vfail(format, va);
+ } else {
+ simd16_unsupported = true;
+
+ if (INTEL_DEBUG & DEBUG_PERF) {
+ if (no16_msg)
+ ralloc_vasprintf_append(&no16_msg, format, va);
+ else
+ no16_msg = ralloc_vasprintf(mem_ctx, format, va);
+ }
+ }
+
+ va_end(va);
+}
+
fs_inst *
fs_visitor::emit(enum opcode opcode)
{
switch (opcode) {
case SHADER_OPCODE_INT_QUOTIENT:
case SHADER_OPCODE_INT_REMAINDER:
- if (brw->gen >= 7 && dispatch_width == 16)
- fail("SIMD16 INTDIV unsupported\n");
+ if (brw->gen >= 7)
+ no16("SIMD16 INTDIV unsupported\n");
break;
case SHADER_OPCODE_POW:
break;
exec_list *simd16_instructions = NULL;
fs_visitor v2(brw, c, prog, fp, 16);
if (brw->gen >= 5 && likely(!(INTEL_DEBUG & DEBUG_NO16))) {
- /* Try a SIMD16 compile */
- v2.import_uniforms(&v);
- if (!v2.run()) {
- perf_debug("SIMD16 shader failed to compile, falling back to "
- "SIMD8 at a 10-20%% performance cost: %s", v2.fail_msg);
+ if (!v.simd16_unsupported) {
+ /* Try a SIMD16 compile */
+ v2.import_uniforms(&v);
+ if (!v2.run()) {
+ perf_debug("SIMD16 shader failed to compile, falling back to "
+ "SIMD8 at a 10-20%% performance cost: %s", v2.fail_msg);
+ } else {
+ simd16_instructions = &v2.instructions;
+ }
} else {
- simd16_instructions = &v2.instructions;
+ perf_debug("SIMD16 shader unsupported, falling back to "
+ "SIMD8 at a 10-20%% performance cost: %s", v.no16_msg);
}
}
* FINISHME: Emit just the MUL if we know an operand is small
* enough.
*/
- if (brw->gen >= 7 && dispatch_width == 16)
- fail("SIMD16 explicit accumulator operands unsupported\n");
+ if (brw->gen >= 7)
+ no16("SIMD16 explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(), this->result.type);
}
break;
case ir_binop_imul_high: {
- if (brw->gen >= 7 && dispatch_width == 16)
- fail("SIMD16 explicit accumulator operands unsupported\n");
+ if (brw->gen >= 7)
+ no16("SIMD16 explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(), this->result.type);
emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]);
break;
case ir_binop_carry: {
- if (brw->gen >= 7 && dispatch_width == 16)
- fail("SIMD16 explicit accumulator operands unsupported\n");
+ if (brw->gen >= 7)
+ no16("SIMD16 explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_UD);
break;
}
case ir_binop_borrow: {
- if (brw->gen >= 7 && dispatch_width == 16)
- fail("SIMD16 explicit accumulator operands unsupported\n");
+ if (brw->gen >= 7)
+ no16("SIMD16 explicit accumulator operands unsupported\n");
struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_UD);
next.reg_offset++;
break;
case ir_txd: {
- if (dispatch_width == 16)
- fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode.");
+ no16("Gen7 does not support sample_d/sample_d_c in SIMD16 mode.");
/* Load dPdx and the coordinate together:
* [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
break;
case ir_tg4:
if (has_nonconstant_offset) {
- if (ir->shadow_comparitor && dispatch_width == 16)
- fail("Gen7 does not support gather4_po_c in SIMD16 mode.");
+ if (ir->shadow_comparitor)
+ no16("Gen7 does not support gather4_po_c in SIMD16 mode.");
/* More crazy intermixing */
ir->offset->accept(this);
0
};
+ no16("rectangle scale uniform setup not supported on SIMD16\n");
if (dispatch_width == 16) {
- fail("rectangle scale uniform setup not supported on SIMD16\n");
return coordinate;
}
void
fs_visitor::visit(ir_if *ir)
{
- if (brw->gen < 6 && dispatch_width == 16) {
- fail("Can't support (non-uniform) control flow on SIMD16\n");
+ if (brw->gen < 6) {
+ no16("Can't support (non-uniform) control flow on SIMD16\n");
}
/* Don't point the annotation at the if statement, because then it plus
void
fs_visitor::visit(ir_loop *ir)
{
- if (brw->gen < 6 && dispatch_width == 16) {
- fail("Can't support (non-uniform) control flow on SIMD16\n");
+ if (brw->gen < 6) {
+ no16("Can't support (non-uniform) control flow on SIMD16\n");
}
this->base_ir = NULL;
bool do_dual_src = this->dual_src_output.file != BAD_FILE;
bool src0_alpha_to_render_target = false;
- if (dispatch_width == 16 && do_dual_src) {
- fail("GL_ARB_blend_func_extended not yet supported in SIMD16.");
- do_dual_src = false;
+ if (do_dual_src) {
+ no16("GL_ARB_blend_func_extended not yet supported in SIMD16.");
+ if (dispatch_width == 16)
+ do_dual_src = false;
}
/* From the Sandy Bridge PRM, volume 4, page 198:
nr += reg_width;
if (c->source_depth_to_render_target) {
- if (brw->gen == 6 && dispatch_width == 16) {
+ if (brw->gen == 6) {
/* For outputting oDepth on gen6, SIMD8 writes have to be
* used. This would require SIMD8 moves of each half to
* message regs, kind of like pre-gen5 SIMD16 FB writes.
* Just bail on doing so for now.
*/
- fail("Missing support for simd16 depth writes on gen6\n");
+ no16("Missing support for simd16 depth writes on gen6\n");
}
if (prog->OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {