ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
+ if (brw->scalar_vs) {
+ /* If we're using the scalar backend for vertex shaders, we need to
+ * configure these accordingly.
+ */
+ ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
+ ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
+ ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
+ }
+
/* ARB_viewport_array */
if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) {
ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
brw_process_driconf_options(brw);
brw_process_intel_debug_variable(brw);
+
+ if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
+ brw->scalar_vs = true;
+
brw_initialize_context_constants(brw);
ctx->Const.ResetStrategy = notify_reset
return true;
}
+static inline bool
+is_scalar_shader_stage(struct brw_context *brw, int stage)
+{
+ switch (stage) {
+ case MESA_SHADER_FRAGMENT:
+ return true;
+ case MESA_SHADER_VERTEX:
+ return brw->scalar_vs;
+ default:
+ return false;
+ }
+}
+
static void
brw_lower_packing_builtins(struct brw_context *brw,
gl_shader_stage shader_type,
| LOWER_PACK_UNORM_2x16
| LOWER_UNPACK_UNORM_2x16;
- if (shader_type == MESA_SHADER_FRAGMENT) {
+ if (is_scalar_shader_stage(brw, shader_type)) {
ops |= LOWER_UNPACK_UNORM_4x8
| LOWER_UNPACK_SNORM_4x8
| LOWER_PACK_UNORM_4x8
* lowering is needed. For SOA code, the Half2x16 ops must be
* scalarized.
*/
- if (shader_type == MESA_SHADER_FRAGMENT) {
+ if (is_scalar_shader_stage(brw, shader_type)) {
ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
| LOWER_UNPACK_HALF_2x16_TO_SPLIT;
}
do {
progress = false;
- if (stage == MESA_SHADER_FRAGMENT) {
+ if (is_scalar_shader_stage(brw, stage)) {
brw_do_channel_expressions(shader->base.ir);
brw_do_vector_splitting(shader->base.ir);
}
*/
#include "brw_vec4.h"
+#include "brw_fs.h"
#include "brw_cfg.h"
#include "brw_vs.h"
#include "brw_dead_control_flow.h"
{
bool start_busy = false;
double start_time = 0;
+ const unsigned *assembly = NULL;
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
if (unlikely(INTEL_DEBUG & DEBUG_VS))
brw_dump_ir("vertex", prog, &shader->base, &c->vp->program.Base);
- vec4_vs_visitor v(brw, c, prog_data, prog, mem_ctx);
- if (!v.run()) {
- if (prog) {
- prog->LinkStatus = false;
- ralloc_strcat(&prog->InfoLog, v.fail_msg);
+ if (prog && brw->gen >= 8 && brw->scalar_vs) {
+ fs_visitor v(brw, mem_ctx, &c->key, prog_data, prog, &c->vp->program, 8);
+ if (!v.run_vs()) {
+ if (prog) {
+ prog->LinkStatus = false;
+ ralloc_strcat(&prog->InfoLog, v.fail_msg);
+ }
+
+ _mesa_problem(NULL, "Failed to compile vertex shader: %s\n",
+ v.fail_msg);
+
+ return NULL;
}
- _mesa_problem(NULL, "Failed to compile vertex shader: %s\n",
- v.fail_msg);
+ fs_generator g(brw, mem_ctx, (void *) &c->key, &prog_data->base.base,
+ &c->vp->program.Base, v.runtime_check_aads_emit);
+ if (INTEL_DEBUG & DEBUG_VS) {
+ char *name = ralloc_asprintf(mem_ctx, "%s vertex shader %d",
+ prog->Label ? prog->Label : "unnamed",
+ prog->Name);
+ g.enable_debug(name);
+ }
+ g.generate_code(v.cfg, 8);
+ assembly = g.get_assembly(final_assembly_size);
- return NULL;
+ if (assembly)
+ prog_data->base.simd8 = true;
+ c->base.last_scratch = v.last_scratch;
}
- const unsigned *assembly = NULL;
- vec4_generator g(brw, prog, &c->vp->program.Base, &prog_data->base,
- mem_ctx, INTEL_DEBUG & DEBUG_VS);
- assembly = g.generate_assembly(v.cfg, final_assembly_size);
+ if (!assembly) {
+ vec4_vs_visitor v(brw, c, prog_data, prog, mem_ctx);
+ if (!v.run()) {
+ if (prog) {
+ prog->LinkStatus = false;
+ ralloc_strcat(&prog->InfoLog, v.fail_msg);
+ }
+
+ _mesa_problem(NULL, "Failed to compile vertex shader: %s\n",
+ v.fail_msg);
+
+ return NULL;
+ }
+
+ vec4_generator g(brw, prog, &c->vp->program.Base, &prog_data->base,
+ mem_ctx, INTEL_DEBUG & DEBUG_VS);
+ assembly = g.generate_assembly(v.cfg, final_assembly_size);
+ }
if (unlikely(brw->perf_debug) && shader) {
if (shader->compiled_once) {