#include "brw_context.h"
#include "compiler/brw_nir.h"
#include "brw_program.h"
+#include "compiler/glsl/gl_nir.h"
+#include "compiler/glsl/gl_nir_linker.h"
#include "compiler/glsl/ir.h"
#include "compiler/glsl/ir_optimization.h"
#include "compiler/glsl/program.h"
#include "compiler/nir/nir_serialize.h"
#include "program/program.h"
+#include "main/glspirv.h"
#include "main/mtypes.h"
#include "main/shaderapi.h"
#include "main/shaderobj.h"
do_vec_index_to_cond_assign(shader->ir);
lower_vector_insert(shader->ir, true);
lower_offset_arrays(shader->ir);
- lower_noise(shader->ir);
lower_quadop_vector(shader->ir, false);
validate_ir_tree(shader->ir);
_mesa_copy_linked_program_data(shProg, shader);
prog->ShadowSamplers = shader->shadow_samplers;
- _mesa_update_shader_textures_used(shProg, prog);
bool debug_enabled =
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(shader->Stage));
compiler->scalar_stage[stage]);
}
+ /* TODO: Verify if its feasible to split up the NIR linking work into a
+ * per-stage part (that fill out information we need for the passes) and a
+ * actual linking part, so that we could fold back brw_nir_lower_resources
+ * back into brw_create_nir.
+ */
+
+ /* SPIR-V programs use a NIR linker */
+ if (shProg->data->spirv) {
+ static const gl_nir_linker_options opts = {
+ .fill_parameters = false,
+ };
+ if (!gl_nir_link_spirv(ctx, shProg, &opts))
+ return GL_FALSE;
+ }
+
+ for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
+ struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
+ if (!shader)
+ continue;
+
+ struct gl_program *prog = shader->Program;
+
+ brw_nir_lower_resources(prog->nir, shProg, prog, &brw->screen->devinfo);
+
+ NIR_PASS_V(prog->nir, brw_nir_lower_gl_images, prog);
+ }
+
/* Determine first and last stage. */
unsigned first = MESA_SHADER_STAGES;
unsigned last = 0;
continue;
brw_nir_link_shaders(compiler,
- &shProg->_LinkedShaders[i]->Program->nir,
- &shProg->_LinkedShaders[next]->Program->nir);
+ shProg->_LinkedShaders[i]->Program->nir,
+ shProg->_LinkedShaders[next]->Program->nir);
next = i;
}
}
continue;
struct gl_program *prog = shader->Program;
+
+ _mesa_update_shader_textures_used(shProg, prog);
+
brw_shader_gather_info(prog->nir, prog);
- NIR_PASS_V(prog->nir, nir_lower_samplers, shProg);
- NIR_PASS_V(prog->nir, nir_lower_atomics, shProg, false);
- NIR_PASS_V(prog->nir, nir_lower_atomics_to_ssbo,
- prog->nir->info.num_abos);
+ NIR_PASS_V(prog->nir, gl_nir_lower_atomics, shProg, false);
+ NIR_PASS_V(prog->nir, nir_lower_atomics_to_ssbo);
+
+ nir_sweep(prog->nir);
infos[stage] = &prog->nir->info;
* get sent to the shader.
*/
nir_foreach_variable(var, &prog->nir->uniforms) {
- if (strncmp(var->name, "gl_", 3) == 0) {
- const nir_state_slot *const slots = var->state_slots;
- assert(var->state_slots != NULL);
-
- for (unsigned int i = 0; i < var->num_state_slots; i++) {
- _mesa_add_state_reference(prog->Parameters, slots[i].tokens);
- }
+ const nir_state_slot *const slots = var->state_slots;
+ for (unsigned int i = 0; i < var->num_state_slots; i++) {
+ assert(slots != NULL);
+ _mesa_add_state_reference(prog->Parameters, slots[i].tokens);
}
}
}
}
}
- if (brw->ctx.Cache) {
- for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
- struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
- if (!shader)
- continue;
-
- struct gl_program *prog = shader->Program;
- brw_program_serialize_nir(ctx, prog);
- }
- }
-
if (brw->precompile && !brw_shader_precompile(ctx, shProg))
- return false;
+ return GL_FALSE;
- build_program_resource_list(ctx, shProg);
+ /* SPIR-V programs build its resource list from linked NIR shaders. */
+ if (!shProg->data->spirv)
+ build_program_resource_list(ctx, shProg, false);
+ else
+ nir_build_program_resource_list(ctx, shProg, true);
for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
shader->ir = NULL;
}
- return true;
+ return GL_TRUE;
}