void *mem_ctx,
const struct brw_vs_prog_key *key,
struct brw_vs_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
const struct brw_tes_prog_key *key,
const struct brw_vue_map *input_vue_map,
struct brw_tes_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
void *mem_ctx,
const struct brw_gs_prog_key *key,
struct brw_gs_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
struct gl_program *prog,
int shader_time_index,
struct brw_compile_stats *stats,
void *mem_ctx,
const struct brw_wm_prog_key *key,
struct brw_wm_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
int shader_time_index8,
int shader_time_index16,
int shader_time_index32,
void *mem_ctx,
const struct brw_cs_prog_key *key,
struct brw_cs_prog_data *prog_data,
- const nir_shader *shader,
+ const nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
void *mem_ctx,
const struct brw_wm_prog_key *key,
struct brw_wm_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
int shader_time_index8, int shader_time_index16,
int shader_time_index32, bool allow_spilling,
bool use_rep_send, struct brw_vue_map *vue_map,
const struct gen_device_info *devinfo = compiler->devinfo;
const unsigned max_subgroup_size = compiler->devinfo->gen >= 6 ? 32 : 16;
- brw_nir_apply_key(shader, compiler, &key->base, max_subgroup_size, true);
- brw_nir_lower_fs_inputs(shader, devinfo, key);
- brw_nir_lower_fs_outputs(shader);
+ brw_nir_apply_key(nir, compiler, &key->base, max_subgroup_size, true);
+ brw_nir_lower_fs_inputs(nir, devinfo, key);
+ brw_nir_lower_fs_outputs(nir);
if (devinfo->gen < 6)
- brw_setup_vue_interpolation(vue_map, shader, prog_data);
+ brw_setup_vue_interpolation(vue_map, nir, prog_data);
/* From the SKL PRM, Volume 7, "Alpha Coverage":
* "If Pixel Shader outputs oMask, AlphaToCoverage is disabled in
* offset to determine render target 0 store instruction in
* emit_alpha_to_coverage pass.
*/
- NIR_PASS_V(shader, nir_opt_constant_folding);
- NIR_PASS_V(shader, brw_nir_lower_alpha_to_coverage);
+ NIR_PASS_V(nir, nir_opt_constant_folding);
+ NIR_PASS_V(nir, brw_nir_lower_alpha_to_coverage);
}
if (!key->multisample_fbo)
- NIR_PASS_V(shader, brw_nir_demote_sample_qualifiers);
- NIR_PASS_V(shader, brw_nir_move_interpolation_to_top);
- brw_postprocess_nir(shader, compiler, true);
+ NIR_PASS_V(nir, brw_nir_demote_sample_qualifiers);
+ NIR_PASS_V(nir, brw_nir_move_interpolation_to_top);
+ brw_postprocess_nir(nir, compiler, true);
- brw_nir_populate_wm_prog_data(shader, compiler->devinfo, key, prog_data);
+ brw_nir_populate_wm_prog_data(nir, compiler->devinfo, key, prog_data);
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
bool has_spilled = false;
v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
- &prog_data->base, shader, 8, shader_time_index8);
+ &prog_data->base, nir, 8, shader_time_index8);
if (!v8->run_fs(allow_spilling, false /* do_rep_send */)) {
if (error_str)
*error_str = ralloc_strdup(mem_ctx, v8->fail_msg);
likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
/* Try a SIMD16 compile */
v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
- &prog_data->base, shader, 16, shader_time_index16);
+ &prog_data->base, nir, 16, shader_time_index16);
v16->import_uniforms(v8);
if (!v16->run_fs(allow_spilling, use_rep_send)) {
compiler->shader_perf_log(log_data,
!(INTEL_DEBUG & DEBUG_NO32)) {
/* Try a SIMD32 compile */
v32 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
- &prog_data->base, shader, 32, shader_time_index32);
+ &prog_data->base, nir, 32, shader_time_index32);
v32->import_uniforms(v8);
if (!v32->run_fs(allow_spilling, false)) {
compiler->shader_perf_log(log_data,
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
- shader->info.label ?
- shader->info.label : "unnamed",
- shader->info.name));
+ nir->info.label ?
+ nir->info.label : "unnamed",
+ nir->info.name));
}
if (simd8_cfg) {
stats = stats ? stats + 1 : NULL;
}
- g.add_const_data(shader->constant_data, shader->constant_data_size);
+ g.add_const_data(nir->constant_data, nir->constant_data_size);
delete v8;
delete v16;
void *mem_ctx,
const struct brw_cs_prog_key *key,
struct brw_cs_prog_data *prog_data,
- const nir_shader *src_shader,
+ const nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str)
{
- prog_data->base.total_shared = src_shader->info.cs.shared_size;
- prog_data->slm_size = src_shader->shared_size;
+ prog_data->base.total_shared = nir->info.cs.shared_size;
+ prog_data->slm_size = nir->shared_size;
/* Generate code for all the possible SIMD variants. */
bool generate_all;
unsigned min_dispatch_width;
unsigned max_dispatch_width;
- if (src_shader->info.cs.local_size_variable) {
+ if (nir->info.cs.local_size_variable) {
generate_all = true;
min_dispatch_width = 8;
max_dispatch_width = 32;
} else {
generate_all = false;
- prog_data->local_size[0] = src_shader->info.cs.local_size[0];
- prog_data->local_size[1] = src_shader->info.cs.local_size[1];
- prog_data->local_size[2] = src_shader->info.cs.local_size[2];
+ prog_data->local_size[0] = nir->info.cs.local_size[0];
+ prog_data->local_size[1] = nir->info.cs.local_size[1];
+ prog_data->local_size[2] = nir->info.cs.local_size[2];
unsigned local_workgroup_size = prog_data->local_size[0] *
prog_data->local_size[1] *
prog_data->local_size[2];
if (likely(!(INTEL_DEBUG & DEBUG_NO8)) &&
min_dispatch_width <= 8 && max_dispatch_width >= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
- src_shader, 8);
+ nir, 8);
v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
&prog_data->base,
nir8, 8, shader_time_index);
min_dispatch_width <= 16 && max_dispatch_width >= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
- src_shader, 16);
+ nir, 16);
v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
&prog_data->base,
nir16, 16, shader_time_index);
min_dispatch_width <= 32 && max_dispatch_width >= 32) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
- src_shader, 32);
+ nir, 32);
v32 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
&prog_data->base,
nir32, 32, shader_time_index);
v->runtime_check_aads_emit, MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- src_shader->info.label ?
- src_shader->info.label : "unnamed",
- src_shader->info.name);
+ nir->info.label ?
+ nir->info.label : "unnamed",
+ nir->info.name);
g.enable_debug(name);
}
v->performance_analysis.require(), stats);
}
- g.add_const_data(src_shader->constant_data, src_shader->constant_data_size);
+ g.add_const_data(nir->constant_data, nir->constant_data_size);
ret = g.get_assembly();
void *mem_ctx,
const struct brw_vs_prog_key *key,
struct brw_vs_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str)
{
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
- brw_nir_apply_key(shader, compiler, &key->base, 8, is_scalar);
+ brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
const unsigned *assembly = NULL;
*/
assert(!is_scalar);
assert(key->copy_edgeflag);
- shader->info.inputs_read |= VERT_BIT_EDGEFLAG;
+ nir->info.inputs_read |= VERT_BIT_EDGEFLAG;
}
- prog_data->inputs_read = shader->info.inputs_read;
- prog_data->double_inputs_read = shader->info.vs.double_inputs;
+ prog_data->inputs_read = nir->info.inputs_read;
+ prog_data->double_inputs_read = nir->info.vs.double_inputs;
- brw_nir_lower_vs_inputs(shader, key->gl_attrib_wa_flags);
- brw_nir_lower_vue_outputs(shader);
- brw_postprocess_nir(shader, compiler, is_scalar);
+ brw_nir_lower_vs_inputs(nir, key->gl_attrib_wa_flags);
+ brw_nir_lower_vue_outputs(nir);
+ brw_postprocess_nir(nir, compiler, is_scalar);
prog_data->base.clip_distance_mask =
- ((1 << shader->info.clip_distance_array_size) - 1);
+ ((1 << nir->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << shader->info.cull_distance_array_size) - 1) <<
- shader->info.clip_distance_array_size;
+ ((1 << nir->info.cull_distance_array_size) - 1) <<
+ nir->info.clip_distance_array_size;
unsigned nr_attribute_slots = util_bitcount64(prog_data->inputs_read);
/* gl_VertexID and gl_InstanceID are system values, but arrive via an
* incoming vertex attribute. So, add an extra slot.
*/
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
}
/* gl_DrawID and IsIndexedDraw share its very own vec4 */
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID) |
BITFIELD64_BIT(SYSTEM_VALUE_IS_INDEXED_DRAW))) {
nr_attribute_slots++;
}
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_IS_INDEXED_DRAW))
prog_data->uses_is_indexed_draw = true;
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX))
prog_data->uses_firstvertex = true;
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE))
prog_data->uses_baseinstance = true;
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE))
prog_data->uses_vertexid = true;
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID))
prog_data->uses_instanceid = true;
- if (shader->info.system_values_read &
+ if (nir->info.system_values_read &
BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID))
prog_data->uses_drawid = true;
fs_visitor v(compiler, log_data, mem_ctx, &key->base,
&prog_data->base.base,
- shader, 8, shader_time_index);
+ nir, 8, shader_time_index);
if (!v.run_vs()) {
if (error_str)
*error_str = ralloc_strdup(mem_ctx, v.fail_msg);
if (INTEL_DEBUG & DEBUG_VS) {
const char *debug_name =
ralloc_asprintf(mem_ctx, "%s vertex shader %s",
- shader->info.label ? shader->info.label :
+ nir->info.label ? nir->info.label :
"unnamed",
- shader->info.name);
+ nir->info.name);
g.enable_debug(debug_name);
}
g.generate_code(v.cfg, 8, v.shader_stats,
v.performance_analysis.require(), stats);
- g.add_const_data(shader->constant_data, shader->constant_data_size);
+ g.add_const_data(nir->constant_data, nir->constant_data_size);
assembly = g.get_assembly();
}
prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
vec4_vs_visitor v(compiler, log_data, key, prog_data,
- shader, mem_ctx, shader_time_index);
+ nir, mem_ctx, shader_time_index);
if (!v.run()) {
if (error_str)
*error_str = ralloc_strdup(mem_ctx, v.fail_msg);
}
assembly = brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
- shader, &prog_data->base,
+ nir, &prog_data->base,
v.cfg,
v.performance_analysis.require(),
stats);
void *mem_ctx,
const struct brw_gs_prog_key *key,
struct brw_gs_prog_data *prog_data,
- nir_shader *shader,
+ nir_shader *nir,
struct gl_program *prog,
int shader_time_index,
struct brw_compile_stats *stats,
* For SSO pipelines, we use a fixed VUE map layout based on variable
* locations, so we can rely on rendezvous-by-location making this work.
*/
- GLbitfield64 inputs_read = shader->info.inputs_read;
+ GLbitfield64 inputs_read = nir->info.inputs_read;
brw_compute_vue_map(compiler->devinfo,
&c.input_vue_map, inputs_read,
- shader->info.separate_shader, 1);
+ nir->info.separate_shader, 1);
- brw_nir_apply_key(shader, compiler, &key->base, 8, is_scalar);
- brw_nir_lower_vue_inputs(shader, &c.input_vue_map);
- brw_nir_lower_vue_outputs(shader);
- brw_postprocess_nir(shader, compiler, is_scalar);
+ brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
+ brw_nir_lower_vue_inputs(nir, &c.input_vue_map);
+ brw_nir_lower_vue_outputs(nir);
+ brw_postprocess_nir(nir, compiler, is_scalar);
prog_data->base.clip_distance_mask =
- ((1 << shader->info.clip_distance_array_size) - 1);
+ ((1 << nir->info.clip_distance_array_size) - 1);
prog_data->base.cull_distance_mask =
- ((1 << shader->info.cull_distance_array_size) - 1) <<
- shader->info.clip_distance_array_size;
+ ((1 << nir->info.cull_distance_array_size) - 1) <<
+ nir->info.clip_distance_array_size;
prog_data->include_primitive_id =
- (shader->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
+ (nir->info.system_values_read & (1 << SYSTEM_VALUE_PRIMITIVE_ID)) != 0;
- prog_data->invocations = shader->info.gs.invocations;
+ prog_data->invocations = nir->info.gs.invocations;
if (compiler->devinfo->gen >= 8)
- prog_data->static_vertex_count = nir_gs_count_vertices(shader);
+ prog_data->static_vertex_count = nir_gs_count_vertices(nir);
if (compiler->devinfo->gen >= 7) {
- if (shader->info.gs.output_primitive == GL_POINTS) {
+ if (nir->info.gs.output_primitive == GL_POINTS) {
/* When the output type is points, the geometry shader may output data
* to multiple streams, and EndPrimitive() has no effect. So we
* configure the hardware to interpret the control data as stream ID.
prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
/* We only have to emit control bits if we are using non-zero streams */
- if (shader->info.gs.active_stream_mask != (1 << 0))
+ if (nir->info.gs.active_stream_mask != (1 << 0))
c.control_data_bits_per_vertex = 2;
else
c.control_data_bits_per_vertex = 0;
* EndPrimitive().
*/
c.control_data_bits_per_vertex =
- shader->info.gs.uses_end_primitive ? 1 : 0;
+ nir->info.gs.uses_end_primitive ? 1 : 0;
}
} else {
/* There are no control data bits in gen6. */
c.control_data_bits_per_vertex = 0;
}
c.control_data_header_size_bits =
- shader->info.gs.vertices_out * c.control_data_bits_per_vertex;
+ nir->info.gs.vertices_out * c.control_data_bits_per_vertex;
/* 1 HWORD = 32 bytes = 256 bits */
prog_data->control_data_header_size_hwords =
unsigned output_size_bytes;
if (compiler->devinfo->gen >= 7) {
output_size_bytes =
- prog_data->output_vertex_size_hwords * 32 * shader->info.gs.vertices_out;
+ prog_data->output_vertex_size_hwords * 32 * nir->info.gs.vertices_out;
output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
} else {
output_size_bytes = prog_data->output_vertex_size_hwords * 32;
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
}
- assert(shader->info.gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
+ assert(nir->info.gs.output_primitive < ARRAY_SIZE(gl_prim_to_hw_prim));
prog_data->output_topology =
- gl_prim_to_hw_prim[shader->info.gs.output_primitive];
+ gl_prim_to_hw_prim[nir->info.gs.output_primitive];
- prog_data->vertices_in = shader->info.gs.vertices_in;
+ prog_data->vertices_in = nir->info.gs.vertices_in;
/* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
* need to program a URB read length of ceiling(num_slots / 2).
}
if (is_scalar) {
- fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, shader,
+ fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, nir,
shader_time_index);
if (v.run_gs()) {
prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
&prog_data->base.base, false, MESA_SHADER_GEOMETRY);
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
const char *label =
- shader->info.label ? shader->info.label : "unnamed";
+ nir->info.label ? nir->info.label : "unnamed";
char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
- label, shader->info.name);
+ label, nir->info.name);
g.enable_debug(name);
}
g.generate_code(v.cfg, 8, v.shader_stats,
v.performance_analysis.require(), stats);
- g.add_const_data(shader->constant_data, shader->constant_data_size);
+ g.add_const_data(nir->constant_data, nir->constant_data_size);
return g.get_assembly();
}
}
likely(!(INTEL_DEBUG & DEBUG_NO_DUAL_OBJECT_GS))) {
prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
- vec4_gs_visitor v(compiler, log_data, &c, prog_data, shader,
+ vec4_gs_visitor v(compiler, log_data, &c, prog_data, nir,
mem_ctx, true /* no_spills */, shader_time_index);
/* Backup 'nr_params' and 'param' as they can be modified by the
/* Success! Backup is not needed */
ralloc_free(param);
return brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
- shader, &prog_data->base,
+ nir, &prog_data->base,
v.cfg,
v.performance_analysis.require(),
stats);
if (compiler->devinfo->gen >= 7)
gs = new vec4_gs_visitor(compiler, log_data, &c, prog_data,
- shader, mem_ctx, false /* no_spills */,
+ nir, mem_ctx, false /* no_spills */,
shader_time_index);
else
gs = new gen6_gs_visitor(compiler, log_data, &c, prog_data, prog,
- shader, mem_ctx, false /* no_spills */,
+ nir, mem_ctx, false /* no_spills */,
shader_time_index);
if (!gs->run()) {
if (error_str)
*error_str = ralloc_strdup(mem_ctx, gs->fail_msg);
} else {
- ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, shader,
+ ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, nir,
&prog_data->base, gs->cfg,
gs->performance_analysis.require(),
stats);