if (inst->qpu.sig.ldtmu ||
inst->qpu.sig.ldvary ||
+ inst->qpu.sig.ldtlbu ||
+ inst->qpu.sig.ldtlb ||
inst->qpu.sig.wrtmuc ||
inst->qpu.sig.thrsw) {
return true;
}
}
+ /* CS textures may not have return_size reflecting the shadow state. */
+ nir_foreach_variable(var, &c->s->uniforms) {
+ const struct glsl_type *type = glsl_without_array(var->type);
+ unsigned array_len = MAX2(glsl_get_length(var->type), 1);
+
+ if (!glsl_type_is_sampler(type) ||
+ !glsl_sampler_type_is_shadow(type))
+ continue;
+
+ for (int i = 0; i < array_len; i++) {
+ tex_options.lower_tex_packing[var->data.binding + i] =
+ nir_lower_tex_packing_16;
+ }
+ }
+
NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
NIR_PASS_V(c->s, nir_lower_system_values);
+
+ NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
+ nir_var_function_temp,
+ 0,
+ glsl_get_natural_size_align_bytes);
+ NIR_PASS_V(c->s, v3d_nir_lower_scratch);
}
static void
prog_data->writes_z = c->writes_z;
prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
prog_data->uses_center_w = c->uses_center_w;
+ prog_data->uses_implicit_point_line_varyings =
+ c->uses_implicit_point_line_varyings;
+ prog_data->lock_scoreboard_on_first_thrsw =
+ c->lock_scoreboard_on_first_thrsw;
+}
+
+static void
+v3d_cs_set_prog_data(struct v3d_compile *c,
+ struct v3d_compute_prog_data *prog_data)
+{
+ prog_data->shared_size = c->s->info.cs.shared_size;
}
static void
v3d_set_prog_data_uniforms(c, prog_data);
- if (c->s->info.stage == MESA_SHADER_VERTEX) {
+ if (c->s->info.stage == MESA_SHADER_COMPUTE) {
+ v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
+ } else if (c->s->info.stage == MESA_SHADER_VERTEX) {
v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
} else {
assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
v3d_optimize_nir(c->s);
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
+
+ /* This must go before nir_lower_io */
+ if (c->vs_key->per_vertex_point_size)
+ NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
+
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4,
(nir_lower_io_options)0);
+ /* clean up nir_lower_io's deref_var remains */
+ NIR_PASS_V(c->s, nir_opt_dce);
}
static void
if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
v3d_fixup_fs_output_types(c);
+ NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
+
/* If the shader has no non-TLB side effects, we can promote it to
* enabling early_fragment_tests even if the user didn't.
*/
c->fs_key = (struct v3d_fs_key *)key;
prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data));
break;
+ case MESA_SHADER_COMPUTE:
+ prog_data = rzalloc_size(NULL,
+ sizeof(struct v3d_compute_prog_data));
+ break;
default:
unreachable("unsupported shader stage");
}
if (c->s->info.stage == MESA_SHADER_VERTEX) {
v3d_nir_lower_vs_early(c);
- } else {
+ } else if (c->s->info.stage != MESA_SHADER_COMPUTE) {
assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
v3d_nir_lower_fs_early(c);
}
if (c->s->info.stage == MESA_SHADER_VERTEX) {
v3d_nir_lower_vs_late(c);
- } else {
+ } else if (c->s->info.stage != MESA_SHADER_COMPUTE) {
assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
v3d_nir_lower_fs_late(c);
}
char *shaderdb;
int ret = asprintf(&shaderdb,
"%s shader: %d inst, %d threads, %d loops, "
- "%d uniforms, %d max-temps, %d:%d spills:fills",
+ "%d uniforms, %d max-temps, %d:%d spills:fills, "
+ "%d sfu-stalls, %d inst-and-stalls",
vir_get_stage_name(c),
c->qpu_inst_count,
c->threads,
c->num_uniforms,
vir_get_max_temps(c),
c->spills,
- c->fills);
+ c->fills,
+ c->qpu_inst_stalled_count,
+ c->qpu_inst_count + c->qpu_inst_stalled_count);
if (ret >= 0) {
if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
fprintf(stderr, "SHADER-DB: %s\n", shaderdb);