#include "v3d_compiler.h"
int
-vir_get_non_sideband_nsrc(struct qinst *inst)
+vir_get_nsrc(struct qinst *inst)
{
switch (inst->qpu.type) {
case V3D_QPU_INSTR_TYPE_BRANCH:
return 0;
}
-int
-vir_get_nsrc(struct qinst *inst)
-{
- int nsrc = vir_get_non_sideband_nsrc(inst);
-
- if (vir_has_implicit_uniform(inst))
- nsrc++;
-
- return nsrc;
-}
-
-bool
-vir_has_implicit_uniform(struct qinst *inst)
-{
- switch (inst->qpu.type) {
- case V3D_QPU_INSTR_TYPE_BRANCH:
- return true;
- case V3D_QPU_INSTR_TYPE_ALU:
- switch (inst->dst.file) {
- case QFILE_TLBU:
- return true;
- case QFILE_MAGIC:
- switch (inst->dst.index) {
- case V3D_QPU_WADDR_TLBU:
- case V3D_QPU_WADDR_TMUAU:
- case V3D_QPU_WADDR_SYNCU:
- return true;
- default:
- break;
- }
- break;
- default:
- return inst->has_implicit_uniform;
- }
- }
- return false;
-}
-
-/* The sideband uniform for textures gets stored after the normal ALU
- * arguments.
- */
-int
-vir_get_implicit_uniform_src(struct qinst *inst)
-{
- if (!vir_has_implicit_uniform(inst))
- return -1;
- return vir_get_nsrc(inst) - 1;
-}
-
/**
* Returns whether the instruction has any side effects that must be
* preserved.
if (inst->qpu.sig.ldtmu ||
inst->qpu.sig.ldvary ||
+ inst->qpu.sig.ldtlbu ||
+ inst->qpu.sig.ldtlb ||
inst->qpu.sig.wrtmuc ||
inst->qpu.sig.thrsw) {
return true;
}
struct qinst *
-vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src)
+vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
{
struct qinst *inst = calloc(1, sizeof(*inst));
inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
inst->dst = vir_nop_reg();
- inst->src[0] = src;
- inst->uniform = ~0;
+ inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);
return inst;
}
}
static int
-type_size_vec4(const struct glsl_type *type)
+type_size_vec4(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
}
}
+ /* CS textures may not have return_size reflecting the shadow state. */
+ nir_foreach_variable(var, &c->s->uniforms) {
+ const struct glsl_type *type = glsl_without_array(var->type);
+ unsigned array_len = MAX2(glsl_get_length(var->type), 1);
+
+ if (!glsl_type_is_sampler(type) ||
+ !glsl_sampler_type_is_shadow(type))
+ continue;
+
+ for (int i = 0; i < array_len; i++) {
+ tex_options.lower_tex_packing[var->data.binding + i] =
+ nir_lower_tex_packing_16;
+ }
+ }
+
NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
NIR_PASS_V(c->s, nir_lower_system_values);
+
+ NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
+ nir_var_function_temp,
+ 0,
+ glsl_get_natural_size_align_bytes);
+ NIR_PASS_V(c->s, v3d_nir_lower_scratch);
}
static void
count * sizeof(*ulist->contents));
}
-/* Copy the compiler UBO range state to the compiled shader, dropping out
- * arrays that were never referenced by an indirect load.
- *
- * (Note that QIR dead code elimination of an array access still leaves that
- * array alive, though)
- */
-static void
-v3d_set_prog_data_ubo(struct v3d_compile *c,
- struct v3d_prog_data *prog_data)
-{
- if (!c->num_ubo_ranges)
- return;
-
- prog_data->num_ubo_ranges = 0;
- prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range,
- c->num_ubo_ranges);
- for (int i = 0; i < c->num_ubo_ranges; i++) {
- if (!c->ubo_range_used[i])
- continue;
-
- struct v3d_ubo_range *range = &c->ubo_ranges[i];
- prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range;
- prog_data->ubo_size += range->size;
- }
-
- if (prog_data->ubo_size) {
- if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
- fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
- vir_get_stage_name(c),
- c->program_id, c->variant_id,
- prog_data->ubo_size / 4);
- }
- }
-}
-
static void
v3d_vs_set_prog_data(struct v3d_compile *c,
struct v3d_vs_prog_data *prog_data)
* batches.
*/
assert(c->devinfo->vpm_size);
- int sector_size = 16 * sizeof(uint32_t) * 8;
+ int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
int half_vpm = vpm_size_in_sectors / 2;
int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
prog_data->writes_z = c->writes_z;
prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
prog_data->uses_center_w = c->uses_center_w;
+ prog_data->uses_implicit_point_line_varyings =
+ c->uses_implicit_point_line_varyings;
+ prog_data->lock_scoreboard_on_first_thrsw =
+ c->lock_scoreboard_on_first_thrsw;
+}
+
+static void
+v3d_cs_set_prog_data(struct v3d_compile *c,
+ struct v3d_compute_prog_data *prog_data)
+{
+ prog_data->shared_size = c->s->info.cs.shared_size;
}
static void
prog_data->threads = c->threads;
prog_data->single_seg = !c->last_thrsw;
prog_data->spill_size = c->spill_size;
+ prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;
v3d_set_prog_data_uniforms(c, prog_data);
- v3d_set_prog_data_ubo(c, prog_data);
- if (c->s->info.stage == MESA_SHADER_VERTEX) {
+ if (c->s->info.stage == MESA_SHADER_COMPUTE) {
+ v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
+ } else if (c->s->info.stage == MESA_SHADER_VERTEX) {
v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
} else {
assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
v3d_optimize_nir(c->s);
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
+
+ /* This must go before nir_lower_io */
+ if (c->vs_key->per_vertex_point_size)
+ NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
+
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4,
(nir_lower_io_options)0);
+ /* clean up nir_lower_io's deref_var remains */
+ NIR_PASS_V(c->s, nir_opt_dce);
}
static void
if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
v3d_fixup_fs_output_types(c);
+ NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
+
/* If the shader has no non-TLB side effects, we can promote it to
* enabling early_fragment_tests even if the user didn't.
*/
if (!(c->s->info.num_images ||
- c->s->info.num_ssbos ||
- c->s->info.num_abos)) {
+ c->s->info.num_ssbos)) {
c->s->info.fs.early_fragment_tests = true;
}
}
if (c->key->ucp_enables) {
NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
- false);
+ false, false, NULL);
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
nir_var_shader_out);
}
if (c->fs_key->clamp_color)
NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
- if (c->fs_key->alpha_test) {
- NIR_PASS_V(c->s, nir_lower_alpha_test,
- c->fs_key->alpha_test_func,
- false);
- }
-
if (c->key->ucp_enables)
- NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables,
+ false);
/* Note: FS input scalarizing must happen after
* nir_lower_two_sided_color, which only handles a vec4 at a time.
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
}
+static uint32_t
+vir_get_max_temps(struct v3d_compile *c)
+{
+ int max_ip = 0;
+ vir_for_each_inst_inorder(inst, c)
+ max_ip++;
+
+ uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);
+
+ for (int t = 0; t < c->num_temps; t++) {
+ for (int i = c->temp_start[t]; (i < c->temp_end[t] &&
+ i < max_ip); i++) {
+ if (i > max_ip)
+ break;
+ pressure[i]++;
+ }
+ }
+
+ uint32_t max_temps = 0;
+ for (int i = 0; i < max_ip; i++)
+ max_temps = MAX2(max_temps, pressure[i]);
+
+ ralloc_free(pressure);
+
+ return max_temps;
+}
+
uint64_t *v3d_compile(const struct v3d_compiler *compiler,
struct v3d_key *key,
struct v3d_prog_data **out_prog_data,
c->fs_key = (struct v3d_fs_key *)key;
prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data));
break;
+ case MESA_SHADER_COMPUTE:
+ prog_data = rzalloc_size(NULL,
+ sizeof(struct v3d_compute_prog_data));
+ break;
default:
unreachable("unsupported shader stage");
}
if (c->s->info.stage == MESA_SHADER_VERTEX) {
v3d_nir_lower_vs_early(c);
- } else {
+ } else if (c->s->info.stage != MESA_SHADER_COMPUTE) {
assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
v3d_nir_lower_fs_early(c);
}
if (c->s->info.stage == MESA_SHADER_VERTEX) {
v3d_nir_lower_vs_late(c);
- } else {
+ } else if (c->s->info.stage != MESA_SHADER_COMPUTE) {
assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
v3d_nir_lower_fs_late(c);
}
NIR_PASS_V(c->s, nir_lower_idiv);
v3d_optimize_nir(c->s);
+
+ /* Do late algebraic optimization to turn add(a, neg(b)) back into
+ * subs, then the mandatory cleanup after algebraic. Note that it may
+ * produce fnegs, and if so then we need to keep running to squash
+ * fneg(fneg(a)).
+ */
+ bool more_late_algebraic = true;
+ while (more_late_algebraic) {
+ more_late_algebraic = false;
+ NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
+ NIR_PASS_V(c->s, nir_opt_constant_folding);
+ NIR_PASS_V(c->s, nir_copy_prop);
+ NIR_PASS_V(c->s, nir_opt_dce);
+ NIR_PASS_V(c->s, nir_opt_cse);
+ }
+
NIR_PASS_V(c->s, nir_lower_bool_to_int32);
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
char *shaderdb;
int ret = asprintf(&shaderdb,
"%s shader: %d inst, %d threads, %d loops, "
- "%d uniforms, %d:%d spills:fills",
+ "%d uniforms, %d max-temps, %d:%d spills:fills, "
+ "%d sfu-stalls, %d inst-and-stalls",
vir_get_stage_name(c),
c->qpu_inst_count,
c->threads,
c->loops,
c->num_uniforms,
+ vir_get_max_temps(c),
c->spills,
- c->fills);
+ c->fills,
+ c->qpu_inst_stalled_count,
+ c->qpu_inst_count + c->qpu_inst_stalled_count);
if (ret >= 0) {
+ if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
+ fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
+
c->debug_output(shaderdb, c->debug_output_data);
free(shaderdb);
}
ralloc_free(c);
}
-struct qreg
-vir_uniform(struct v3d_compile *c,
- enum quniform_contents contents,
- uint32_t data)
+uint32_t
+vir_get_uniform_index(struct v3d_compile *c,
+ enum quniform_contents contents,
+ uint32_t data)
{
for (int i = 0; i < c->num_uniforms; i++) {
if (c->uniform_contents[i] == contents &&
c->uniform_data[i] == data) {
- return vir_reg(QFILE_UNIF, i);
+ return i;
}
}
c->uniform_contents[uniform] = contents;
c->uniform_data[uniform] = data;
- return vir_reg(QFILE_UNIF, uniform);
+ return uniform;
+}
+
+struct qreg
+vir_uniform(struct v3d_compile *c,
+ enum quniform_contents contents,
+ uint32_t data)
+{
+ struct qinst *inst = vir_NOP(c);
+ inst->qpu.sig.ldunif = true;
+ inst->uniform = vir_get_uniform_index(c, contents, data);
+ inst->dst = vir_get_temp(c);
+ c->defs[inst->dst.index] = inst;
+ return inst->dst;
}
#define OPTPASS(func) \
bool progress = false;
OPTPASS(vir_opt_copy_propagate);
+ OPTPASS(vir_opt_redundant_flags);
OPTPASS(vir_opt_dead_code);
OPTPASS(vir_opt_small_immediates);