X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fbroadcom%2Fcompiler%2Fvir.c;h=dc5d3fe3bed6d708124adddc70c1dccec20bde06;hb=46182fc1da0b5cabc09e818bddc6b7968d4d2b7b;hp=6b55b0e03bc04a19e018c1e8587b3b155cc9d166;hpb=a91b158bd9e1b6bc08f1d5ac350cd8b68e372042;p=mesa.git diff --git a/src/broadcom/compiler/vir.c b/src/broadcom/compiler/vir.c index 6b55b0e03bc..dc5d3fe3bed 100644 --- a/src/broadcom/compiler/vir.c +++ b/src/broadcom/compiler/vir.c @@ -25,7 +25,7 @@ #include "v3d_compiler.h" int -vir_get_non_sideband_nsrc(struct qinst *inst) +vir_get_nsrc(struct qinst *inst) { switch (inst->qpu.type) { case V3D_QPU_INSTR_TYPE_BRANCH: @@ -40,45 +40,6 @@ vir_get_non_sideband_nsrc(struct qinst *inst) return 0; } -int -vir_get_nsrc(struct qinst *inst) -{ - int nsrc = vir_get_non_sideband_nsrc(inst); - - if (vir_has_implicit_uniform(inst)) - nsrc++; - - return nsrc; -} - -bool -vir_has_implicit_uniform(struct qinst *inst) -{ - switch (inst->qpu.type) { - case V3D_QPU_INSTR_TYPE_BRANCH: - return true; - case V3D_QPU_INSTR_TYPE_ALU: - switch (inst->dst.file) { - case QFILE_TLBU: - return true; - default: - return inst->has_implicit_uniform; - } - } - return false; -} - -/* The sideband uniform for textures gets stored after the normal ALU - * arguments. - */ -int -vir_get_implicit_uniform_src(struct qinst *inst) -{ - if (!vir_has_implicit_uniform(inst)) - return -1; - return vir_get_nsrc(inst) - 1; -} - /** * Returns whether the instruction has any side effects that must be * preserved. @@ -114,6 +75,8 @@ vir_has_side_effects(struct v3d_compile *c, struct qinst *inst) if (inst->qpu.sig.ldtmu || inst->qpu.sig.ldvary || + inst->qpu.sig.ldtlbu || + inst->qpu.sig.ldtlb || inst->qpu.sig.wrtmuc || inst->qpu.sig.thrsw) { return true; @@ -122,38 +85,6 @@ vir_has_side_effects(struct v3d_compile *c, struct qinst *inst) return false; } -bool -vir_is_float_input(struct qinst *inst) -{ - /* XXX: More instrs */ - switch (inst->qpu.type) { - case V3D_QPU_INSTR_TYPE_BRANCH: - return false; - case V3D_QPU_INSTR_TYPE_ALU: - switch (inst->qpu.alu.add.op) { - case V3D_QPU_A_FADD: - case V3D_QPU_A_FSUB: - case V3D_QPU_A_FMIN: - case V3D_QPU_A_FMAX: - case V3D_QPU_A_FTOIN: - return true; - default: - break; - } - - switch (inst->qpu.alu.mul.op) { - case V3D_QPU_M_FMOV: - case V3D_QPU_M_VFMUL: - case V3D_QPU_M_FMUL: - return true; - default: - break; - } - } - - return false; -} - bool vir_is_raw_mov(struct qinst *inst) { @@ -168,6 +99,13 @@ vir_is_raw_mov(struct qinst *inst) return false; } + if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE || + inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE || + inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE || + inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) { + return false; + } + if (inst->qpu.flags.ac != V3D_QPU_COND_NONE || inst->qpu.flags.mc != V3D_QPU_COND_NONE) return false; @@ -203,17 +141,6 @@ vir_is_tex(struct qinst *inst) return false; } -bool -vir_depends_on_flags(struct qinst *inst) -{ - if (inst->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH) { - return (inst->qpu.branch.cond != V3D_QPU_BRANCH_COND_ALWAYS); - } else { - return (inst->qpu.flags.ac != V3D_QPU_COND_NONE && - inst->qpu.flags.mc != V3D_QPU_COND_NONE); - } -} - bool vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst) { @@ -302,6 +229,17 @@ vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf) } } +void +vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf) +{ + if (vir_is_add(inst)) { + inst->qpu.flags.auf = uf; + } else { + assert(vir_is_mul(inst)); + inst->qpu.flags.muf = uf; + } +} + #if 0 uint8_t vir_channels_written(struct qinst *inst) @@ -411,7 +349,7 @@ vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct q } struct qinst * -vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src) +vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond) { struct qinst *inst = calloc(1, sizeof(*inst)); @@ -423,9 +361,8 @@ vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src) inst->qpu.branch.ub = true; inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL; - inst->dst = vir_reg(QFILE_NULL, 0); - inst->src[0] = src; - inst->uniform = ~0; + inst->dst = vir_nop_reg(); + inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0); return inst; } @@ -558,6 +495,9 @@ static struct v3d_compile * vir_compile_init(const struct v3d_compiler *compiler, struct v3d_key *key, nir_shader *s, + void (*debug_output)(const char *msg, + void *debug_output_data), + void *debug_output_data, int program_id, int variant_id) { struct v3d_compile *c = rzalloc(NULL, struct v3d_compile); @@ -568,6 +508,8 @@ vir_compile_init(const struct v3d_compiler *compiler, c->program_id = program_id; c->variant_id = variant_id; c->threads = 4; + c->debug_output = debug_output; + c->debug_output_data = debug_output_data; s = nir_shader_clone(c, s); c->s = s; @@ -576,7 +518,6 @@ vir_compile_init(const struct v3d_compiler *compiler, vir_set_emit_block(c, vir_new_block(c)); c->output_position_index = -1; - c->output_point_size_index = -1; c->output_sample_mask_index = -1; c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer, @@ -585,11 +526,19 @@ vir_compile_init(const struct v3d_compiler *compiler, return c; } +static int +type_size_vec4(const struct glsl_type *type, bool bindless) +{ + return glsl_count_attribute_slots(type, false); +} + static void v3d_lower_nir(struct v3d_compile *c) { struct nir_lower_tex_options tex_options = { .lower_txd = true, + .lower_tg4_broadcom_swizzle = true, + .lower_rect = false, /* XXX: Use this on V3D 3.x */ .lower_txp = ~0, /* Apply swizzles to all samplers. */ @@ -609,17 +558,35 @@ v3d_lower_nir(struct v3d_compile *c) tex_options.saturate_t |= 1 << i; if (c->key->tex[i].clamp_r) tex_options.saturate_r |= 1 << i; + if (c->key->tex[i].return_size == 16) { + tex_options.lower_tex_packing[i] = + nir_lower_tex_packing_16; + } + } + + /* CS textures may not have return_size reflecting the shadow state. */ + nir_foreach_variable(var, &c->s->uniforms) { + const struct glsl_type *type = glsl_without_array(var->type); + unsigned array_len = MAX2(glsl_get_length(var->type), 1); + + if (!glsl_type_is_sampler(type) || + !glsl_sampler_type_is_shadow(type)) + continue; + + for (int i = 0; i < array_len; i++) { + tex_options.lower_tex_packing[var->data.binding + i] = + nir_lower_tex_packing_16; + } } NIR_PASS_V(c->s, nir_lower_tex, &tex_options); -} + NIR_PASS_V(c->s, nir_lower_system_values); -static void -v3d_lower_nir_late(struct v3d_compile *c) -{ - NIR_PASS_V(c->s, v3d_nir_lower_io, c); - NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c); - NIR_PASS_V(c->s, nir_lower_idiv); + NIR_PASS_V(c->s, nir_lower_vars_to_scratch, + nir_var_function_temp, + 0, + glsl_get_natural_size_align_bytes); + NIR_PASS_V(c->s, v3d_nir_lower_scratch); } static void @@ -638,105 +605,10 @@ v3d_set_prog_data_uniforms(struct v3d_compile *c, count * sizeof(*ulist->contents)); } -/* Copy the compiler UBO range state to the compiled shader, dropping out - * arrays that were never referenced by an indirect load. - * - * (Note that QIR dead code elimination of an array access still leaves that - * array alive, though) - */ -static void -v3d_set_prog_data_ubo(struct v3d_compile *c, - struct v3d_prog_data *prog_data) -{ - if (!c->num_ubo_ranges) - return; - - prog_data->num_ubo_ranges = 0; - prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range, - c->num_ubo_ranges); - for (int i = 0; i < c->num_ubo_ranges; i++) { - if (!c->ubo_range_used[i]) - continue; - - struct v3d_ubo_range *range = &c->ubo_ranges[i]; - prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range; - prog_data->ubo_size += range->size; - } - - if (prog_data->ubo_size) { - if (V3D_DEBUG & V3D_DEBUG_SHADERDB) { - fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n", - vir_get_stage_name(c), - c->program_id, c->variant_id, - prog_data->ubo_size / 4); - } - } -} - static void -v3d_set_prog_data(struct v3d_compile *c, - struct v3d_prog_data *prog_data) -{ - prog_data->threads = c->threads; - prog_data->single_seg = !c->last_thrsw; - prog_data->spill_size = c->spill_size; - - v3d_set_prog_data_uniforms(c, prog_data); - v3d_set_prog_data_ubo(c, prog_data); -} - -static uint64_t * -v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size) -{ - *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t); - - uint64_t *qpu_insts = malloc(*final_assembly_size); - if (!qpu_insts) - return NULL; - - memcpy(qpu_insts, c->qpu_insts, *final_assembly_size); - - vir_compile_destroy(c); - - return qpu_insts; -} - -uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler, - struct v3d_vs_key *key, - struct v3d_vs_prog_data *prog_data, - nir_shader *s, - int program_id, int variant_id, - uint32_t *final_assembly_size) +v3d_vs_set_prog_data(struct v3d_compile *c, + struct v3d_vs_prog_data *prog_data) { - struct v3d_compile *c = vir_compile_init(compiler, &key->base, s, - program_id, variant_id); - - c->vs_key = key; - - v3d_lower_nir(c); - - if (key->clamp_color) - NIR_PASS_V(c->s, nir_lower_clamp_color_outputs); - - if (key->base.ucp_enables) { - NIR_PASS_V(c->s, nir_lower_clip_vs, key->base.ucp_enables); - NIR_PASS_V(c->s, nir_lower_io_to_scalar, - nir_var_shader_out); - } - - /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */ - NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); - - v3d_lower_nir_late(c); - v3d_optimize_nir(c->s); - NIR_PASS_V(c->s, nir_convert_from_ssa, true); - - v3d_nir_to_vir(c); - - v3d_set_prog_data(c, &prog_data->base); - - prog_data->base.num_inputs = c->num_inputs; - /* The vertex data gets format converted by the VPM so that * each attribute channel takes up a VPM column. Precompute * the sizes for the shader record. @@ -746,9 +618,9 @@ uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler, prog_data->vpm_input_size += c->vattr_sizes[i]; } - prog_data->uses_vid = (s->info.system_values_read & + prog_data->uses_vid = (c->s->info.system_values_read & (1ull << SYSTEM_VALUE_VERTEX_ID)); - prog_data->uses_iid = (s->info.system_values_read & + prog_data->uses_iid = (c->s->info.system_values_read & (1ull << SYSTEM_VALUE_INSTANCE_ID)); if (prog_data->uses_vid) @@ -760,7 +632,15 @@ uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler, * channel). */ prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8; - prog_data->vpm_output_size = align(c->num_vpm_writes, 8) / 8; + prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8; + + /* Set us up for shared input/output segments. This is apparently + * necessary for our VCM setup to avoid varying corruption. + */ + prog_data->separate_segments = false; + prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size, + prog_data->vpm_input_size); + prog_data->vpm_input_size = 0; /* Compute VCM cache size. We set up our program to take up less than * half of the VPM, so that any set of bin and render programs won't @@ -771,22 +651,20 @@ uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler, * batches. */ assert(c->devinfo->vpm_size); - int sector_size = 16 * sizeof(uint32_t) * 8; + int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8; int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size; int half_vpm = vpm_size_in_sectors / 2; int vpm_output_sectors = half_vpm - prog_data->vpm_input_size; int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size; assert(vpm_output_batches >= 2); prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4); - - return v3d_return_qpu_insts(c, final_assembly_size); } static void v3d_set_fs_prog_data_inputs(struct v3d_compile *c, struct v3d_fs_prog_data *prog_data) { - prog_data->base.num_inputs = c->num_inputs; + prog_data->num_inputs = c->num_inputs; memcpy(prog_data->input_slots, c->input_slots, c->num_inputs * sizeof(*c->input_slots)); @@ -804,6 +682,95 @@ v3d_set_fs_prog_data_inputs(struct v3d_compile *c, } } +static void +v3d_fs_set_prog_data(struct v3d_compile *c, + struct v3d_fs_prog_data *prog_data) +{ + v3d_set_fs_prog_data_inputs(c, prog_data); + prog_data->writes_z = c->writes_z; + prog_data->disable_ez = !c->s->info.fs.early_fragment_tests; + prog_data->uses_center_w = c->uses_center_w; + prog_data->uses_implicit_point_line_varyings = + c->uses_implicit_point_line_varyings; + prog_data->lock_scoreboard_on_first_thrsw = + c->lock_scoreboard_on_first_thrsw; +} + +static void +v3d_cs_set_prog_data(struct v3d_compile *c, + struct v3d_compute_prog_data *prog_data) +{ + prog_data->shared_size = c->s->info.cs.shared_size; +} + +static void +v3d_set_prog_data(struct v3d_compile *c, + struct v3d_prog_data *prog_data) +{ + prog_data->threads = c->threads; + prog_data->single_seg = !c->last_thrsw; + prog_data->spill_size = c->spill_size; + prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl; + + v3d_set_prog_data_uniforms(c, prog_data); + + if (c->s->info.stage == MESA_SHADER_COMPUTE) { + v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data); + } else if (c->s->info.stage == MESA_SHADER_VERTEX) { + v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data); + } else { + assert(c->s->info.stage == MESA_SHADER_FRAGMENT); + v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data); + } +} + +static uint64_t * +v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size) +{ + *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t); + + uint64_t *qpu_insts = malloc(*final_assembly_size); + if (!qpu_insts) + return NULL; + + memcpy(qpu_insts, c->qpu_insts, *final_assembly_size); + + vir_compile_destroy(c); + + return qpu_insts; +} + +static void +v3d_nir_lower_vs_early(struct v3d_compile *c) +{ + /* Split our I/O vars and dead code eliminate the unused + * components. + */ + NIR_PASS_V(c->s, nir_lower_io_to_scalar_early, + nir_var_shader_in | nir_var_shader_out); + uint64_t used_outputs[4] = {0}; + for (int i = 0; i < c->vs_key->num_fs_inputs; i++) { + int slot = v3d_slot_get_slot(c->vs_key->fs_inputs[i]); + int comp = v3d_slot_get_component(c->vs_key->fs_inputs[i]); + used_outputs[comp] |= 1ull << slot; + } + NIR_PASS_V(c->s, nir_remove_unused_io_vars, + &c->s->outputs, used_outputs, NULL); /* demotes to globals */ + NIR_PASS_V(c->s, nir_lower_global_vars_to_local); + v3d_optimize_nir(c->s); + NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in); + + /* This must go before nir_lower_io */ + if (c->vs_key->per_vertex_point_size) + NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f); + + NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out, + type_size_vec4, + (nir_lower_io_options)0); + /* clean up nir_lower_io's deref_var remains */ + NIR_PASS_V(c->s, nir_opt_dce); +} + static void v3d_fixup_fs_output_types(struct v3d_compile *c) { @@ -834,57 +801,189 @@ v3d_fixup_fs_output_types(struct v3d_compile *c) } } -uint64_t *v3d_compile_fs(const struct v3d_compiler *compiler, - struct v3d_fs_key *key, - struct v3d_fs_prog_data *prog_data, - nir_shader *s, - int program_id, int variant_id, - uint32_t *final_assembly_size) +static void +v3d_nir_lower_fs_early(struct v3d_compile *c) { - struct v3d_compile *c = vir_compile_init(compiler, &key->base, s, - program_id, variant_id); + if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb) + v3d_fixup_fs_output_types(c); - c->fs_key = key; + NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c); - if (key->int_color_rb || key->uint_color_rb) - v3d_fixup_fs_output_types(c); + /* If the shader has no non-TLB side effects, we can promote it to + * enabling early_fragment_tests even if the user didn't. + */ + if (!(c->s->info.num_images || + c->s->info.num_ssbos)) { + c->s->info.fs.early_fragment_tests = true; + } +} - v3d_lower_nir(c); +static void +v3d_nir_lower_vs_late(struct v3d_compile *c) +{ + if (c->vs_key->clamp_color) + NIR_PASS_V(c->s, nir_lower_clamp_color_outputs); - if (key->light_twoside) + if (c->key->ucp_enables) { + NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables, + false, false, NULL); + NIR_PASS_V(c->s, nir_lower_io_to_scalar, + nir_var_shader_out); + } + + /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */ + NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); +} + +static void +v3d_nir_lower_fs_late(struct v3d_compile *c) +{ + if (c->fs_key->light_twoside) NIR_PASS_V(c->s, nir_lower_two_sided_color); - if (key->clamp_color) + if (c->fs_key->clamp_color) NIR_PASS_V(c->s, nir_lower_clamp_color_outputs); - if (key->alpha_test) { - NIR_PASS_V(c->s, nir_lower_alpha_test, key->alpha_test_func, + if (c->key->ucp_enables) + NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, false); - } - - if (key->base.ucp_enables) - NIR_PASS_V(c->s, nir_lower_clip_fs, key->base.ucp_enables); /* Note: FS input scalarizing must happen after * nir_lower_two_sided_color, which only handles a vec4 at a time. */ NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in); +} + +static uint32_t +vir_get_max_temps(struct v3d_compile *c) +{ + int max_ip = 0; + vir_for_each_inst_inorder(inst, c) + max_ip++; + + uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip); + + for (int t = 0; t < c->num_temps; t++) { + for (int i = c->temp_start[t]; (i < c->temp_end[t] && + i < max_ip); i++) { + if (i > max_ip) + break; + pressure[i]++; + } + } + + uint32_t max_temps = 0; + for (int i = 0; i < max_ip; i++) + max_temps = MAX2(max_temps, pressure[i]); + + ralloc_free(pressure); + + return max_temps; +} + +uint64_t *v3d_compile(const struct v3d_compiler *compiler, + struct v3d_key *key, + struct v3d_prog_data **out_prog_data, + nir_shader *s, + void (*debug_output)(const char *msg, + void *debug_output_data), + void *debug_output_data, + int program_id, int variant_id, + uint32_t *final_assembly_size) +{ + struct v3d_prog_data *prog_data; + struct v3d_compile *c = vir_compile_init(compiler, key, s, + debug_output, debug_output_data, + program_id, variant_id); + + switch (c->s->info.stage) { + case MESA_SHADER_VERTEX: + c->vs_key = (struct v3d_vs_key *)key; + prog_data = rzalloc_size(NULL, sizeof(struct v3d_vs_prog_data)); + break; + case MESA_SHADER_FRAGMENT: + c->fs_key = (struct v3d_fs_key *)key; + prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data)); + break; + case MESA_SHADER_COMPUTE: + prog_data = rzalloc_size(NULL, + sizeof(struct v3d_compute_prog_data)); + break; + default: + unreachable("unsupported shader stage"); + } + + if (c->s->info.stage == MESA_SHADER_VERTEX) { + v3d_nir_lower_vs_early(c); + } else if (c->s->info.stage != MESA_SHADER_COMPUTE) { + assert(c->s->info.stage == MESA_SHADER_FRAGMENT); + v3d_nir_lower_fs_early(c); + } + + v3d_lower_nir(c); + + if (c->s->info.stage == MESA_SHADER_VERTEX) { + v3d_nir_lower_vs_late(c); + } else if (c->s->info.stage != MESA_SHADER_COMPUTE) { + assert(c->s->info.stage == MESA_SHADER_FRAGMENT); + v3d_nir_lower_fs_late(c); + } + + NIR_PASS_V(c->s, v3d_nir_lower_io, c); + NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c); + NIR_PASS_V(c->s, v3d_nir_lower_image_load_store); + NIR_PASS_V(c->s, nir_lower_idiv); - v3d_lower_nir_late(c); v3d_optimize_nir(c->s); + + /* Do late algebraic optimization to turn add(a, neg(b)) back into + * subs, then the mandatory cleanup after algebraic. Note that it may + * produce fnegs, and if so then we need to keep running to squash + * fneg(fneg(a)). + */ + bool more_late_algebraic = true; + while (more_late_algebraic) { + more_late_algebraic = false; + NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late); + NIR_PASS_V(c->s, nir_opt_constant_folding); + NIR_PASS_V(c->s, nir_copy_prop); + NIR_PASS_V(c->s, nir_opt_dce); + NIR_PASS_V(c->s, nir_opt_cse); + } + + NIR_PASS_V(c->s, nir_lower_bool_to_int32); NIR_PASS_V(c->s, nir_convert_from_ssa, true); v3d_nir_to_vir(c); - v3d_set_prog_data(c, &prog_data->base); - v3d_set_fs_prog_data_inputs(c, prog_data); - prog_data->writes_z = (c->s->info.outputs_written & - (1 << FRAG_RESULT_DEPTH)); - prog_data->discard = (c->s->info.fs.uses_discard || - c->fs_key->sample_alpha_to_coverage); - prog_data->uses_center_w = c->uses_center_w; + v3d_set_prog_data(c, prog_data); + + *out_prog_data = prog_data; + + char *shaderdb; + int ret = asprintf(&shaderdb, + "%s shader: %d inst, %d threads, %d loops, " + "%d uniforms, %d max-temps, %d:%d spills:fills, " + "%d sfu-stalls, %d inst-and-stalls", + vir_get_stage_name(c), + c->qpu_inst_count, + c->threads, + c->loops, + c->num_uniforms, + vir_get_max_temps(c), + c->spills, + c->fills, + c->qpu_inst_stalled_count, + c->qpu_inst_count + c->qpu_inst_stalled_count); + if (ret >= 0) { + if (V3D_DEBUG & V3D_DEBUG_SHADERDB) + fprintf(stderr, "SHADER-DB: %s\n", shaderdb); + + c->debug_output(shaderdb, c->debug_output_data); + free(shaderdb); + } - return v3d_return_qpu_insts(c, final_assembly_size); + return v3d_return_qpu_insts(c, final_assembly_size); } void @@ -940,15 +1039,15 @@ vir_compile_destroy(struct v3d_compile *c) ralloc_free(c); } -struct qreg -vir_uniform(struct v3d_compile *c, - enum quniform_contents contents, - uint32_t data) +uint32_t +vir_get_uniform_index(struct v3d_compile *c, + enum quniform_contents contents, + uint32_t data) { for (int i = 0; i < c->num_uniforms; i++) { if (c->uniform_contents[i] == contents && c->uniform_data[i] == data) { - return vir_reg(QFILE_UNIF, i); + return i; } } @@ -969,46 +1068,20 @@ vir_uniform(struct v3d_compile *c, c->uniform_contents[uniform] = contents; c->uniform_data[uniform] = data; - return vir_reg(QFILE_UNIF, uniform); -} - -static bool -vir_can_set_flags(struct v3d_compile *c, struct qinst *inst) -{ - if (c->devinfo->ver >= 40 && (v3d_qpu_reads_vpm(&inst->qpu) || - v3d_qpu_uses_sfu(&inst->qpu))) { - return false; - } - - return true; + return uniform; } -void -vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf) +struct qreg +vir_uniform(struct v3d_compile *c, + enum quniform_contents contents, + uint32_t data) { - struct qinst *last_inst = NULL; - - if (!list_empty(&c->cur_block->instructions)) { - last_inst = (struct qinst *)c->cur_block->instructions.prev; - - /* Can't stuff the PF into the last last inst if our cursor - * isn't pointing after it. - */ - struct vir_cursor after_inst = vir_after_inst(last_inst); - if (c->cursor.mode != after_inst.mode || - c->cursor.link != after_inst.link) - last_inst = NULL; - } - - if (src.file != QFILE_TEMP || - !c->defs[src.index] || - last_inst != c->defs[src.index] || - !vir_can_set_flags(c, last_inst)) { - /* XXX: Make the MOV be the appropriate type */ - last_inst = vir_MOV_dest(c, vir_reg(QFILE_NULL, 0), src); - } - - vir_set_pf(last_inst, pf); + struct qinst *inst = vir_NOP(c); + inst->qpu.sig.ldunif = true; + inst->uniform = vir_get_uniform_index(c, contents, data); + inst->dst = vir_get_temp(c); + c->defs[inst->dst.index] = inst; + return inst->dst; } #define OPTPASS(func) \ @@ -1035,6 +1108,7 @@ vir_optimize(struct v3d_compile *c) bool progress = false; OPTPASS(vir_opt_copy_propagate); + OPTPASS(vir_opt_redundant_flags); OPTPASS(vir_opt_dead_code); OPTPASS(vir_opt_small_immediates);