X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fbroadcom%2Fcompiler%2Fvir.c;h=d06f789161fab09e4589e14889d24c16702b86c5;hp=4f88b86a5d20c224bfdd9d1649ca7bafddf54e72;hb=c3258f927c462cc2e24106e7841138452f7fec10;hpb=4f5fbd6490a52726d851263ad26c0a595e7cc4d0 diff --git a/src/broadcom/compiler/vir.c b/src/broadcom/compiler/vir.c index 4f88b86a5d2..d06f789161f 100644 --- a/src/broadcom/compiler/vir.c +++ b/src/broadcom/compiler/vir.c @@ -24,6 +24,7 @@ #include "broadcom/common/v3d_device_info.h" #include "v3d_compiler.h" #include "util/u_prim.h" +#include "compiler/nir/nir_schedule.h" int vir_get_nsrc(struct qinst *inst) @@ -499,7 +500,8 @@ vir_compile_init(const struct v3d_compiler *compiler, void (*debug_output)(const char *msg, void *debug_output_data), void *debug_output_data, - int program_id, int variant_id) + int program_id, int variant_id, + bool fallback_scheduler) { struct v3d_compile *c = rzalloc(NULL, struct v3d_compile); @@ -511,6 +513,8 @@ vir_compile_init(const struct v3d_compiler *compiler, c->threads = 4; c->debug_output = debug_output; c->debug_output_data = debug_output_data; + c->compilation_result = V3D_COMPILATION_SUCCEEDED; + c->fallback_scheduler = fallback_scheduler; s = nir_shader_clone(c, s); c->s = s; @@ -566,7 +570,7 @@ v3d_lower_nir(struct v3d_compile *c) } /* CS textures may not have return_size reflecting the shadow state. */ - nir_foreach_variable(var, &c->s->uniforms) { + nir_foreach_uniform_variable(var, c->s) { const struct glsl_type *type = glsl_without_array(var->type); unsigned array_len = MAX2(glsl_get_length(var->type), 1); @@ -680,6 +684,24 @@ v3d_gs_set_prog_data(struct v3d_compile *c, /* Output segment size is in sectors (8 rows of 32 bits per channel) */ prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8; + /* Compute SIMD dispatch width and update VPM output size accordingly + * to ensure we can fit our program in memory. Available widths are + * 16, 8, 4, 1. + * + * Notice that at draw time we will have to consider VPM memory + * requirements from other stages and choose a smaller dispatch + * width if needed to fit the program in VPM memory. + */ + prog_data->simd_width = 16; + while ((prog_data->simd_width > 1 && prog_data->vpm_output_size > 16) || + prog_data->simd_width == 2) { + prog_data->simd_width >>= 1; + prog_data->vpm_output_size = + align(prog_data->vpm_output_size, 2) / 2; + } + assert(prog_data->vpm_output_size <= 16); + assert(prog_data->simd_width != 2); + prog_data->out_prim_type = c->s->info.gs.output_primitive; prog_data->num_invocations = c->s->info.gs.invocations; } @@ -787,10 +809,10 @@ v3d_nir_lower_vs_early(struct v3d_compile *c) used_outputs[comp] |= 1ull << slot; } NIR_PASS_V(c->s, nir_remove_unused_io_vars, - &c->s->outputs, used_outputs, NULL); /* demotes to globals */ + nir_var_shader_out, used_outputs, NULL); /* demotes to globals */ NIR_PASS_V(c->s, nir_lower_global_vars_to_local); v3d_optimize_nir(c->s); - NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in); + NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL); /* This must go before nir_lower_io */ if (c->vs_key->per_vertex_point_size) @@ -799,8 +821,11 @@ v3d_nir_lower_vs_early(struct v3d_compile *c) NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out, type_size_vec4, (nir_lower_io_options)0); - /* clean up nir_lower_io's deref_var remains */ + /* clean up nir_lower_io's deref_var remains and do a constant folding pass + * on the code it generated. + */ NIR_PASS_V(c->s, nir_opt_dce); + NIR_PASS_V(c->s, nir_opt_constant_folding); } static void @@ -818,10 +843,10 @@ v3d_nir_lower_gs_early(struct v3d_compile *c) used_outputs[comp] |= 1ull << slot; } NIR_PASS_V(c->s, nir_remove_unused_io_vars, - &c->s->outputs, used_outputs, NULL); /* demotes to globals */ + nir_var_shader_out, used_outputs, NULL); /* demotes to globals */ NIR_PASS_V(c->s, nir_lower_global_vars_to_local); v3d_optimize_nir(c->s); - NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in); + NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL); /* This must go before nir_lower_io */ if (c->gs_key->per_vertex_point_size) @@ -837,7 +862,7 @@ v3d_nir_lower_gs_early(struct v3d_compile *c) static void v3d_fixup_fs_output_types(struct v3d_compile *c) { - nir_foreach_variable(var, &c->s->outputs) { + nir_foreach_shader_out_variable(var, c->s) { uint32_t mask = 0; switch (var->data.location) { @@ -872,6 +897,13 @@ v3d_nir_lower_fs_early(struct v3d_compile *c) NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c); + if (c->fs_key->line_smoothing) { + v3d_nir_lower_line_smooth(c->s); + NIR_PASS_V(c->s, nir_lower_global_vars_to_local); + /* The lowering pass can introduce new sysval reads */ + nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s)); + } + /* If the shader has no non-TLB side effects, we can promote it to * enabling early_fragment_tests even if the user didn't. */ @@ -914,7 +946,7 @@ static void v3d_nir_lower_fs_late(struct v3d_compile *c) { if (c->fs_key->light_twoside) - NIR_PASS_V(c->s, nir_lower_two_sided_color); + NIR_PASS_V(c->s, nir_lower_two_sided_color, true); if (c->fs_key->clamp_color) NIR_PASS_V(c->s, nir_lower_clamp_color_outputs); @@ -925,9 +957,15 @@ v3d_nir_lower_fs_late(struct v3d_compile *c) false, NULL); } + /* In OpenGL the fragment shader can't read gl_ClipDistance[], but + * Vulkan allows it, in which case the SPIR-V compiler will declare + * VARING_SLOT_CLIP_DIST0 as compact array variable. Pass true as + * the last parameter to always operate with a compact array in both + * OpenGL and Vulkan so we do't have to care about the API we + * are using. + */ if (c->key->ucp_enables) - NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, - false); + NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true); /* Note: FS input scalarizing must happen after * nir_lower_two_sided_color, which only handles a vec4 at a time. @@ -962,43 +1000,76 @@ vir_get_max_temps(struct v3d_compile *c) return max_temps; } -uint64_t *v3d_compile(const struct v3d_compiler *compiler, - struct v3d_key *key, - struct v3d_prog_data **out_prog_data, - nir_shader *s, - void (*debug_output)(const char *msg, - void *debug_output_data), - void *debug_output_data, - int program_id, int variant_id, - uint32_t *final_assembly_size) +enum v3d_dependency_class { + V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0 +}; + +static bool +v3d_intrinsic_dependency_cb(nir_intrinsic_instr *intr, + nir_schedule_dependency *dep, + void *user_data) { - struct v3d_prog_data *prog_data; - struct v3d_compile *c = vir_compile_init(compiler, key, s, - debug_output, debug_output_data, - program_id, variant_id); + struct v3d_compile *c = user_data; + + switch (intr->intrinsic) { + case nir_intrinsic_store_output: + /* Writing to location 0 overwrites the value passed in for + * gl_PrimitiveID on geometry shaders + */ + if (c->s->info.stage != MESA_SHADER_GEOMETRY || + nir_intrinsic_base(intr) != 0) + break; + nir_const_value *const_value = + nir_src_as_const_value(intr->src[1]); + + if (const_value == NULL) + break; + + uint64_t offset = + nir_const_value_as_uint(*const_value, + nir_src_bit_size(intr->src[1])); + if (offset != 0) + break; + + dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0; + dep->type = NIR_SCHEDULE_WRITE_DEPENDENCY; + return true; + + case nir_intrinsic_load_primitive_id: + if (c->s->info.stage != MESA_SHADER_GEOMETRY) + break; + + dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0; + dep->type = NIR_SCHEDULE_READ_DEPENDENCY; + return true; + + default: + break; + } + + return false; +} + +static void +v3d_attempt_compile(struct v3d_compile *c) +{ switch (c->s->info.stage) { case MESA_SHADER_VERTEX: - c->vs_key = (struct v3d_vs_key *)key; - prog_data = rzalloc_size(NULL, sizeof(struct v3d_vs_prog_data)); + c->vs_key = (struct v3d_vs_key *) c->key; break; case MESA_SHADER_GEOMETRY: - c->gs_key = (struct v3d_gs_key *)key; - prog_data = rzalloc_size(NULL, sizeof(struct v3d_gs_prog_data)); + c->gs_key = (struct v3d_gs_key *) c->key; break; case MESA_SHADER_FRAGMENT: - c->fs_key = (struct v3d_fs_key *)key; - prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data)); + c->fs_key = (struct v3d_fs_key *) c->key; break; case MESA_SHADER_COMPUTE: - prog_data = rzalloc_size(NULL, - sizeof(struct v3d_compute_prog_data)); break; default: unreachable("unsupported shader stage"); } - switch (c->s->info.stage) { case MESA_SHADER_VERTEX: v3d_nir_lower_vs_early(c); @@ -1054,12 +1125,91 @@ uint64_t *v3d_compile(const struct v3d_compiler *compiler, NIR_PASS_V(c->s, nir_lower_bool_to_int32); NIR_PASS_V(c->s, nir_convert_from_ssa, true); - /* Schedule for about half our register space, to enable more shaders - * to hit 4 threads. - */ - NIR_PASS_V(c->s, nir_schedule, 24); + struct nir_schedule_options schedule_options = { + /* Schedule for about half our register space, to enable more + * shaders to hit 4 threads. + */ + .threshold = 24, + + /* Vertex shaders share the same memory for inputs and outputs, + * fragement and geometry shaders do not. + */ + .stages_with_shared_io_memory = + (((1 << MESA_ALL_SHADER_STAGES) - 1) & + ~((1 << MESA_SHADER_FRAGMENT) | + (1 << MESA_SHADER_GEOMETRY))), + + .fallback = c->fallback_scheduler, + + .intrinsic_cb = v3d_intrinsic_dependency_cb, + .intrinsic_cb_data = c, + }; + NIR_PASS_V(c->s, nir_schedule, &schedule_options); v3d_nir_to_vir(c); +} + +uint32_t +v3d_prog_data_size(gl_shader_stage stage) +{ + static const int prog_data_size[] = { + [MESA_SHADER_VERTEX] = sizeof(struct v3d_vs_prog_data), + [MESA_SHADER_GEOMETRY] = sizeof(struct v3d_gs_prog_data), + [MESA_SHADER_FRAGMENT] = sizeof(struct v3d_fs_prog_data), + [MESA_SHADER_COMPUTE] = sizeof(struct v3d_compute_prog_data), + }; + + assert(stage >= 0 && + stage < ARRAY_SIZE(prog_data_size) && + prog_data_size[stage]); + + return prog_data_size[stage]; +} + +uint64_t *v3d_compile(const struct v3d_compiler *compiler, + struct v3d_key *key, + struct v3d_prog_data **out_prog_data, + nir_shader *s, + void (*debug_output)(const char *msg, + void *debug_output_data), + void *debug_output_data, + int program_id, int variant_id, + uint32_t *final_assembly_size) +{ + struct v3d_compile *c; + + for (int i = 0; true; i++) { + c = vir_compile_init(compiler, key, s, + debug_output, debug_output_data, + program_id, variant_id, + i > 0 /* fallback_scheduler */); + + v3d_attempt_compile(c); + + if (i > 0 || + c->compilation_result != + V3D_COMPILATION_FAILED_REGISTER_ALLOCATION) + break; + + char *debug_msg; + int ret = asprintf(&debug_msg, + "Using fallback scheduler for %s", + vir_get_stage_name(c)); + + if (ret >= 0) { + if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF)) + fprintf(stderr, "%s\n", debug_msg); + + c->debug_output(debug_msg, c->debug_output_data); + free(debug_msg); + } + + vir_compile_destroy(c); + } + + struct v3d_prog_data *prog_data; + + prog_data = rzalloc_size(NULL, v3d_prog_data_size(c->s->info.stage)); v3d_set_prog_data(c, prog_data);