X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fglsl%2Flower_packed_varyings.cpp;h=5e844c792e8df2b1e8293bd05e9207d7677630d7;hb=153b8b35257fb5d68735b5e43e48b0cdb8b15170;hp=4f617225c0f2169680cda90b60b5dfafb88f08bf;hpb=3e820e3aef382fed73b6b8128596424ed7690951;p=mesa.git diff --git a/src/glsl/lower_packed_varyings.cpp b/src/glsl/lower_packed_varyings.cpp index 4f617225c0f..5e844c792e8 100644 --- a/src/glsl/lower_packed_varyings.cpp +++ b/src/glsl/lower_packed_varyings.cpp @@ -148,6 +148,8 @@ #include "ir.h" #include "ir_optimization.h" +namespace { + /** * Visitor that performs varying packing. For each varying declared in the * shader, this visitor determines whether it needs to be packed. If so, it @@ -158,8 +160,7 @@ class lower_packed_varyings_visitor { public: - lower_packed_varyings_visitor(void *mem_ctx, unsigned location_base, - unsigned locations_used, + lower_packed_varyings_visitor(void *mem_ctx, unsigned locations_used, ir_variable_mode mode, unsigned gs_input_vertices, exec_list *out_instructions); @@ -187,19 +188,11 @@ private: */ void * const mem_ctx; - /** - * Location representing the first generic varying slot for this shader - * stage (e.g. VARYING_SLOT_VAR0 if we are packing vertex shader outputs). - * Varyings whose location is less than this value are assumed to - * correspond to special fixed function hardware, so they are not lowered. - */ - const unsigned location_base; - /** * Number of generic varying slots which are used by this shader. This is - * used to allocate temporary intermediate data structures. If any any - * varying used by this shader has a location greater than or equal to - * location_base + locations_used, an assertion will fire. + * used to allocate temporary intermediate data structures. If any varying + * used by this shader has a location greater than or equal to + * VARYING_SLOT_VAR0 + locations_used, an assertion will fire. */ const unsigned locations_used; @@ -230,12 +223,12 @@ private: exec_list *out_instructions; }; +} /* anonymous namespace */ + lower_packed_varyings_visitor::lower_packed_varyings_visitor( - void *mem_ctx, unsigned location_base, unsigned locations_used, - ir_variable_mode mode, unsigned gs_input_vertices, - exec_list *out_instructions) + void *mem_ctx, unsigned locations_used, ir_variable_mode mode, + unsigned gs_input_vertices, exec_list *out_instructions) : mem_ctx(mem_ctx), - location_base(location_base), locations_used(locations_used), packed_varyings((ir_variable **) rzalloc_array_size(mem_ctx, sizeof(*packed_varyings), @@ -249,13 +242,13 @@ lower_packed_varyings_visitor::lower_packed_varyings_visitor( void lower_packed_varyings_visitor::run(exec_list *instructions) { - foreach_list (node, instructions) { - ir_variable *var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, instructions) { + ir_variable *var = node->as_variable(); if (var == NULL) continue; - if (var->mode != this->mode || - var->location < (int) this->location_base || + if (var->data.mode != this->mode || + var->data.location < VARYING_SLOT_VAR0 || !this->needs_lowering(var)) continue; @@ -264,18 +257,19 @@ lower_packed_varyings_visitor::run(exec_list *instructions) * safe, caller should ensure that integral varyings always use flat * interpolation, even when this is not required by GLSL. */ - assert(var->interpolation == INTERP_QUALIFIER_FLAT || + assert(var->data.interpolation == INTERP_QUALIFIER_FLAT || !var->type->contains_integer()); /* Change the old varying into an ordinary global. */ - var->mode = ir_var_auto; + assert(var->data.mode != ir_var_temporary); + var->data.mode = ir_var_auto; /* Create a reference to the old varying. */ ir_dereference_variable *deref = new(this->mem_ctx) ir_dereference_variable(var); /* Recursively pack or unpack it. */ - this->lower_rvalue(deref, var->location * 4 + var->location_frac, var, + this->lower_rvalue(deref, var->data.location * 4 + var->data.location_frac, var, var->name, this->gs_input_vertices != 0, 0); } } @@ -502,17 +496,16 @@ lower_packed_varyings_visitor::lower_arraylike(ir_rvalue *rvalue, ir_constant *constant = new(this->mem_ctx) ir_constant(i); ir_dereference_array *dereference_array = new(this->mem_ctx) ir_dereference_array(rvalue, constant); - char *subscripted_name - = ralloc_asprintf(this->mem_ctx, "%s[%d]", name, i); if (gs_input_toplevel) { /* Geometry shader inputs are a special case. Instead of storing * each element of the array at a different location, all elements * are at the same location, but with a different vertex index. */ (void) this->lower_rvalue(dereference_array, fine_location, - unpacked_var, subscripted_name, - false, i); + unpacked_var, name, false, i); } else { + char *subscripted_name + = ralloc_asprintf(this->mem_ctx, "%s[%d]", name, i); fine_location = this->lower_rvalue(dereference_array, fine_location, unpacked_var, subscripted_name, @@ -539,12 +532,12 @@ lower_packed_varyings_visitor::get_packed_varying_deref( unsigned location, ir_variable *unpacked_var, const char *name, unsigned vertex_index) { - unsigned slot = location - this->location_base; + unsigned slot = location - VARYING_SLOT_VAR0; assert(slot < locations_used); if (this->packed_varyings[slot] == NULL) { char *packed_name = ralloc_asprintf(this->mem_ctx, "packed:%s", name); const glsl_type *packed_type; - if (unpacked_var->interpolation == INTERP_QUALIFIER_FLAT) + if (unpacked_var->data.interpolation == INTERP_QUALIFIER_FLAT) packed_type = glsl_type::ivec4_type; else packed_type = glsl_type::vec4_type; @@ -559,11 +552,12 @@ lower_packed_varyings_visitor::get_packed_varying_deref( /* Prevent update_array_sizes() from messing with the size of the * array. */ - packed_var->max_array_access = this->gs_input_vertices - 1; + packed_var->data.max_array_access = this->gs_input_vertices - 1; } - packed_var->centroid = unpacked_var->centroid; - packed_var->interpolation = unpacked_var->interpolation; - packed_var->location = location; + packed_var->data.centroid = unpacked_var->data.centroid; + packed_var->data.sample = unpacked_var->data.sample; + packed_var->data.interpolation = unpacked_var->data.interpolation; + packed_var->data.location = location; unpacked_var->insert_before(packed_var); this->packed_varyings[slot] = packed_var; } else { @@ -591,7 +585,12 @@ lower_packed_varyings_visitor::get_packed_varying_deref( bool lower_packed_varyings_visitor::needs_lowering(ir_variable *var) { - /* Things composed of vec4's don't need lowering. Everything else does. */ + /* Things composed of vec4's and varyings with explicitly assigned + * locations don't need lowering. Everything else does. + */ + if (var->data.explicit_location) + return false; + const glsl_type *type = var->type; if (this->gs_input_vertices != 0) { assert(type->is_array()); @@ -615,7 +614,7 @@ public: explicit lower_packed_varyings_gs_splicer(void *mem_ctx, const exec_list *instructions); - virtual ir_visitor_status visit(ir_emit_vertex *ev); + virtual ir_visitor_status visit_leave(ir_emit_vertex *ev); private: /** @@ -639,10 +638,9 @@ lower_packed_varyings_gs_splicer::lower_packed_varyings_gs_splicer( ir_visitor_status -lower_packed_varyings_gs_splicer::visit(ir_emit_vertex *ev) +lower_packed_varyings_gs_splicer::visit_leave(ir_emit_vertex *ev) { - foreach_list(node, this->instructions) { - ir_instruction *ir = (ir_instruction *) node; + foreach_in_list(ir_instruction, ir, this->instructions) { ev->insert_before(ir->clone(this->mem_ctx, NULL)); } return visit_continue; @@ -650,22 +648,21 @@ lower_packed_varyings_gs_splicer::visit(ir_emit_vertex *ev) void -lower_packed_varyings(void *mem_ctx, unsigned location_base, - unsigned locations_used, ir_variable_mode mode, - unsigned gs_input_vertices, gl_shader *shader) +lower_packed_varyings(void *mem_ctx, unsigned locations_used, + ir_variable_mode mode, unsigned gs_input_vertices, + gl_shader *shader) { exec_list *instructions = shader->ir; ir_function *main_func = shader->symbols->get_function("main"); exec_list void_parameters; ir_function_signature *main_func_sig - = main_func->matching_signature(NULL, &void_parameters); + = main_func->matching_signature(NULL, &void_parameters, false); exec_list new_instructions; - lower_packed_varyings_visitor visitor(mem_ctx, location_base, - locations_used, mode, + lower_packed_varyings_visitor visitor(mem_ctx, locations_used, mode, gs_input_vertices, &new_instructions); visitor.run(instructions); if (mode == ir_var_shader_out) { - if (shader->Type == GL_GEOMETRY_SHADER) { + if (shader->Stage == MESA_SHADER_GEOMETRY) { /* For geometry shaders, outputs need to be lowered before each call * to EmitVertex() */