#include "main/uniforms.h"
#include "st_context.h"
+#include "st_glsl_types.h"
#include "st_program.h"
#include "compiler/nir/nir.h"
{
unsigned location = 0;
unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
- uint64_t processed_locs = 0;
- uint32_t processed_patch_locs = 0;
+ uint64_t processed_locs[2] = {0};
+
+ const int base = stage == MESA_SHADER_FRAGMENT ?
+ (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
nir_foreach_variable(var, var_list) {
type = glsl_get_array_element(type);
}
+ /* Builtins don't allow component packing so we only need to worry about
+ * user defined varyings sharing the same location.
+ */
bool processed = false;
- if (var->data.patch &&
- var->data.location != VARYING_SLOT_TESS_LEVEL_INNER &&
- var->data.location != VARYING_SLOT_TESS_LEVEL_OUTER &&
- var->data.location != VARYING_SLOT_BOUNDING_BOX0 &&
- var->data.location != VARYING_SLOT_BOUNDING_BOX1) {
- unsigned patch_loc = var->data.location - VARYING_SLOT_PATCH0;
- if (processed_patch_locs & (1 << patch_loc))
+ if (var->data.location >= base) {
+ unsigned glsl_location = var->data.location - base;
+ if (processed_locs[var->data.index] & ((uint64_t)1 << glsl_location))
processed = true;
-
- processed_patch_locs |= (1 << patch_loc);
- } else {
- if (processed_locs & ((uint64_t)1 << var->data.location))
- processed = true;
-
- processed_locs |= ((uint64_t)1 << var->data.location);
+ else
+ processed_locs[var->data.index] |= ((uint64_t)1 << glsl_location);
}
/* Because component packing allows varyings to share the same location
* we may have already have processed this location.
*/
- if (processed && var->data.location >= VARYING_SLOT_VAR0) {
+ if (processed) {
var->data.driver_location = assigned_locations[var->data.location];
*size += type_size(type);
continue;
}
static void
-st_nir_assign_uniform_locations(struct gl_program *prog,
+st_nir_assign_uniform_locations(struct gl_context *ctx,
+ struct gl_program *prog,
struct gl_shader_program *shader_program,
struct exec_list *uniform_list, unsigned *size)
{
/* This state reference has already been setup by ir_to_mesa, but we'll
* get the same index back here.
*/
- loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
+
+ unsigned comps;
+ const struct glsl_type *type = glsl_without_array(uniform->type);
+ if (glsl_type_is_struct(type)) {
+ comps = 4;
+ } else {
+ comps = glsl_get_vector_elements(type);
+ }
+
+ if (ctx->Const.PackedDriverUniformStorage) {
+ loc = _mesa_add_sized_state_reference(prog->Parameters,
+ stateTokens, comps, false);
+ } else {
+ loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
+ }
} else {
loc = st_nir_lookup_parameter_index(prog->Parameters, uniform->name);
}
const nir_state_slot *const slots = var->state_slots;
assert(var->state_slots != NULL);
+ const struct glsl_type *type = glsl_without_array(var->type);
for (unsigned int i = 0; i < var->num_state_slots; i++) {
- _mesa_add_state_reference(prog->Parameters,
- slots[i].tokens);
+ unsigned comps;
+ if (glsl_type_is_struct(type)) {
+ /* Builtin struct require specical handling for now we just
+ * make all members vec4. See st_nir_lower_builtin.
+ */
+ comps = 4;
+ } else {
+ comps = glsl_get_vector_elements(type);
+ }
+
+ if (st->ctx->Const.PackedDriverUniformStorage) {
+ _mesa_add_sized_state_reference(prog->Parameters,
+ slots[i].tokens,
+ comps, false);
+ } else {
+ _mesa_add_state_reference(prog->Parameters,
+ slots[i].tokens);
+ }
}
}
}
NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
- st_nir_assign_uniform_locations(prog, shader_program,
+ st_nir_assign_uniform_locations(st->ctx, prog, shader_program,
&nir->uniforms, &nir->num_uniforms);
+ /* Below is a quick hack so that uniform lowering only runs on radeonsi
+ * (the only NIR backend that currently supports tess) once we enable
+ * uniform packing support we will just use
+ * ctx->Const.PackedDriverUniformStorage for this check.
+ */
+ if (screen->get_shader_param(screen, PIPE_SHADER_TESS_CTRL,
+ PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
+ NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, type_size,
+ (nir_lower_io_options)0);
+ NIR_PASS_V(nir, st_nir_lower_uniforms_to_ubo);
+ }
+
if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
NIR_PASS_V(nir, nir_lower_samplers_as_deref, shader_program);
else