* IN THE SOFTWARE.
*/
-#include "brw_shader.h"
-#include "brw_nir.h"
-#include "glsl/ir.h"
-#include "glsl/ir_uniform.h"
+#include "compiler/brw_nir.h"
+#include "compiler/glsl/ir_uniform.h"
static void
brw_nir_setup_glsl_builtin_uniform(nir_variable *var,
const struct gl_program *prog,
struct brw_stage_prog_data *stage_prog_data,
- unsigned comps_per_unit)
+ bool is_scalar)
{
const nir_state_slot *const slots = var->state_slots;
assert(var->state_slots != NULL);
- unsigned uniform_index = var->data.driver_location * comps_per_unit;
+ unsigned uniform_index = var->data.driver_location / 4;
for (unsigned int i = 0; i < var->num_state_slots; i++) {
/* This state reference has already been setup by ir_to_mesa, but we'll
* get the same index back here.
* and move on to the next one. In vec4, we need to continue and pad
* it out to 4 components.
*/
- if (swiz == last_swiz && comps_per_unit == 1)
+ if (swiz == last_swiz && is_scalar)
break;
last_swiz = swiz;
}
}
+static void
+setup_vec4_uniform_value(const gl_constant_value **params,
+ const gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
+
+ for (unsigned i = 0; i < n; ++i)
+ params[i] = &values[i];
+
+ for (unsigned i = n; i < 4; ++i)
+ params[i] = &zero;
+}
+
+static void
+brw_setup_image_uniform_values(gl_shader_stage stage,
+ struct brw_stage_prog_data *stage_prog_data,
+ unsigned param_start_index,
+ const gl_uniform_storage *storage)
+{
+ const gl_constant_value **param =
+ &stage_prog_data->param[param_start_index];
+
+ for (unsigned i = 0; i < MAX2(storage->array_elements, 1); i++) {
+ const unsigned image_idx = storage->opaque[stage].index + i;
+ const brw_image_param *image_param =
+ &stage_prog_data->image_param[image_idx];
+
+ /* Upload the brw_image_param structure. The order is expected to match
+ * the BRW_IMAGE_PARAM_*_OFFSET defines.
+ */
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET,
+ (const gl_constant_value *)&image_param->surface_idx, 1);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
+ (const gl_constant_value *)image_param->offset, 2);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
+ (const gl_constant_value *)image_param->size, 3);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
+ (const gl_constant_value *)image_param->stride, 4);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
+ (const gl_constant_value *)image_param->tiling, 3);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
+ (const gl_constant_value *)image_param->swizzling, 2);
+ param += BRW_IMAGE_PARAM_SIZE;
+
+ brw_mark_surface_used(
+ stage_prog_data,
+ stage_prog_data->binding_table.image_start + image_idx);
+ }
+}
+
static void
brw_nir_setup_glsl_uniform(gl_shader_stage stage, nir_variable *var,
- struct gl_shader_program *shader_prog,
+ const struct gl_program *prog,
struct brw_stage_prog_data *stage_prog_data,
- unsigned comps_per_unit)
+ bool is_scalar)
{
int namelen = strlen(var->name);
/* The data for our (non-builtin) uniforms is stored in a series of
- * gl_uniform_driver_storage structs for each subcomponent that
+ * gl_uniform_storage structs for each subcomponent that
* glGetUniformLocation() could name. We know it's been set up in the same
* order we'd walk the type, so walk the list of storage and find anything
* with our name, or the prefix of a component that starts with our name.
*/
- unsigned uniform_index = var->data.driver_location * comps_per_unit;
- for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
- struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
+ unsigned uniform_index = var->data.driver_location / 4;
+ for (unsigned u = 0; u < prog->sh.data->NumUniformStorage; u++) {
+ struct gl_uniform_storage *storage =
+ &prog->sh.data->UniformStorage[u];
- if (storage->builtin)
+ if (storage->builtin || storage->type->is_sampler())
continue;
if (strncmp(var->name, storage->name, namelen) != 0 ||
if (storage->type->is_image()) {
brw_setup_image_uniform_values(stage, stage_prog_data,
uniform_index, storage);
+ uniform_index +=
+ BRW_IMAGE_PARAM_SIZE * MAX2(storage->array_elements, 1);
} else {
gl_constant_value *components = storage->storage;
unsigned vector_count = (MAX2(storage->array_elements, 1) *
storage->type->matrix_columns);
unsigned vector_size = storage->type->vector_elements;
+ unsigned max_vector_size = 4;
+ if (storage->type->base_type == GLSL_TYPE_DOUBLE ||
+ storage->type->base_type == GLSL_TYPE_UINT64 ||
+ storage->type->base_type == GLSL_TYPE_INT64) {
+ vector_size *= 2;
+ if (vector_size > 4)
+ max_vector_size = 8;
+ }
for (unsigned s = 0; s < vector_count; s++) {
unsigned i;
stage_prog_data->param[uniform_index++] = components++;
}
- /* Pad out with zeros if needed (only needed for vec4) */
- for (; i < comps_per_unit; i++) {
- static const gl_constant_value zero = { 0.0 };
- stage_prog_data->param[uniform_index++] = &zero;
+ if (!is_scalar) {
+ /* Pad out with zeros if needed (only needed for vec4) */
+ for (; i < max_vector_size; i++) {
+ static const gl_constant_value zero = { 0.0 };
+ stage_prog_data->param[uniform_index++] = &zero;
+ }
}
}
}
}
void
-brw_nir_setup_glsl_uniforms(nir_shader *shader,
- struct gl_shader_program *shader_prog,
- const struct gl_program *prog,
+brw_nir_setup_glsl_uniforms(nir_shader *shader, const struct gl_program *prog,
struct brw_stage_prog_data *stage_prog_data,
bool is_scalar)
{
- unsigned comps_per_unit = is_scalar ? 1 : 4;
-
- foreach_list_typed(nir_variable, var, node, &shader->uniforms) {
+ nir_foreach_variable(var, &shader->uniforms) {
/* UBO's, atomics and samplers don't take up space in the
uniform file */
if (var->interface_type != NULL || var->type->contains_atomic())
if (strncmp(var->name, "gl_", 3) == 0) {
brw_nir_setup_glsl_builtin_uniform(var, prog, stage_prog_data,
- comps_per_unit);
+ is_scalar);
} else {
- brw_nir_setup_glsl_uniform(shader->stage, var, shader_prog,
- stage_prog_data, comps_per_unit);
+ brw_nir_setup_glsl_uniform(shader->stage, var, prog, stage_prog_data,
+ is_scalar);
}
}
}
{
struct gl_program_parameter_list *plist = prog->Parameters;
-#ifndef NDEBUG
- if (!shader->uniforms.is_empty()) {
- /* For ARB programs, only a single "parameters" variable is generated to
- * support uniform data.
- */
- assert(shader->uniforms.length() == 1);
- nir_variable *var = (nir_variable *) shader->uniforms.get_head();
- assert(strcmp(var->name, "parameters") == 0);
- assert(var->type->array_size() == (int)plist->NumParameters);
- }
-#endif
+ /* For ARB programs, prog_to_nir generates a single "parameters" variable
+ * for all uniform data. nir_lower_wpos_ytransform may also create an
+ * additional variable.
+ */
+ assert(shader->uniforms.length() <= 2);
for (unsigned p = 0; p < plist->NumParameters; p++) {
/* Parameters should be either vec4 uniforms or single component