if (layout == NULL)
goto out;
+ if (layout->stage[stage].image_count > 0) {
+ VkResult result =
+ anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
+ if (result != VK_SUCCESS)
+ return result;
+
+ cmd_buffer->state.push_constants_dirty |= 1 << stage;
+ }
+
+ uint32_t image = 0;
for (uint32_t s = 0; s < layout->stage[stage].surface_count; s++) {
struct anv_pipeline_binding *binding =
&layout->stage[stage].surface_to_descriptor[s];
bo_offset = desc->image_view->offset;
break;
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+ surface_state = desc->image_view->storage_surface_state;
+ bo = desc->image_view->bo;
+ bo_offset = desc->image_view->offset;
+
+ struct brw_image_param *image_param =
+ &cmd_buffer->state.push_constants[stage]->images[image++];
+
+ anv_image_view_fill_image_param(cmd_buffer->device, desc->image_view,
+ image_param);
+ image_param->surface_idx = bias + s;
+ break;
+ }
+
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
assert(!"Unsupported descriptor type");
bt_map[bias + s] = surface_state.offset + state_offset;
add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
}
+ assert(image == layout->stage[stage].image_count);
out:
if (!cmd_buffer->device->info.has_llc)
return sampler_index;
}
+static uint32_t
+get_image_index(unsigned set, unsigned binding,
+ struct apply_pipeline_layout_state *state)
+{
+ assert(set < state->layout->num_sets);
+ struct anv_descriptor_set_layout *set_layout =
+ state->layout->set[set].layout;
+
+ assert(binding < set_layout->binding_count);
+
+ gl_shader_stage stage = state->shader->stage;
+
+ assert(set_layout->binding[binding].stage[stage].image_index >= 0);
+
+ uint32_t image_index =
+ state->layout->set[set].stage[stage].image_start +
+ set_layout->binding[binding].stage[stage].image_index;
+
+ assert(image_index < state->layout->stage[stage].image_count);
+
+ return image_index;
+}
+
static void
lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
struct apply_pipeline_layout_state *state)
return true;
}
+static void
+setup_vec4_uniform_value(const gl_constant_value **params,
+ const gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
+
+ for (unsigned i = 0; i < n; ++i)
+ params[i] = &values[i];
+
+ for (unsigned i = n; i < 4; ++i)
+ params[i] = &zero;
+}
+
bool
anv_nir_apply_pipeline_layout(nir_shader *shader,
+ struct brw_stage_prog_data *prog_data,
const struct anv_pipeline_layout *layout)
{
struct apply_pipeline_layout_state state = {
}
}
+ if (layout->stage[shader->stage].image_count > 0) {
+ nir_foreach_variable(var, &shader->uniforms) {
+ if (glsl_type_is_image(var->type) ||
+ (glsl_type_is_array(var->type) &&
+ glsl_type_is_image(glsl_get_array_element(var->type)))) {
+ /* Images are represented as uniform push constants and the actual
+ * information required for reading/writing to/from the image is
+ * storred in the uniform.
+ */
+ unsigned image_index = get_image_index(var->data.descriptor_set,
+ var->data.binding, &state);
+
+ var->data.driver_location = shader->num_uniforms +
+ image_index * BRW_IMAGE_PARAM_SIZE;
+ }
+ }
+
+ struct anv_push_constants *null_data = NULL;
+ const gl_constant_value **param = prog_data->param + shader->num_uniforms;
+ const struct brw_image_param *image_param = null_data->images;
+ for (uint32_t i = 0; i < layout->stage[shader->stage].image_count; i++) {
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET,
+ (const gl_constant_value *)&image_param->surface_idx, 1);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
+ (const gl_constant_value *)image_param->offset, 2);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
+ (const gl_constant_value *)image_param->size, 3);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
+ (const gl_constant_value *)image_param->stride, 4);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
+ (const gl_constant_value *)image_param->tiling, 3);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
+ (const gl_constant_value *)image_param->swizzling, 2);
+
+ param += BRW_IMAGE_PARAM_SIZE;
+ image_param ++;
+ }
+
+ shader->num_uniforms += layout->stage[shader->stage].image_count *
+ BRW_IMAGE_PARAM_SIZE;
+ }
+
return state.progress;
}
if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
+ if (pipeline->layout && pipeline->layout->stage[stage].image_count > 0)
+ prog_data->nr_params += pipeline->layout->stage[stage].image_count *
+ BRW_IMAGE_PARAM_SIZE;
+
if (prog_data->nr_params > 0) {
/* XXX: I think we're leaking this */
prog_data->param = (const gl_constant_value **)
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
if (pipeline->layout)
- anv_nir_apply_pipeline_layout(nir, pipeline->layout);
+ anv_nir_apply_pipeline_layout(nir, prog_data, pipeline->layout);
/* All binding table offsets provided by apply_pipeline_layout() are
* relative to the start of the bindint table (plus MAX_RTS for VS).