state.offset + dword * 4, bo, offset);
}
+static void
+fill_descriptor_buffer_surface_state(struct anv_device *device, void *state,
+ VkShaderStage stage, VkDescriptorType type,
+ uint32_t offset, uint32_t range)
+{
+ VkFormat format;
+ uint32_t stride;
+
+ switch (type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ if (anv_is_scalar_shader_stage(device->instance->physicalDevice.compiler,
+ stage)) {
+ stride = 4;
+ } else {
+ stride = 16;
+ }
+ format = VK_FORMAT_R32G32B32A32_SFLOAT;
+ break;
+
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ stride = 1;
+ format = VK_FORMAT_UNDEFINED;
+ break;
+
+ default:
+ unreachable("Invalid descriptor type");
+ }
+
+ anv_fill_buffer_surface_state(device, state,
+ anv_format_for_vk_format(format),
+ offset, range, stride);
+}
+
VkResult
anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
VkShaderStage stage, struct anv_state *bt_state)
surface_state =
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
- anv_fill_buffer_surface_state(cmd_buffer->device, surface_state.map,
- anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT),
- bo_offset, desc->range);
+
+ fill_descriptor_buffer_surface_state(cmd_buffer->device,
+ surface_state.map,
+ stage, desc->type,
+ bo_offset, desc->range);
break;
}
void
anv_fill_buffer_surface_state(struct anv_device *device, void *state,
const struct anv_format *format,
- uint32_t offset, uint32_t range)
+ uint32_t offset, uint32_t range, uint32_t stride)
{
switch (device->info.gen) {
case 7:
- gen7_fill_buffer_surface_state(state, format, offset, range);
+ gen7_fill_buffer_surface_state(state, format, offset, range, stride);
break;
case 8:
- gen8_fill_buffer_surface_state(state, format, offset, range);
+ gen8_fill_buffer_surface_state(state, format, offset, range, stride);
break;
default:
unreachable("unsupported gen\n");
void anv_fill_buffer_surface_state(struct anv_device *device, void *state,
const struct anv_format *format,
- uint32_t offset, uint32_t range);
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
void gen7_fill_buffer_surface_state(void *state, const struct anv_format *format,
- uint32_t offset, uint32_t range);
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
void gen8_fill_buffer_surface_state(void *state, const struct anv_format *format,
- uint32_t offset, uint32_t range);
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
struct anv_sampler {
uint32_t state[4];
void
gen7_fill_buffer_surface_state(void *state, const struct anv_format *format,
- uint32_t offset, uint32_t range)
+ uint32_t offset, uint32_t range, uint32_t stride)
{
- /* This assumes RGBA float format. */
-
- uint32_t stride = 16; /* Depends on whether accessing shader is simd8 or
- * vec4. Will need one of each for buffers that are
- * used in both vec4 and simd8. */
-
uint32_t num_elements = range / stride;
struct GEN7_RENDER_SURFACE_STATE surface_state = {
void
gen8_fill_buffer_surface_state(void *state, const struct anv_format *format,
- uint32_t offset, uint32_t range)
+ uint32_t offset, uint32_t range, uint32_t stride)
{
- /* This assumes RGBA float format. */
- uint32_t stride = 4;
uint32_t num_elements = range / stride;
struct GEN8_RENDER_SURFACE_STATE surface_state = {