const struct glsl_type *vertex_type = glsl_vec4_type();
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vertex_type, "a_pos");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
+ b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_fs");
const struct glsl_type *color_type = glsl_vec4_type();
glsl_vec4_type(), "v_attr");
tex_pos_in->data.location = VARYING_SLOT_VAR0;
+ /* Swizzle the array index which comes in as Z coordinate into the right
+ * position.
+ */
+ unsigned swz[] = { 0, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 1), 2 };
+ nir_ssa_def *const tex_pos =
+ nir_swizzle(&b, nir_load_var(&b, tex_pos_in), swz,
+ (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 3), false);
+
const struct glsl_type *sampler_type =
- glsl_sampler_type(tex_dim, false, false, glsl_get_base_type(color_type));
+ glsl_sampler_type(tex_dim, false, tex_dim != GLSL_SAMPLER_DIM_3D,
+ glsl_get_base_type(color_type));
nir_variable *sampler = nir_variable_create(b.shader, nir_var_uniform,
sampler_type, "s_tex");
sampler->data.descriptor_set = 0;
tex->sampler_dim = tex_dim;
tex->op = nir_texop_tex;
tex->src[0].src_type = nir_tex_src_coord;
- tex->src[0].src = nir_src_for_ssa(nir_load_var(&b, tex_pos_in));
+ tex->src[0].src = nir_src_for_ssa(tex_pos);
tex->dest_type = nir_type_float; /* TODO */
-
- if (tex_dim != GLSL_SAMPLER_DIM_3D)
- tex->is_array = true;
-
- tex->coord_components = 3;
-
+ tex->is_array = glsl_sampler_type_is_array(sampler_type);
+ tex->coord_components = tex_pos->num_components;
tex->sampler = nir_deref_var_create(tex, sampler);
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, "tex");
cmd_buffer->state.vb_dirty |= (1 << ANV_META_VERTEX_BINDING_COUNT) - 1;
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
- cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_VERTEX_BIT;
+ cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
anv_dynamic_state_copy(&cmd_buffer->state.dynamic, &state->dynamic,
state->dynamic_mask);
cmd_buffer->state.dirty |= state->dynamic_mask;
+
+ /* Since we've used the pipeline with the VS disabled, set
+ * need_query_wa. See CmdBeginQuery.
+ */
+ cmd_buffer->state.need_query_wa = true;
}
VkImageViewType
.layout = VK_IMAGE_LAYOUT_GENERAL,
},
.preserveAttachmentCount = 1,
- .pPreserveAttachments = &(VkAttachmentReference) {
- .attachment = 0,
- .layout = VK_IMAGE_LAYOUT_GENERAL,
- },
+ .pPreserveAttachments = (uint32_t[]) { 0 },
},
.dependencyCount = 0,
- }, NULL, &device->meta_state.blit.render_pass);
+ }, &device->meta_state.alloc, &device->meta_state.blit.render_pass);
if (result != VK_SUCCESS)
goto fail;
VkDescriptorSetLayoutCreateInfo ds_layout_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = 1,
- .pBinding = (VkDescriptorSetLayoutBinding[]) {
+ .pBindings = (VkDescriptorSetLayoutBinding[]) {
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
}
};
result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
- &ds_layout_info, NULL,
+ &ds_layout_info,
+ &device->meta_state.alloc,
&device->meta_state.blit.ds_layout);
if (result != VK_SUCCESS)
goto fail_render_pass;
.setLayoutCount = 1,
.pSetLayouts = &device->meta_state.blit.ds_layout,
},
- NULL, &device->meta_state.blit.pipeline_layout);
+ &device->meta_state.alloc, &device->meta_state.blit.pipeline_layout);
if (result != VK_SUCCESS)
goto fail_descriptor_set_layout;
};
const struct anv_graphics_pipeline_create_info anv_pipeline_info = {
+ .color_attachment_count = -1,
.use_repclear = false,
.disable_viewport = true,
.disable_scissor = true,
result = anv_graphics_pipeline_create(anv_device_to_handle(device),
VK_NULL_HANDLE,
&vk_pipeline_info, &anv_pipeline_info,
- NULL, &device->meta_state.blit.pipeline_1d_src);
+ &device->meta_state.alloc, &device->meta_state.blit.pipeline_1d_src);
if (result != VK_SUCCESS)
goto fail_pipeline_layout;
result = anv_graphics_pipeline_create(anv_device_to_handle(device),
VK_NULL_HANDLE,
&vk_pipeline_info, &anv_pipeline_info,
- NULL, &device->meta_state.blit.pipeline_2d_src);
+ &device->meta_state.alloc, &device->meta_state.blit.pipeline_2d_src);
if (result != VK_SUCCESS)
goto fail_pipeline_1d;
result = anv_graphics_pipeline_create(anv_device_to_handle(device),
VK_NULL_HANDLE,
&vk_pipeline_info, &anv_pipeline_info,
- NULL, &device->meta_state.blit.pipeline_3d_src);
+ &device->meta_state.alloc, &device->meta_state.blit.pipeline_3d_src);
if (result != VK_SUCCESS)
goto fail_pipeline_2d;
fail_pipeline_2d:
anv_DestroyPipeline(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_2d_src, NULL);
+ device->meta_state.blit.pipeline_2d_src,
+ &device->meta_state.alloc);
fail_pipeline_1d:
anv_DestroyPipeline(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_1d_src, NULL);
+ device->meta_state.blit.pipeline_1d_src,
+ &device->meta_state.alloc);
fail_pipeline_layout:
anv_DestroyPipelineLayout(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_layout, NULL);
+ device->meta_state.blit.pipeline_layout,
+ &device->meta_state.alloc);
fail_descriptor_set_layout:
anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
- device->meta_state.blit.ds_layout, NULL);
+ device->meta_state.blit.ds_layout,
+ &device->meta_state.alloc);
fail_render_pass:
anv_DestroyRenderPass(anv_device_to_handle(device),
- device->meta_state.blit.render_pass, NULL);
+ device->meta_state.blit.render_pass,
+ &device->meta_state.alloc);
ralloc_free(vs.nir);
ralloc_free(fs_1d.nir);
float tex_coord[3];
} *vb_data;
+ assert(src_image->samples == dest_image->samples);
+
unsigned vb_size = sizeof(struct anv_vue_header) + 3 * sizeof(*vb_data);
struct anv_state vb_state =
&(VkDescriptorSetAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.descriptorPool = dummy_desc_pool,
- .setLayoutCount = 1,
+ .descriptorSetCount = 1,
.pSetLayouts = &device->meta_state.blit.ds_layout
}, &set);
anv_UpdateDescriptorSets(anv_device_to_handle(device),
VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
}
- anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 1,
+ anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
&(VkViewport) {
.x = 0.0f,
.y = 0.0f,
/* First, we make a bunch of max-sized copies */
uint64_t max_copy_size = max_surface_dim * max_surface_dim * bs;
- while (copy_size > max_copy_size) {
+ while (copy_size >= max_copy_size) {
do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
dest_buffer->bo, dest_offset,
max_surface_dim, max_surface_dim, copy_format);
meta_finish_blit(cmd_buffer, &saved_state);
}
+void anv_CmdUpdateBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const uint32_t* pData)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
+ struct anv_meta_saved_state saved_state;
+
+ meta_prepare_blit(cmd_buffer, &saved_state);
+
+ /* We can't quite grab a full block because the state stream needs a
+ * little data at the top to build its linked list.
+ */
+ const uint32_t max_update_size =
+ cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
+
+ assert(max_update_size < (1 << 14) * 4);
+
+ while (dataSize) {
+ const uint32_t copy_size = MIN2(dataSize, max_update_size);
+
+ struct anv_state tmp_data =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
+
+ memcpy(tmp_data.map, pData, copy_size);
+
+ VkFormat format;
+ int bs;
+ if ((copy_size & 15) == 0 && (dstOffset & 15) == 0) {
+ format = VK_FORMAT_R32G32B32A32_UINT;
+ bs = 16;
+ } else if ((copy_size & 7) == 0 && (dstOffset & 7) == 0) {
+ format = VK_FORMAT_R32G32_UINT;
+ bs = 8;
+ } else {
+ assert((copy_size & 3) == 0 && (dstOffset & 3) == 0);
+ format = VK_FORMAT_R32_UINT;
+ bs = 4;
+ }
+
+ do_buffer_copy(cmd_buffer,
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ tmp_data.offset,
+ dst_buffer->bo, dst_buffer->offset + dstOffset,
+ copy_size / bs, 1, format);
+
+ dataSize -= copy_size;
+ dstOffset += copy_size;
+ pData = (void *)pData + copy_size;
+ }
+}
+
static VkFormat
choose_iview_format(struct anv_image *image, VkImageAspectFlagBits aspect)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
ANV_FROM_HANDLE(anv_image, dest_image, destImage);
-
struct anv_meta_saved_state saved_state;
+ /* From the Vulkan 1.0 spec:
+ *
+ * vkCmdCopyImage can be used to copy image data between multisample
+ * images, but both images must have the same number of samples.
+ */
+ assert(src_image->samples == dest_image->samples);
+
meta_prepare_blit(cmd_buffer, &saved_state);
for (unsigned r = 0; r < regionCount; r++) {
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
ANV_FROM_HANDLE(anv_image, dest_image, destImage);
-
struct anv_meta_saved_state saved_state;
+ /* From the Vulkan 1.0 spec:
+ *
+ * vkCmdBlitImage must not be used for multisampled source or
+ * destination images. Use vkCmdResolveImage for this purpose.
+ */
+ assert(src_image->samples == 1);
+ assert(dest_image->samples == 1);
+
anv_finishme("respect VkFilter");
meta_prepare_blit(cmd_buffer, &saved_state);
cmd_buffer);
const VkOffset3D dest_offset = {
- .x = pRegions[r].dstOffset.x,
- .y = pRegions[r].dstOffset.y,
+ .x = pRegions[r].dstOffsets[0].x,
+ .y = pRegions[r].dstOffsets[0].y,
.z = 0,
};
+ if (pRegions[r].dstOffsets[1].x < pRegions[r].dstOffsets[0].x ||
+ pRegions[r].dstOffsets[1].y < pRegions[r].dstOffsets[0].y ||
+ pRegions[r].srcOffsets[1].x < pRegions[r].srcOffsets[0].x ||
+ pRegions[r].srcOffsets[1].y < pRegions[r].srcOffsets[0].y)
+ anv_finishme("FINISHME: Allow flipping in blits");
+
+ const VkExtent3D dest_extent = {
+ .width = pRegions[r].dstOffsets[1].x - pRegions[r].dstOffsets[0].x,
+ .height = pRegions[r].dstOffsets[1].y - pRegions[r].dstOffsets[0].y,
+ };
+
+ const VkExtent3D src_extent = {
+ .width = pRegions[r].srcOffsets[1].x - pRegions[r].srcOffsets[0].x,
+ .height = pRegions[r].srcOffsets[1].y - pRegions[r].srcOffsets[0].y,
+ };
+
const uint32_t dest_array_slice =
meta_blit_get_dest_view_base_array_slice(dest_image,
&pRegions[r].dstSubresource,
- &pRegions[r].dstOffset);
+ &pRegions[r].dstOffsets[0]);
if (pRegions[r].srcSubresource.layerCount > 1)
anv_finishme("FINISHME: copy multiple array layers");
- if (pRegions[r].dstExtent.depth > 1)
+ if (pRegions[r].srcOffsets[0].z + 1 != pRegions[r].srcOffsets[1].z ||
+ pRegions[r].dstOffsets[0].z + 1 != pRegions[r].dstOffsets[1].z)
anv_finishme("FINISHME: copy multiple depth layers");
struct anv_image_view dest_iview;
meta_emit_blit(cmd_buffer,
src_image, &src_iview,
- pRegions[r].srcOffset,
- pRegions[r].srcExtent,
+ pRegions[r].srcOffsets[0], src_extent,
dest_image, &dest_iview,
- dest_offset,
- pRegions[r].dstExtent,
+ dest_offset, dest_extent,
filter);
}
VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
struct anv_meta_saved_state saved_state;
+ /* The Vulkan 1.0 spec says "dstImage must have a sample count equal to
+ * VK_SAMPLE_COUNT_1_BIT."
+ */
+ assert(dest_image->samples == 1);
+
meta_prepare_blit(cmd_buffer, &saved_state);
for (unsigned r = 0; r < regionCount; r++) {
VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
struct anv_meta_saved_state saved_state;
+
+ /* The Vulkan 1.0 spec says "srcImage must have a sample count equal to
+ * VK_SAMPLE_COUNT_1_BIT."
+ */
+ assert(src_image->samples == 1);
+
meta_prepare_blit(cmd_buffer, &saved_state);
for (unsigned r = 0; r < regionCount; r++) {
meta_finish_blit(cmd_buffer, &saved_state);
}
-void anv_CmdUpdateBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer destBuffer,
- VkDeviceSize destOffset,
- VkDeviceSize dataSize,
- const uint32_t* pData)
-{
- stub();
-}
-
-void anv_CmdFillBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer destBuffer,
- VkDeviceSize destOffset,
- VkDeviceSize fillSize,
- uint32_t data)
-{
- stub();
-}
-
void anv_CmdResolveImage(
VkCommandBuffer commandBuffer,
VkImage srcImage,
stub();
}
+static void *
+meta_alloc(void* _device, size_t size, size_t alignment,
+ VkSystemAllocationScope allocationScope)
+{
+ struct anv_device *device = _device;
+ return device->alloc.pfnAllocation(device->alloc.pUserData, size, alignment,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+}
+
+static void *
+meta_realloc(void* _device, void *original, size_t size, size_t alignment,
+ VkSystemAllocationScope allocationScope)
+{
+ struct anv_device *device = _device;
+ return device->alloc.pfnReallocation(device->alloc.pUserData, original,
+ size, alignment,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+}
+
+static void
+meta_free(void* _device, void *data)
+{
+ struct anv_device *device = _device;
+ return device->alloc.pfnFree(device->alloc.pUserData, data);
+}
+
VkResult
anv_device_init_meta(struct anv_device *device)
{
+ device->meta_state.alloc = (VkAllocationCallbacks) {
+ .pUserData = device,
+ .pfnAllocation = meta_alloc,
+ .pfnReallocation = meta_realloc,
+ .pfnFree = meta_free,
+ };
+
VkResult result;
result = anv_device_init_meta_clear_state(device);
if (result != VK_SUCCESS)
return result;
result = anv_device_init_meta_blit_state(device);
- if (result != VK_SUCCESS)
+ if (result != VK_SUCCESS) {
+ anv_device_finish_meta_clear_state(device);
return result;
+ }
return VK_SUCCESS;
}
/* Blit */
anv_DestroyRenderPass(anv_device_to_handle(device),
- device->meta_state.blit.render_pass, NULL);
+ device->meta_state.blit.render_pass,
+ &device->meta_state.alloc);
anv_DestroyPipeline(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_1d_src, NULL);
+ device->meta_state.blit.pipeline_1d_src,
+ &device->meta_state.alloc);
anv_DestroyPipeline(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_2d_src, NULL);
+ device->meta_state.blit.pipeline_2d_src,
+ &device->meta_state.alloc);
anv_DestroyPipeline(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_3d_src, NULL);
+ device->meta_state.blit.pipeline_3d_src,
+ &device->meta_state.alloc);
anv_DestroyPipelineLayout(anv_device_to_handle(device),
- device->meta_state.blit.pipeline_layout, NULL);
+ device->meta_state.blit.pipeline_layout,
+ &device->meta_state.alloc);
anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
- device->meta_state.blit.ds_layout, NULL);
+ device->meta_state.blit.ds_layout,
+ &device->meta_state.alloc);
}