#include "anv_private.h"
static bool
-lookup_blorp_shader(struct blorp_context *blorp,
+lookup_blorp_shader(struct blorp_batch *batch,
const void *key, uint32_t key_size,
uint32_t *kernel_out, void *prog_data_out)
{
+ struct blorp_context *blorp = batch->blorp;
struct anv_device *device = blorp->driver_ctx;
/* The default cache must be a real cache */
}
static bool
-upload_blorp_shader(struct blorp_context *blorp,
+upload_blorp_shader(struct blorp_batch *batch,
const void *key, uint32_t key_size,
const void *kernel, uint32_t kernel_size,
const struct brw_stage_prog_data *prog_data,
uint32_t prog_data_size,
uint32_t *kernel_out, void *prog_data_out)
{
+ struct blorp_context *blorp = batch->blorp;
struct anv_device *device = blorp->driver_ctx;
/* The blorp cache must be a real cache */
anv_pipeline_cache_upload_kernel(&device->default_pipeline_cache,
key, key_size, kernel, kernel_size,
NULL, 0,
- prog_data, prog_data_size, &bind_map);
+ prog_data, prog_data_size,
+ NULL, 0, NULL, &bind_map);
if (!bin)
return false;
case 11:
device->blorp.exec = gen11_blorp_exec;
break;
+ case 12:
+ device->blorp.exec = gen12_blorp_exec;
+ break;
default:
unreachable("Unknown hardware generation");
}
.addr = {
.buffer = buffer->address.bo,
.offset = buffer->address.offset + offset,
- .mocs = device->default_mocs,
+ .mocs = anv_mocs_for_bo(device, buffer->address.bo),
},
};
.addr = {
.buffer = image->planes[plane].address.bo,
.offset = image->planes[plane].address.offset + surface->offset,
- .mocs = device->default_mocs,
+ .mocs = anv_mocs_for_bo(device, image->planes[plane].address.bo),
},
};
blorp_surf->aux_addr = (struct blorp_address) {
.buffer = image->planes[plane].address.bo,
.offset = image->planes[plane].address.offset + aux_surface->offset,
- .mocs = device->default_mocs,
+ .mocs = anv_mocs_for_bo(device, image->planes[plane].address.bo),
};
blorp_surf->aux_usage = aux_usage;
}
}
+static bool
+get_blorp_surf_for_anv_shadow_image(const struct anv_device *device,
+ const struct anv_image *image,
+ VkImageAspectFlags aspect,
+ struct blorp_surf *blorp_surf)
+{
+
+ uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
+ if (image->planes[plane].shadow_surface.isl.size_B == 0)
+ return false;
+
+ *blorp_surf = (struct blorp_surf) {
+ .surf = &image->planes[plane].shadow_surface.isl,
+ .addr = {
+ .buffer = image->planes[plane].address.bo,
+ .offset = image->planes[plane].address.offset +
+ image->planes[plane].shadow_surface.offset,
+ .mocs = anv_mocs_for_bo(device, image->planes[plane].address.bo),
+ },
+ };
+
+ return true;
+}
+
void anv_CmdCopyImage(
VkCommandBuffer commandBuffer,
VkImage srcImage,
dstOffset.x, dstOffset.y,
extent.width, extent.height);
}
+
+ struct blorp_surf dst_shadow_surf;
+ if (get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
+ dst_image,
+ 1UL << aspect_bit,
+ &dst_shadow_surf)) {
+ for (unsigned i = 0; i < layer_count; i++) {
+ blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
+ &dst_shadow_surf, dst_level, dst_base_layer + i,
+ srcOffset.x, srcOffset.y,
+ dstOffset.x, dstOffset.y,
+ extent.width, extent.height);
+ }
+ }
}
} else {
struct blorp_surf src_surf, dst_surf;
dstOffset.x, dstOffset.y,
extent.width, extent.height);
}
+
+ struct blorp_surf dst_shadow_surf;
+ if (get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
+ dst_image, dst_mask,
+ &dst_shadow_surf)) {
+ for (unsigned i = 0; i < layer_count; i++) {
+ blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
+ &dst_shadow_surf, dst_level, dst_base_layer + i,
+ srcOffset.x, srcOffset.y,
+ dstOffset.x, dstOffset.y,
+ extent.width, extent.height);
+ }
+ }
}
}
buffer_row_pitch, buffer_format,
&buffer.surf, &buffer_isl_surf);
+ bool dst_has_shadow = false;
+ struct blorp_surf dst_shadow_surf;
if (&image == dst) {
anv_cmd_buffer_mark_image_written(cmd_buffer, anv_image,
aspect, dst->surf.aux_usage,
dst->level,
dst->offset.z, extent.depth);
+
+ dst_has_shadow =
+ get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
+ anv_image, aspect,
+ &dst_shadow_surf);
}
for (unsigned z = 0; z < extent.depth; z++) {
src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
extent.width, extent.height);
+ if (dst_has_shadow) {
+ blorp_copy(&batch, &src->surf, src->level, src->offset.z,
+ &dst_shadow_surf, dst->level, dst->offset.z,
+ src->offset.x, src->offset.y,
+ dst->offset.x, dst->offset.y,
+ extent.width, extent.height);
+ }
+
image.offset.z++;
buffer.surf.addr.offset += buffer_layer_stride;
}
copy_buffer_to_image(cmd_buffer, dst_buffer, src_image, srcImageLayout,
regionCount, pRegions, false);
+
+ cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
}
static bool
struct blorp_address src = {
.buffer = src_buffer->address.bo,
.offset = src_buffer->address.offset + pRegions[r].srcOffset,
- .mocs = cmd_buffer->device->default_mocs,
+ .mocs = anv_mocs_for_bo(cmd_buffer->device, src_buffer->address.bo),
};
struct blorp_address dst = {
.buffer = dst_buffer->address.bo,
.offset = dst_buffer->address.offset + pRegions[r].dstOffset,
- .mocs = cmd_buffer->device->default_mocs,
+ .mocs = anv_mocs_for_bo(cmd_buffer->device, dst_buffer->address.bo),
};
blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
}
blorp_batch_finish(&batch);
+
+ cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
}
void anv_CmdUpdateBuffer(
memcpy(tmp_data.map, pData, copy_size);
- anv_state_flush(cmd_buffer->device, tmp_data);
-
struct blorp_address src = {
- .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = tmp_data.offset,
.mocs = cmd_buffer->device->default_mocs,
};
struct blorp_address dst = {
.buffer = dst_buffer->address.bo,
.offset = dst_buffer->address.offset + dstOffset,
- .mocs = cmd_buffer->device->default_mocs,
+ .mocs = anv_mocs_for_bo(cmd_buffer->device, dst_buffer->address.bo),
};
blorp_buffer_copy(&batch, src, dst, copy_size);
}
blorp_batch_finish(&batch);
+
+ cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
}
void anv_CmdFillBuffer(
}
blorp_batch_finish(&batch);
+
+ cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
}
void anv_CmdClearColorImage(
struct blorp_batch batch;
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
- struct blorp_surf depth, stencil;
+ struct blorp_surf depth, stencil, stencil_shadow;
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
get_blorp_surf_for_anv_image(cmd_buffer->device,
image, VK_IMAGE_ASPECT_DEPTH_BIT,
memset(&depth, 0, sizeof(depth));
}
+ bool has_stencil_shadow = false;
if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
get_blorp_surf_for_anv_image(cmd_buffer->device,
image, VK_IMAGE_ASPECT_STENCIL_BIT,
imageLayout, ISL_AUX_USAGE_NONE, &stencil);
+
+ has_stencil_shadow =
+ get_blorp_surf_for_anv_shadow_image(cmd_buffer->device, image,
+ VK_IMAGE_ASPECT_STENCIL_BIT,
+ &stencil_shadow);
} else {
memset(&stencil, 0, sizeof(stencil));
}
clear_depth, pDepthStencil->depth,
clear_stencil ? 0xff : 0,
pDepthStencil->stencil);
+
+ if (clear_stencil && has_stencil_shadow) {
+ union isl_color_value stencil_color = {
+ .u32 = { pDepthStencil->stencil, },
+ };
+ blorp_clear(&batch, &stencil_shadow,
+ ISL_FORMAT_R8_UINT, ISL_SWIZZLE_IDENTITY,
+ level, base_layer, layer_count,
+ 0, 0, level_width, level_height,
+ stencil_color, NULL);
+ }
}
}
{
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
const uint32_t color_att = attachment->colorAttachment;
+ assert(color_att < subpass->color_count);
const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
if (att_idx == VK_ATTACHMENT_UNUSED)
{
static const union isl_color_value color_value = { .u32 = { 0, } };
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
- const uint32_t att_idx = subpass->depth_stencil_attachment->attachment;
-
- if (att_idx == VK_ATTACHMENT_UNUSED)
+ if (!subpass->depth_stencil_attachment)
return;
+ const uint32_t att_idx = subpass->depth_stencil_attachment->attachment;
+ assert(att_idx != VK_ATTACHMENT_UNUSED);
struct anv_render_pass_attachment *pass_att =
&cmd_buffer->state.pass->attachments[att_idx];
* trash our depth and stencil buffers.
*/
struct blorp_batch batch;
- blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
- BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
+ enum blorp_batch_flags flags = BLORP_BATCH_NO_EMIT_DEPTH_STENCIL;
+ if (cmd_buffer->state.conditional_render_enabled) {
+ anv_cmd_emit_conditional_render_predicate(cmd_buffer);
+ flags |= BLORP_BATCH_PREDICATE_ENABLE;
+ }
+ blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, flags);
for (uint32_t a = 0; a < attachmentCount; ++a) {
if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
SUBPASS_STAGE_RESOLVE,
};
-static void
-resolve_surface(struct blorp_batch *batch,
- struct blorp_surf *src_surf,
- uint32_t src_level, uint32_t src_layer,
- struct blorp_surf *dst_surf,
- uint32_t dst_level, uint32_t dst_layer,
- uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
- uint32_t width, uint32_t height,
- enum blorp_filter filter)
-{
- blorp_blit(batch,
- src_surf, src_level, src_layer,
- ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
- dst_surf, dst_level, dst_layer,
- ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
- src_x, src_y, src_x + width, src_y + height,
- dst_x, dst_y, dst_x + width, dst_y + height,
- filter, false, false);
-}
-
-static void
-resolve_image(struct anv_device *device,
- struct blorp_batch *batch,
- const struct anv_image *src_image,
- VkImageLayout src_image_layout,
- uint32_t src_level, uint32_t src_layer,
- const struct anv_image *dst_image,
- VkImageLayout dst_image_layout,
- uint32_t dst_level, uint32_t dst_layer,
- VkImageAspectFlags aspect_mask,
- uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
- uint32_t width, uint32_t height)
+void
+anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *src_image,
+ enum isl_aux_usage src_aux_usage,
+ uint32_t src_level, uint32_t src_base_layer,
+ const struct anv_image *dst_image,
+ enum isl_aux_usage dst_aux_usage,
+ uint32_t dst_level, uint32_t dst_base_layer,
+ VkImageAspectFlagBits aspect,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t dst_x, uint32_t dst_y,
+ uint32_t width, uint32_t height,
+ uint32_t layer_count,
+ enum blorp_filter filter)
{
- struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
+ struct blorp_batch batch;
+ blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
assert(src_image->type == VK_IMAGE_TYPE_2D);
assert(src_image->samples > 1);
assert(dst_image->type == VK_IMAGE_TYPE_2D);
assert(dst_image->samples == 1);
assert(src_image->n_planes == dst_image->n_planes);
+ assert(!src_image->format->can_ycbcr);
+ assert(!dst_image->format->can_ycbcr);
- uint32_t aspect_bit;
-
- anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
- struct blorp_surf src_surf, dst_surf;
- get_blorp_surf_for_anv_image(device, src_image, 1UL << aspect_bit,
- src_image_layout, ISL_AUX_USAGE_NONE,
- &src_surf);
- get_blorp_surf_for_anv_image(device, dst_image, 1UL << aspect_bit,
- dst_image_layout, ISL_AUX_USAGE_NONE,
- &dst_surf);
- anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
- 1UL << aspect_bit,
- dst_surf.aux_usage,
- dst_level, dst_layer, 1);
-
- enum blorp_filter filter;
+ struct blorp_surf src_surf, dst_surf;
+ get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, aspect,
+ ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
+ src_aux_usage, &src_surf);
+ if (src_aux_usage == ISL_AUX_USAGE_MCS) {
+ src_surf.clear_color_addr = anv_to_blorp_address(
+ anv_image_get_clear_color_addr(cmd_buffer->device, src_image,
+ VK_IMAGE_ASPECT_COLOR_BIT));
+ }
+ get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, aspect,
+ ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
+ dst_aux_usage, &dst_surf);
+ anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
+ aspect, dst_aux_usage,
+ dst_level, dst_base_layer, layer_count);
+
+ if (filter == BLORP_FILTER_NONE) {
+ /* If no explicit filter is provided, then it's implied by the type of
+ * the source image.
+ */
if ((src_surf.surf->usage & ISL_SURF_USAGE_DEPTH_BIT) ||
(src_surf.surf->usage & ISL_SURF_USAGE_STENCIL_BIT) ||
isl_format_has_int_channel(src_surf.surf->format)) {
} else {
filter = BLORP_FILTER_AVERAGE;
}
+ }
- assert(!src_image->format->can_ycbcr);
- assert(!dst_image->format->can_ycbcr);
-
- resolve_surface(batch,
- &src_surf, src_level, src_layer,
- &dst_surf, dst_level, dst_layer,
- src_x, src_y, dst_x, dst_y, width, height, filter);
+ for (uint32_t l = 0; l < layer_count; l++) {
+ blorp_blit(&batch,
+ &src_surf, src_level, src_base_layer + l,
+ ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
+ &dst_surf, dst_level, dst_base_layer + l,
+ ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
+ src_x, src_y, src_x + width, src_y + height,
+ dst_x, dst_y, dst_x + width, dst_y + height,
+ filter, false, false);
}
+
+ blorp_batch_finish(&batch);
}
void anv_CmdResolveImage(
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
- struct blorp_batch batch;
- blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
+ assert(!src_image->format->can_ycbcr);
for (uint32_t r = 0; r < regionCount; r++) {
assert(pRegions[r].srcSubresource.aspectMask ==
const uint32_t layer_count =
anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
- VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
- dst_mask = pRegions[r].dstSubresource.aspectMask;
-
- assert(anv_image_aspects_compatible(src_mask, dst_mask));
-
- for (uint32_t layer = 0; layer < layer_count; layer++) {
- resolve_image(cmd_buffer->device, &batch,
- src_image, srcImageLayout,
- pRegions[r].srcSubresource.mipLevel,
- pRegions[r].srcSubresource.baseArrayLayer + layer,
- dst_image, dstImageLayout,
- pRegions[r].dstSubresource.mipLevel,
- pRegions[r].dstSubresource.baseArrayLayer + layer,
- pRegions[r].dstSubresource.aspectMask,
- pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
- pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
- pRegions[r].extent.width, pRegions[r].extent.height);
+ uint32_t aspect_bit;
+ anv_foreach_image_aspect_bit(aspect_bit, src_image,
+ pRegions[r].srcSubresource.aspectMask) {
+ enum isl_aux_usage src_aux_usage =
+ anv_layout_to_aux_usage(&cmd_buffer->device->info, src_image,
+ (1 << aspect_bit), srcImageLayout);
+ enum isl_aux_usage dst_aux_usage =
+ anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_image,
+ (1 << aspect_bit), dstImageLayout);
+
+ anv_image_msaa_resolve(cmd_buffer,
+ src_image, src_aux_usage,
+ pRegions[r].srcSubresource.mipLevel,
+ pRegions[r].srcSubresource.baseArrayLayer,
+ dst_image, dst_aux_usage,
+ pRegions[r].dstSubresource.mipLevel,
+ pRegions[r].dstSubresource.baseArrayLayer,
+ (1 << aspect_bit),
+ pRegions[r].srcOffset.x,
+ pRegions[r].srcOffset.y,
+ pRegions[r].dstOffset.x,
+ pRegions[r].dstOffset.y,
+ pRegions[r].extent.width,
+ pRegions[r].extent.height,
+ layer_count, BLORP_FILTER_NONE);
}
}
-
- blorp_batch_finish(&batch);
}
static enum isl_aux_usage
return image->planes[plane].aux_usage;
}
-void
-anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
-{
- struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
- struct anv_subpass *subpass = cmd_buffer->state.subpass;
-
- if (subpass->has_resolve) {
- struct blorp_batch batch;
- blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
-
- /* We are about to do some MSAA resolves. We need to flush so that the
- * result of writes to the MSAA color attachments show up in the sampler
- * when we blit to the single-sampled resolve target.
- */
- cmd_buffer->state.pending_pipe_bits |=
- ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
- ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
-
- for (uint32_t i = 0; i < subpass->color_count; ++i) {
- uint32_t src_att = subpass->color_attachments[i].attachment;
- uint32_t dst_att = subpass->resolve_attachments[i].attachment;
-
- if (dst_att == VK_ATTACHMENT_UNUSED)
- continue;
-
- assert(src_att < cmd_buffer->state.pass->attachment_count);
- assert(dst_att < cmd_buffer->state.pass->attachment_count);
-
- if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
- /* From the Vulkan 1.0 spec:
- *
- * If the first use of an attachment in a render pass is as a
- * resolve attachment, then the loadOp is effectively ignored
- * as the resolve is guaranteed to overwrite all pixels in the
- * render area.
- */
- cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
- }
-
- struct anv_image_view *src_iview = fb->attachments[src_att];
- struct anv_image_view *dst_iview = fb->attachments[dst_att];
-
- enum isl_aux_usage src_aux_usage =
- cmd_buffer->state.attachments[src_att].aux_usage;
- enum isl_aux_usage dst_aux_usage =
- cmd_buffer->state.attachments[dst_att].aux_usage;
-
- const VkRect2D render_area = cmd_buffer->state.render_area;
-
- assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
- dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
-
- enum blorp_filter filter;
- if (isl_format_has_int_channel(src_iview->planes[0].isl.format)) {
- filter = BLORP_FILTER_SAMPLE_0;
- } else {
- filter = BLORP_FILTER_AVERAGE;
- }
-
- struct blorp_surf src_surf, dst_surf;
- get_blorp_surf_for_anv_image(cmd_buffer->device, src_iview->image,
- VK_IMAGE_ASPECT_COLOR_BIT,
- ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
- src_aux_usage, &src_surf);
- if (src_aux_usage == ISL_AUX_USAGE_MCS) {
- src_surf.clear_color_addr = anv_to_blorp_address(
- anv_image_get_clear_color_addr(cmd_buffer->device,
- src_iview->image,
- VK_IMAGE_ASPECT_COLOR_BIT));
- }
- get_blorp_surf_for_anv_image(cmd_buffer->device, dst_iview->image,
- VK_IMAGE_ASPECT_COLOR_BIT,
- ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
- dst_aux_usage, &dst_surf);
-
- uint32_t base_src_layer = src_iview->planes[0].isl.base_array_layer;
- uint32_t base_dst_layer = dst_iview->planes[0].isl.base_array_layer;
-
- assert(src_iview->planes[0].isl.array_len >= fb->layers);
- assert(dst_iview->planes[0].isl.array_len >= fb->layers);
-
- anv_cmd_buffer_mark_image_written(cmd_buffer, dst_iview->image,
- VK_IMAGE_ASPECT_COLOR_BIT,
- dst_surf.aux_usage,
- dst_iview->planes[0].isl.base_level,
- base_dst_layer, fb->layers);
-
- assert(!src_iview->image->format->can_ycbcr);
- assert(!dst_iview->image->format->can_ycbcr);
-
- for (uint32_t i = 0; i < fb->layers; i++) {
- resolve_surface(&batch,
- &src_surf,
- src_iview->planes[0].isl.base_level,
- base_src_layer + i,
- &dst_surf,
- dst_iview->planes[0].isl.base_level,
- base_dst_layer + i,
- render_area.offset.x, render_area.offset.y,
- render_area.offset.x, render_area.offset.y,
- render_area.extent.width, render_area.extent.height,
- filter);
- }
- }
-
- blorp_batch_finish(&batch);
- }
-}
-
void
anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
uint32_t base_level, uint32_t level_count,
uint32_t base_layer, uint32_t layer_count)
{
struct blorp_batch batch;
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
- assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
+ /* We don't know who touched the main surface last so flush a bunch of
+ * caches to ensure we get good data.
+ */
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
+ ANV_PIPE_DATA_CACHE_FLUSH_BIT |
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
+ ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
struct blorp_surf surf;
get_blorp_surf_for_anv_image(cmd_buffer->device,
- image, VK_IMAGE_ASPECT_COLOR_BIT,
+ image, aspect,
VK_IMAGE_LAYOUT_GENERAL,
ISL_AUX_USAGE_NONE, &surf);
assert(surf.aux_usage == ISL_AUX_USAGE_NONE);
- struct blorp_surf shadow_surf = {
- .surf = &image->planes[0].shadow_surface.isl,
- .addr = {
- .buffer = image->planes[0].address.bo,
- .offset = image->planes[0].address.offset +
- image->planes[0].shadow_surface.offset,
- .mocs = cmd_buffer->device->default_mocs,
- },
- };
+ struct blorp_surf shadow_surf;
+ get_blorp_surf_for_anv_shadow_image(cmd_buffer->device,
+ image, aspect, &shadow_surf);
for (uint32_t l = 0; l < level_count; l++) {
const uint32_t level = base_level + l;
}
}
+ /* We just wrote to the buffer with the render cache. Flush it. */
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
+
blorp_batch_finish(&batch);
}
ISL_AUX_USAGE_NONE, &stencil);
}
+ /* Blorp may choose to clear stencil using RGBA32_UINT for better
+ * performance. If it does this, we need to flush it out of the depth
+ * cache before rendering to it.
+ */
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
+
blorp_clear_depth_stencil(&batch, &depth, &stencil,
level, base_layer, layer_count,
area.offset.x, area.offset.y,
(aspects & VK_IMAGE_ASPECT_STENCIL_BIT) ? 0xff : 0,
stencil_value);
+ /* Blorp may choose to clear stencil using RGBA32_UINT for better
+ * performance. If it does this, we need to flush it out of the render
+ * cache before someone starts trying to do stencil on it.
+ */
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
+
+ struct blorp_surf stencil_shadow;
+ if ((aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
+ get_blorp_surf_for_anv_shadow_image(cmd_buffer->device, image,
+ VK_IMAGE_ASPECT_STENCIL_BIT,
+ &stencil_shadow)) {
+ union isl_color_value stencil_color = {
+ .u32 = { stencil_value },
+ };
+ blorp_clear(&batch, &stencil_shadow,
+ ISL_FORMAT_R8_UINT, ISL_SWIZZLE_IDENTITY,
+ level, base_layer, layer_count,
+ area.offset.x, area.offset.y,
+ area.offset.x + area.extent.width,
+ area.offset.y + area.extent.height,
+ stencil_color, NULL);
+ }
+
blorp_batch_finish(&batch);
}
void
anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image,
+ enum isl_format format,
VkImageAspectFlagBits aspect,
uint32_t base_layer, uint32_t layer_count,
enum isl_aux_op mcs_op, union isl_color_value *clear_value,
switch (mcs_op) {
case ISL_AUX_OP_FAST_CLEAR:
- blorp_fast_clear(&batch, &surf, surf.surf->format,
+ blorp_fast_clear(&batch, &surf, format,
0, base_layer, layer_count,
0, 0, image->extent.width, image->extent.height);
break;
case ISL_AUX_OP_PARTIAL_RESOLVE:
- blorp_mcs_partial_resolve(&batch, &surf, surf.surf->format,
+ blorp_mcs_partial_resolve(&batch, &surf, format,
base_layer, layer_count);
break;
case ISL_AUX_OP_FULL_RESOLVE:
void
anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image,
+ enum isl_format format,
VkImageAspectFlagBits aspect, uint32_t level,
uint32_t base_layer, uint32_t layer_count,
enum isl_aux_op ccs_op, union isl_color_value *clear_value,
switch (ccs_op) {
case ISL_AUX_OP_FAST_CLEAR:
- blorp_fast_clear(&batch, &surf, surf.surf->format,
+ blorp_fast_clear(&batch, &surf, format,
level, base_layer, layer_count,
0, 0, level_width, level_height);
break;
case ISL_AUX_OP_FULL_RESOLVE:
case ISL_AUX_OP_PARTIAL_RESOLVE:
blorp_ccs_resolve(&batch, &surf, level, base_layer, layer_count,
- surf.surf->format, ccs_op);
+ format, ccs_op);
break;
case ISL_AUX_OP_AMBIGUATE:
for (uint32_t a = 0; a < layer_count; a++) {