#include "anv_private.h"
#include "vk_format_info.h"
+#include "vk_util.h"
#include "common/gen_l3_config.h"
#include "genxml/gen_macros.h"
}
}
+#if GEN_IS_HASWELL || GEN_GEN >= 8
+static void
+emit_lrr(struct anv_batch *batch, uint32_t dst, uint32_t src)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
+ lrr.SourceRegisterAddress = src;
+ lrr.DestinationRegisterAddress = dst;
+ }
+}
+#endif
+
void
genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
{
sba.SurfaceStateBaseAddressModifyEnable = true;
sba.DynamicStateBaseAddress =
- (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
+ (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 };
sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
sba.DynamicStateBaseAddressModifyEnable = true;
sba.IndirectObjectBaseAddressModifyEnable = true;
sba.InstructionBaseAddress =
- (struct anv_address) { &device->instruction_block_pool.bo, 0 };
+ (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 };
sba.InstructionMemoryObjectControlState = GENX(MOCS);
sba.InstructionBaseAddressModifyEnable = true;
static void
add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer,
- const struct anv_image_view *iview,
- enum isl_aux_usage aux_usage,
- struct anv_state state)
+ const struct anv_image_view *image_view,
+ const uint32_t plane,
+ struct anv_surface_state state)
{
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
+ const struct anv_image *image = image_view->image;
+ uint32_t image_plane = image_view->planes[plane].image_plane;
- add_surface_state_reloc(cmd_buffer, state, iview->bo, iview->offset);
-
- if (aux_usage != ISL_AUX_USAGE_NONE) {
- uint32_t aux_offset = iview->offset + iview->image->aux_surface.offset;
-
- /* On gen7 and prior, the bottom 12 bits of the MCS base address are
- * used to store other information. This should be ok, however, because
- * surface buffer addresses are always 4K page alinged.
- */
- assert((aux_offset & 0xfff) == 0);
- uint32_t *aux_addr_dw = state.map + isl_dev->ss.aux_addr_offset;
- aux_offset += *aux_addr_dw & 0xfff;
+ add_surface_state_reloc(cmd_buffer, state.state,
+ image->planes[image_plane].bo, state.address);
+ if (state.aux_address) {
VkResult result =
anv_reloc_list_add(&cmd_buffer->surface_relocs,
&cmd_buffer->pool->alloc,
- state.offset + isl_dev->ss.aux_addr_offset,
- iview->bo, aux_offset);
+ state.state.offset + isl_dev->ss.aux_addr_offset,
+ image->planes[image_plane].bo, state.aux_address);
if (result != VK_SUCCESS)
anv_batch_set_error(&cmd_buffer->batch, result);
}
}
static void
-color_attachment_compute_aux_usage(struct anv_device *device,
- struct anv_attachment_state *att_state,
- struct anv_image_view *iview,
- VkRect2D render_area,
+color_attachment_compute_aux_usage(struct anv_device * device,
+ struct anv_cmd_state * cmd_state,
+ uint32_t att, VkRect2D render_area,
union isl_color_value *fast_clear_color)
{
- if (iview->image->aux_surface.isl.size == 0) {
+ struct anv_attachment_state *att_state = &cmd_state->attachments[att];
+ struct anv_image_view *iview = cmd_state->framebuffer->attachments[att];
+
+ assert(iview->n_planes == 1);
+
+ if (iview->planes[0].isl.base_array_layer >=
+ anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level)) {
+ /* There is no aux buffer which corresponds to the level and layer(s)
+ * being accessed.
+ */
att_state->aux_usage = ISL_AUX_USAGE_NONE;
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
att_state->fast_clear = false;
return;
- } else if (iview->image->aux_usage == ISL_AUX_USAGE_MCS) {
+ } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_MCS) {
att_state->aux_usage = ISL_AUX_USAGE_MCS;
att_state->input_aux_usage = ISL_AUX_USAGE_MCS;
att_state->fast_clear = false;
return;
+ } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E) {
+ att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
+ att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
+ } else {
+ att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
+ /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
+ *
+ * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
+ * setting is only allowed if Surface Format supported for Fast
+ * Clear. In addition, if the surface is bound to the sampling
+ * engine, Surface Format must be supported for Render Target
+ * Compression for surfaces bound to the sampling engine."
+ *
+ * In other words, we can only sample from a fast-cleared image if it
+ * also supports color compression.
+ */
+ if (isl_format_supports_ccs_e(&device->info, iview->planes[0].isl.format)) {
+ att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
+
+ /* While fast-clear resolves and partial resolves are fairly cheap in the
+ * case where you render to most of the pixels, full resolves are not
+ * because they potentially involve reading and writing the entire
+ * framebuffer. If we can't texture with CCS_E, we should leave it off and
+ * limit ourselves to fast clears.
+ */
+ if (cmd_state->pass->attachments[att].first_subpass_layout ==
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
+ anv_perf_warn(device->instance, iview->image,
+ "Not temporarily enabling CCS_E.");
+ }
+ } else {
+ att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
+ }
}
- assert(iview->image->aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT);
+ assert(iview->image->planes[0].aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT);
att_state->clear_color_is_zero_one =
- color_is_zero_one(att_state->clear_value.color, iview->isl.format);
+ color_is_zero_one(att_state->clear_value.color, iview->planes[0].isl.format);
+ att_state->clear_color_is_zero =
+ att_state->clear_value.color.uint32[0] == 0 &&
+ att_state->clear_value.color.uint32[1] == 0 &&
+ att_state->clear_value.color.uint32[2] == 0 &&
+ att_state->clear_value.color.uint32[3] == 0;
if (att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
/* Start off assuming fast clears are possible */
render_area.extent.height != iview->extent.height)
att_state->fast_clear = false;
- if (GEN_GEN <= 7) {
- /* On gen7, we can't do multi-LOD or multi-layer fast-clears. We
- * technically can, but it comes with crazy restrictions that we
- * don't want to deal with now.
- */
- if (iview->isl.base_level > 0 ||
- iview->isl.base_array_layer > 0 ||
- iview->isl.array_len > 1)
- att_state->fast_clear = false;
- }
-
/* On Broadwell and earlier, we can only handle 0/1 clear colors */
if (GEN_GEN <= 8 && !att_state->clear_color_is_zero_one)
att_state->fast_clear = false;
+ /* We allow fast clears when all aux layers of the miplevel are targeted.
+ * See add_fast_clear_state_buffer() for more information. Also, because
+ * we only either do a fast clear or a normal clear and not both, this
+ * complies with the gen7 restriction of not fast-clearing multiple
+ * layers.
+ */
+ if (cmd_state->framebuffer->layers !=
+ anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level)) {
+ att_state->fast_clear = false;
+ if (GEN_GEN == 7) {
+ anv_perf_warn(device->instance, iview->image,
+ "Not fast-clearing the first layer in "
+ "a multi-layer fast clear.");
+ }
+ }
+
+ /* We only allow fast clears in the GENERAL layout if the auxiliary
+ * buffer is always enabled and the fast-clear value is all 0's. See
+ * add_fast_clear_state_buffer() for more information.
+ */
+ if (cmd_state->pass->attachments[att].first_subpass_layout ==
+ VK_IMAGE_LAYOUT_GENERAL &&
+ (!att_state->clear_color_is_zero ||
+ iview->image->planes[0].aux_usage == ISL_AUX_USAGE_NONE)) {
+ att_state->fast_clear = false;
+ }
+
if (att_state->fast_clear) {
memcpy(fast_clear_color->u32, att_state->clear_value.color.uint32,
sizeof(fast_clear_color->u32));
} else {
att_state->fast_clear = false;
}
-
- /**
- * TODO: Consider using a heuristic to determine if temporarily enabling
- * CCS_E for this image view would be beneficial.
- *
- * While fast-clear resolves and partial resolves are fairly cheap in the
- * case where you render to most of the pixels, full resolves are not
- * because they potentially involve reading and writing the entire
- * framebuffer. If we can't texture with CCS_E, we should leave it off and
- * limit ourselves to fast clears.
- */
- if (iview->image->aux_usage == ISL_AUX_USAGE_CCS_E) {
- att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
- att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
- } else if (att_state->fast_clear) {
- att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
- if (GEN_GEN >= 9 &&
- !isl_format_supports_ccs_e(&device->info, iview->isl.format)) {
- /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
- *
- * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
- * setting is only allowed if Surface Format supported for Fast
- * Clear. In addition, if the surface is bound to the sampling
- * engine, Surface Format must be supported for Render Target
- * Compression for surfaces bound to the sampling engine."
- *
- * In other words, we can't sample from a fast-cleared image if it
- * doesn't also support color compression.
- */
- att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
- } else if (GEN_GEN >= 8) {
- /* Broadwell/Skylake can sample from fast-cleared images */
- att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
- } else {
- /* Ivy Bridge and Haswell cannot */
- att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
- }
- } else {
- att_state->aux_usage = ISL_AUX_USAGE_NONE;
- att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
- }
}
static bool
* The undefined layout indicates that the user doesn't care about the data
* that's currently in the buffer. Therefore, a data-preserving resolve
* operation is not needed.
- *
- * The pre-initialized layout is equivalent to the undefined layout for
- * optimally-tiled images. Anv only exposes support for optimally-tiled
- * depth buffers.
*/
- if (image->aux_usage != ISL_AUX_USAGE_HIZ ||
- initial_layout == final_layout ||
- initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
- initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)
+ if (image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout)
return;
const bool hiz_enabled = ISL_AUX_USAGE_HIZ ==
- anv_layout_to_aux_usage(&cmd_buffer->device->info, image, image->aspects,
- initial_layout);
+ anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
+ VK_IMAGE_ASPECT_DEPTH_BIT, initial_layout);
const bool enable_hiz = ISL_AUX_USAGE_HIZ ==
- anv_layout_to_aux_usage(&cmd_buffer->device->info, image, image->aspects,
- final_layout);
+ anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
+ VK_IMAGE_ASPECT_DEPTH_BIT, final_layout);
enum blorp_hiz_op hiz_op;
if (hiz_enabled && !enable_hiz) {
anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op);
}
+#define MI_PREDICATE_SRC0 0x2400
+#define MI_PREDICATE_SRC1 0x2408
+
+/* Manages the state of an color image subresource to ensure resolves are
+ * performed properly.
+ */
+static void
+genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ unsigned level, bool needs_resolve)
+{
+ assert(cmd_buffer && image);
+ assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
+ assert(level < anv_image_aux_levels(image, aspect));
+
+ /* The HW docs say that there is no way to guarantee the completion of
+ * the following command. We use it nevertheless because it shows no
+ * issues in testing is currently being used in the GL driver.
+ */
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
+ sdi.Address = anv_image_get_needs_resolve_addr(cmd_buffer->device,
+ image, aspect, level);
+ sdi.ImmediateData = needs_resolve;
+ }
+}
+
+static void
+genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ unsigned level)
+{
+ assert(cmd_buffer && image);
+ assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
+ assert(level < anv_image_aux_levels(image, aspect));
+
+ const struct anv_address resolve_flag_addr =
+ anv_image_get_needs_resolve_addr(cmd_buffer->device,
+ image, aspect, level);
+
+ /* Make the pending predicated resolve a no-op if one is not needed.
+ * predicate = do_resolve = resolve_flag != 0;
+ */
+ emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 , 0);
+ emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 + 4, 0);
+ emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC0 , 0);
+ emit_lrm(&cmd_buffer->batch, MI_PREDICATE_SRC0 + 4,
+ resolve_flag_addr.bo, resolve_flag_addr.offset);
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
+ mip.LoadOperation = LOAD_LOADINV;
+ mip.CombineOperation = COMBINE_SET;
+ mip.CompareOperation = COMPARE_SRCS_EQUAL;
+ }
+}
+
+static void
+init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ unsigned level)
+{
+ assert(cmd_buffer && image);
+ assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
+ assert(level < anv_image_aux_levels(image, aspect));
+
+ uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
+ enum isl_aux_usage aux_usage = image->planes[plane].aux_usage;
+
+ /* The resolve flag should updated to signify that fast-clear/compression
+ * data needs to be removed when leaving the undefined layout. Such data
+ * may need to be removed if it would cause accesses to the color buffer
+ * to return incorrect data. The fast clear data in CCS_D buffers should
+ * be removed because CCS_D isn't enabled all the time.
+ */
+ genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level,
+ aux_usage == ISL_AUX_USAGE_NONE);
+
+ /* The fast clear value dword(s) will be copied into a surface state object.
+ * Ensure that the restrictions of the fields in the dword(s) are followed.
+ *
+ * CCS buffers on SKL+ can have any value set for the clear colors.
+ */
+ if (image->samples == 1 && GEN_GEN >= 9)
+ return;
+
+ /* Other combinations of auxiliary buffers and platforms require specific
+ * values in the clear value dword(s).
+ */
+ struct anv_address addr =
+ anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level);
+ unsigned i = 0;
+ for (; i < cmd_buffer->device->isl_dev.ss.clear_value_size; i += 4) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
+ sdi.Address = addr;
+
+ if (GEN_GEN >= 9) {
+ /* MCS buffers on SKL+ can only have 1/0 clear colors. */
+ assert(aux_usage == ISL_AUX_USAGE_MCS);
+ sdi.ImmediateData = 0;
+ } else if (GEN_VERSIONx10 >= 75) {
+ /* Pre-SKL, the dword containing the clear values also contains
+ * other fields, so we need to initialize those fields to match the
+ * values that would be in a color attachment.
+ */
+ assert(i == 0);
+ sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 |
+ ISL_CHANNEL_SELECT_GREEN << 22 |
+ ISL_CHANNEL_SELECT_BLUE << 19 |
+ ISL_CHANNEL_SELECT_ALPHA << 16;
+ } else if (GEN_VERSIONx10 == 70) {
+ /* On IVB, the dword containing the clear values also contains
+ * other fields that must be zero or can be zero.
+ */
+ assert(i == 0);
+ sdi.ImmediateData = 0;
+ }
+ }
+
+ addr.offset += 4;
+ }
+}
+
+/* Copy the fast-clear value dword(s) between a surface state object and an
+ * image's fast clear state buffer.
+ */
+static void
+genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_state surface_state,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ unsigned level,
+ bool copy_from_surface_state)
+{
+ assert(cmd_buffer && image);
+ assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
+ assert(level < anv_image_aux_levels(image, aspect));
+
+ struct anv_bo *ss_bo =
+ &cmd_buffer->device->surface_state_pool.block_pool.bo;
+ uint32_t ss_clear_offset = surface_state.offset +
+ cmd_buffer->device->isl_dev.ss.clear_value_offset;
+ const struct anv_address entry_addr =
+ anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level);
+ unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
+
+ if (copy_from_surface_state) {
+ genX(cmd_buffer_mi_memcpy)(cmd_buffer, entry_addr.bo, entry_addr.offset,
+ ss_bo, ss_clear_offset, copy_size);
+ } else {
+ genX(cmd_buffer_mi_memcpy)(cmd_buffer, ss_bo, ss_clear_offset,
+ entry_addr.bo, entry_addr.offset, copy_size);
+
+ /* Updating a surface state object may require that the state cache be
+ * invalidated. From the SKL PRM, Shared Functions -> State -> State
+ * Caching:
+ *
+ * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
+ * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
+ * modified [...], the L1 state cache must be invalidated to ensure
+ * the new surface or sampler state is fetched from system memory.
+ *
+ * In testing, SKL doesn't actually seem to need this, but HSW does.
+ */
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
+ }
+}
+
+/**
+ * @brief Transitions a color buffer from one layout to another.
+ *
+ * See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for
+ * more information.
+ *
+ * @param level_count VK_REMAINING_MIP_LEVELS isn't supported.
+ * @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images,
+ * this represents the maximum layers to transition at each
+ * specified miplevel.
+ */
+static void
+transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ const uint32_t base_level, uint32_t level_count,
+ uint32_t base_layer, uint32_t layer_count,
+ VkImageLayout initial_layout,
+ VkImageLayout final_layout)
+{
+ /* Validate the inputs. */
+ assert(cmd_buffer);
+ assert(image && image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
+ /* These values aren't supported for simplicity's sake. */
+ assert(level_count != VK_REMAINING_MIP_LEVELS &&
+ layer_count != VK_REMAINING_ARRAY_LAYERS);
+ /* Ensure the subresource range is valid. */
+ uint64_t last_level_num = base_level + level_count;
+ const uint32_t max_depth = anv_minify(image->extent.depth, base_level);
+ UNUSED const uint32_t image_layers = MAX2(image->array_size, max_depth);
+ assert((uint64_t)base_layer + layer_count <= image_layers);
+ assert(last_level_num <= image->levels);
+ /* The spec disallows these final layouts. */
+ assert(final_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
+ final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED);
+
+ /* No work is necessary if the layout stays the same or if this subresource
+ * range lacks auxiliary data.
+ */
+ if (initial_layout == final_layout)
+ return;
+
+ uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
+
+ if (image->planes[plane].shadow_surface.isl.size > 0 &&
+ final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
+ /* This surface is a linear compressed image with a tiled shadow surface
+ * for texturing. The client is about to use it in READ_ONLY_OPTIMAL so
+ * we need to ensure the shadow copy is up-to-date.
+ */
+ assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
+ assert(image->planes[plane].surface.isl.tiling == ISL_TILING_LINEAR);
+ assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR);
+ assert(isl_format_is_compressed(image->planes[plane].surface.isl.format));
+ assert(plane == 0);
+ anv_image_copy_to_shadow(cmd_buffer, image,
+ base_level, level_count,
+ base_layer, layer_count);
+ }
+
+ if (base_layer >= anv_image_aux_layers(image, aspect, base_level))
+ return;
+
+ /* A transition of a 3D subresource works on all slices at a time. */
+ if (image->type == VK_IMAGE_TYPE_3D) {
+ base_layer = 0;
+ layer_count = anv_minify(image->extent.depth, base_level);
+ }
+
+ /* We're interested in the subresource range subset that has aux data. */
+ level_count = MIN2(level_count, anv_image_aux_levels(image, aspect) - base_level);
+ layer_count = MIN2(layer_count,
+ anv_image_aux_layers(image, aspect, base_level) - base_layer);
+ last_level_num = base_level + level_count;
+
+ /* Record whether or not the layout is undefined. Pre-initialized images
+ * with auxiliary buffers have a non-linear layout and are thus undefined.
+ */
+ assert(image->tiling == VK_IMAGE_TILING_OPTIMAL);
+ const bool undef_layout = initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
+ initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED;
+
+ /* Do preparatory work before the resolve operation or return early if no
+ * resolve is actually needed.
+ */
+ if (undef_layout) {
+ /* A subresource in the undefined layout may have been aliased and
+ * populated with any arrangement of bits. Therefore, we must initialize
+ * the related aux buffer and clear buffer entry with desirable values.
+ *
+ * Initialize the relevant clear buffer entries.
+ */
+ for (unsigned level = base_level; level < last_level_num; level++)
+ init_fast_clear_state_entry(cmd_buffer, image, aspect, level);
+
+ /* Initialize the aux buffers to enable correct rendering. This operation
+ * requires up to two steps: one to rid the aux buffer of data that may
+ * cause GPU hangs, and another to ensure that writes done without aux
+ * will be visible to reads done with aux.
+ *
+ * Having an aux buffer with invalid data is possible for CCS buffers
+ * SKL+ and for MCS buffers with certain sample counts (2x and 8x). One
+ * easy way to get to a valid state is to fast-clear the specified range.
+ *
+ * Even for MCS buffers that have sample counts that don't require
+ * certain bits to be reserved (4x and 8x), we're unsure if the hardware
+ * will be okay with the sample mappings given by the undefined buffer.
+ * We don't have any data to show that this is a problem, but we want to
+ * avoid causing difficult-to-debug problems.
+ */
+ if ((GEN_GEN >= 9 && image->samples == 1) || image->samples > 1) {
+ if (image->samples == 4 || image->samples == 16) {
+ anv_perf_warn(cmd_buffer->device->instance, image,
+ "Doing a potentially unnecessary fast-clear to "
+ "define an MCS buffer.");
+ }
+
+ if (image->samples == 1) {
+ for (uint32_t l = 0; l < level_count; l++) {
+ const uint32_t level = base_level + l;
+ const uint32_t level_layer_count =
+ MIN2(layer_count, anv_image_aux_layers(image, aspect, level));
+ anv_image_ccs_op(cmd_buffer, image, aspect, level,
+ base_layer, level_layer_count,
+ ISL_AUX_OP_FAST_CLEAR, false);
+ }
+ } else {
+ assert(image->samples > 1);
+ assert(base_level == 0 && level_count == 1);
+ anv_image_mcs_op(cmd_buffer, image, aspect,
+ base_layer, layer_count,
+ ISL_AUX_OP_FAST_CLEAR, false);
+ }
+ }
+ /* At this point, some elements of the CCS buffer may have the fast-clear
+ * bit-arrangement. As the user writes to a subresource, we need to have
+ * the associated CCS elements enter the ambiguated state. This enables
+ * reads (implicit or explicit) to reflect the user-written data instead
+ * of the clear color. The only time such elements will not change their
+ * state as described above, is in a final layout that doesn't have CCS
+ * enabled. In this case, we must force the associated CCS buffers of the
+ * specified range to enter the ambiguated state in advance.
+ */
+ if (image->samples == 1 &&
+ image->planes[plane].aux_usage != ISL_AUX_USAGE_CCS_E &&
+ final_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
+ /* The CCS_D buffer may not be enabled in the final layout. Continue
+ * executing this function to perform a resolve.
+ */
+ anv_perf_warn(cmd_buffer->device->instance, image,
+ "Performing an additional resolve for CCS_D layout "
+ "transition. Consider always leaving it on or "
+ "performing an ambiguation pass.");
+ } else {
+ /* Writes in the final layout will be aware of the auxiliary buffer.
+ * In addition, the clear buffer entries and the auxiliary buffers
+ * have been populated with values that will result in correct
+ * rendering.
+ */
+ return;
+ }
+ } else if (initial_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
+ /* Resolves are only necessary if the subresource may contain blocks
+ * fast-cleared to values unsupported in other layouts. This only occurs
+ * if the initial layout is COLOR_ATTACHMENT_OPTIMAL.
+ */
+ return;
+ } else if (image->samples > 1) {
+ /* MCS buffers don't need resolving. */
+ return;
+ }
+
+ /* Perform a resolve to synchronize data between the main and aux buffer.
+ * Before we begin, we must satisfy the cache flushing requirement specified
+ * in the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
+ *
+ * Any transition from any value in {Clear, Render, Resolve} to a
+ * different value in {Clear, Render, Resolve} requires end of pipe
+ * synchronization.
+ *
+ * We perform a flush of the write cache before and after the clear and
+ * resolve operations to meet this requirement.
+ *
+ * Unlike other drawing, fast clear operations are not properly
+ * synchronized. The first PIPE_CONTROL here likely ensures that the
+ * contents of the previous render or clear hit the render target before we
+ * resolve and the second likely ensures that the resolve is complete before
+ * we do any more rendering or clearing.
+ */
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
+
+ for (uint32_t level = base_level; level < last_level_num; level++) {
+
+ /* The number of layers changes at each 3D miplevel. */
+ if (image->type == VK_IMAGE_TYPE_3D) {
+ layer_count = MIN2(layer_count, anv_image_aux_layers(image, aspect, level));
+ }
+
+ genX(load_needs_resolve_predicate)(cmd_buffer, image, aspect, level);
+
+ anv_image_ccs_op(cmd_buffer, image, aspect, level,
+ base_layer, layer_count,
+ image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E ?
+ ISL_AUX_OP_PARTIAL_RESOLVE : ISL_AUX_OP_FULL_RESOLVE,
+ true);
+
+ genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level, false);
+ }
+
+ cmd_buffer->state.pending_pipe_bits |=
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
+}
/**
* Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
vk_free(&cmd_buffer->pool->alloc, state->attachments);
- if (pass->attachment_count == 0) {
+ if (pass->attachment_count > 0) {
+ state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
+ pass->attachment_count *
+ sizeof(state->attachments[0]),
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (state->attachments == NULL) {
+ /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
+ return anv_batch_set_error(&cmd_buffer->batch,
+ VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ } else {
state->attachments = NULL;
- return VK_SUCCESS;
}
- state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
- pass->attachment_count *
- sizeof(state->attachments[0]),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (state->attachments == NULL) {
- /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
- return anv_batch_set_error(&cmd_buffer->batch,
- VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- bool need_null_state = false;
- unsigned num_states = 0;
+ /* Reserve one for the NULL state. */
+ unsigned num_states = 1;
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
- if (vk_format_is_color(pass->attachments[i].format)) {
+ if (vk_format_is_color(pass->attachments[i].format))
num_states++;
- } else {
- /* We need a null state for any depth-stencil-only subpasses.
- * Importantly, this includes depth/stencil clears so we create one
- * whenever we have depth or stencil
- */
- need_null_state = true;
- }
if (need_input_attachment_state(&pass->attachments[i]))
num_states++;
}
- num_states += need_null_state;
const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
state->render_pass_states =
struct anv_state next_state = state->render_pass_states;
next_state.alloc_size = isl_dev->ss.size;
- if (need_null_state) {
- state->null_surface_state = next_state;
- next_state.offset += ss_stride;
- next_state.map += ss_stride;
- }
+ state->null_surface_state = next_state;
+ next_state.offset += ss_stride;
+ next_state.map += ss_stride;
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
if (vk_format_is_color(pass->attachments[i].format)) {
- state->attachments[i].color_rt_state = next_state;
+ state->attachments[i].color.state = next_state;
next_state.offset += ss_stride;
next_state.map += ss_stride;
}
if (need_input_attachment_state(&pass->attachments[i])) {
- state->attachments[i].input_att_state = next_state;
+ state->attachments[i].input.state = next_state;
next_state.offset += ss_stride;
next_state.map += ss_stride;
}
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer);
assert(pass->attachment_count == framebuffer->attachment_count);
- if (need_null_state) {
- struct GENX(RENDER_SURFACE_STATE) null_ss = {
- .SurfaceType = SURFTYPE_NULL,
- .SurfaceArray = framebuffer->layers > 0,
- .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
-#if GEN_GEN >= 8
- .TileMode = YMAJOR,
-#else
- .TiledSurface = true,
-#endif
- .Width = framebuffer->width - 1,
- .Height = framebuffer->height - 1,
- .Depth = framebuffer->layers - 1,
- .RenderTargetViewExtent = framebuffer->layers - 1,
- };
- GENX(RENDER_SURFACE_STATE_pack)(NULL, state->null_surface_state.map,
- &null_ss);
- }
+ isl_null_fill_state(isl_dev, state->null_surface_state.map,
+ isl_extent3d(framebuffer->width,
+ framebuffer->height,
+ framebuffer->layers));
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
struct anv_render_pass_attachment *att = &pass->attachments[i];
VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
VkImageAspectFlags clear_aspects = 0;
- if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
+ if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
/* color attachment */
if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
struct anv_image_view *iview = framebuffer->attachments[i];
anv_assert(iview->vk_format == att->format);
+ anv_assert(iview->n_planes == 1);
union isl_color_value clear_color = { .u32 = { 0, } };
- if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
+ if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
+ assert(att_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
color_attachment_compute_aux_usage(cmd_buffer->device,
- &state->attachments[i],
- iview, begin->renderArea,
+ state, i, begin->renderArea,
&clear_color);
- struct isl_view view = iview->isl;
- view.usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
- view.swizzle = anv_swizzle_for_render(view.swizzle);
- isl_surf_fill_state(isl_dev,
- state->attachments[i].color_rt_state.map,
- .surf = &iview->image->color_surface.isl,
- .view = &view,
- .aux_surf = &iview->image->aux_surface.isl,
- .aux_usage = state->attachments[i].aux_usage,
- .clear_color = clear_color,
- .mocs = cmd_buffer->device->default_mocs);
-
- add_image_view_relocs(cmd_buffer, iview,
- state->attachments[i].aux_usage,
- state->attachments[i].color_rt_state);
+ anv_image_fill_surface_state(cmd_buffer->device,
+ iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ &iview->planes[0].isl,
+ ISL_SURF_USAGE_RENDER_TARGET_BIT,
+ state->attachments[i].aux_usage,
+ &clear_color,
+ 0,
+ &state->attachments[i].color,
+ NULL);
+
+ add_image_view_relocs(cmd_buffer, iview, 0,
+ state->attachments[i].color);
} else {
/* This field will be initialized after the first subpass
* transition.
}
if (need_input_attachment_state(&pass->attachments[i])) {
- struct isl_view view = iview->isl;
- view.usage |= ISL_SURF_USAGE_TEXTURE_BIT;
- isl_surf_fill_state(isl_dev,
- state->attachments[i].input_att_state.map,
- .surf = &iview->image->color_surface.isl,
- .view = &view,
- .aux_surf = &iview->image->aux_surface.isl,
- .aux_usage = state->attachments[i].input_aux_usage,
- .clear_color = clear_color,
- .mocs = cmd_buffer->device->default_mocs);
-
- add_image_view_relocs(cmd_buffer, iview,
- state->attachments[i].input_aux_usage,
- state->attachments[i].input_att_state);
+ anv_image_fill_surface_state(cmd_buffer->device,
+ iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ &iview->planes[0].isl,
+ ISL_SURF_USAGE_TEXTURE_BIT,
+ state->attachments[i].input_aux_usage,
+ &clear_color,
+ 0,
+ &state->attachments[i].input,
+ NULL);
+
+ add_image_view_relocs(cmd_buffer, iview, 0,
+ state->attachments[i].input);
}
}
-
- anv_state_flush(cmd_buffer->device, state->render_pass_states);
}
return VK_SUCCESS;
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
+ /* We sometimes store vertex data in the dynamic state buffer for blorp
+ * operations and our dynamic state stream may re-use data from previous
+ * command buffers. In order to prevent stale cache data, we flush the VF
+ * cache. We could do this on every blorp call but that's not really
+ * needed as all of the data will get written by the CPU prior to the GPU
+ * executing anything. The chances are fairly high that they will use
+ * blorp at least once per primary command buffer so it shouldn't be
+ * wasted.
+ */
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
+
+ /* We send an "Indirect State Pointers Disable" packet at
+ * EndCommandBuffer, so all push contant packets are ignored during a
+ * context restore. Documentation says after that command, we need to
+ * emit push constants again before any rendering operation. So we
+ * flag them dirty here to make sure they get emitted.
+ */
+ cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
+
VkResult result = VK_SUCCESS;
if (cmd_buffer->usage_flags &
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ assert(pBeginInfo->pInheritanceInfo);
cmd_buffer->state.pass =
anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
cmd_buffer->state.subpass =
&cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
- cmd_buffer->state.framebuffer = NULL;
+
+ /* This is optional in the inheritance info. */
+ cmd_buffer->state.framebuffer =
+ anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
result = genX(cmd_buffer_setup_attachments)(cmd_buffer,
cmd_buffer->state.pass, NULL);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
+ /* Record that HiZ is enabled if we can. */
+ if (cmd_buffer->state.framebuffer) {
+ const struct anv_image_view * const iview =
+ anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+
+ if (iview) {
+ VkImageLayout layout =
+ cmd_buffer->state.subpass->depth_stencil_attachment.layout;
+
+ enum isl_aux_usage aux_usage =
+ anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image,
+ VK_IMAGE_ASPECT_DEPTH_BIT, layout);
+
+ cmd_buffer->state.hiz_enabled = aux_usage == ISL_AUX_USAGE_HIZ;
+ }
+ }
+
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
}
return result;
}
+/* From the PRM, Volume 2a:
+ *
+ * "Indirect State Pointers Disable
+ *
+ * At the completion of the post-sync operation associated with this pipe
+ * control packet, the indirect state pointers in the hardware are
+ * considered invalid; the indirect pointers are not saved in the context.
+ * If any new indirect state commands are executed in the command stream
+ * while the pipe control is pending, the new indirect state commands are
+ * preserved.
+ *
+ * [DevIVB+]: Using Invalidate State Pointer (ISP) only inhibits context
+ * restoring of Push Constant (3DSTATE_CONSTANT_*) commands. Push Constant
+ * commands are only considered as Indirect State Pointers. Once ISP is
+ * issued in a context, SW must initialize by programming push constant
+ * commands for all the shaders (at least to zero length) before attempting
+ * any rendering operation for the same context."
+ *
+ * 3DSTATE_CONSTANT_* packets are restored during a context restore,
+ * even though they point to a BO that has been already unreferenced at
+ * the end of the previous batch buffer. This has been fine so far since
+ * we are protected by these scratch page (every address not covered by
+ * a BO should be pointing to the scratch page). But on CNL, it is
+ * causing a GPU hang during context restore at the 3DSTATE_CONSTANT_*
+ * instruction.
+ *
+ * The flag "Indirect State Pointers Disable" in PIPE_CONTROL tells the
+ * hardware to ignore previous 3DSTATE_CONSTANT_* packets during a
+ * context restore, so the mentioned hang doesn't happen. However,
+ * software must program push constant commands for all stages prior to
+ * rendering anything. So we flag them dirty in BeginCommandBuffer.
+ */
+static void
+emit_isp_disable(struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.IndirectStatePointersDisable = true;
+ pc.CommandStreamerStallEnable = true;
+ }
+}
+
VkResult
genX(EndCommandBuffer)(
VkCommandBuffer commandBuffer)
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
+ emit_isp_disable(cmd_buffer);
+
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
return VK_SUCCESS;
*/
genX(cmd_buffer_enable_pma_fix)(primary, false);
+ /* The secondary command buffer doesn't know which textures etc. have been
+ * flushed prior to their execution. Apply those flushes now.
+ */
+ genX(cmd_buffer_apply_pipe_flushes)(primary);
+
for (uint32_t i = 0; i < commandBufferCount; i++) {
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
* copy the surface states for the current subpass into the storage
* we allocated for them in BeginCommandBuffer.
*/
- struct anv_bo *ss_bo = &primary->device->surface_state_block_pool.bo;
+ struct anv_bo *ss_bo =
+ &primary->device->surface_state_pool.block_pool.bo;
struct anv_state src_state = primary->state.render_pass_states;
struct anv_state dst_state = secondary->state.render_pass_states;
assert(src_state.alloc_size == dst_state.alloc_size);
- genX(cmd_buffer_gpu_memcpy)(primary, ss_bo, dst_state.offset,
- ss_bo, src_state.offset,
- src_state.alloc_size);
+ genX(cmd_buffer_so_memcpy)(primary, ss_bo, dst_state.offset,
+ ss_bo, src_state.offset,
+ src_state.alloc_size);
}
anv_cmd_buffer_add_secondary(primary, secondary);
}
+ /* The secondary may have selected a different pipeline (3D or compute) and
+ * may have changed the current L3$ configuration. Reset our tracking
+ * variables to invalid values to ensure that we re-emit these in the case
+ * where we do any draws or compute dispatches from the primary after the
+ * secondary has returned.
+ */
+ primary->state.current_pipeline = UINT32_MAX;
+ primary->state.current_l3_config = NULL;
+
/* Each of the secondary command buffers will use its own state base
* address. We need to re-emit state base address for the primary after
* all of the secondaries are done.
return;
if (unlikely(INTEL_DEBUG & DEBUG_L3)) {
- fprintf(stderr, "L3 config transition: ");
+ intel_logd("L3 config transition: ");
gen_dump_l3_config(cfg, stderr);
}
anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
.SLMEnable = has_slm,
.URBLowBandwidth = urb_low_bw,
- .URBAllocation = cfg->n[GEN_L3P_URB],
+ .URBAllocation = cfg->n[GEN_L3P_URB] - n0_urb,
#if !GEN_IS_HASWELL
.ALLAllocation = cfg->n[GEN_L3P_ALL],
#endif
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- uint32_t b;
/* XXX: Right now, we're really dumb and just flush whatever categories
* the app asks for. One of these days we may make this a bit better
src_flags |= pImageMemoryBarriers[i].srcAccessMask;
dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
- if (pImageMemoryBarriers[i].subresourceRange.aspectMask &
- VK_IMAGE_ASPECT_DEPTH_BIT) {
+ const VkImageSubresourceRange *range =
+ &pImageMemoryBarriers[i].subresourceRange;
+
+ if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
transition_depth_buffer(cmd_buffer, image,
pImageMemoryBarriers[i].oldLayout,
pImageMemoryBarriers[i].newLayout);
+ } else if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
+ VkImageAspectFlags color_aspects =
+ anv_image_expand_aspects(image, range->aspectMask);
+ uint32_t aspect_bit;
+
+ anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) {
+ transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit,
+ range->baseMipLevel,
+ anv_get_levelCount(image, range),
+ range->baseArrayLayer,
+ anv_get_layerCount(image, range),
+ pImageMemoryBarriers[i].oldLayout,
+ pImageMemoryBarriers[i].newLayout);
+ }
}
}
- enum anv_pipe_bits pipe_bits = 0;
-
- for_each_bit(b, src_flags) {
- switch ((VkAccessFlagBits)(1 << b)) {
- case VK_ACCESS_SHADER_WRITE_BIT:
- pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
- break;
- case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
- pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
- break;
- case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
- pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
- break;
- case VK_ACCESS_TRANSFER_WRITE_BIT:
- pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
- pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
- break;
- default:
- break; /* Nothing to do */
- }
- }
-
- for_each_bit(b, dst_flags) {
- switch ((VkAccessFlagBits)(1 << b)) {
- case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
- case VK_ACCESS_INDEX_READ_BIT:
- case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
- pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
- break;
- case VK_ACCESS_UNIFORM_READ_BIT:
- pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
- pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
- break;
- case VK_ACCESS_SHADER_READ_BIT:
- case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
- case VK_ACCESS_TRANSFER_READ_BIT:
- pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
- break;
- default:
- break; /* Nothing to do */
- }
- }
-
- cmd_buffer->state.pending_pipe_bits |= pipe_bits;
+ cmd_buffer->state.pending_pipe_bits |=
+ anv_pipe_flush_bits_for_access_flags(src_flags) |
+ anv_pipe_invalidate_bits_for_access_flags(dst_flags);
}
static void
cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
- VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
+ VkShaderStageFlags stages =
+ cmd_buffer->state.gfx.base.pipeline->active_stages;
/* In order to avoid thrash, we assume that vertex and fragment stages
* always exist. In the rare case where one is missing *and* the other
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
}
+static const struct anv_descriptor *
+anv_descriptor_for_binding(const struct anv_cmd_pipeline_state *pipe_state,
+ const struct anv_pipeline_binding *binding)
+{
+ assert(binding->set < MAX_SETS);
+ const struct anv_descriptor_set *set =
+ pipe_state->descriptors[binding->set];
+ const uint32_t offset =
+ set->layout->binding[binding->binding].descriptor_index;
+ return &set->descriptors[offset + binding->index];
+}
+
+static uint32_t
+dynamic_offset_for_binding(const struct anv_cmd_pipeline_state *pipe_state,
+ const struct anv_pipeline_binding *binding)
+{
+ assert(binding->set < MAX_SETS);
+ const struct anv_descriptor_set *set =
+ pipe_state->descriptors[binding->set];
+
+ uint32_t dynamic_offset_idx =
+ pipe_state->layout->set[binding->set].dynamic_offset_start +
+ set->layout->binding[binding->binding].dynamic_offset_index +
+ binding->index;
+
+ return pipe_state->dynamic_offsets[dynamic_offset_idx];
+}
+
static VkResult
emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage,
struct anv_state *bt_state)
{
struct anv_subpass *subpass = cmd_buffer->state.subpass;
+ struct anv_cmd_pipeline_state *pipe_state;
struct anv_pipeline *pipeline;
uint32_t bias, state_offset;
switch (stage) {
case MESA_SHADER_COMPUTE:
- pipeline = cmd_buffer->state.compute_pipeline;
+ pipe_state = &cmd_buffer->state.compute.base;
bias = 1;
break;
default:
- pipeline = cmd_buffer->state.pipeline;
+ pipe_state = &cmd_buffer->state.gfx.base;
bias = 0;
break;
}
+ pipeline = pipe_state->pipeline;
if (!anv_pipeline_has_stage(pipeline, stage)) {
*bt_state = (struct anv_state) { 0, };
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
if (stage == MESA_SHADER_COMPUTE &&
- get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
- struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
- uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
+ get_cs_prog_data(pipeline)->uses_num_work_groups) {
+ struct anv_bo *bo = cmd_buffer->state.compute.num_workgroups.bo;
+ uint32_t bo_offset = cmd_buffer->state.compute.num_workgroups.offset;
struct anv_state surface_state;
surface_state =
assert(stage == MESA_SHADER_FRAGMENT);
assert(binding->binding == 0);
if (binding->index < subpass->color_count) {
- const unsigned att = subpass->color_attachments[binding->index].attachment;
- surface_state = cmd_buffer->state.attachments[att].color_rt_state;
+ const unsigned att =
+ subpass->color_attachments[binding->index].attachment;
+
+ /* From the Vulkan 1.0.46 spec:
+ *
+ * "If any color or depth/stencil attachments are
+ * VK_ATTACHMENT_UNUSED, then no writes occur for those
+ * attachments."
+ */
+ if (att == VK_ATTACHMENT_UNUSED) {
+ surface_state = cmd_buffer->state.null_surface_state;
+ } else {
+ surface_state = cmd_buffer->state.attachments[att].color.state;
+ }
} else {
surface_state = cmd_buffer->state.null_surface_state;
}
continue;
}
- struct anv_descriptor_set *set =
- cmd_buffer->state.descriptors[binding->set];
- uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
- struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
+ const struct anv_descriptor *desc =
+ anv_descriptor_for_binding(pipe_state, binding);
switch (desc->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
continue;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- surface_state = desc->aux_usage == ISL_AUX_USAGE_NONE ?
- desc->image_view->no_aux_sampler_surface_state :
- desc->image_view->sampler_surface_state;
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
+ struct anv_surface_state sstate =
+ (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
+ desc->image_view->planes[binding->plane].general_sampler_surface_state :
+ desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
+ surface_state = sstate.state;
assert(surface_state.alloc_size);
add_image_view_relocs(cmd_buffer, desc->image_view,
- desc->aux_usage, surface_state);
+ binding->plane, sstate);
break;
+ }
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
assert(stage == MESA_SHADER_FRAGMENT);
- if (desc->image_view->aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
+ if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) == 0) {
/* For depth and stencil input attachments, we treat it like any
* old texture that a user may have bound.
*/
- surface_state = desc->aux_usage == ISL_AUX_USAGE_NONE ?
- desc->image_view->no_aux_sampler_surface_state :
- desc->image_view->sampler_surface_state;
+ struct anv_surface_state sstate =
+ (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
+ desc->image_view->planes[binding->plane].general_sampler_surface_state :
+ desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
+ surface_state = sstate.state;
assert(surface_state.alloc_size);
add_image_view_relocs(cmd_buffer, desc->image_view,
- desc->aux_usage, surface_state);
+ binding->plane, sstate);
} else {
/* For color input attachments, we create the surface state at
* vkBeginRenderPass time so that we can include aux and clear
assert(binding->input_attachment_index < subpass->input_count);
const unsigned subpass_att = binding->input_attachment_index;
const unsigned att = subpass->input_attachments[subpass_att].attachment;
- surface_state = cmd_buffer->state.attachments[att].input_att_state;
+ surface_state = cmd_buffer->state.attachments[att].input.state;
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
- surface_state = (binding->write_only)
- ? desc->image_view->writeonly_storage_surface_state
- : desc->image_view->storage_surface_state;
+ struct anv_surface_state sstate = (binding->write_only)
+ ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state
+ : desc->image_view->planes[binding->plane].storage_surface_state;
+ surface_state = sstate.state;
assert(surface_state.alloc_size);
add_image_view_relocs(cmd_buffer, desc->image_view,
- desc->image_view->image->aux_usage,
- surface_state);
+ binding->plane, sstate);
struct brw_image_param *image_param =
&cmd_buffer->state.push_constants[stage]->images[image++];
- *image_param = desc->image_view->storage_image_param;
+ *image_param = desc->image_view->planes[binding->plane].storage_image_param;
image_param->surface_idx = bias + s;
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
- uint32_t dynamic_offset_idx =
- pipeline->layout->set[binding->set].dynamic_offset_start +
- set->layout->binding[binding->binding].dynamic_offset_index +
- binding->index;
-
/* Compute the offset within the buffer */
- uint64_t offset = desc->offset +
- cmd_buffer->state.dynamic_offsets[dynamic_offset_idx];
+ uint32_t dynamic_offset =
+ dynamic_offset_for_binding(pipe_state, binding);
+ uint64_t offset = desc->offset + dynamic_offset;
/* Clamp to the buffer size */
offset = MIN2(offset, desc->buffer->size);
/* Clamp the range to the buffer size */
gl_shader_stage stage,
struct anv_state *state)
{
- struct anv_pipeline *pipeline;
-
- if (stage == MESA_SHADER_COMPUTE)
- pipeline = cmd_buffer->state.compute_pipeline;
- else
- pipeline = cmd_buffer->state.pipeline;
+ struct anv_cmd_pipeline_state *pipe_state =
+ stage == MESA_SHADER_COMPUTE ? &cmd_buffer->state.compute.base :
+ &cmd_buffer->state.gfx.base;
+ struct anv_pipeline *pipeline = pipe_state->pipeline;
if (!anv_pipeline_has_stage(pipeline, stage)) {
*state = (struct anv_state) { 0, };
for (uint32_t s = 0; s < map->sampler_count; s++) {
struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
- struct anv_descriptor_set *set =
- cmd_buffer->state.descriptors[binding->set];
- uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
- struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
+ const struct anv_descriptor *desc =
+ anv_descriptor_for_binding(pipe_state, binding);
if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
continue;
memcpy(state->map + (s * 16),
- sampler->state, sizeof(sampler->state));
+ sampler->state[binding->plane], sizeof(sampler->state[0]));
}
anv_state_flush(cmd_buffer->device, *state);
static uint32_t
flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
{
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
+
VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
- cmd_buffer->state.pipeline->active_stages;
+ pipeline->active_stages;
VkResult result = VK_SUCCESS;
anv_foreach_stage(s, dirty) {
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
/* Re-emit all active binding tables */
- dirty |= cmd_buffer->state.pipeline->active_stages;
+ dirty |= pipeline->active_stages;
anv_foreach_stage(s, dirty) {
result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
if (result != VK_SUCCESS) {
};
anv_foreach_stage(s, stages) {
+ assert(s < ARRAY_SIZE(binding_table_opcodes));
+ assert(binding_table_opcodes[s] > 0);
+
if (cmd_buffer->state.samplers[s].alloc_size > 0) {
anv_batch_emit(&cmd_buffer->batch,
GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
}
}
-static uint32_t
-cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
+static void
+cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
+ VkShaderStageFlags dirty_stages)
{
+ const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
+ const struct anv_pipeline *pipeline = gfx_state->base.pipeline;
+
static const uint32_t push_constant_opcodes[] = {
[MESA_SHADER_VERTEX] = 21,
[MESA_SHADER_TESS_CTRL] = 25, /* HS */
VkShaderStageFlags flushed = 0;
- anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
- if (stage == MESA_SHADER_COMPUTE)
- continue;
+ anv_foreach_stage(stage, dirty_stages) {
+ assert(stage < ARRAY_SIZE(push_constant_opcodes));
+ assert(push_constant_opcodes[stage] > 0);
- struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
+ c._3DCommandSubOpcode = push_constant_opcodes[stage];
- if (state.offset == 0) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
- c._3DCommandSubOpcode = push_constant_opcodes[stage];
- } else {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
- c._3DCommandSubOpcode = push_constant_opcodes[stage],
- c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
-#if GEN_GEN >= 9
- .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
- .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+ if (anv_pipeline_has_stage(pipeline, stage)) {
+#if GEN_GEN >= 8 || GEN_IS_HASWELL
+ const struct brw_stage_prog_data *prog_data =
+ pipeline->shaders[stage]->prog_data;
+ const struct anv_pipeline_bind_map *bind_map =
+ &pipeline->shaders[stage]->bind_map;
+
+ /* The Skylake PRM contains the following restriction:
+ *
+ * "The driver must ensure The following case does not occur
+ * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
+ * buffer 3 read length equal to zero committed followed by a
+ * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
+ * zero committed."
+ *
+ * To avoid this, we program the buffers in the highest slots.
+ * This way, slot 0 is only used if slot 3 is also used.
+ */
+ int n = 3;
+
+ for (int i = 3; i >= 0; i--) {
+ const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
+ if (range->length == 0)
+ continue;
+
+ const unsigned surface =
+ prog_data->binding_table.ubo_start + range->block;
+
+ assert(surface <= bind_map->surface_count);
+ const struct anv_pipeline_binding *binding =
+ &bind_map->surface_to_descriptor[surface];
+
+ const struct anv_descriptor *desc =
+ anv_descriptor_for_binding(&gfx_state->base, binding);
+
+ struct anv_address read_addr;
+ uint32_t read_len;
+ if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+ read_len = MIN2(range->length,
+ DIV_ROUND_UP(desc->buffer_view->range, 32) - range->start);
+ read_addr = (struct anv_address) {
+ .bo = desc->buffer_view->bo,
+ .offset = desc->buffer_view->offset +
+ range->start * 32,
+ };
+ } else {
+ assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
+
+ uint32_t dynamic_offset =
+ dynamic_offset_for_binding(&gfx_state->base, binding);
+ uint32_t buf_offset =
+ MIN2(desc->offset + dynamic_offset, desc->buffer->size);
+ uint32_t buf_range =
+ MIN2(desc->range, desc->buffer->size - buf_offset);
+
+ read_len = MIN2(range->length,
+ DIV_ROUND_UP(buf_range, 32) - range->start);
+ read_addr = (struct anv_address) {
+ .bo = desc->buffer->bo,
+ .offset = desc->buffer->offset + buf_offset +
+ range->start * 32,
+ };
+ }
+
+ if (read_len > 0) {
+ c.ConstantBody.Buffer[n] = read_addr;
+ c.ConstantBody.ReadLength[n] = read_len;
+ n--;
+ }
+ }
+
+ struct anv_state state =
+ anv_cmd_buffer_push_constants(cmd_buffer, stage);
+
+ if (state.alloc_size > 0) {
+ c.ConstantBody.Buffer[n] = (struct anv_address) {
+ .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .offset = state.offset,
+ };
+ c.ConstantBody.ReadLength[n] =
+ DIV_ROUND_UP(state.alloc_size, 32);
+ }
#else
- .PointerToConstantBuffer0 = { .offset = state.offset },
- .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+ /* For Ivy Bridge, the push constants packets have a different
+ * rule that would require us to iterate in the other direction
+ * and possibly mess around with dynamic state base address.
+ * Don't bother; just emit regular push constants at n = 0.
+ */
+ struct anv_state state =
+ anv_cmd_buffer_push_constants(cmd_buffer, stage);
+
+ if (state.alloc_size > 0) {
+ c.ConstantBody.Buffer[0].offset = state.offset,
+ c.ConstantBody.ReadLength[0] =
+ DIV_ROUND_UP(state.alloc_size, 32);
+ }
#endif
- };
}
}
flushed |= mesa_to_vk_shader_stage(stage);
}
- cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
-
- return flushed;
+ cmd_buffer->state.push_constants_dirty &= ~flushed;
}
void
genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
uint32_t *p;
- uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
+ uint32_t vb_emit = cmd_buffer->state.gfx.vb_dirty & pipeline->vb_used;
assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
.MemoryObjectControlState = GENX(MOCS),
#else
.BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
- .InstanceDataStepRate = 1,
+ /* Our implementation of VK_KHR_multiview uses instancing to draw
+ * the different views. If the client asks for instancing, we
+ * need to use the Instance Data Step Rate to ensure that we
+ * repeat the client's per-instance data once for each view.
+ */
+ .InstanceDataStepRate = anv_subpass_view_count(pipeline->subpass),
.VertexBufferMemoryObjectControlState = GENX(MOCS),
#endif
}
}
- cmd_buffer->state.vb_dirty &= ~vb_emit;
+ cmd_buffer->state.gfx.vb_dirty &= ~vb_emit;
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) {
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
/* The exact descriptor layout is pulled from the pipeline, so we need
* to re-emit binding tables on every pipeline change.
*/
- cmd_buffer->state.descriptors_dirty |=
- cmd_buffer->state.pipeline->active_stages;
+ cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
/* If the pipeline changed, we may need to re-allocate push constant
* space in the URB.
#endif
/* Render targets live in the same binding table as fragment descriptors */
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
/* We emit the binding tables and sampler tables first, then emit push
if (cmd_buffer->state.descriptors_dirty)
dirty = flush_descriptor_sets(cmd_buffer);
- if (cmd_buffer->state.push_constants_dirty) {
-#if GEN_GEN >= 9
- /* On Sky Lake and later, the binding table pointers commands are
- * what actually flush the changes to push constant state so we need
- * to dirty them so they get re-emitted below.
+ if (dirty || cmd_buffer->state.push_constants_dirty) {
+ /* Because we're pushing UBOs, we have to push whenever either
+ * descriptors or push constants is dirty.
*/
- dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
-#else
- cmd_buffer_flush_push_constants(cmd_buffer);
-#endif
+ dirty |= cmd_buffer->state.push_constants_dirty;
+ dirty &= ANV_STAGE_MASK & VK_SHADER_STAGE_ALL_GRAPHICS;
+ cmd_buffer_flush_push_constants(cmd_buffer, dirty);
}
if (dirty)
cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
gen8_cmd_buffer_emit_viewport(cmd_buffer);
- if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
+ if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
ANV_CMD_DIRTY_PIPELINE)) {
gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
pipeline->depth_clamp_enable);
}
- if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
+ if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
gen7_cmd_buffer_emit_scissor(cmd_buffer);
genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
anv_state_flush(cmd_buffer->device, id_state);
emit_base_vertex_instance_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset);
}
static void
anv_state_flush(cmd_buffer->device, state);
emit_vertex_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
state.offset, 4, ANV_DRAWID_VB_INDEX);
}
uint32_t firstInstance)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
if (vs_prog_data->uses_drawid)
emit_draw_index(cmd_buffer, 0);
+ /* Our implementation of VK_KHR_multiview uses instancing to draw the
+ * different views. We need to multiply instanceCount by the view count.
+ */
+ instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
+
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = SEQUENTIAL;
prim.PrimitiveTopologyType = pipeline->topology;
uint32_t firstInstance)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
if (vs_prog_data->uses_drawid)
emit_draw_index(cmd_buffer, 0);
+ /* Our implementation of VK_KHR_multiview uses instancing to draw the
+ * different views. We need to multiply instanceCount by the view count.
+ */
+ instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
+
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = RANDOM;
prim.PrimitiveTopologyType = pipeline->topology;
#define GEN7_3DPRIM_START_INSTANCE 0x243C
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
+/* MI_MATH only exists on Haswell+ */
+#if GEN_IS_HASWELL || GEN_GEN >= 8
+
+static uint32_t
+mi_alu(uint32_t opcode, uint32_t op1, uint32_t op2)
+{
+ struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
+ .ALUOpcode = opcode,
+ .Operand1 = op1,
+ .Operand2 = op2,
+ };
+
+ uint32_t dw;
+ GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
+
+ return dw;
+}
+
+#define CS_GPR(n) (0x2600 + (n) * 8)
+
+/* Emit dwords to multiply GPR0 by N */
+static void
+build_alu_multiply_gpr0(uint32_t *dw, unsigned *dw_count, uint32_t N)
+{
+ VK_OUTARRAY_MAKE(out, dw, dw_count);
+
+#define append_alu(opcode, operand1, operand2) \
+ vk_outarray_append(&out, alu_dw) *alu_dw = mi_alu(opcode, operand1, operand2)
+
+ assert(N > 0);
+ unsigned top_bit = 31 - __builtin_clz(N);
+ for (int i = top_bit - 1; i >= 0; i--) {
+ /* We get our initial data in GPR0 and we write the final data out to
+ * GPR0 but we use GPR1 as our scratch register.
+ */
+ unsigned src_reg = i == top_bit - 1 ? MI_ALU_REG0 : MI_ALU_REG1;
+ unsigned dst_reg = i == 0 ? MI_ALU_REG0 : MI_ALU_REG1;
+
+ /* Shift the current value left by 1 */
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCA, src_reg);
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCB, src_reg);
+ append_alu(MI_ALU_ADD, 0, 0);
+
+ if (N & (1 << i)) {
+ /* Store ACCU to R1 and add R0 to R1 */
+ append_alu(MI_ALU_STORE, MI_ALU_REG1, MI_ALU_ACCU);
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1);
+ append_alu(MI_ALU_ADD, 0, 0);
+ }
+
+ append_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU);
+ }
+
+#undef append_alu
+}
+
+static void
+emit_mul_gpr0(struct anv_batch *batch, uint32_t N)
+{
+ uint32_t num_dwords;
+ build_alu_multiply_gpr0(NULL, &num_dwords, N);
+
+ uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords, GENX(MI_MATH));
+ build_alu_multiply_gpr0(dw + 1, &num_dwords, N);
+}
+
+#endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */
+
+static void
+load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_buffer *buffer, uint64_t offset,
+ bool indexed)
+{
+ struct anv_batch *batch = &cmd_buffer->batch;
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
+
+ emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+
+ unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
+ if (view_count > 1) {
+#if GEN_IS_HASWELL || GEN_GEN >= 8
+ emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);
+ emit_mul_gpr0(batch, view_count);
+ emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));
+#else
+ anv_finishme("Multiview + indirect draw requires MI_MATH; "
+ "MI_MATH is not supported on Ivy Bridge");
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+#endif
+ } else {
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+ }
+
+ emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+
+ if (indexed) {
+ emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
+ emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+ } else {
+ emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
+ emit_lri(batch, GEN7_3DPRIM_BASE_VERTEX, 0);
+ }
+}
+
void genX(CmdDrawIndirect)(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
if (anv_batch_has_error(&cmd_buffer->batch))
return;
genX(cmd_buffer_flush_state)(cmd_buffer);
- if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
- emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
- if (vs_prog_data->uses_drawid)
- emit_draw_index(cmd_buffer, 0);
+ for (uint32_t i = 0; i < drawCount; i++) {
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
- emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+ emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, i);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
- prim.IndirectParameterEnable = true;
- prim.VertexAccessType = SEQUENTIAL;
- prim.PrimitiveTopologyType = pipeline->topology;
+ load_indirect_parameters(cmd_buffer, buffer, offset, false);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.IndirectParameterEnable = true;
+ prim.VertexAccessType = SEQUENTIAL;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ }
+
+ offset += stride;
}
}
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
if (anv_batch_has_error(&cmd_buffer->batch))
return;
genX(cmd_buffer_flush_state)(cmd_buffer);
- /* TODO: We need to stomp base vertex to 0 somehow */
- if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
- emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
- if (vs_prog_data->uses_drawid)
- emit_draw_index(cmd_buffer, 0);
+ for (uint32_t i = 0; i < drawCount; i++) {
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+ /* TODO: We need to stomp base vertex to 0 somehow */
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+ emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, i);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
- prim.IndirectParameterEnable = true;
- prim.VertexAccessType = RANDOM;
- prim.PrimitiveTopologyType = pipeline->topology;
+ load_indirect_parameters(cmd_buffer, buffer, offset, true);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.IndirectParameterEnable = true;
+ prim.VertexAccessType = RANDOM;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ }
+
+ offset += stride;
}
}
static VkResult
flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
struct anv_state surfaces = { 0, }, samplers = { 0, };
VkResult result;
void
genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
MAYBE_UNUSED VkResult result;
assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
genX(flush_pipeline_select_gpgpu)(cmd_buffer);
- if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
+ if (cmd_buffer->state.compute.pipeline_dirty) {
/* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
*
* "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
}
if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
- (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
+ cmd_buffer->state.compute.pipeline_dirty) {
/* FIXME: figure out descriptors for gen7 */
result = flush_compute_descriptor_set(cmd_buffer);
if (result != VK_SUCCESS)
}
}
- cmd_buffer->state.compute_dirty = 0;
+ cmd_buffer->state.compute.pipeline_dirty = false;
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
}
const char *function)
{
if (device->instance->physicalDevice.cmd_parser_version < required_version) {
- return vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
+ return vk_errorf(device->instance, device->instance,
+ VK_ERROR_FEATURE_NOT_PRESENT,
"cmd parser version %d is required for %s",
required_version, function);
} else {
uint32_t z)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
sizes[1] = y;
sizes[2] = z;
anv_state_flush(cmd_buffer->device, state);
- cmd_buffer->state.num_workgroups_offset = state.offset;
- cmd_buffer->state.num_workgroups_bo =
- &cmd_buffer->device->dynamic_state_block_pool.bo;
+ cmd_buffer->state.compute.num_workgroups = (struct anv_address) {
+ .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .offset = state.offset,
+ };
}
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
#define GPGPU_DISPATCHDIMY 0x2504
#define GPGPU_DISPATCHDIMZ 0x2508
-#define MI_PREDICATE_SRC0 0x2400
-#define MI_PREDICATE_SRC1 0x2408
-
void genX(CmdDispatchIndirect)(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
#endif
if (prog_data->uses_num_work_groups) {
- cmd_buffer->state.num_workgroups_offset = bo_offset;
- cmd_buffer->state.num_workgroups_bo = bo;
+ cmd_buffer->state.compute.num_workgroups = (struct anv_address) {
+ .bo = bo,
+ .offset = bo_offset,
+ };
}
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
}
static void
-flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
- uint32_t pipeline)
+genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
+ uint32_t pipeline)
{
+ UNUSED const struct gen_device_info *devinfo = &cmd_buffer->device->info;
+
+ if (cmd_buffer->state.current_pipeline == pipeline)
+ return;
+
#if GEN_GEN >= 8 && GEN_GEN < 10
/* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
*
*/
if (pipeline == GPGPU)
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
-#elif GEN_GEN <= 7
- /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
- * PIPELINE_SELECT [DevBWR+]":
- *
- * Project: DEVSNB+
+#endif
+
+ /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+ * PIPELINE_SELECT [DevBWR+]":
+ *
+ * Project: DEVSNB+
+ *
+ * Software must ensure all the write caches are flushed through a
+ * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
+ * command to invalidate read only caches prior to programming
+ * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
+ */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.RenderTargetCacheFlushEnable = true;
+ pc.DepthCacheFlushEnable = true;
+ pc.DCFlushEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ pc.CommandStreamerStallEnable = true;
+ }
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.TextureCacheInvalidationEnable = true;
+ pc.ConstantCacheInvalidationEnable = true;
+ pc.StateCacheInvalidationEnable = true;
+ pc.InstructionCacheInvalidateEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ }
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
+#if GEN_GEN >= 9
+ ps.MaskBits = 3;
+#endif
+ ps.PipelineSelection = pipeline;
+ }
+
+#if GEN_GEN == 9
+ if (devinfo->is_geminilake) {
+ /* Project: DevGLK
*
- * Software must ensure all the write caches are flushed through a
- * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
- * command to invalidate read only caches prior to programming
- * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
+ * "This chicken bit works around a hardware issue with barrier logic
+ * encountered when switching between GPGPU and 3D pipelines. To
+ * workaround the issue, this mode bit should be set after a pipeline
+ * is selected."
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.RenderTargetCacheFlushEnable = true;
- pc.DepthCacheFlushEnable = true;
- pc.DCFlushEnable = true;
- pc.PostSyncOperation = NoWrite;
- pc.CommandStreamerStallEnable = true;
- }
-
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.TextureCacheInvalidationEnable = true;
- pc.ConstantCacheInvalidationEnable = true;
- pc.StateCacheInvalidationEnable = true;
- pc.InstructionCacheInvalidateEnable = true;
- pc.PostSyncOperation = NoWrite;
- }
+ uint32_t scec;
+ anv_pack_struct(&scec, GENX(SLICE_COMMON_ECO_CHICKEN1),
+ .GLKBarrierMode =
+ pipeline == GPGPU ? GLK_BARRIER_MODE_GPGPU
+ : GLK_BARRIER_MODE_3D_HULL,
+ .GLKBarrierModeMask = 1);
+ emit_lri(&cmd_buffer->batch, GENX(SLICE_COMMON_ECO_CHICKEN1_num), scec);
+ }
#endif
+
+ cmd_buffer->state.current_pipeline = pipeline;
}
void
genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
{
- if (cmd_buffer->state.current_pipeline != _3D) {
- flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
-
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
-#if GEN_GEN >= 9
- ps.MaskBits = 3;
-#endif
- ps.PipelineSelection = _3D;
- }
-
- cmd_buffer->state.current_pipeline = _3D;
- }
+ genX(flush_pipeline_select)(cmd_buffer, _3D);
}
void
genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
{
- if (cmd_buffer->state.current_pipeline != GPGPU) {
- flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
-
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
-#if GEN_GEN >= 9
- ps.MaskBits = 3;
-#endif
- ps.PipelineSelection = GPGPU;
- }
-
- cmd_buffer->state.current_pipeline = GPGPU;
- }
+ genX(flush_pipeline_select)(cmd_buffer, GPGPU);
}
void
}
}
-static uint32_t
-depth_stencil_surface_type(enum isl_surf_dim dim)
-{
- switch (dim) {
- case ISL_SURF_DIM_1D:
- if (GEN_GEN >= 9) {
- /* From the Sky Lake PRM, 3DSTATAE_DEPTH_BUFFER::SurfaceType
- *
- * Programming Notes:
- * The Surface Type of the depth buffer must be the same as the
- * Surface Type of the render target(s) (defined in
- * SURFACE_STATE), unless either the depth buffer or render
- * targets are SURFTYPE_NULL (see exception below for SKL). 1D
- * surface type not allowed for depth surface and stencil surface.
- *
- * Workaround:
- * If depth/stencil is enabled with 1D render target,
- * depth/stencil surface type needs to be set to 2D surface type
- * and height set to 1. Depth will use (legacy) TileY and stencil
- * will use TileW. For this case only, the Surface Type of the
- * depth buffer can be 2D while the Surface Type of the render
- * target(s) are 1D, representing an exception to a programming
- * note above.
- */
- return SURFTYPE_2D;
- } else {
- return SURFTYPE_1D;
- }
- case ISL_SURF_DIM_2D:
- return SURFTYPE_2D;
- case ISL_SURF_DIM_3D:
- if (GEN_GEN >= 9) {
- /* The Sky Lake docs list the value for 3D as "Reserved". However,
- * they have the exact same layout as 2D arrays on gen9+, so we can
- * just use 2D here.
- */
- return SURFTYPE_2D;
- } else {
- return SURFTYPE_3D;
- }
- default:
- unreachable("Invalid surface dimension");
- }
-}
-
static void
cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_device *device = cmd_buffer->device;
- const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
const struct anv_image_view *iview =
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
const struct anv_image *image = iview ? iview->image : NULL;
- const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
- const uint32_t ds = cmd_buffer->state.subpass->depth_stencil_attachment.attachment;
- const bool has_hiz = image != NULL &&
- cmd_buffer->state.attachments[ds].aux_usage == ISL_AUX_USAGE_HIZ;
- const bool has_stencil =
- image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
-
- cmd_buffer->state.hiz_enabled = has_hiz;
/* FIXME: Width and Height are wrong */
genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
- /* Emit 3DSTATE_DEPTH_BUFFER */
- if (has_depth) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
- db.SurfaceType =
- depth_stencil_surface_type(image->depth_surface.isl.dim);
- db.DepthWriteEnable = true;
- db.StencilWriteEnable = has_stencil;
- db.HierarchicalDepthBufferEnable = has_hiz;
-
- db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
- &image->depth_surface.isl);
-
- db.SurfaceBaseAddress = (struct anv_address) {
- .bo = image->bo,
- .offset = image->offset + image->depth_surface.offset,
- };
- db.DepthBufferObjectControlState = GENX(MOCS);
-
- db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
- db.Height = image->extent.height - 1;
- db.Width = image->extent.width - 1;
- db.LOD = iview->isl.base_level;
- db.MinimumArrayElement = iview->isl.base_array_layer;
+ uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
+ device->isl_dev.ds.size / 4);
+ if (dw == NULL)
+ return;
- assert(image->depth_surface.isl.dim != ISL_SURF_DIM_3D);
- db.Depth =
- db.RenderTargetViewExtent = iview->isl.array_len - 1;
+ struct isl_depth_stencil_hiz_emit_info info = {
+ .mocs = device->default_mocs,
+ };
-#if GEN_GEN >= 8
- db.SurfaceQPitch =
- isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2;
-#endif
- }
- } else {
- /* Even when no depth buffer is present, the hardware requires that
- * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
- *
- * If a null depth buffer is bound, the driver must instead bind depth as:
- * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
- * 3DSTATE_DEPTH.Width = 1
- * 3DSTATE_DEPTH.Height = 1
- * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
- * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
- * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
- * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
- * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
- *
- * The PRM is wrong, though. The width and height must be programmed to
- * actual framebuffer's width and height, even when neither depth buffer
- * nor stencil buffer is present. Also, D16_UNORM is not allowed to
- * be combined with a stencil buffer so we use D32_FLOAT instead.
- */
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
- if (has_stencil) {
- db.SurfaceType =
- depth_stencil_surface_type(image->stencil_surface.isl.dim);
- } else {
- db.SurfaceType = SURFTYPE_2D;
- }
- db.SurfaceFormat = D32_FLOAT;
- db.Width = MAX2(fb->width, 1) - 1;
- db.Height = MAX2(fb->height, 1) - 1;
- db.StencilWriteEnable = has_stencil;
+ if (iview)
+ info.view = &iview->planes[0].isl;
+
+ if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
+ uint32_t depth_plane =
+ anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
+ const struct anv_surface *surface = &image->planes[depth_plane].surface;
+
+ info.depth_surf = &surface->isl;
+
+ info.depth_address =
+ anv_batch_emit_reloc(&cmd_buffer->batch,
+ dw + device->isl_dev.ds.depth_offset / 4,
+ image->planes[depth_plane].bo,
+ image->planes[depth_plane].bo_offset +
+ surface->offset);
+
+ const uint32_t ds =
+ cmd_buffer->state.subpass->depth_stencil_attachment.attachment;
+ info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage;
+ if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
+ info.hiz_surf = &image->planes[depth_plane].aux_surface.isl;
+
+ info.hiz_address =
+ anv_batch_emit_reloc(&cmd_buffer->batch,
+ dw + device->isl_dev.ds.hiz_offset / 4,
+ image->planes[depth_plane].bo,
+ image->planes[depth_plane].bo_offset +
+ image->planes[depth_plane].aux_surface.offset);
+
+ info.depth_clear_value = ANV_HZ_FC_VAL;
}
}
- if (has_hiz) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb) {
- hdb.HierarchicalDepthBufferObjectControlState = GENX(MOCS);
- hdb.SurfacePitch = image->aux_surface.isl.row_pitch - 1;
- hdb.SurfaceBaseAddress = (struct anv_address) {
- .bo = image->bo,
- .offset = image->offset + image->aux_surface.offset,
- };
-#if GEN_GEN >= 8
- /* From the SKL PRM Vol2a:
- *
- * The interpretation of this field is dependent on Surface Type
- * as follows:
- * - SURFTYPE_1D: distance in pixels between array slices
- * - SURFTYPE_2D/CUBE: distance in rows between array slices
- * - SURFTYPE_3D: distance in rows between R - slices
- *
- * Unfortunately, the docs aren't 100% accurate here. They fail to
- * mention that the 1-D rule only applies to linear 1-D images.
- * Since depth and HiZ buffers are always tiled, they are treated as
- * 2-D images. Prior to Sky Lake, this field is always in rows.
- */
- hdb.SurfaceQPitch =
- isl_surf_get_array_pitch_sa_rows(&image->aux_surface.isl) >> 2;
-#endif
- }
- } else {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb);
- }
+ if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ uint32_t stencil_plane =
+ anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_STENCIL_BIT);
+ const struct anv_surface *surface = &image->planes[stencil_plane].surface;
- /* Emit 3DSTATE_STENCIL_BUFFER */
- if (has_stencil) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
-#if GEN_GEN >= 8 || GEN_IS_HASWELL
- sb.StencilBufferEnable = true;
-#endif
- sb.StencilBufferObjectControlState = GENX(MOCS);
+ info.stencil_surf = &surface->isl;
- sb.SurfacePitch = image->stencil_surface.isl.row_pitch - 1;
-
-#if GEN_GEN >= 8
- sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2;
-#endif
- sb.SurfaceBaseAddress = (struct anv_address) {
- .bo = image->bo,
- .offset = image->offset + image->stencil_surface.offset,
- };
- }
- } else {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
+ info.stencil_address =
+ anv_batch_emit_reloc(&cmd_buffer->batch,
+ dw + device->isl_dev.ds.stencil_offset / 4,
+ image->planes[stencil_plane].bo,
+ image->planes[stencil_plane].bo_offset + surface->offset);
}
- /* From the IVB PRM Vol2P1, 11.5.5.4 3DSTATE_CLEAR_PARAMS:
- *
- * 3DSTATE_CLEAR_PARAMS must always be programmed in the along with
- * the other Depth/Stencil state commands(i.e. 3DSTATE_DEPTH_BUFFER,
- * 3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER)
- *
- * Testing also shows that some variant of this restriction may exist HSW+.
- * On BDW+, it is not possible to emit 2 of these packets consecutively when
- * both have DepthClearValueValid set. An analysis of such state programming
- * on SKL showed that the GPU doesn't register the latter packet's clear
- * value.
- */
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) {
- if (has_hiz) {
- cp.DepthClearValueValid = true;
- cp.DepthClearValue = ANV_HZ_FC_VAL;
- }
- }
+ isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
+
+ cmd_buffer->state.hiz_enabled = info.hiz_usage == ISL_AUX_USAGE_HIZ;
}
* this is not the last use of the buffer. The layout should not have
* changed from the first call and no transition is necessary.
*/
- assert(att_ref->layout == att_state->current_layout);
+ assert(att_state->current_layout == att_ref->layout ||
+ att_state->current_layout ==
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
continue;
}
- /* Get the appropriate target layout for this attachment. */
- const VkImageLayout target_layout = subpass_end ?
- att_desc->final_layout : att_ref->layout;
-
/* The attachment index must be less than the number of attachments
* within the framebuffer.
*/
assert(att_ref->attachment < cmd_state->framebuffer->attachment_count);
- const struct anv_image * const image =
- cmd_state->framebuffer->attachments[att_ref->attachment]->image;
+ const struct anv_image_view * const iview =
+ cmd_state->framebuffer->attachments[att_ref->attachment];
+ const struct anv_image * const image = iview->image;
+
+ /* Get the appropriate target layout for this attachment. */
+ VkImageLayout target_layout;
+
+ /* A resolve is necessary before use as an input attachment if the clear
+ * color or auxiliary buffer usage isn't supported by the sampler.
+ */
+ const bool input_needs_resolve =
+ (att_state->fast_clear && !att_state->clear_color_is_zero_one) ||
+ att_state->input_aux_usage != att_state->aux_usage;
+ if (subpass_end) {
+ target_layout = att_desc->final_layout;
+ } else if (iview->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV &&
+ !input_needs_resolve) {
+ /* Layout transitions before the final only help to enable sampling as
+ * an input attachment. If the input attachment supports sampling
+ * using the auxiliary surface, we can skip such transitions by making
+ * the target layout one that is CCS-aware.
+ */
+ target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ } else {
+ target_layout = att_ref->layout;
+ }
/* Perform the layout transition. */
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
att_state->current_layout, target_layout);
att_state->aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
- image->aspects, target_layout);
+ VK_IMAGE_ASPECT_DEPTH_BIT, target_layout);
+ } else if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
+ assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
+ transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level, 1,
+ iview->planes[0].isl.base_array_layer,
+ iview->planes[0].isl.array_len,
+ att_state->current_layout, target_layout);
}
att_state->current_layout = target_layout;
}
}
+/* Update the clear value dword(s) in surface state objects or the fast clear
+ * state buffer entry for the color attachments used in this subpass.
+ */
+static void
+cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer)
+{
+ assert(cmd_buffer && cmd_buffer->state.subpass);
+
+ const struct anv_cmd_state *state = &cmd_buffer->state;
+
+ /* Iterate through every color attachment used in this subpass. */
+ for (uint32_t i = 0; i < state->subpass->color_count; ++i) {
+
+ /* The attachment should be one of the attachments described in the
+ * render pass and used in the subpass.
+ */
+ const uint32_t a = state->subpass->color_attachments[i].attachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ assert(a < state->pass->attachment_count);
+
+ /* Store some information regarding this attachment. */
+ const struct anv_attachment_state *att_state = &state->attachments[a];
+ const struct anv_image_view *iview = state->framebuffer->attachments[a];
+ const struct anv_render_pass_attachment *rp_att =
+ &state->pass->attachments[a];
+
+ if (att_state->aux_usage == ISL_AUX_USAGE_NONE)
+ continue;
+
+ /* The fast clear state entry must be updated if a fast clear is going to
+ * happen. The surface state must be updated if the clear value from a
+ * prior fast clear may be needed.
+ */
+ if (att_state->pending_clear_aspects && att_state->fast_clear) {
+ /* Update the fast clear state entry. */
+ genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state,
+ iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level,
+ true /* copy from ss */);
+
+ /* Fast-clears impact whether or not a resolve will be necessary. */
+ if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E &&
+ att_state->clear_color_is_zero) {
+ /* This image always has the auxiliary buffer enabled. We can mark
+ * the subresource as not needing a resolve because the clear color
+ * will match what's in every RENDER_SURFACE_STATE object when it's
+ * being used for sampling.
+ */
+ genX(set_image_needs_resolve)(cmd_buffer, iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level,
+ false);
+ } else {
+ genX(set_image_needs_resolve)(cmd_buffer, iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level,
+ true);
+ }
+ } else if (rp_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
+ /* The attachment may have been fast-cleared in a previous render
+ * pass and the value is needed now. Update the surface state(s).
+ *
+ * TODO: Do this only once per render pass instead of every subpass.
+ */
+ genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state,
+ iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level,
+ false /* copy to ss */);
+
+ if (need_input_attachment_state(rp_att) &&
+ att_state->input_aux_usage != ISL_AUX_USAGE_NONE) {
+ genX(copy_fast_clear_dwords)(cmd_buffer, att_state->input.state,
+ iview->image,
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ iview->planes[0].isl.base_level,
+ false /* copy to ss */);
+ }
+ }
+ }
+}
+
+
static void
genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
struct anv_subpass *subpass)
{
cmd_buffer->state.subpass = subpass;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
+
+ /* Our implementation of VK_KHR_multiview uses instancing to draw the
+ * different views. If the client asks for instancing, we need to use the
+ * Instance Data Step Rate to ensure that we repeat the client's
+ * per-instance data once for each view. Since this bit is in
+ * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top
+ * of each subpass.
+ */
+ if (GEN_GEN == 7)
+ cmd_buffer->state.gfx.vb_dirty |= ~0;
+
+ /* It is possible to start a render pass with an old pipeline. Because the
+ * render pass and subpass index are both baked into the pipeline, this is
+ * highly unlikely. In order to do so, it requires that you have a render
+ * pass with a single subpass and that you use that render pass twice
+ * back-to-back and use the same pipeline at the start of the second render
+ * pass as at the end of the first. In order to avoid unpredictable issues
+ * with this edge case, we just dirty the pipeline at the start of every
+ * subpass.
+ */
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
/* Perform transitions to the subpass layout before any writes have
* occurred.
*/
cmd_buffer_subpass_transition_layouts(cmd_buffer, false);
+ /* Update clear values *after* performing automatic layout transitions.
+ * This ensures that transitions from the UNDEFINED layout have had a chance
+ * to populate the clear value buffer with the correct values for the
+ * LOAD_OP_LOAD loadOp and that the fast-clears will update the buffer
+ * without the aforementioned layout transition overwriting the fast-clear
+ * value.
+ */
+ cmd_buffer_subpass_sync_fast_clear_values(cmd_buffer);
+
cmd_buffer_emit_depth_stencil(cmd_buffer);
anv_cmd_buffer_clear_subpass(cmd_buffer);
genX(flush_pipeline_select_3d)(cmd_buffer);
genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
+
+ cmd_buffer->state.pending_pipe_bits |=
+ cmd_buffer->state.pass->subpass_flushes[0];
}
void genX(CmdNextSubpass)(
cmd_buffer_subpass_transition_layouts(cmd_buffer, true);
genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
+
+ uint32_t subpass_id = anv_get_subpass_id(&cmd_buffer->state);
+ cmd_buffer->state.pending_pipe_bits |=
+ cmd_buffer->state.pass->subpass_flushes[subpass_id];
}
void genX(CmdEndRenderPass)(
*/
cmd_buffer_subpass_transition_layouts(cmd_buffer, true);
+ cmd_buffer->state.pending_pipe_bits |=
+ cmd_buffer->state.pass->subpass_flushes[cmd_buffer->state.pass->subpass_count];
+
cmd_buffer->state.hiz_enabled = false;
#ifndef NDEBUG