#include "anv_private.h"
#include "vk_format_info.h"
+#include "vk_util.h"
#include "common/gen_l3_config.h"
#include "genxml/gen_macros.h"
}
}
+#if GEN_IS_HASWELL || GEN_GEN >= 8
+static void
+emit_lrr(struct anv_batch *batch, uint32_t dst, uint32_t src)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
+ lrr.SourceRegisterAddress = src;
+ lrr.DestinationRegisterAddress = dst;
+ }
+}
+#endif
+
void
genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
{
sba.SurfaceStateBaseAddressModifyEnable = true;
sba.DynamicStateBaseAddress =
- (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
+ (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 };
sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
sba.DynamicStateBaseAddressModifyEnable = true;
sba.IndirectObjectBaseAddressModifyEnable = true;
sba.InstructionBaseAddress =
- (struct anv_address) { &device->instruction_block_pool.bo, 0 };
+ (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 };
sba.InstructionMemoryObjectControlState = GENX(MOCS);
sba.InstructionBaseAddressModifyEnable = true;
}
static void
-add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer,
- const struct anv_image_view *iview,
- enum isl_aux_usage aux_usage,
- struct anv_state state)
+add_image_relocs(struct anv_cmd_buffer * const cmd_buffer,
+ const struct anv_image * const image,
+ const VkImageAspectFlags aspect_mask,
+ const enum isl_aux_usage aux_usage,
+ const struct anv_state state)
{
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
+ const uint32_t surf_offset = image->offset +
+ anv_image_get_surface_for_aspect_mask(image, aspect_mask)->offset;
- add_surface_state_reloc(cmd_buffer, state, iview->bo, iview->offset);
+ add_surface_state_reloc(cmd_buffer, state, image->bo, surf_offset);
if (aux_usage != ISL_AUX_USAGE_NONE) {
- uint32_t aux_offset = iview->offset + iview->image->aux_surface.offset;
+ uint32_t aux_offset = image->offset + image->aux_surface.offset;
/* On gen7 and prior, the bottom 12 bits of the MCS base address are
* used to store other information. This should be ok, however, because
anv_reloc_list_add(&cmd_buffer->surface_relocs,
&cmd_buffer->pool->alloc,
state.offset + isl_dev->ss.aux_addr_offset,
- iview->bo, aux_offset);
+ image->bo, aux_offset);
if (result != VK_SUCCESS)
anv_batch_set_error(&cmd_buffer->batch, result);
}
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
} else if (att_state->fast_clear) {
att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
- if (GEN_GEN >= 9 &&
- !isl_format_supports_ccs_e(&device->info, iview->isl.format)) {
- /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
- *
- * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
- * setting is only allowed if Surface Format supported for Fast
- * Clear. In addition, if the surface is bound to the sampling
- * engine, Surface Format must be supported for Render Target
- * Compression for surfaces bound to the sampling engine."
- *
- * In other words, we can't sample from a fast-cleared image if it
- * doesn't also support color compression.
- */
- att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
- } else if (GEN_GEN >= 8) {
- /* Broadwell/Skylake can sample from fast-cleared images */
+ /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
+ *
+ * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
+ * setting is only allowed if Surface Format supported for Fast
+ * Clear. In addition, if the surface is bound to the sampling
+ * engine, Surface Format must be supported for Render Target
+ * Compression for surfaces bound to the sampling engine."
+ *
+ * In other words, we can only sample from a fast-cleared image if it
+ * also supports color compression.
+ */
+ if (isl_format_supports_ccs_e(&device->info, iview->isl.format))
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
- } else {
- /* Ivy Bridge and Haswell cannot */
+ else
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
- }
} else {
att_state->aux_usage = ISL_AUX_USAGE_NONE;
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
* The undefined layout indicates that the user doesn't care about the data
* that's currently in the buffer. Therefore, a data-preserving resolve
* operation is not needed.
- *
- * The pre-initialized layout is equivalent to the undefined layout for
- * optimally-tiled images. Anv only exposes support for optimally-tiled
- * depth buffers.
*/
- if (image->aux_usage != ISL_AUX_USAGE_HIZ ||
- initial_layout == final_layout ||
- initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
- initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)
+ if (image->aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout)
return;
const bool hiz_enabled = ISL_AUX_USAGE_HIZ ==
anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op);
}
+static void
+transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ const uint32_t base_level, uint32_t level_count,
+ uint32_t base_layer, uint32_t layer_count,
+ VkImageLayout initial_layout,
+ VkImageLayout final_layout)
+{
+ if (image->aux_usage != ISL_AUX_USAGE_CCS_E)
+ return;
+
+ if (initial_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
+ initial_layout != VK_IMAGE_LAYOUT_PREINITIALIZED)
+ return;
+
+ /* A transition of a 3D subresource works on all slices at a time. */
+ if (image->type == VK_IMAGE_TYPE_3D) {
+ base_layer = 0;
+ layer_count = anv_minify(image->extent.depth, base_level);
+ }
+
+#if GEN_GEN >= 9
+ /* We're transitioning from an undefined layout so it doesn't really matter
+ * what data ends up in the color buffer. We do, however, need to ensure
+ * that the CCS has valid data in it. One easy way to do that is to
+ * fast-clear the specified range.
+ */
+ anv_image_ccs_clear(cmd_buffer, image, base_level, level_count,
+ base_layer, layer_count);
+#endif
+}
/**
* Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
VK_ERROR_OUT_OF_HOST_MEMORY);
}
- bool need_null_state = false;
- unsigned num_states = 0;
+ /* Reserve one for the NULL state. */
+ unsigned num_states = 1;
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
- if (vk_format_is_color(pass->attachments[i].format)) {
+ if (vk_format_is_color(pass->attachments[i].format))
num_states++;
- } else {
- /* We need a null state for any depth-stencil-only subpasses.
- * Importantly, this includes depth/stencil clears so we create one
- * whenever we have depth or stencil
- */
- need_null_state = true;
- }
if (need_input_attachment_state(&pass->attachments[i]))
num_states++;
}
- num_states += need_null_state;
const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
state->render_pass_states =
struct anv_state next_state = state->render_pass_states;
next_state.alloc_size = isl_dev->ss.size;
- if (need_null_state) {
- state->null_surface_state = next_state;
- next_state.offset += ss_stride;
- next_state.map += ss_stride;
- }
+ state->null_surface_state = next_state;
+ next_state.offset += ss_stride;
+ next_state.map += ss_stride;
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
if (vk_format_is_color(pass->attachments[i].format)) {
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer);
assert(pass->attachment_count == framebuffer->attachment_count);
- if (need_null_state) {
- struct GENX(RENDER_SURFACE_STATE) null_ss = {
- .SurfaceType = SURFTYPE_NULL,
- .SurfaceArray = framebuffer->layers > 0,
- .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
+ struct GENX(RENDER_SURFACE_STATE) null_ss = {
+ .SurfaceType = SURFTYPE_NULL,
+ .SurfaceArray = framebuffer->layers > 0,
+ .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
#if GEN_GEN >= 8
- .TileMode = YMAJOR,
+ .TileMode = YMAJOR,
#else
- .TiledSurface = true,
+ .TiledSurface = true,
#endif
- .Width = framebuffer->width - 1,
- .Height = framebuffer->height - 1,
- .Depth = framebuffer->layers - 1,
- .RenderTargetViewExtent = framebuffer->layers - 1,
- };
- GENX(RENDER_SURFACE_STATE_pack)(NULL, state->null_surface_state.map,
- &null_ss);
- }
+ .Width = framebuffer->width - 1,
+ .Height = framebuffer->height - 1,
+ .Depth = framebuffer->layers - 1,
+ .RenderTargetViewExtent = framebuffer->layers - 1,
+ };
+ GENX(RENDER_SURFACE_STATE_pack)(NULL, state->null_surface_state.map,
+ &null_ss);
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
struct anv_render_pass_attachment *att = &pass->attachments[i];
.clear_color = clear_color,
.mocs = cmd_buffer->device->default_mocs);
- add_image_view_relocs(cmd_buffer, iview,
- state->attachments[i].aux_usage,
- state->attachments[i].color_rt_state);
+ add_image_relocs(cmd_buffer, iview->image, iview->aspect_mask,
+ state->attachments[i].aux_usage,
+ state->attachments[i].color_rt_state);
} else {
/* This field will be initialized after the first subpass
* transition.
.clear_color = clear_color,
.mocs = cmd_buffer->device->default_mocs);
- add_image_view_relocs(cmd_buffer, iview,
- state->attachments[i].input_aux_usage,
- state->attachments[i].input_att_state);
+ add_image_relocs(cmd_buffer, iview->image, iview->aspect_mask,
+ state->attachments[i].input_aux_usage,
+ state->attachments[i].input_att_state);
}
}
* copy the surface states for the current subpass into the storage
* we allocated for them in BeginCommandBuffer.
*/
- struct anv_bo *ss_bo = &primary->device->surface_state_block_pool.bo;
+ struct anv_bo *ss_bo =
+ &primary->device->surface_state_pool.block_pool.bo;
struct anv_state src_state = primary->state.render_pass_states;
struct anv_state dst_state = secondary->state.render_pass_states;
assert(src_state.alloc_size == dst_state.alloc_size);
anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
.SLMEnable = has_slm,
.URBLowBandwidth = urb_low_bw,
- .URBAllocation = cfg->n[GEN_L3P_URB],
+ .URBAllocation = cfg->n[GEN_L3P_URB] - n0_urb,
#if !GEN_IS_HASWELL
.ALLAllocation = cfg->n[GEN_L3P_ALL],
#endif
src_flags |= pImageMemoryBarriers[i].srcAccessMask;
dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
- if (pImageMemoryBarriers[i].subresourceRange.aspectMask &
- VK_IMAGE_ASPECT_DEPTH_BIT) {
+ const VkImageSubresourceRange *range =
+ &pImageMemoryBarriers[i].subresourceRange;
+
+ if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
transition_depth_buffer(cmd_buffer, image,
pImageMemoryBarriers[i].oldLayout,
pImageMemoryBarriers[i].newLayout);
+ } else if (range->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
+ transition_color_buffer(cmd_buffer, image,
+ range->baseMipLevel,
+ anv_get_levelCount(image, range),
+ range->baseArrayLayer,
+ anv_get_layerCount(image, range),
+ pImageMemoryBarriers[i].oldLayout,
+ pImageMemoryBarriers[i].newLayout);
}
}
assert(stage == MESA_SHADER_FRAGMENT);
assert(binding->binding == 0);
if (binding->index < subpass->color_count) {
- const unsigned att = subpass->color_attachments[binding->index].attachment;
- surface_state = cmd_buffer->state.attachments[att].color_rt_state;
+ const unsigned att =
+ subpass->color_attachments[binding->index].attachment;
+
+ /* From the Vulkan 1.0.46 spec:
+ *
+ * "If any color or depth/stencil attachments are
+ * VK_ATTACHMENT_UNUSED, then no writes occur for those
+ * attachments."
+ */
+ if (att == VK_ATTACHMENT_UNUSED) {
+ surface_state = cmd_buffer->state.null_surface_state;
+ } else {
+ surface_state = cmd_buffer->state.attachments[att].color_rt_state;
+ }
} else {
surface_state = cmd_buffer->state.null_surface_state;
}
desc->image_view->no_aux_sampler_surface_state :
desc->image_view->sampler_surface_state;
assert(surface_state.alloc_size);
- add_image_view_relocs(cmd_buffer, desc->image_view,
- desc->aux_usage, surface_state);
+ add_image_relocs(cmd_buffer, desc->image_view->image,
+ desc->image_view->aspect_mask,
+ desc->aux_usage, surface_state);
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
assert(stage == MESA_SHADER_FRAGMENT);
desc->image_view->no_aux_sampler_surface_state :
desc->image_view->sampler_surface_state;
assert(surface_state.alloc_size);
- add_image_view_relocs(cmd_buffer, desc->image_view,
- desc->aux_usage, surface_state);
+ add_image_relocs(cmd_buffer, desc->image_view->image,
+ desc->image_view->aspect_mask,
+ desc->aux_usage, surface_state);
} else {
/* For color input attachments, we create the surface state at
* vkBeginRenderPass time so that we can include aux and clear
? desc->image_view->writeonly_storage_surface_state
: desc->image_view->storage_surface_state;
assert(surface_state.alloc_size);
- add_image_view_relocs(cmd_buffer, desc->image_view,
- desc->image_view->image->aux_usage,
- surface_state);
+ add_image_relocs(cmd_buffer, desc->image_view->image,
+ desc->image_view->aspect_mask,
+ desc->image_view->image->aux_usage, surface_state);
struct brw_image_param *image_param =
&cmd_buffer->state.push_constants[stage]->images[image++];
c._3DCommandSubOpcode = push_constant_opcodes[stage],
c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
#if GEN_GEN >= 9
- .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
- .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+ .Buffer[2] = { &cmd_buffer->device->dynamic_state_pool.block_pool.bo, state.offset },
+ .ReadLength[2] = DIV_ROUND_UP(state.alloc_size, 32),
#else
- .PointerToConstantBuffer0 = { .offset = state.offset },
- .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+ .Buffer[0] = { .offset = state.offset },
+ .ReadLength[0] = DIV_ROUND_UP(state.alloc_size, 32),
#endif
};
}
.MemoryObjectControlState = GENX(MOCS),
#else
.BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
- .InstanceDataStepRate = 1,
+ /* Our implementation of VK_KHR_multiview uses instancing to draw
+ * the different views. If the client asks for instancing, we
+ * need to use the Instance Data Step Rate to ensure that we
+ * repeat the client's per-instance data once for each view.
+ */
+ .InstanceDataStepRate = anv_subpass_view_count(pipeline->subpass),
.VertexBufferMemoryObjectControlState = GENX(MOCS),
#endif
anv_state_flush(cmd_buffer->device, id_state);
emit_base_vertex_instance_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset);
}
static void
anv_state_flush(cmd_buffer->device, state);
emit_vertex_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_block_pool.bo,
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
state.offset, 4, ANV_DRAWID_VB_INDEX);
}
if (vs_prog_data->uses_drawid)
emit_draw_index(cmd_buffer, 0);
+ /* Our implementation of VK_KHR_multiview uses instancing to draw the
+ * different views. We need to multiply instanceCount by the view count.
+ */
+ instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
+
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = SEQUENTIAL;
prim.PrimitiveTopologyType = pipeline->topology;
if (vs_prog_data->uses_drawid)
emit_draw_index(cmd_buffer, 0);
+ /* Our implementation of VK_KHR_multiview uses instancing to draw the
+ * different views. We need to multiply instanceCount by the view count.
+ */
+ instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
+
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = RANDOM;
prim.PrimitiveTopologyType = pipeline->topology;
#define GEN7_3DPRIM_START_INSTANCE 0x243C
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
+/* MI_MATH only exists on Haswell+ */
+#if GEN_IS_HASWELL || GEN_GEN >= 8
+
+static uint32_t
+mi_alu(uint32_t opcode, uint32_t op1, uint32_t op2)
+{
+ struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
+ .ALUOpcode = opcode,
+ .Operand1 = op1,
+ .Operand2 = op2,
+ };
+
+ uint32_t dw;
+ GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
+
+ return dw;
+}
+
+#define CS_GPR(n) (0x2600 + (n) * 8)
+
+/* Emit dwords to multiply GPR0 by N */
+static void
+build_alu_multiply_gpr0(uint32_t *dw, unsigned *dw_count, uint32_t N)
+{
+ VK_OUTARRAY_MAKE(out, dw, dw_count);
+
+#define append_alu(opcode, operand1, operand2) \
+ vk_outarray_append(&out, alu_dw) *alu_dw = mi_alu(opcode, operand1, operand2)
+
+ assert(N > 0);
+ unsigned top_bit = 31 - __builtin_clz(N);
+ for (int i = top_bit - 1; i >= 0; i--) {
+ /* We get our initial data in GPR0 and we write the final data out to
+ * GPR0 but we use GPR1 as our scratch register.
+ */
+ unsigned src_reg = i == top_bit - 1 ? MI_ALU_REG0 : MI_ALU_REG1;
+ unsigned dst_reg = i == 0 ? MI_ALU_REG0 : MI_ALU_REG1;
+
+ /* Shift the current value left by 1 */
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCA, src_reg);
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCB, src_reg);
+ append_alu(MI_ALU_ADD, 0, 0);
+
+ if (N & (1 << i)) {
+ /* Store ACCU to R1 and add R0 to R1 */
+ append_alu(MI_ALU_STORE, MI_ALU_REG1, MI_ALU_ACCU);
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
+ append_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1);
+ append_alu(MI_ALU_ADD, 0, 0);
+ }
+
+ append_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU);
+ }
+
+#undef append_alu
+}
+
+static void
+emit_mul_gpr0(struct anv_batch *batch, uint32_t N)
+{
+ uint32_t num_dwords;
+ build_alu_multiply_gpr0(NULL, &num_dwords, N);
+
+ uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords, GENX(MI_MATH));
+ build_alu_multiply_gpr0(dw + 1, &num_dwords, N);
+}
+
+#endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */
+
+static void
+load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_buffer *buffer, uint64_t offset,
+ bool indexed)
+{
+ struct anv_batch *batch = &cmd_buffer->batch;
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
+
+ emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+
+ unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
+ if (view_count > 1) {
+#if GEN_IS_HASWELL || GEN_GEN >= 8
+ emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);
+ emit_mul_gpr0(batch, view_count);
+ emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));
+#else
+ anv_finishme("Multiview + indirect draw requires MI_MATH\n"
+ "MI_MATH is not supported on Ivy Bridge");
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+#endif
+ } else {
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+ }
+
+ emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+
+ if (indexed) {
+ emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
+ emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+ } else {
+ emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
+ emit_lri(batch, GEN7_3DPRIM_BASE_VERTEX, 0);
+ }
+}
+
void genX(CmdDrawIndirect)(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
if (anv_batch_has_error(&cmd_buffer->batch))
return;
genX(cmd_buffer_flush_state)(cmd_buffer);
- if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
- emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
- if (vs_prog_data->uses_drawid)
- emit_draw_index(cmd_buffer, 0);
+ for (uint32_t i = 0; i < drawCount; i++) {
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
- emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+ emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, i);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
- prim.IndirectParameterEnable = true;
- prim.VertexAccessType = SEQUENTIAL;
- prim.PrimitiveTopologyType = pipeline->topology;
+ load_indirect_parameters(cmd_buffer, buffer, offset, false);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.IndirectParameterEnable = true;
+ prim.VertexAccessType = SEQUENTIAL;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ }
+
+ offset += stride;
}
}
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
if (anv_batch_has_error(&cmd_buffer->batch))
return;
genX(cmd_buffer_flush_state)(cmd_buffer);
- /* TODO: We need to stomp base vertex to 0 somehow */
- if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
- emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
- if (vs_prog_data->uses_drawid)
- emit_draw_index(cmd_buffer, 0);
+ for (uint32_t i = 0; i < drawCount; i++) {
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
- emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+ /* TODO: We need to stomp base vertex to 0 somehow */
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
+ emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, i);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
- prim.IndirectParameterEnable = true;
- prim.VertexAccessType = RANDOM;
- prim.PrimitiveTopologyType = pipeline->topology;
+ load_indirect_parameters(cmd_buffer, buffer, offset, true);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.IndirectParameterEnable = true;
+ prim.VertexAccessType = RANDOM;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ }
+
+ offset += stride;
}
}
anv_state_flush(cmd_buffer->device, state);
cmd_buffer->state.num_workgroups_offset = state.offset;
cmd_buffer->state.num_workgroups_bo =
- &cmd_buffer->device->dynamic_state_block_pool.bo;
+ &cmd_buffer->device->dynamic_state_pool.block_pool.bo;
}
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
*/
assert(att_ref->attachment < cmd_state->framebuffer->attachment_count);
- const struct anv_image * const image =
- cmd_state->framebuffer->attachments[att_ref->attachment]->image;
+ const struct anv_image_view * const iview =
+ cmd_state->framebuffer->attachments[att_ref->attachment];
+ const struct anv_image * const image = iview->image;
/* Perform the layout transition. */
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
att_state->aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
image->aspects, target_layout);
+ } else if (image->aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
+ transition_color_buffer(cmd_buffer, image,
+ iview->isl.base_level, 1,
+ iview->isl.base_array_layer,
+ iview->isl.array_len,
+ att_state->current_layout, target_layout);
}
att_state->current_layout = target_layout;
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
+ /* Our implementation of VK_KHR_multiview uses instancing to draw the
+ * different views. If the client asks for instancing, we need to use the
+ * Instance Data Step Rate to ensure that we repeat the client's
+ * per-instance data once for each view. Since this bit is in
+ * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top
+ * of each subpass.
+ */
+ if (GEN_GEN == 7)
+ cmd_buffer->state.vb_dirty |= ~0;
+
/* Perform transitions to the subpass layout before any writes have
* occurred.
*/