bbo->relocs.num_relocs = 0;
}
+static void
+anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
+ size_t batch_padding)
+{
+ batch->start = bbo->bo.map;
+ batch->next = bbo->bo.map + bbo->length;
+ batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
+ batch->relocs = &bbo->relocs;
+}
+
static void
anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
{
struct anv_batch_bo *surface_bbo =
anv_cmd_buffer_current_surface_bbo(cmd_buffer);
- anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END);
+ if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
+ anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END);
- /* Round batch up to an even number of dwords. */
- if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
- anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP);
+ /* Round batch up to an even number of dwords. */
+ if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
+ anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP);
+ }
anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
surface_bbo->length = cmd_buffer->surface_next;
}
+static inline VkResult
+anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
+ struct list_head *list)
+{
+ list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
+ struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
+ if (bbo_ptr == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *bbo_ptr = bbo;
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
+ struct anv_cmd_buffer *secondary)
+{
+ if ((secondary->batch_bos.next == secondary->batch_bos.prev) &&
+ anv_cmd_buffer_current_batch_bo(secondary)->length < ANV_CMD_BUFFER_BATCH_SIZE / 2) {
+ /* If the secondary has exactly one batch buffer in its list *and*
+ * that batch buffer is less than half of the maximum size, we're
+ * probably better of simply copying it into our batch.
+ */
+ anv_batch_emit_batch(&primary->batch, &secondary->batch);
+ } else {
+ struct list_head copy_list;
+ VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
+ secondary->device,
+ ©_list);
+ if (result != VK_SUCCESS)
+ return; /* FIXME */
+
+ anv_cmd_buffer_add_seen_bbos(primary, ©_list);
+
+ struct anv_batch_bo *first_bbo =
+ list_first_entry(©_list, struct anv_batch_bo, link);
+ struct anv_batch_bo *last_bbo =
+ list_last_entry(©_list, struct anv_batch_bo, link);
+
+ cmd_buffer_chain_to_batch_bo(primary, first_bbo);
+
+ list_splicetail(©_list, &primary->batch_bos);
+
+ anv_batch_bo_continue(last_bbo, &primary->batch,
+ GEN8_MI_BATCH_BUFFER_START_length * 4);
+
+ anv_cmd_buffer_emit_state_base_address(primary);
+ }
+
+ /* Mark the surface buffer from the secondary as seen */
+ anv_cmd_buffer_add_seen_bbos(primary, &secondary->surface_bos);
+}
+
static VkResult
anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
struct anv_bo *bo,
struct anv_cmd_buffer *cmd_buffer;
VkResult result;
- assert(pCreateInfo->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
-
cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (cmd_buffer == NULL)
anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
&device->dynamic_state_block_pool);
+ cmd_buffer->level = pCreateInfo->level;
+ cmd_buffer->opt_flags = 0;
+
anv_cmd_state_init(&cmd_buffer->state);
*pCmdBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
+ cmd_buffer->opt_flags = pBeginInfo->flags;
+
+ if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_SECONDARY) {
+ cmd_buffer->state.framebuffer =
+ anv_framebuffer_from_handle(pBeginInfo->framebuffer);
+ cmd_buffer->state.pass =
+ anv_render_pass_from_handle(pBeginInfo->renderPass);
+
+ /* FIXME: We shouldn't be starting on the first subpass */
+ anv_cmd_buffer_begin_subpass(cmd_buffer,
+ &cmd_buffer->state.pass->subpasses[0]);
+ }
+
anv_cmd_buffer_emit_state_base_address(cmd_buffer);
cmd_buffer->state.current_pipeline = UINT32_MAX;
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
- /* The algorithm used to compute the validate list is not threadsafe as
- * it uses the bo->index field. We have to lock the device around it.
- * Fortunately, the chances for contention here are probably very low.
- */
- pthread_mutex_lock(&device->mutex);
- anv_cmd_buffer_prepare_execbuf(cmd_buffer);
- pthread_mutex_unlock(&device->mutex);
+ if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
+ /* The algorithm used to compute the validate list is not threadsafe as
+ * it uses the bo->index field. We have to lock the device around it.
+ * Fortunately, the chances for contention here are probably very low.
+ */
+ pthread_mutex_lock(&device->mutex);
+ anv_cmd_buffer_prepare_execbuf(cmd_buffer);
+ pthread_mutex_unlock(&device->mutex);
+ }
return VK_SUCCESS;
}
ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
- assert(contents == VK_RENDER_PASS_CONTENTS_INLINE);
-
cmd_buffer->state.framebuffer = framebuffer;
cmd_buffer->state.pass = pass;
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
- assert(contents == VK_RENDER_PASS_CONTENTS_INLINE);
+ assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
anv_cmd_buffer_begin_subpass(cmd_buffer, cmd_buffer->state.subpass + 1);
}
uint32_t cmdBuffersCount,
const VkCmdBuffer* pCmdBuffers)
{
- stub();
+ ANV_FROM_HANDLE(anv_cmd_buffer, primary, cmdBuffer);
+
+ assert(primary->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
+
+ anv_assert(primary->state.subpass == &primary->state.pass->subpasses[0]);
+
+ for (uint32_t i = 0; i < cmdBuffersCount; i++) {
+ ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
+
+ assert(secondary->level == VK_CMD_BUFFER_LEVEL_SECONDARY);
+
+ anv_cmd_buffer_add_secondary(primary, secondary);
+ }
}
for (uint32_t i = 0; i < cmdBufferCount; i++) {
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
+ assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
+
if (device->dump_aub)
anv_cmd_buffer_dump(cmd_buffer);
struct anv_state_stream surface_state_stream;
struct anv_state_stream dynamic_state_stream;
+ VkCmdBufferOptimizeFlags opt_flags;
+ VkCmdBufferLevel level;
+
struct anv_cmd_state state;
};
void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
+ struct anv_cmd_buffer *secondary);
void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
struct anv_bo *