return VK_SUCCESS;
}
+static void
+setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
+{
+ anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo, NULL, 0,
+ &device->alloc);
+
+ execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
+ .buffers_ptr = (uintptr_t) execbuf->objects,
+ .buffer_count = execbuf->bo_count,
+ .batch_start_offset = 0,
+ .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+ .rsvd1 = device->context_id,
+ .rsvd2 = 0,
+ };
+}
+
VkResult
anv_cmd_buffer_execbuf(struct anv_device *device,
struct anv_cmd_buffer *cmd_buffer,
}
}
- result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
- if (result != VK_SUCCESS)
- return result;
+ if (cmd_buffer) {
+ result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+ } else {
+ setup_empty_execbuf(&execbuf, device);
+ }
+
result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
border_colors);
}
+static void
+anv_device_init_trivial_batch(struct anv_device *device)
+{
+ anv_bo_init_new(&device->trivial_batch_bo, device, 4096);
+
+ if (device->instance->physicalDevice.has_exec_async)
+ device->trivial_batch_bo.flags |= EXEC_OBJECT_ASYNC;
+
+ void *map = anv_gem_mmap(device, device->trivial_batch_bo.gem_handle,
+ 0, 4096, 0);
+
+ struct anv_batch batch = {
+ .start = map,
+ .next = map,
+ .end = map + 4096,
+ };
+
+ anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
+ anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
+
+ if (!device->info.has_llc)
+ gen_clflush_range(map, batch.next - map);
+
+ anv_gem_munmap(map, device->trivial_batch_bo.size);
+}
+
VkResult anv_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
if (result != VK_SUCCESS)
goto fail_surface_state_pool;
+ anv_device_init_trivial_batch(device);
+
anv_scratch_pool_init(device, &device->scratch_pool);
anv_queue_init(device, &device->queue);
anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
anv_gem_close(device, device->workaround_bo.gem_handle);
+ anv_gem_close(device, device->trivial_batch_bo.gem_handle);
+
anv_state_pool_finish(&device->surface_state_pool);
anv_state_pool_finish(&device->instruction_state_pool);
anv_state_pool_finish(&device->dynamic_state_pool);
struct anv_state_pool surface_state_pool;
struct anv_bo workaround_bo;
+ struct anv_bo trivial_batch_bo;
struct anv_pipeline_cache blorp_shader_cache;
struct blorp_context blorp;
pthread_mutex_lock(&device->mutex);
for (uint32_t i = 0; i < submitCount; i++) {
+ if (pSubmits[i].commandBufferCount == 0) {
+ /* If we don't have any command buffers, we need to submit a dummy
+ * batch to give GEM something to wait on. We could, potentially,
+ * come up with something more efficient but this shouldn't be a
+ * common case.
+ */
+ result = anv_cmd_buffer_execbuf(device, NULL,
+ pSubmits[i].pWaitSemaphores,
+ pSubmits[i].waitSemaphoreCount,
+ pSubmits[i].pSignalSemaphores,
+ pSubmits[i].signalSemaphoreCount);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ continue;
+ }
+
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);