+ if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
+ /* If we were able to successfully relocate everything, tell the kernel
+ * that it can skip doing relocations. The requirement for using
+ * NO_RELOC is:
+ *
+ * 1) The addresses written in the objects must match the corresponding
+ * reloc.presumed_offset which in turn must match the corresponding
+ * execobject.offset.
+ *
+ * 2) To avoid stalling, execobject.offset should match the current
+ * address of that object within the active context.
+ *
+ * In order to satisfy all of the invariants that make userspace
+ * relocations to be safe (see relocate_cmd_buffer()), we need to
+ * further ensure that the addresses we use match those used by the
+ * kernel for the most recent execbuf2.
+ *
+ * The kernel may still choose to do relocations anyway if something has
+ * moved in the GTT. In this case, the relocation list still needs to be
+ * valid. All relocations on the batch buffers are already valid and
+ * kept up-to-date. For surface state relocations, by applying the
+ * relocations in relocate_cmd_buffer, we ensured that the address in
+ * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
+ * safe for the kernel to relocate them as needed.
+ */
+ execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
+ } else {
+ /* In the case where we fall back to doing kernel relocations, we need
+ * to ensure that the relocation list is valid. All relocations on the
+ * batch buffers are already valid and kept up-to-date. Since surface
+ * states are shared between command buffers and we don't know what
+ * order they will be submitted to the kernel, we don't know what
+ * address is actually written in the surface state object at any given
+ * time. The only option is to set a bogus presumed offset and let the
+ * kernel relocate them.
+ */
+ for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
+ cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_cmd_buffer_execbuf(struct anv_device *device,
+ struct anv_cmd_buffer *cmd_buffer,
+ const VkSemaphore *in_semaphores,
+ uint32_t num_in_semaphores,
+ const VkSemaphore *out_semaphores,
+ uint32_t num_out_semaphores)
+{
+ struct anv_execbuf execbuf;
+ anv_execbuf_init(&execbuf);
+
+ VkResult result = VK_SUCCESS;
+ for (uint32_t i = 0; i < num_in_semaphores; i++) {
+ ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
+ assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
+ struct anv_semaphore_impl *impl = &semaphore->permanent;
+
+ switch (impl->type) {
+ case ANV_SEMAPHORE_TYPE_BO:
+ result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
+ 0, &device->alloc);
+ if (result != VK_SUCCESS)
+ return result;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (uint32_t i = 0; i < num_out_semaphores; i++) {
+ ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
+ assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
+ struct anv_semaphore_impl *impl = &semaphore->permanent;
+
+ switch (impl->type) {
+ case ANV_SEMAPHORE_TYPE_BO:
+ result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
+ EXEC_OBJECT_WRITE, &device->alloc);
+ if (result != VK_SUCCESS)
+ return result;
+ break;
+ default:
+ break;
+ }
+ }
+
+ result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
+
+ anv_execbuf_finish(&execbuf, &device->alloc);
+
+ return result;