X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Firis%2Firis_batch.c;h=66ed3c36278573a77215a2227b111a14c15f6aa7;hb=572fad1e8435e11640fe5f51bfbae58e0fad619f;hp=a7fc7f710ef0342ed63042b2cb96cbf8ad3f8843;hpb=a3a998f19a3699755ce50cbe5480b6f12ce89015;p=mesa.git diff --git a/src/gallium/drivers/iris/iris_batch.c b/src/gallium/drivers/iris/iris_batch.c index a7fc7f710ef..66ed3c36278 100644 --- a/src/gallium/drivers/iris/iris_batch.c +++ b/src/gallium/drivers/iris/iris_batch.c @@ -2,30 +2,45 @@ * Copyright © 2017 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file iris_batch.c + * + * Batchbuffer and command submission module. + * + * Every API draw call results in a number of GPU commands, which we + * collect into a "batch buffer". Typically, many draw calls are grouped + * into a single batch to amortize command submission overhead. + * + * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl. + * One critical piece of data is the "validation list", which contains a + * list of the buffer objects (BOs) which the commands in the GPU need. + * The kernel will make sure these are resident and pinned at the correct + * virtual memory address before executing our batch. If a BO is not in + * the validation list, it effectively does not exist, so take care. */ #include "iris_batch.h" -#include "iris_binder.h" #include "iris_bufmgr.h" #include "iris_context.h" +#include "iris_fence.h" #include "drm-uapi/i915_drm.h" @@ -38,20 +53,45 @@ #define FILE_DEBUG_FLAG DEBUG_BUFMGR -#define BATCH_SZ (20 * 1024) - /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16. */ #define BATCH_RESERVED 16 -static void decode_batch(struct iris_batch *batch); - static void iris_batch_reset(struct iris_batch *batch); -UNUSED static void +static unsigned +num_fences(struct iris_batch *batch) +{ + return util_dynarray_num_elements(&batch->exec_fences, + struct drm_i915_gem_exec_fence); +} + +/** + * Debugging code to dump the fence list, used by INTEL_DEBUG=submit. + */ +static void +dump_fence_list(struct iris_batch *batch) +{ + fprintf(stderr, "Fence list (length %u): ", num_fences(batch)); + + util_dynarray_foreach(&batch->exec_fences, + struct drm_i915_gem_exec_fence, f) { + fprintf(stderr, "%s%u%s ", + (f->flags & I915_EXEC_FENCE_WAIT) ? "..." : "", + f->handle, + (f->flags & I915_EXEC_FENCE_SIGNAL) ? "!" : ""); + } + + fprintf(stderr, "\n"); +} + +/** + * Debugging code to dump the validation list, used by INTEL_DEBUG=submit. + */ +static void dump_validation_list(struct iris_batch *batch) { fprintf(stderr, "Validation list (length %d):\n", batch->exec_count); @@ -60,18 +100,20 @@ dump_validation_list(struct iris_batch *batch) uint64_t flags = batch->validation_list[i].flags; assert(batch->validation_list[i].handle == batch->exec_bos[i]->gem_handle); - fprintf(stderr, "[%2d]: %2d %-14s %p %-7s @ 0x%016llx (%"PRIu64"B) - %d refs\n", + fprintf(stderr, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64"B)\t %2d refs %s\n", i, batch->validation_list[i].handle, batch->exec_bos[i]->name, - batch->exec_bos[i], - (flags & EXEC_OBJECT_WRITE) ? "(write)" : "", batch->validation_list[i].offset, batch->exec_bos[i]->size, - batch->exec_bos[i]->refcount); + batch->exec_bos[i]->refcount, + (flags & EXEC_OBJECT_WRITE) ? " (write)" : ""); } } +/** + * Return BO information to the batch decoder (for debugging). + */ static struct gen_batch_decode_bo decode_get_bo(void *v_batch, uint64_t address) { @@ -95,6 +137,17 @@ decode_get_bo(void *v_batch, uint64_t address) return (struct gen_batch_decode_bo) { }; } +/** + * Decode the current batch. + */ +static void +decode_batch(struct iris_batch *batch) +{ + void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ); + gen_print_batch(&batch->decoder, map, batch->primary_batch_size, + batch->exec_bos[0]->gtt_offset); +} + static bool uint_key_compare(const void *a, const void *b) { @@ -112,16 +165,25 @@ iris_init_batch(struct iris_batch *batch, struct iris_screen *screen, struct iris_vtable *vtbl, struct pipe_debug_callback *dbg, - uint8_t ring) + struct iris_batch *all_batches, + enum iris_batch_name name, + uint8_t engine) { batch->screen = screen; batch->vtbl = vtbl; batch->dbg = dbg; + batch->name = name; + + /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */ + assert((engine & ~I915_EXEC_RING_MASK) == 0); + assert(util_bitcount(engine) == 1); + batch->engine = engine; - /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */ - assert((ring & ~I915_EXEC_RING_MASK) == 0); - assert(util_bitcount(ring) == 1); - batch->ring = ring; + batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr); + assert(batch->hw_ctx_id); + + util_dynarray_init(&batch->exec_fences, ralloc_context(NULL)); + util_dynarray_init(&batch->syncpts, ralloc_context(NULL)); batch->exec_count = 0; batch->exec_array_size = 100; @@ -130,12 +192,18 @@ iris_init_batch(struct iris_batch *batch, batch->validation_list = malloc(batch->exec_array_size * sizeof(batch->validation_list[0])); - batch->binder.bo = NULL; - batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal); batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal); + + memset(batch->other_batches, 0, sizeof(batch->other_batches)); + + for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) { + if (&all_batches[i] != batch) + batch->other_batches[j++] = &all_batches[i]; + } + if (unlikely(INTEL_DEBUG)) { batch->state_sizes = _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare); @@ -157,20 +225,87 @@ iris_init_batch(struct iris_batch *batch, #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x)) -static unsigned -add_exec_bo(struct iris_batch *batch, struct iris_bo *bo) +static struct drm_i915_gem_exec_object2 * +find_validation_entry(struct iris_batch *batch, struct iris_bo *bo) { unsigned index = READ_ONCE(bo->index); if (index < batch->exec_count && batch->exec_bos[index] == bo) - return index; + return &batch->validation_list[index]; /* May have been shared between multiple active batches */ for (index = 0; index < batch->exec_count; index++) { if (batch->exec_bos[index] == bo) - return index; + return &batch->validation_list[index]; + } + + return NULL; +} + +/** + * Add a buffer to the current batch's validation list. + * + * You must call this on any BO you wish to use in this batch, to ensure + * that it's resident when the GPU commands execute. + */ +void +iris_use_pinned_bo(struct iris_batch *batch, + struct iris_bo *bo, + bool writable) +{ + assert(bo->kflags & EXEC_OBJECT_PINNED); + + /* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care + * about the order of any writes to that buffer, and marking it writable + * would introduce data dependencies between multiple batches which share + * the buffer. + */ + if (bo == batch->screen->workaround_bo) + writable = false; + + struct drm_i915_gem_exec_object2 *existing_entry = + find_validation_entry(batch, bo); + + if (existing_entry) { + /* The BO is already in the validation list; mark it writable */ + if (writable) + existing_entry->flags |= EXEC_OBJECT_WRITE; + + return; + } + + if (bo != batch->bo) { + /* This is the first time our batch has seen this BO. Before we use it, + * we may need to flush and synchronize with other batches. + */ + for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) { + struct drm_i915_gem_exec_object2 *other_entry = + find_validation_entry(batch->other_batches[b], bo); + + /* If the buffer is referenced by another batch, and either batch + * intends to write it, then flush the other batch and synchronize. + * + * Consider these cases: + * + * 1. They read, we read => No synchronization required. + * 2. They read, we write => Synchronize (they need the old value) + * 3. They write, we read => Synchronize (we need their new value) + * 4. They write, we write => Synchronize (order writes) + * + * The read/read case is very common, as multiple batches usually + * share a streaming state buffer or shader assembly buffer, and + * we want to avoid synchronizing in this case. + */ + if (other_entry && + ((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) { + iris_batch_flush(batch->other_batches[b]); + iris_batch_add_syncpt(batch, batch->other_batches[b]->last_syncpt, + I915_EXEC_FENCE_WAIT); + } + } } + /* Now, take a reference and add it to the validation list. */ iris_bo_reference(bo); if (batch->exec_count == batch->exec_array_size) { @@ -187,14 +322,14 @@ add_exec_bo(struct iris_batch *batch, struct iris_bo *bo) (struct drm_i915_gem_exec_object2) { .handle = bo->gem_handle, .offset = bo->gtt_offset, - .flags = bo->kflags, + .flags = bo->kflags | (writable ? EXEC_OBJECT_WRITE : 0), }; bo->index = batch->exec_count; batch->exec_bos[batch->exec_count] = bo; batch->aperture_space += bo->size; - return batch->exec_count++; + batch->exec_count++; } static void @@ -208,26 +343,25 @@ create_batch(struct iris_batch *batch) batch->bo->kflags |= EXEC_OBJECT_CAPTURE; batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE); batch->map_next = batch->map; - batch->contains_draw = false; - add_exec_bo(batch, batch->bo); + iris_use_pinned_bo(batch, batch->bo, false); } static void iris_batch_reset(struct iris_batch *batch) { - if (batch->last_bo != NULL) { - iris_bo_unreference(batch->last_bo); - batch->last_bo = NULL; - } - batch->last_bo = batch->bo; + struct iris_screen *screen = batch->screen; + + iris_bo_unreference(batch->bo); batch->primary_batch_size = 0; + batch->contains_draw = false; create_batch(batch); assert(batch->bo->index == 0); - iris_destroy_binder(&batch->binder); - iris_init_binder(&batch->binder, batch->bo->bufmgr); + struct iris_syncpt *syncpt = iris_create_syncpt(screen); + iris_batch_add_syncpt(batch, syncpt, I915_EXEC_FENCE_SIGNAL); + iris_syncpt_reference(screen, &syncpt, NULL); if (batch->state_sizes) _mesa_hash_table_clear(batch->state_sizes, NULL); @@ -238,35 +372,39 @@ iris_batch_reset(struct iris_batch *batch) void iris_batch_free(struct iris_batch *batch) { + struct iris_screen *screen = batch->screen; + struct iris_bufmgr *bufmgr = screen->bufmgr; + for (int i = 0; i < batch->exec_count; i++) { iris_bo_unreference(batch->exec_bos[i]); } free(batch->exec_bos); free(batch->validation_list); + + ralloc_free(batch->exec_fences.mem_ctx); + + util_dynarray_foreach(&batch->syncpts, struct iris_syncpt *, s) + iris_syncpt_reference(screen, s, NULL); + ralloc_free(batch->syncpts.mem_ctx); + + iris_syncpt_reference(screen, &batch->last_syncpt, NULL); + iris_bo_unreference(batch->bo); batch->bo = NULL; batch->map = NULL; batch->map_next = NULL; - iris_bo_unreference(batch->last_bo); + iris_destroy_hw_context(bufmgr, batch->hw_ctx_id); _mesa_hash_table_destroy(batch->cache.render, NULL); _mesa_set_destroy(batch->cache.depth, NULL); - iris_destroy_binder(&batch->binder); - if (batch->state_sizes) { _mesa_hash_table_destroy(batch->state_sizes, NULL); gen_batch_decode_ctx_finish(&batch->decoder); } } -static unsigned -batch_bytes_used(struct iris_batch *batch) -{ - return batch->map_next - batch->map; -} - /** * If we've chained to a secondary batch, or are getting near to the end, * then flush. This should only be called between draws. @@ -275,51 +413,29 @@ void iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate) { if (batch->bo != batch->exec_bos[0] || - batch_bytes_used(batch) + estimate >= BATCH_SZ) { + iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) { iris_batch_flush(batch); } } void -iris_require_command_space(struct iris_batch *batch, unsigned size) +iris_chain_to_new_batch(struct iris_batch *batch) { - const unsigned required_bytes = batch_bytes_used(batch) + size; - - if (required_bytes >= BATCH_SZ) { - /* We only support chaining a single time. */ - assert(batch->bo == batch->exec_bos[0]); - - uint32_t *cmd = batch->map_next; - uint64_t *addr = batch->map_next + 4; - uint32_t *noop = batch->map_next + 12; - batch->map_next += 12; - - /* No longer held by batch->bo, still held by validation list */ - iris_bo_unreference(batch->bo); - batch->primary_batch_size = ALIGN(batch_bytes_used(batch), 8); - create_batch(batch); - - /* Emit MI_BATCH_BUFFER_START to chain to another batch. */ - *cmd = (0x31 << 23) | (1 << 8) | (3 - 2); - *addr = batch->bo->gtt_offset; - *noop = 0; - } -} + /* We only support chaining a single time. */ + assert(batch->bo == batch->exec_bos[0]); -void * -iris_get_command_space(struct iris_batch *batch, unsigned bytes) -{ - iris_require_command_space(batch, bytes); - void *map = batch->map_next; - batch->map_next += bytes; - return map; -} + uint32_t *cmd = batch->map_next; + uint64_t *addr = batch->map_next + 4; + batch->map_next += 8; -void -iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size) -{ - void *map = iris_get_command_space(batch, size); - memcpy(map, data, size); + /* No longer held by batch->bo, still held by validation list */ + iris_bo_unreference(batch->bo); + batch->primary_batch_size = iris_batch_bytes_used(batch); + create_batch(batch); + + /* Emit MI_BATCH_BUFFER_START to chain to another batch. */ + *cmd = (0x31 << 23) | (1 << 8) | (3 - 2); + *addr = batch->bo->gtt_offset; } /** @@ -330,24 +446,22 @@ iris_finish_batch(struct iris_batch *batch) { // XXX: ISP DIS - /* Emit MI_BATCH_BUFFER_END to finish our batch. Note that execbuf2 - * requires our batch size to be QWord aligned, so we pad it out if - * necessary by emitting an extra MI_NOOP after the end. - */ - const bool qword_aligned = (batch_bytes_used(batch) % 8) == 0; + /* Emit MI_BATCH_BUFFER_END to finish our batch. */ uint32_t *map = batch->map_next; map[0] = (0xA << 23); - map[1] = 0; - batch->map_next += qword_aligned ? 8 : 4; + batch->map_next += 4; if (batch->bo == batch->exec_bos[0]) - batch->primary_batch_size = batch_bytes_used(batch); + batch->primary_batch_size = iris_batch_bytes_used(batch); } +/** + * Submit the batch to the GPU via execbuffer2. + */ static int -submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd) +submit_batch(struct iris_batch *batch) { iris_bo_unmap(batch->bo); @@ -367,31 +481,30 @@ submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd) .buffers_ptr = (uintptr_t) batch->validation_list, .buffer_count = batch->exec_count, .batch_start_offset = 0, - .batch_len = batch->primary_batch_size, - .flags = batch->ring | + /* This must be QWord aligned. */ + .batch_len = ALIGN(batch->primary_batch_size, 8), + .flags = batch->engine | I915_EXEC_NO_RELOC | I915_EXEC_BATCH_FIRST | I915_EXEC_HANDLE_LUT, .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */ }; - unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2; - - if (in_fence_fd != -1) { - execbuf.rsvd2 = in_fence_fd; - execbuf.flags |= I915_EXEC_FENCE_IN; + if (num_fences(batch)) { + execbuf.flags |= I915_EXEC_FENCE_ARRAY; + execbuf.num_cliprects = num_fences(batch); + execbuf.cliprects_ptr = + (uintptr_t)util_dynarray_begin(&batch->exec_fences); } - if (out_fence_fd != NULL) { - cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR; - *out_fence_fd = -1; - execbuf.flags |= I915_EXEC_FENCE_OUT; - } - - int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf); + int ret = drm_ioctl(batch->screen->fd, + DRM_IOCTL_I915_GEM_EXECBUFFER2, + &execbuf); if (ret != 0) { ret = -errno; DBG("execbuf FAILED: errno = %d\n", -ret); + fprintf(stderr, "execbuf FAILED: errno = %d\n", -ret); + abort(); } else { DBG("execbuf succeeded\n"); } @@ -401,47 +514,58 @@ submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd) bo->idle = false; bo->index = -1; - } - if (ret == 0 && out_fence_fd != NULL) - *out_fence_fd = execbuf.rsvd2 >> 32; + iris_bo_unreference(bo); + } return ret; } +static const char * +batch_name_to_string(enum iris_batch_name name) +{ + const char *names[IRIS_BATCH_COUNT] = { + [IRIS_BATCH_RENDER] = "render", + [IRIS_BATCH_COMPUTE] = "compute", + }; + return names[name]; +} + /** - * The in_fence_fd is ignored if -1. Otherwise this function takes ownership - * of the fd. + * Flush the batch buffer, submitting it to the GPU and resetting it so + * we're ready to emit the next batch. * - * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership - * of the returned fd. + * \param in_fence_fd is ignored if -1. Otherwise, this function takes + * ownership of the fd. + * + * \param out_fence_fd is ignored if NULL. Otherwise, the caller must + * take ownership of the returned fd. */ -int -_iris_batch_flush_fence(struct iris_batch *batch, - int in_fence_fd, int *out_fence_fd, - const char *file, int line) +void +_iris_batch_flush(struct iris_batch *batch, const char *file, int line) { - if (batch_bytes_used(batch) == 0) - return 0; + struct iris_screen *screen = batch->screen; + + if (iris_batch_bytes_used(batch) == 0) + return; iris_finish_batch(batch); if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) { - int bytes_for_commands = batch_bytes_used(batch); - int bytes_for_binder = batch->binder.insert_point; + int bytes_for_commands = iris_batch_bytes_used(batch); int second_bytes = 0; if (batch->bo != batch->exec_bos[0]) { second_bytes = bytes_for_commands; bytes_for_commands += batch->primary_batch_size; } - fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5d+%5db (%0.1f%%) " - "(cmds), %5db (%0.1f%%) (binder), %4d BOs (%0.1fMb aperture)\n", - file, line, + fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) " + "(cmds), %4d BOs (%0.1fMb aperture)\n", + file, line, batch_name_to_string(batch->name), batch->hw_ctx_id, batch->primary_batch_size, second_bytes, 100.0f * bytes_for_commands / BATCH_SZ, - bytes_for_binder, 100.0f * bytes_for_binder / IRIS_BINDER_SIZE, batch->exec_count, (float) batch->aperture_space / (1024 * 1024)); + dump_fence_list(batch); dump_validation_list(batch); } @@ -449,9 +573,7 @@ _iris_batch_flush_fence(struct iris_batch *batch, decode_batch(batch); } - int ret = submit_batch(batch, in_fence_fd, out_fence_fd); - - //throttle(iris); + int ret = submit_batch(batch); if (ret >= 0) { //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB) @@ -470,58 +592,30 @@ _iris_batch_flush_fence(struct iris_batch *batch, #endif } - /* Clean up after the batch we submitted and prepare for a new one. */ - for (int i = 0; i < batch->exec_count; i++) { - iris_bo_unreference(batch->exec_bos[i]); - batch->exec_bos[i] = NULL; - } batch->exec_count = 0; batch->aperture_space = 0; - /* Start a new batch buffer. */ - iris_batch_reset(batch); + struct iris_syncpt *syncpt = + ((struct iris_syncpt **) util_dynarray_begin(&batch->syncpts))[0]; + iris_syncpt_reference(screen, &batch->last_syncpt, syncpt); - return 0; -} + util_dynarray_foreach(&batch->syncpts, struct iris_syncpt *, s) + iris_syncpt_reference(screen, s, NULL); + util_dynarray_clear(&batch->syncpts); -bool -iris_batch_references(struct iris_batch *batch, struct iris_bo *bo) -{ - unsigned index = READ_ONCE(bo->index); - if (index < batch->exec_count && batch->exec_bos[index] == bo) - return true; + util_dynarray_clear(&batch->exec_fences); - for (int i = 0; i < batch->exec_count; i++) { - if (batch->exec_bos[i] == bo) - return true; - } - return false; + /* Start a new batch buffer. */ + iris_batch_reset(batch); } -/* This is the only way buffers get added to the validate list. +/** + * Does the current batch refer to the given BO? + * + * (In other words, is the BO in the current batch's validation list?) */ -void -iris_use_pinned_bo(struct iris_batch *batch, - struct iris_bo *bo, - bool writable) -{ - assert(bo->kflags & EXEC_OBJECT_PINNED); - unsigned index = add_exec_bo(batch, bo); - if (writable) - batch->validation_list[index].flags |= EXEC_OBJECT_WRITE; -} - -static void -decode_batch(struct iris_batch *batch) +bool +iris_batch_references(struct iris_batch *batch, struct iris_bo *bo) { - //if (batch->bo != batch->exec_bos[0]) { - void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ); - gen_print_batch(&batch->decoder, map, batch->primary_batch_size, - batch->exec_bos[0]->gtt_offset); - - //fprintf(stderr, "Secondary batch...\n"); - //} - - //gen_print_batch(&batch->decoder, batch->map, batch_bytes_used(batch), - //batch->bo->gtt_offset); + return find_validation_entry(batch, bo) != NULL; }