* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file iris_batch.c
+ *
+ * Batchbuffer and command submission module.
+ *
+ * Every API draw call results in a number of GPU commands, which we
+ * collect into a "batch buffer". Typically, many draw calls are grouped
+ * into a single batch to amortize command submission overhead.
+ *
+ * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
+ * One critical piece of data is the "validation list", which contains a
+ * list of the buffer objects (BOs) which the commands in the GPU need.
+ * The kernel will make sure these are resident and pinned at the correct
+ * virtual memory address before executing our batch. If a BO is not in
+ * the validation list, it effectively does not exist, so take care.
*/
#include "iris_batch.h"
#include "iris_bufmgr.h"
#include "iris_context.h"
-#include "common/gen_decoder.h"
+#include "iris_fence.h"
#include "drm-uapi/i915_drm.h"
+#include "common/gen_aux_map.h"
+#include "intel/common/gen_gem.h"
#include "util/hash_table.h"
+#include "util/set.h"
+#include "util/u_upload_mgr.h"
#include "main/macros.h"
#include <errno.h>
#include <xf86drm.h>
+#if HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#define VG(x) x
+#else
+#define VG(x)
+#endif
+
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
+static void
+iris_batch_reset(struct iris_batch *batch);
+
+static unsigned
+num_fences(struct iris_batch *batch)
+{
+ return util_dynarray_num_elements(&batch->exec_fences,
+ struct drm_i915_gem_exec_fence);
+}
+
/**
- * Target sizes of the batch and state buffers. We create the initial
- * buffers at these sizes, and flush when they're nearly full. If we
- * underestimate how close we are to the end, and suddenly need more space
- * in the middle of a draw, we can grow the buffers, and finish the draw.
- * At that point, we'll be over our target size, so the next operation
- * should flush. Each time we flush the batch, we recreate both buffers
- * at the original target size, so it doesn't grow without bound.
+ * Debugging code to dump the fence list, used by INTEL_DEBUG=submit.
*/
-#define BATCH_SZ (20 * 1024)
-#define STATE_SZ (18 * 1024)
+static void
+dump_fence_list(struct iris_batch *batch)
+{
+ fprintf(stderr, "Fence list (length %u): ", num_fences(batch));
+
+ util_dynarray_foreach(&batch->exec_fences,
+ struct drm_i915_gem_exec_fence, f) {
+ fprintf(stderr, "%s%u%s ",
+ (f->flags & I915_EXEC_FENCE_WAIT) ? "..." : "",
+ f->handle,
+ (f->flags & I915_EXEC_FENCE_SIGNAL) ? "!" : "");
+ }
-static void decode_batch(struct iris_batch *batch);
+ fprintf(stderr, "\n");
+}
+/**
+ * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
+ */
static void
-iris_batch_reset(struct iris_batch *batch);
-
-UNUSED static void
dump_validation_list(struct iris_batch *batch)
{
fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);
for (int i = 0; i < batch->exec_count; i++) {
+ uint64_t flags = batch->validation_list[i].flags;
assert(batch->validation_list[i].handle ==
batch->exec_bos[i]->gem_handle);
- fprintf(stderr, "[%d] = %d %s %p @ %"PRIx64"\n", i,
+ fprintf(stderr, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64"B)\t %2d refs %s\n",
+ i,
batch->validation_list[i].handle,
batch->exec_bos[i]->name,
- batch->exec_bos[i],
- batch->exec_bos[i]->gtt_offset);
+ batch->validation_list[i].offset,
+ batch->exec_bos[i]->size,
+ batch->exec_bos[i]->refcount,
+ (flags & EXEC_OBJECT_WRITE) ? " (write)" : "");
}
}
-static bool
-uint_key_compare(const void *a, const void *b)
+/**
+ * Return BO information to the batch decoder (for debugging).
+ */
+static struct gen_batch_decode_bo
+decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
{
- return a == b;
-}
+ struct iris_batch *batch = v_batch;
-static uint32_t
-uint_key_hash(const void *key)
-{
- return (uintptr_t) key;
+ assert(ppgtt);
+
+ for (int i = 0; i < batch->exec_count; i++) {
+ struct iris_bo *bo = batch->exec_bos[i];
+ /* The decoder zeroes out the top 16 bits, so we need to as well */
+ uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
+
+ if (address >= bo_address && address < bo_address + bo->size) {
+ return (struct gen_batch_decode_bo) {
+ .addr = address,
+ .size = bo->size,
+ .map = iris_bo_map(batch->dbg, bo, MAP_READ) +
+ (address - bo_address),
+ };
+ }
+ }
+
+ return (struct gen_batch_decode_bo) { };
}
-static void
-init_reloc_list(struct iris_reloc_list *rlist, int count)
+static unsigned
+decode_get_state_size(void *v_batch,
+ uint64_t address,
+ UNUSED uint64_t base_address)
{
- rlist->reloc_count = 0;
- rlist->reloc_array_size = count;
- rlist->relocs = malloc(rlist->reloc_array_size *
- sizeof(struct drm_i915_gem_relocation_entry));
+ struct iris_batch *batch = v_batch;
+ unsigned size = (uintptr_t)
+ _mesa_hash_table_u64_search(batch->state_sizes, address);
+
+ return size;
}
+/**
+ * Decode the current batch.
+ */
static void
-create_batch_buffer(struct iris_bufmgr *bufmgr,
- struct iris_batch_buffer *buf,
- const char *name, unsigned size)
+decode_batch(struct iris_batch *batch)
{
- buf->bo = iris_bo_alloc(bufmgr, name, size, IRIS_MEMZONE_OTHER);
- buf->bo->kflags |= EXEC_OBJECT_CAPTURE;
- buf->map = iris_bo_map(NULL, buf->bo, MAP_READ | MAP_WRITE);
- buf->map_next = buf->map;
+ void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
+ gen_print_batch(&batch->decoder, map, batch->primary_batch_size,
+ batch->exec_bos[0]->gtt_offset, false);
}
void
-iris_init_batch(struct iris_batch *batch,
- struct iris_screen *screen,
- struct pipe_debug_callback *dbg,
- uint8_t ring)
+iris_init_batch(struct iris_context *ice,
+ enum iris_batch_name name,
+ int priority)
{
+ struct iris_batch *batch = &ice->batches[name];
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+
batch->screen = screen;
- batch->dbg = dbg;
+ batch->dbg = &ice->dbg;
+ batch->reset = &ice->reset;
+ batch->state_sizes = ice->state.sizes;
+ batch->name = name;
+
+ batch->seqno.uploader =
+ u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, 0);
+ iris_seqno_init(batch);
- /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
- assert((ring & ~I915_EXEC_RING_MASK) == 0);
- assert(util_bitcount(ring) == 1);
- batch->ring = ring;
+ batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
+ assert(batch->hw_ctx_id);
- init_reloc_list(&batch->cmdbuf.relocs, 256);
- init_reloc_list(&batch->statebuf.relocs, 256);
+ iris_hw_context_set_priority(screen->bufmgr, batch->hw_ctx_id, priority);
+
+ util_dynarray_init(&batch->exec_fences, ralloc_context(NULL));
+ util_dynarray_init(&batch->syncobjs, ralloc_context(NULL));
batch->exec_count = 0;
batch->exec_array_size = 100;
batch->validation_list =
malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
+ batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ memset(batch->other_batches, 0, sizeof(batch->other_batches));
+
+ for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (i != name)
+ batch->other_batches[j++] = &ice->batches[i];
+ }
+
if (unlikely(INTEL_DEBUG)) {
- batch->state_sizes =
- _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
+ const unsigned decode_flags =
+ GEN_BATCH_DECODE_FULL |
+ ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
+ GEN_BATCH_DECODE_OFFSETS |
+ GEN_BATCH_DECODE_FLOATS;
+
+ gen_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
+ stderr, decode_flags, NULL,
+ decode_get_bo, decode_get_state_size, batch);
+ batch->decoder.dynamic_base = IRIS_MEMZONE_DYNAMIC_START;
+ batch->decoder.instruction_base = IRIS_MEMZONE_SHADER_START;
+ batch->decoder.max_vbo_decoded_lines = 32;
}
iris_batch_reset(batch);
}
-#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
-
-static unsigned
-add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
+static struct drm_i915_gem_exec_object2 *
+find_validation_entry(struct iris_batch *batch, struct iris_bo *bo)
{
unsigned index = READ_ONCE(bo->index);
if (index < batch->exec_count && batch->exec_bos[index] == bo)
- return index;
+ return &batch->validation_list[index];
/* May have been shared between multiple active batches */
for (index = 0; index < batch->exec_count; index++) {
if (batch->exec_bos[index] == bo)
- return index;
+ return &batch->validation_list[index];
}
- iris_bo_reference(bo);
+ return NULL;
+}
- if (batch->exec_count == batch->exec_array_size) {
+static void
+ensure_exec_obj_space(struct iris_batch *batch, uint32_t count)
+{
+ while (batch->exec_count + count > batch->exec_array_size) {
batch->exec_array_size *= 2;
batch->exec_bos =
realloc(batch->exec_bos,
realloc(batch->validation_list,
batch->exec_array_size * sizeof(batch->validation_list[0]));
}
+}
+
+/**
+ * Add a buffer to the current batch's validation list.
+ *
+ * You must call this on any BO you wish to use in this batch, to ensure
+ * that it's resident when the GPU commands execute.
+ */
+void
+iris_use_pinned_bo(struct iris_batch *batch,
+ struct iris_bo *bo,
+ bool writable)
+{
+ assert(bo->kflags & EXEC_OBJECT_PINNED);
+
+ /* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care
+ * about the order of any writes to that buffer, and marking it writable
+ * would introduce data dependencies between multiple batches which share
+ * the buffer.
+ */
+ if (bo == batch->screen->workaround_bo)
+ writable = false;
+
+ struct drm_i915_gem_exec_object2 *existing_entry =
+ find_validation_entry(batch, bo);
+
+ if (existing_entry) {
+ /* The BO is already in the validation list; mark it writable */
+ if (writable)
+ existing_entry->flags |= EXEC_OBJECT_WRITE;
+
+ return;
+ }
+
+ if (bo != batch->bo) {
+ /* This is the first time our batch has seen this BO. Before we use it,
+ * we may need to flush and synchronize with other batches.
+ */
+ for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) {
+ struct drm_i915_gem_exec_object2 *other_entry =
+ find_validation_entry(batch->other_batches[b], bo);
+
+ /* If the buffer is referenced by another batch, and either batch
+ * intends to write it, then flush the other batch and synchronize.
+ *
+ * Consider these cases:
+ *
+ * 1. They read, we read => No synchronization required.
+ * 2. They read, we write => Synchronize (they need the old value)
+ * 3. They write, we read => Synchronize (we need their new value)
+ * 4. They write, we write => Synchronize (order writes)
+ *
+ * The read/read case is very common, as multiple batches usually
+ * share a streaming state buffer or shader assembly buffer, and
+ * we want to avoid synchronizing in this case.
+ */
+ if (other_entry &&
+ ((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {
+ iris_batch_flush(batch->other_batches[b]);
+ iris_batch_add_syncobj(batch,
+ batch->other_batches[b]->last_seqno->syncobj,
+ I915_EXEC_FENCE_WAIT);
+ }
+ }
+ }
+
+ /* Now, take a reference and add it to the validation list. */
+ iris_bo_reference(bo);
+
+ ensure_exec_obj_space(batch, 1);
batch->validation_list[batch->exec_count] =
(struct drm_i915_gem_exec_object2) {
.handle = bo->gem_handle,
.offset = bo->gtt_offset,
- .flags = bo->kflags,
+ .flags = bo->kflags | (writable ? EXEC_OBJECT_WRITE : 0),
};
bo->index = batch->exec_count;
batch->exec_bos[batch->exec_count] = bo;
batch->aperture_space += bo->size;
- return batch->exec_count++;
+ batch->exec_count++;
}
static void
-iris_batch_reset(struct iris_batch *batch)
+create_batch(struct iris_batch *batch)
{
struct iris_screen *screen = batch->screen;
struct iris_bufmgr *bufmgr = screen->bufmgr;
- if (batch->last_cmd_bo != NULL) {
- iris_bo_unreference(batch->last_cmd_bo);
- batch->last_cmd_bo = NULL;
- }
- batch->last_cmd_bo = batch->cmdbuf.bo;
+ batch->bo = iris_bo_alloc(bufmgr, "command buffer",
+ BATCH_SZ + BATCH_RESERVED, IRIS_MEMZONE_OTHER);
+ batch->bo->kflags |= EXEC_OBJECT_CAPTURE;
+ batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
+ batch->map_next = batch->map;
- create_batch_buffer(bufmgr, &batch->cmdbuf, "command buffer", BATCH_SZ);
- create_batch_buffer(bufmgr, &batch->statebuf, "state buffer", STATE_SZ);
+ iris_use_pinned_bo(batch, batch->bo, false);
+}
- /* Avoid making 0 a valid state offset - otherwise the decoder will try
- * and decode data when we use offset 0 as a null pointer.
- */
- batch->statebuf.map_next += 1;
+static void
+iris_batch_maybe_noop(struct iris_batch *batch)
+{
+ /* We only insert the NOOP at the beginning of the batch. */
+ assert(iris_batch_bytes_used(batch) == 0);
- add_exec_bo(batch, batch->cmdbuf.bo);
- assert(batch->cmdbuf.bo->index == 0);
+ if (batch->noop_enabled) {
+ /* Emit MI_BATCH_BUFFER_END to prevent any further command to be
+ * executed.
+ */
+ uint32_t *map = batch->map_next;
- if (batch->state_sizes)
- _mesa_hash_table_clear(batch->state_sizes, NULL);
+ map[0] = (0xA << 23);
- if (batch->ring == I915_EXEC_RENDER)
- batch->emit_state_base_address(batch);
+ batch->map_next += 4;
+ }
}
static void
-iris_batch_reset_and_clear_render_cache(struct iris_batch *batch)
+iris_batch_reset(struct iris_batch *batch)
{
- iris_batch_reset(batch);
- // XXX: iris_render_cache_set_clear(batch);
-}
+ struct iris_screen *screen = batch->screen;
-static void
-free_batch_buffer(struct iris_batch_buffer *buf)
-{
- iris_bo_unreference(buf->bo);
- buf->bo = NULL;
- buf->map = NULL;
- buf->map_next = NULL;
-
- free(buf->relocs.relocs);
- buf->relocs.relocs = NULL;
- buf->relocs.reloc_array_size = 0;
+ iris_bo_unreference(batch->bo);
+ batch->primary_batch_size = 0;
+ batch->total_chained_batch_size = 0;
+ batch->contains_draw = false;
+ batch->decoder.surface_base = batch->last_surface_base_address;
+
+ create_batch(batch);
+ assert(batch->bo->index == 0);
+
+ struct iris_syncobj *syncobj = iris_create_syncobj(screen);
+ iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
+ iris_syncobj_reference(screen, &syncobj, NULL);
+
+ iris_cache_sets_clear(batch);
+
+ /* Always add the workaround BO, it contains a driver identifier at the
+ * beginning quite helpful to debug error states.
+ */
+ iris_use_pinned_bo(batch, screen->workaround_bo, false);
+
+ iris_batch_maybe_noop(batch);
}
void
iris_batch_free(struct iris_batch *batch)
{
+ struct iris_screen *screen = batch->screen;
+ struct iris_bufmgr *bufmgr = screen->bufmgr;
+
for (int i = 0; i < batch->exec_count; i++) {
iris_bo_unreference(batch->exec_bos[i]);
}
free(batch->exec_bos);
free(batch->validation_list);
- free_batch_buffer(&batch->cmdbuf);
- free_batch_buffer(&batch->statebuf);
- iris_bo_unreference(batch->last_cmd_bo);
+ ralloc_free(batch->exec_fences.mem_ctx);
- if (batch->state_sizes)
- _mesa_hash_table_destroy(batch->state_sizes, NULL);
-}
+ pipe_resource_reference(&batch->seqno.ref.res, NULL);
-/**
- * Finish copying the old batch/state buffer's contents to the new one
- * after we tried to "grow" the buffer in an earlier operation.
- */
-static void
-finish_growing_bos(struct iris_batch_buffer *buf)
-{
- struct iris_bo *old_bo = buf->partial_bo;
- if (!old_bo)
- return;
+ util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
+ iris_syncobj_reference(screen, s, NULL);
+ ralloc_free(batch->syncobjs.mem_ctx);
- void *old_map = old_bo->map_cpu ? old_bo->map_cpu : old_bo->map_wc;
- memcpy(buf->map, old_map, buf->partial_bytes);
+ iris_seqno_reference(batch->screen, &batch->last_seqno, NULL);
+ u_upload_destroy(batch->seqno.uploader);
- buf->partial_bo = NULL;
- buf->partial_bytes = 0;
+ iris_bo_unreference(batch->bo);
+ batch->bo = NULL;
+ batch->map = NULL;
+ batch->map_next = NULL;
- iris_bo_unreference(old_bo);
-}
+ iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
-static unsigned
-buffer_bytes_used(struct iris_batch_buffer *buf)
-{
- return buf->map_next - buf->map;
+ _mesa_hash_table_destroy(batch->cache.render, NULL);
+ _mesa_set_destroy(batch->cache.depth, NULL);
+
+ if (unlikely(INTEL_DEBUG))
+ gen_batch_decode_ctx_finish(&batch->decoder);
}
/**
- * Grow either the batch or state buffer to a new larger size.
- *
- * We can't actually grow buffers, so we allocate a new one, copy over
- * the existing contents, and update our lists to refer to the new one.
- *
- * Note that this is only temporary - each new batch recreates the buffers
- * at their original target size (BATCH_SZ or STATE_SZ).
+ * If we've chained to a secondary batch, or are getting near to the end,
+ * then flush. This should only be called between draws.
*/
-static void
-grow_buffer(struct iris_batch *batch,
- struct iris_batch_buffer *buf,
- unsigned new_size)
+void
+iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate)
{
- struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
- struct iris_bo *bo = buf->bo;
-
- perf_debug(batch->dbg, "Growing %s - ran out of space\n", bo->name);
-
- if (buf->partial_bo) {
- /* We've already grown once, and now we need to do it again.
- * Finish our last grow operation so we can start a new one.
- * This should basically never happen.
- */
- perf_debug(batch->dbg, "Had to grow multiple times");
- finish_growing_bos(buf);
+ if (batch->bo != batch->exec_bos[0] ||
+ iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) {
+ iris_batch_flush(batch);
}
+}
- const unsigned existing_bytes = buffer_bytes_used(buf);
-
- struct iris_bo *new_bo =
- iris_bo_alloc(bufmgr, bo->name, new_size, IRIS_MEMZONE_OTHER);
+static void
+record_batch_sizes(struct iris_batch *batch)
+{
+ unsigned batch_size = iris_batch_bytes_used(batch);
- buf->map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
- buf->map_next = buf->map + existing_bytes;
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->map, batch_size));
- /* Try to put the new BO at the same GTT offset as the old BO (which
- * we're throwing away, so it doesn't need to be there).
- *
- * This guarantees that our relocations continue to work: values we've
- * already written into the buffer, values we're going to write into the
- * buffer, and the validation/relocation lists all will match.
- *
- * Also preserve kflags for EXEC_OBJECT_CAPTURE.
- */
- new_bo->gtt_offset = bo->gtt_offset;
- new_bo->index = bo->index;
- new_bo->kflags = bo->kflags;
+ if (batch->bo == batch->exec_bos[0])
+ batch->primary_batch_size = batch_size;
- /* Batch/state buffers are per-context, and if we've run out of space,
- * we must have actually used them before, so...they will be in the list.
- */
- assert(bo->index < batch->exec_count);
- assert(batch->exec_bos[bo->index] == bo);
+ batch->total_chained_batch_size += batch_size;
+}
- /* Update the validation list to use the new BO. */
- batch->exec_bos[bo->index] = new_bo;
- batch->validation_list[bo->index].handle = new_bo->gem_handle;
+void
+iris_chain_to_new_batch(struct iris_batch *batch)
+{
+ uint32_t *cmd = batch->map_next;
+ uint64_t *addr = batch->map_next + 4;
+ batch->map_next += 12;
- /* Exchange the two BOs...without breaking pointers to the old BO.
- *
- * Consider this scenario:
- *
- * 1. Somebody calls iris_state_batch() to get a region of memory, and
- * and then creates a iris_address pointing to iris->batch.state.bo.
- * 2. They then call iris_state_batch() a second time, which happens to
- * grow and replace the state buffer. They then try to emit a
- * relocation to their first section of memory.
- *
- * If we replace the iris->batch.state.bo pointer at step 2, we would
- * break the address created in step 1. They'd have a pointer to the
- * old destroyed BO. Emitting a relocation would add this dead BO to
- * the validation list...causing /both/ statebuffers to be in the list,
- * and all kinds of disasters.
- *
- * This is not a contrived case - BLORP vertex data upload hits this.
- *
- * There are worse scenarios too. Fences for GL sync objects reference
- * iris->batch.batch.bo. If we replaced the batch pointer when growing,
- * we'd need to chase down every fence and update it to point to the
- * new BO. Otherwise, it would refer to a "batch" that never actually
- * gets submitted, and would fail to trigger.
- *
- * To work around both of these issues, we transmutate the buffers in
- * place, making the existing struct iris_bo represent the new buffer,
- * and "new_bo" represent the old BO. This is highly unusual, but it
- * seems like a necessary evil.
- *
- * We also defer the memcpy of the existing batch's contents. Callers
- * may make multiple iris_state_batch calls, and retain pointers to the
- * old BO's map. We'll perform the memcpy in finish_growing_bo() when
- * we finally submit the batch, at which point we've finished uploading
- * state, and nobody should have any old references anymore.
- *
- * To do that, we keep a reference to the old BO in grow->partial_bo,
- * and store the number of bytes to copy in grow->partial_bytes. We
- * can monkey with the refcounts directly without atomics because these
- * are per-context BOs and they can only be touched by this thread.
- */
- assert(new_bo->refcount == 1);
- new_bo->refcount = bo->refcount;
- bo->refcount = 1;
+ record_batch_sizes(batch);
- struct iris_bo tmp;
- memcpy(&tmp, bo, sizeof(struct iris_bo));
- memcpy(bo, new_bo, sizeof(struct iris_bo));
- memcpy(new_bo, &tmp, sizeof(struct iris_bo));
+ /* No longer held by batch->bo, still held by validation list */
+ iris_bo_unreference(batch->bo);
+ create_batch(batch);
- buf->partial_bo = new_bo; /* the one reference of the OLD bo */
- buf->partial_bytes = existing_bytes;
+ /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
+ *cmd = (0x31 << 23) | (1 << 8) | (3 - 2);
+ *addr = batch->bo->gtt_offset;
}
static void
-require_buffer_space(struct iris_batch *batch,
- struct iris_batch_buffer *buf,
- unsigned size,
- unsigned flush_threshold,
- unsigned max_buffer_size)
+add_aux_map_bos_to_batch(struct iris_batch *batch)
{
- const unsigned required_bytes = buffer_bytes_used(buf) + size;
+ void *aux_map_ctx = iris_bufmgr_get_aux_map_context(batch->screen->bufmgr);
+ if (!aux_map_ctx)
+ return;
- if (!batch->no_wrap && required_bytes >= flush_threshold) {
- iris_batch_flush(batch);
- } else if (required_bytes >= buf->bo->size) {
- grow_buffer(batch, buf,
- MIN2(buf->bo->size + buf->bo->size / 2, max_buffer_size));
- assert(required_bytes < buf->bo->size);
+ uint32_t count = gen_aux_map_get_num_buffers(aux_map_ctx);
+ ensure_exec_obj_space(batch, count);
+ gen_aux_map_fill_bos(aux_map_ctx,
+ (void**)&batch->exec_bos[batch->exec_count], count);
+ for (uint32_t i = 0; i < count; i++) {
+ struct iris_bo *bo = batch->exec_bos[batch->exec_count];
+ iris_bo_reference(bo);
+ batch->validation_list[batch->exec_count] =
+ (struct drm_i915_gem_exec_object2) {
+ .handle = bo->gem_handle,
+ .offset = bo->gtt_offset,
+ .flags = bo->kflags,
+ };
+ batch->aperture_space += bo->size;
+ batch->exec_count++;
}
}
-
-void
-iris_require_command_space(struct iris_batch *batch, unsigned size)
+static void
+finish_seqno(struct iris_batch *batch)
{
- require_buffer_space(batch, &batch->cmdbuf, size, BATCH_SZ, MAX_BATCH_SIZE);
+ struct iris_seqno *sq = iris_seqno_new(batch, IRIS_SEQNO_END);
+ if (!sq)
+ return;
+
+ iris_seqno_reference(batch->screen, &batch->last_seqno, sq);
+ iris_seqno_reference(batch->screen, &sq, NULL);
}
/**
- * Reserve some space in the statebuffer, or flush.
- *
- * This is used to estimate when we're near the end of the batch,
- * so we can flush early.
+ * Terminate a batch with MI_BATCH_BUFFER_END.
*/
-void
-iris_require_state_space(struct iris_batch *batch, unsigned size)
+static void
+iris_finish_batch(struct iris_batch *batch)
{
- require_buffer_space(batch, &batch->statebuf, size, STATE_SZ,
- MAX_STATE_SIZE);
-}
+ add_aux_map_bos_to_batch(batch);
-void
-iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
-{
- iris_require_command_space(batch, size);
- memcpy(batch->cmdbuf.map_next, data, size);
- batch->cmdbuf.map_next += size;
+ finish_seqno(batch);
+
+ /* Emit MI_BATCH_BUFFER_END to finish our batch. */
+ uint32_t *map = batch->map_next;
+
+ map[0] = (0xA << 23);
+
+ batch->map_next += 4;
+
+ record_batch_sizes(batch);
}
/**
- * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
- * sending it off.
- *
- * This function can emit state (say, to preserve registers that aren't saved
- * between batches).
+ * Replace our current GEM context with a new one (in case it got banned).
*/
-static void
-iris_finish_batch(struct iris_batch *batch)
+static bool
+replace_hw_ctx(struct iris_batch *batch)
{
- batch->no_wrap = true;
+ struct iris_screen *screen = batch->screen;
+ struct iris_bufmgr *bufmgr = screen->bufmgr;
- // XXX: ISP DIS
+ uint32_t new_ctx = iris_clone_hw_context(bufmgr, batch->hw_ctx_id);
+ if (!new_ctx)
+ return false;
- /* Emit MI_BATCH_BUFFER_END to finish our batch. Note that execbuf2
- * requires our batch size to be QWord aligned, so we pad it out if
- * necessary by emitting an extra MI_NOOP after the end.
- */
- const uint32_t MI_BATCH_BUFFER_END_AND_NOOP[2] = { (0xA << 23), 0 };
- const bool qword_aligned = (buffer_bytes_used(&batch->cmdbuf) % 8) == 0;
- iris_batch_emit(batch, MI_BATCH_BUFFER_END_AND_NOOP, qword_aligned ? 8 : 4);
+ iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
+ batch->hw_ctx_id = new_ctx;
+
+ /* Notify the context that state must be re-initialized. */
+ iris_lost_context_state(batch);
+
+ return true;
+}
+
+enum pipe_reset_status
+iris_batch_check_for_reset(struct iris_batch *batch)
+{
+ struct iris_screen *screen = batch->screen;
+ enum pipe_reset_status status = PIPE_NO_RESET;
+ struct drm_i915_reset_stats stats = { .ctx_id = batch->hw_ctx_id };
+
+ if (drmIoctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))
+ DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));
+
+ if (stats.batch_active != 0) {
+ /* A reset was observed while a batch from this hardware context was
+ * executing. Assume that this context was at fault.
+ */
+ status = PIPE_GUILTY_CONTEXT_RESET;
+ } else if (stats.batch_pending != 0) {
+ /* A reset was observed while a batch from this context was in progress,
+ * but the batch was not executing. In this case, assume that the
+ * context was not at fault.
+ */
+ status = PIPE_INNOCENT_CONTEXT_RESET;
+ }
- batch->no_wrap = false;
+ if (status != PIPE_NO_RESET) {
+ /* Our context is likely banned, or at least in an unknown state.
+ * Throw it away and start with a fresh context. Ideally this may
+ * catch the problem before our next execbuf fails with -EIO.
+ */
+ replace_hw_ctx(batch);
+ }
+
+ return status;
}
+/**
+ * Submit the batch to the GPU via execbuffer2.
+ */
static int
-submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd)
+submit_batch(struct iris_batch *batch)
{
- iris_bo_unmap(batch->cmdbuf.bo);
- iris_bo_unmap(batch->statebuf.bo);
+ iris_bo_unmap(batch->bo);
/* The requirement for using I915_EXEC_NO_RELOC are:
*
* To avoid stalling, execobject.offset should match the current
* address of that object within the active context.
*/
- /* Set statebuffer relocations */
- const unsigned state_index = batch->statebuf.bo->index;
- if (state_index < batch->exec_count &&
- batch->exec_bos[state_index] == batch->statebuf.bo) {
- struct drm_i915_gem_exec_object2 *entry =
- &batch->validation_list[state_index];
- assert(entry->handle == batch->statebuf.bo->gem_handle);
- entry->relocation_count = batch->statebuf.relocs.reloc_count;
- entry->relocs_ptr = (uintptr_t) batch->statebuf.relocs.relocs;
- }
-
- /* Set batchbuffer relocations */
- struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
- assert(entry->handle == batch->cmdbuf.bo->gem_handle);
- entry->relocation_count = batch->cmdbuf.relocs.reloc_count;
- entry->relocs_ptr = (uintptr_t) batch->cmdbuf.relocs.relocs;
-
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = (uintptr_t) batch->validation_list,
.buffer_count = batch->exec_count,
.batch_start_offset = 0,
- .batch_len = buffer_bytes_used(&batch->cmdbuf),
- .flags = batch->ring |
+ /* This must be QWord aligned. */
+ .batch_len = ALIGN(batch->primary_batch_size, 8),
+ .flags = I915_EXEC_RENDER |
I915_EXEC_NO_RELOC |
I915_EXEC_BATCH_FIRST |
I915_EXEC_HANDLE_LUT,
.rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
};
- unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
-
- if (in_fence_fd != -1) {
- execbuf.rsvd2 = in_fence_fd;
- execbuf.flags |= I915_EXEC_FENCE_IN;
- }
-
- if (out_fence_fd != NULL) {
- cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
- *out_fence_fd = -1;
- execbuf.flags |= I915_EXEC_FENCE_OUT;
+ if (num_fences(batch)) {
+ execbuf.flags |= I915_EXEC_FENCE_ARRAY;
+ execbuf.num_cliprects = num_fences(batch);
+ execbuf.cliprects_ptr =
+ (uintptr_t)util_dynarray_begin(&batch->exec_fences);
}
-#if 1
- int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf);
- if (ret != 0) {
- ret = -errno;
- DBG("execbuf FAILED: errno = %d\n", -ret);
- } else {
- DBG("execbuf succeeded\n");
- }
-#else
int ret = 0;
- fprintf(stderr, "execbuf disabled for now\n");
-#endif
+ if (!batch->screen->no_hw &&
+ gen_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))
+ ret = -errno;
for (int i = 0; i < batch->exec_count; i++) {
struct iris_bo *bo = batch->exec_bos[i];
bo->idle = false;
bo->index = -1;
- /* Update iris_bo::gtt_offset */
- if (batch->validation_list[i].offset != bo->gtt_offset) {
- DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
- bo->gem_handle, bo->gtt_offset,
- batch->validation_list[i].offset);
- bo->gtt_offset = batch->validation_list[i].offset;
- }
+ iris_bo_unreference(bo);
}
- if (ret == 0 && out_fence_fd != NULL)
- *out_fence_fd = execbuf.rsvd2 >> 32;
-
return ret;
}
+static const char *
+batch_name_to_string(enum iris_batch_name name)
+{
+ const char *names[IRIS_BATCH_COUNT] = {
+ [IRIS_BATCH_RENDER] = "render",
+ [IRIS_BATCH_COMPUTE] = "compute",
+ };
+ return names[name];
+}
+
/**
- * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
- * of the fd.
+ * Flush the batch buffer, submitting it to the GPU and resetting it so
+ * we're ready to emit the next batch.
+ *
+ * \param in_fence_fd is ignored if -1. Otherwise, this function takes
+ * ownership of the fd.
*
- * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
- * of the returned fd.
+ * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
+ * take ownership of the returned fd.
*/
-int
-_iris_batch_flush_fence(struct iris_batch *batch,
- int in_fence_fd, int *out_fence_fd,
- const char *file, int line)
+void
+_iris_batch_flush(struct iris_batch *batch, const char *file, int line)
{
- if (buffer_bytes_used(&batch->cmdbuf) == 0)
- return 0;
+ struct iris_screen *screen = batch->screen;
- /* Check that we didn't just wrap our batchbuffer at a bad time. */
- assert(!batch->no_wrap);
+ if (iris_batch_bytes_used(batch) == 0)
+ return;
iris_finish_batch(batch);
- if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
- int bytes_for_commands = buffer_bytes_used(&batch->cmdbuf);
- int bytes_for_state = buffer_bytes_used(&batch->statebuf);
- fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) (pkt),"
- " %5db (%0.1f%%) (state), %4d BOs (%0.1fMb aperture),"
- " %4d batch relocs, %4d state relocs\n", file, line,
- bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
- bytes_for_state, 100.0f * bytes_for_state / STATE_SZ,
+ if (unlikely(INTEL_DEBUG &
+ (DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL))) {
+ const char *basefile = strstr(file, "iris/");
+ if (basefile)
+ file = basefile + 5;
+
+ fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5db (%0.1f%%) "
+ "(cmds), %4d BOs (%0.1fMb aperture)\n",
+ file, line, batch_name_to_string(batch->name), batch->hw_ctx_id,
+ batch->total_chained_batch_size,
+ 100.0f * batch->total_chained_batch_size / BATCH_SZ,
batch->exec_count,
- (float) batch->aperture_space / (1024 * 1024),
- batch->cmdbuf.relocs.reloc_count,
- batch->statebuf.relocs.reloc_count);
- }
-
- int ret = submit_batch(batch, in_fence_fd, out_fence_fd);
-
- //throttle(iris);
-
- if (ret < 0)
- return ret;
+ (float) batch->aperture_space / (1024 * 1024));
- if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
- decode_batch(batch);
-
- //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
- //iris_check_for_reset(ice);
+ if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
+ dump_fence_list(batch);
+ dump_validation_list(batch);
+ }
- if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
- dbg_printf("waiting for idle\n");
- iris_bo_wait_rendering(batch->cmdbuf.bo);
+ if (INTEL_DEBUG & DEBUG_BATCH) {
+ decode_batch(batch);
+ }
}
- /* Clean up after the batch we submitted and prepare for a new one. */
- for (int i = 0; i < batch->exec_count; i++) {
- iris_bo_unreference(batch->exec_bos[i]);
- batch->exec_bos[i] = NULL;
- }
- batch->cmdbuf.relocs.reloc_count = 0;
- batch->statebuf.relocs.reloc_count = 0;
+ int ret = submit_batch(batch);
+
batch->exec_count = 0;
batch->aperture_space = 0;
- iris_bo_unreference(batch->statebuf.bo);
+ util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
+ iris_syncobj_reference(screen, s, NULL);
+ util_dynarray_clear(&batch->syncobjs);
- /* Start a new batch buffer. */
- iris_batch_reset_and_clear_render_cache(batch);
-
- return 0;
-}
-
-bool
-iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
-{
- unsigned index = READ_ONCE(bo->index);
- if (index < batch->exec_count && batch->exec_bos[index] == bo)
- return true;
+ util_dynarray_clear(&batch->exec_fences);
- for (int i = 0; i < batch->exec_count; i++) {
- if (batch->exec_bos[i] == bo)
- return true;
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
+ dbg_printf("waiting for idle\n");
+ iris_bo_wait_rendering(batch->bo); /* if execbuf failed; this is a nop */
}
- return false;
-}
-/* This is the only way buffers get added to the validate list.
- */
-static uint64_t
-emit_reloc(struct iris_batch *batch,
- struct iris_reloc_list *rlist, uint32_t offset,
- struct iris_bo *target, uint32_t target_offset,
- unsigned int reloc_flags)
-{
- assert(target != NULL);
+ /* Start a new batch buffer. */
+ iris_batch_reset(batch);
- unsigned int index = add_exec_bo(batch, target);
- struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+ /* EIO means our context is banned. In this case, try and replace it
+ * with a new logical context, and inform iris_context that all state
+ * has been lost and needs to be re-initialized. If this succeeds,
+ * dubiously claim success...
+ */
+ if (ret == -EIO && replace_hw_ctx(batch)) {
+ if (batch->reset->reset) {
+ /* Tell gallium frontends the device is lost and it was our fault. */
+ batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);
+ }
- if (target->kflags & EXEC_OBJECT_PINNED) {
- assert(entry->offset == target->gtt_offset);
- return entry->offset + target_offset;
+ ret = 0;
}
- if (rlist->reloc_count == rlist->reloc_array_size) {
- rlist->reloc_array_size *= 2;
- rlist->relocs = realloc(rlist->relocs,
- rlist->reloc_array_size *
- sizeof(struct drm_i915_gem_relocation_entry));
+ if (ret < 0) {
+#ifdef DEBUG
+ const bool color = INTEL_DEBUG & DEBUG_COLOR;
+ fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",
+ color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
+#endif
+ abort();
}
-
- rlist->relocs[rlist->reloc_count++] =
- (struct drm_i915_gem_relocation_entry) {
- .offset = offset,
- .delta = target_offset,
- .target_handle = index,
- .presumed_offset = entry->offset,
- };
-
- /* Using the old buffer offset, write in what the right data would be, in
- * case the buffer doesn't move and we can short-circuit the relocation
- * processing in the kernel
- */
- return entry->offset + target_offset;
-}
-
-void
-iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo)
-{
- assert(bo->kflags & EXEC_OBJECT_PINNED);
- add_exec_bo(batch, bo);
-}
-
-uint64_t
-iris_batch_reloc(struct iris_batch *batch, uint32_t batch_offset,
- struct iris_bo *target, uint32_t target_offset,
- unsigned int reloc_flags)
-{
- assert(batch_offset <= batch->cmdbuf.bo->size - sizeof(uint32_t));
-
- return emit_reloc(batch, &batch->cmdbuf.relocs, batch_offset,
- target, target_offset, reloc_flags);
}
-uint64_t
-iris_state_reloc(struct iris_batch *batch, uint32_t state_offset,
- struct iris_bo *target, uint32_t target_offset,
- unsigned int reloc_flags)
-{
- assert(state_offset <= batch->statebuf.bo->size - sizeof(uint32_t));
-
- return emit_reloc(batch, &batch->statebuf.relocs, state_offset,
- target, target_offset, reloc_flags);
-}
-
-
-static uint32_t
-iris_state_entry_size(struct iris_batch *batch, uint32_t offset)
+/**
+ * Does the current batch refer to the given BO?
+ *
+ * (In other words, is the BO in the current batch's validation list?)
+ */
+bool
+iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
{
- struct hash_entry *entry =
- _mesa_hash_table_search(batch->state_sizes, (void *)(uintptr_t) offset);
- return entry ? (uintptr_t) entry->data : 0;
+ return find_validation_entry(batch, bo) != NULL;
}
/**
- * Allocates a block of space in the batchbuffer for indirect state.
+ * Updates the state of the noop feature.
*/
-void *
-iris_alloc_state(struct iris_batch *batch,
- int size, int alignment,
- uint32_t *out_offset)
+uint64_t
+iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dirty_flags)
{
- assert(size < batch->statebuf.bo->size);
-
- const unsigned existing_bytes = buffer_bytes_used(&batch->statebuf);
- unsigned aligned_size =
- ALIGN(existing_bytes, alignment) - existing_bytes + size;
-
- require_buffer_space(batch, &batch->statebuf, aligned_size,
- STATE_SZ, MAX_STATE_SIZE);
-
- unsigned offset = ALIGN(buffer_bytes_used(&batch->statebuf), alignment);
-
- if (unlikely(batch->state_sizes)) {
- _mesa_hash_table_insert(batch->state_sizes,
- (void *) (uintptr_t) offset,
- (void *) (uintptr_t) size);
- }
+ if (batch->noop_enabled == noop_enable)
+ return 0;
- batch->statebuf.map_next += aligned_size;
+ batch->noop_enabled = noop_enable;
- *out_offset = offset;
- return batch->statebuf.map + offset;
-}
+ iris_batch_flush(batch);
-uint32_t
-iris_emit_state(struct iris_batch *batch,
- const void *data,
- int size, int alignment)
-{
- uint32_t out_offset;
- void *dest = iris_alloc_state(batch, size, alignment, &out_offset);
- memcpy(dest, data, size);
- return out_offset;
-}
+ /* If the batch was empty, flush had no effect, so insert our noop. */
+ if (iris_batch_bytes_used(batch) == 0)
+ iris_batch_maybe_noop(batch);
-static void
-decode_batch(struct iris_batch *batch)
-{
- // XXX: decode the batch
+ /* We only need to update the entire state if we transition from noop ->
+ * not-noop.
+ */
+ return !batch->noop_enabled ? dirty_flags : 0;
}