#include "util/u_inlines.h"
#include "util/u_format.h"
#include "util/u_upload_mgr.h"
-#include "i915_drm.h"
+#include "drm-uapi/i915_drm.h"
#include "iris_context.h"
#include "iris_resource.h"
#include "iris_screen.h"
+#include "common/gen_defines.h"
#include "common/gen_sample_positions.h"
/**
memset(&ice->dbg, 0, sizeof(ice->dbg));
}
+/**
+ * Called from the batch module when it detects a GPU hang.
+ *
+ * In this case, we've lost our GEM context, and can't rely on any existing
+ * state on the GPU. We must mark everything dirty and wipe away any saved
+ * assumptions about the last known state of the GPU.
+ */
+void
+iris_lost_context_state(struct iris_batch *batch)
+{
+ /* The batch module doesn't have an iris_context, because we want to
+ * avoid introducing lots of layering violations. Unfortunately, here
+ * we do need to inform the context of batch catastrophe. We know the
+ * batch is one of our context's, so hackily claw our way back.
+ */
+ struct iris_context *ice = NULL;
+ struct iris_screen *screen;
+
+ if (batch->name == IRIS_BATCH_RENDER) {
+ ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
+ assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
+ screen = (void *) ice->ctx.screen;
+
+ ice->vtbl.init_render_context(screen, batch, &ice->vtbl, &ice->dbg);
+ } else if (batch->name == IRIS_BATCH_COMPUTE) {
+ ice = container_of(batch, ice, batches[IRIS_BATCH_COMPUTE]);
+ assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch);
+ screen = (void *) ice->ctx.screen;
+
+ ice->vtbl.init_compute_context(screen, batch, &ice->vtbl, &ice->dbg);
+ } else {
+ unreachable("unhandled batch reset");
+ }
+
+ ice->state.dirty = ~0ull;
+ memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
+ batch->last_surface_base_address = ~0ull;
+}
+
+static enum pipe_reset_status
+iris_get_device_reset_status(struct pipe_context *ctx)
+{
+ struct iris_context *ice = (struct iris_context *)ctx;
+
+ enum pipe_reset_status worst_reset = PIPE_NO_RESET;
+
+ /* Check the reset status of each batch's hardware context, and take the
+ * worst status (if one was guilty, proclaim guilt).
+ */
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ /* This will also recreate the hardware contexts as necessary, so any
+ * future queries will show no resets. We only want to report once.
+ */
+ enum pipe_reset_status batch_reset =
+ iris_batch_check_for_reset(&ice->batches[i]);
+
+ if (batch_reset == PIPE_NO_RESET)
+ continue;
+
+ if (worst_reset == PIPE_NO_RESET) {
+ worst_reset = batch_reset;
+ } else {
+ /* GUILTY < INNOCENT < UNKNOWN */
+ worst_reset = MIN2(worst_reset, batch_reset);
+ }
+ }
+
+ if (worst_reset != PIPE_NO_RESET && ice->reset.reset)
+ ice->reset.reset(ice->reset.data, worst_reset);
+
+ return worst_reset;
+}
+
+static void
+iris_set_device_reset_callback(struct pipe_context *ctx,
+ const struct pipe_device_reset_callback *cb)
+{
+ struct iris_context *ice = (struct iris_context *)ctx;
+
+ if (cb)
+ ice->reset = *cb;
+ else
+ memset(&ice->reset, 0, sizeof(ice->reset));
+}
+
static void
iris_get_sample_position(struct pipe_context *ctx,
unsigned sample_count,
iris_destroy_border_color_pool(ice);
u_upload_destroy(ice->state.surface_uploader);
u_upload_destroy(ice->state.dynamic_uploader);
+ u_upload_destroy(ice->query_buffer_uploader);
slab_destroy_child(&ice->transfer_pool);
case 9: \
gen9_##func(__VA_ARGS__); \
break; \
+ case 8: \
+ gen8_##func(__VA_ARGS__); \
+ break; \
default: \
unreachable("Unknown hardware generation"); \
}
ctx->destroy = iris_destroy_context;
ctx->set_debug_callback = iris_set_debug_callback;
+ ctx->set_device_reset_callback = iris_set_device_reset_callback;
+ ctx->get_device_reset_status = iris_get_device_reset_status;
ctx->get_sample_position = iris_get_sample_position;
ice->shaders.urb_size = devinfo->urb.size;
u_upload_create(ctx, 16384, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE);
+ ice->query_buffer_uploader =
+ u_upload_create(ctx, 4096, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING,
+ 0);
+
genX_call(devinfo, init_state, ice);
genX_call(devinfo, init_blorp, ice);
+ int priority = 0;
+ if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
+ priority = GEN_CONTEXT_HIGH_PRIORITY;
+ if (flags & PIPE_CONTEXT_LOW_PRIORITY)
+ priority = GEN_CONTEXT_LOW_PRIORITY;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+ ice->state.sizes = _mesa_hash_table_u64_create(ice);
+
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
iris_init_batch(&ice->batches[i], screen, &ice->vtbl, &ice->dbg,
+ &ice->reset, ice->state.sizes,
ice->batches, (enum iris_batch_name) i,
- I915_EXEC_RENDER);
+ I915_EXEC_RENDER, priority);
}
ice->vtbl.init_render_context(screen, &ice->batches[IRIS_BATCH_RENDER],