return true;
}
+enum pipe_reset_status
+iris_batch_check_for_reset(struct iris_batch *batch)
+{
+ struct iris_screen *screen = batch->screen;
+ enum pipe_reset_status status = PIPE_NO_RESET;
+ struct drm_i915_reset_stats stats = { .ctx_id = batch->hw_ctx_id };
+
+ if (drmIoctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))
+ DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));
+
+ if (stats.batch_active != 0) {
+ /* A reset was observed while a batch from this hardware context was
+ * executing. Assume that this context was at fault.
+ */
+ status = PIPE_GUILTY_CONTEXT_RESET;
+ } else if (stats.batch_pending != 0) {
+ /* A reset was observed while a batch from this context was in progress,
+ * but the batch was not executing. In this case, assume that the
+ * context was not at fault.
+ */
+ status = PIPE_INNOCENT_CONTEXT_RESET;
+ }
+
+ if (status != PIPE_NO_RESET) {
+ /* Our context is likely banned, or at least in an unknown state.
+ * Throw it away and start with a fresh context. Ideally this may
+ * catch the problem before our next execbuf fails with -EIO.
+ */
+ replace_hw_ctx(batch);
+ }
+
+ return status;
+}
+
/**
* Submit the batch to the GPU via execbuffer2.
*/
void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
bool writable);
+enum pipe_reset_status iris_batch_check_for_reset(struct iris_batch *batch);
+
static inline unsigned
iris_batch_bytes_used(struct iris_batch *batch)
{
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
}
+static enum pipe_reset_status
+iris_get_device_reset_status(struct pipe_context *ctx)
+{
+ struct iris_context *ice = (struct iris_context *)ctx;
+
+ enum pipe_reset_status worst_reset = PIPE_NO_RESET;
+
+ /* Check the reset status of each batch's hardware context, and take the
+ * worst status (if one was guilty, proclaim guilt).
+ */
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ /* This will also recreate the hardware contexts as necessary, so any
+ * future queries will show no resets. We only want to report once.
+ */
+ enum pipe_reset_status batch_reset =
+ iris_batch_check_for_reset(&ice->batches[i]);
+
+ if (batch_reset == PIPE_NO_RESET)
+ continue;
+
+ if (worst_reset == PIPE_NO_RESET) {
+ worst_reset = batch_reset;
+ } else {
+ /* GUILTY < INNOCENT < UNKNOWN */
+ worst_reset = MIN2(worst_reset, batch_reset);
+ }
+ }
+
+ if (worst_reset != PIPE_NO_RESET && ice->reset.reset)
+ ice->reset.reset(ice->reset.data, worst_reset);
+
+ return worst_reset;
+}
+
static void
iris_set_device_reset_callback(struct pipe_context *ctx,
const struct pipe_device_reset_callback *cb)
ctx->destroy = iris_destroy_context;
ctx->set_debug_callback = iris_set_debug_callback;
ctx->set_device_reset_callback = iris_set_device_reset_callback;
+ ctx->get_device_reset_status = iris_get_device_reset_status;
ctx->get_sample_position = iris_get_sample_position;
ice->shaders.urb_size = devinfo->urb.size;
case PIPE_CAP_INT64_DIVMOD:
case PIPE_CAP_SAMPLER_VIEW_TARGET:
case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
+ case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
case PIPE_CAP_CULL_DISTANCE: