GL_EXT_color_buffer_float DONE (all drivers)
GL_KHR_blend_equation_advanced DONE (i965, nvc0)
GL_KHR_debug DONE (all drivers)
- GL_KHR_robustness DONE (i965, nvc0)
+ GL_KHR_robustness DONE (freedreno, i965, nvc0)
GL_KHR_texture_compression_astc_ldr DONE (freedreno, i965/gen9+)
GL_OES_copy_image DONE (all drivers)
GL_OES_draw_buffers_indexed DONE (all drivers that support GL_ARB_draw_buffers_blend)
memset(&ctx->debug, 0, sizeof(ctx->debug));
}
+static uint32_t
+fd_get_reset_count(struct fd_context *ctx, bool per_context)
+{
+ uint64_t val;
+ enum fd_param_id param =
+ per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS;
+ int ret = fd_pipe_get_param(ctx->pipe, param, &val);
+ debug_assert(!ret);
+ return val;
+}
+
+static enum pipe_reset_status
+fd_get_device_reset_status(struct pipe_context *pctx)
+{
+ struct fd_context *ctx = fd_context(pctx);
+ int context_faults = fd_get_reset_count(ctx, true);
+ int global_faults = fd_get_reset_count(ctx, false);
+ enum pipe_reset_status status;
+
+ if (context_faults != ctx->context_reset_count) {
+ status = PIPE_GUILTY_CONTEXT_RESET;
+ } else if (global_faults != ctx->global_reset_count) {
+ status = PIPE_INNOCENT_CONTEXT_RESET;
+ } else {
+ status = PIPE_NO_RESET;
+ }
+
+ ctx->context_reset_count = context_faults;
+ ctx->global_reset_count = global_faults;
+
+ return status;
+}
+
/* TODO we could combine a few of these small buffers (solid_vbuf,
* blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and
* save a tiny bit of memory
ctx->screen = screen;
ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
+ if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
+ ctx->context_reset_count = fd_get_reset_count(ctx, true);
+ ctx->global_reset_count = fd_get_reset_count(ctx, false);
+ }
+
ctx->primtypes = primtypes;
ctx->primtype_mask = 0;
for (i = 0; i < PIPE_PRIM_MAX; i++)
pctx->flush = fd_context_flush;
pctx->emit_string_marker = fd_emit_string_marker;
pctx->set_debug_callback = fd_set_debug_callback;
+ pctx->get_device_reset_status = fd_get_device_reset_status;
pctx->create_fence_fd = fd_create_fence_fd;
pctx->fence_server_sync = fd_fence_server_sync;
pctx->texture_barrier = fd_texture_barrier;
*/
struct pipe_fence_handle *last_fence;
+ /* track last known reset status globally and per-context to
+ * determine if more resets occurred since then. If global reset
+ * count increases, it means some other context crashed. If
+ * per-context reset count increases, it means we crashed the
+ * gpu.
+ */
+ uint32_t context_reset_count, global_reset_count;
+
/* Are we in process of shadowing a resource? Used to detect recursion
* in transfer_map, and skip unneeded synchronization.
*/
case PIPE_CAP_PACKED_UNIFORMS:
return !is_a2xx(screen);
+ case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
+ case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
+ return screen->has_robustness;
+
case PIPE_CAP_VERTEXID_NOBASE:
return is_a3xx(screen) || is_a4xx(screen);
screen->priority_mask = (1 << val) - 1;
}
+ if ((fd_device_version(dev) >= FD_VERSION_ROBUSTNESS) &&
+ (fd_pipe_get_param(screen->pipe, FD_PP_PGTABLE, &val) == 0)) {
+ screen->has_robustness = val;
+ }
+
struct sysinfo si;
sysinfo(&si);
screen->ram_size = si.totalram;