#include "p_format.h"
#include "p_video_enums.h"
#include "p_defines.h"
+#include <stdio.h>
#ifdef __cplusplus
extern "C" {
struct pipe_box;
struct pipe_clip_state;
struct pipe_constant_buffer;
+struct pipe_debug_callback;
struct pipe_depth_stencil_alpha_state;
+struct pipe_device_reset_callback;
struct pipe_draw_info;
+struct pipe_grid_info;
struct pipe_fence_handle;
struct pipe_framebuffer_state;
-struct pipe_index_buffer;
+struct pipe_image_view;
struct pipe_query;
struct pipe_poly_stipple;
struct pipe_rasterizer_state;
struct pipe_sampler_state;
struct pipe_sampler_view;
struct pipe_scissor_state;
+struct pipe_shader_buffer;
struct pipe_shader_state;
struct pipe_stencil_ref;
struct pipe_stream_output_target;
struct pipe_compute_state;
union pipe_color_union;
union pipe_query_result;
+struct u_log_context;
+struct u_upload_mgr;
/**
* Gallium rendering context. Basically:
void *priv; /**< context private data (for DRI for example) */
void *draw; /**< private, for draw module (temporary?) */
+ /**
+ * Stream uploaders created by the driver. All drivers, gallium frontends, and
+ * modules should use them.
+ *
+ * Use u_upload_alloc or u_upload_data as many times as you want.
+ * Once you are done, use u_upload_unmap.
+ */
+ struct u_upload_mgr *stream_uploader; /* everything but shader constants */
+ struct u_upload_mgr *const_uploader; /* shader constants only */
+
void (*destroy)( struct pipe_context * );
/**
*/
void (*render_condition)( struct pipe_context *pipe,
struct pipe_query *query,
- boolean condition,
- uint mode );
+ bool condition,
+ enum pipe_render_cond_flag mode );
/**
* Query objects
*/
/*@{*/
struct pipe_query *(*create_query)( struct pipe_context *pipe,
- unsigned query_type );
+ unsigned query_type,
+ unsigned index );
+
+ /**
+ * Create a query object that queries all given query types simultaneously.
+ *
+ * This can only be used for those query types for which
+ * get_driver_query_info indicates that it must be used. Only one batch
+ * query object may be active at a time.
+ *
+ * There may be additional constraints on which query types can be used
+ * together, in particular those that are implied by
+ * get_driver_query_group_info.
+ *
+ * \param num_queries the number of query types
+ * \param query_types array of \p num_queries query types
+ * \return a query object, or NULL on error.
+ */
+ struct pipe_query *(*create_batch_query)( struct pipe_context *pipe,
+ unsigned num_queries,
+ unsigned *query_types );
void (*destroy_query)(struct pipe_context *pipe,
struct pipe_query *q);
- void (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
- void (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
+ bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
+ bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
/**
* Get results of a query.
* \param wait if true, this query will block until the result is ready
* \return TRUE if results are ready, FALSE otherwise
*/
- boolean (*get_query_result)(struct pipe_context *pipe,
- struct pipe_query *q,
- boolean wait,
- union pipe_query_result *result);
+ bool (*get_query_result)(struct pipe_context *pipe,
+ struct pipe_query *q,
+ bool wait,
+ union pipe_query_result *result);
+
+ /**
+ * Get results of a query, storing into resource. Note that this may not
+ * be used with batch queries.
+ *
+ * \param wait if true, this query will block until the result is ready
+ * \param result_type the type of the value being stored:
+ * \param index for queries that return multiple pieces of data, which
+ * item of that data to store (e.g. for
+ * PIPE_QUERY_PIPELINE_STATISTICS).
+ * When the index is -1, instead of the value of the query
+ * the driver should instead write a 1 or 0 to the appropriate
+ * location with 1 meaning that the query result is available.
+ */
+ void (*get_query_result_resource)(struct pipe_context *pipe,
+ struct pipe_query *q,
+ bool wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset);
+
+ /**
+ * Set whether all current non-driver queries except TIME_ELAPSED are
+ * active or paused.
+ */
+ void (*set_active_query_state)(struct pipe_context *pipe, bool enable);
+
+ /**
+ * INTEL Performance Query
+ */
+ /*@{*/
+
+ unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe);
+
+ void (*get_intel_perf_query_info)(struct pipe_context *pipe,
+ unsigned query_index,
+ const char **name,
+ uint32_t *data_size,
+ uint32_t *n_counters,
+ uint32_t *n_active);
+
+ void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe,
+ unsigned query_index,
+ unsigned counter_index,
+ const char **name,
+ const char **desc,
+ uint32_t *offset,
+ uint32_t *data_size,
+ uint32_t *type_enum,
+ uint32_t *data_type_enum,
+ uint64_t *raw_max);
+
+ struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe,
+ unsigned query_index);
+
+ void (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
+
+ void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
+
+ void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
+
+ void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
+
+ bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q);
+
+ void (*get_intel_perf_query_data)(struct pipe_context *pipe,
+ struct pipe_query *q,
+ size_t data_size,
+ uint32_t *data,
+ uint32_t *bytes_written);
+
/*@}*/
/**
void * (*create_sampler_state)(struct pipe_context *,
const struct pipe_sampler_state *);
void (*bind_sampler_states)(struct pipe_context *,
- unsigned shader, unsigned start_slot,
- unsigned num_samplers, void **samplers);
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned num_samplers,
+ void **samplers);
void (*delete_sampler_state)(struct pipe_context *, void *);
void * (*create_rasterizer_state)(struct pipe_context *,
void (*bind_gs_state)(struct pipe_context *, void *);
void (*delete_gs_state)(struct pipe_context *, void *);
+ void * (*create_tcs_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*bind_tcs_state)(struct pipe_context *, void *);
+ void (*delete_tcs_state)(struct pipe_context *, void *);
+
+ void * (*create_tes_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*bind_tes_state)(struct pipe_context *, void *);
+ void (*delete_tes_state)(struct pipe_context *, void *);
+
void * (*create_vertex_elements_state)(struct pipe_context *,
unsigned num_elements,
const struct pipe_vertex_element *);
void (*set_sample_mask)( struct pipe_context *,
unsigned sample_mask );
+ void (*set_min_samples)( struct pipe_context *,
+ unsigned min_samples );
+
void (*set_clip_state)( struct pipe_context *,
const struct pipe_clip_state * );
void (*set_constant_buffer)( struct pipe_context *,
- uint shader, uint index,
- struct pipe_constant_buffer *buf );
+ enum pipe_shader_type shader, uint index,
+ const struct pipe_constant_buffer *buf );
void (*set_framebuffer_state)( struct pipe_context *,
const struct pipe_framebuffer_state * );
+ /**
+ * Set the sample locations used during rasterization. When NULL or sized
+ * zero, the default locations are used.
+ *
+ * Note that get_sample_position() still returns the default locations.
+ *
+ * The samples are accessed with
+ * locations[(pixel_y*grid_w+pixel_x)*ms+i],
+ * where:
+ * ms = the sample count
+ * grid_w = the pixel grid width for the sample count
+ * grid_w = the pixel grid height for the sample count
+ * pixel_x = the window x coordinate modulo grid_w
+ * pixel_y = the window y coordinate modulo grid_w
+ * i = the sample index
+ * This gives a result with the x coordinate as the low 4 bits and the y
+ * coordinate as the high 4 bits. For each coordinate 0 is the left or top
+ * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge.
+ *
+ * Out of bounds accesses are return undefined values.
+ *
+ * The pixel grid is used to vary sample locations across pixels and its
+ * size can be queried with get_sample_pixel_grid().
+ */
+ void (*set_sample_locations)( struct pipe_context *,
+ size_t size, const uint8_t *locations );
+
void (*set_polygon_stipple)( struct pipe_context *,
- const struct pipe_poly_stipple * );
+ const struct pipe_poly_stipple * );
void (*set_scissor_states)( struct pipe_context *,
unsigned start_slot,
unsigned num_scissors,
const struct pipe_scissor_state * );
+ void (*set_window_rectangles)( struct pipe_context *,
+ bool include,
+ unsigned num_rectangles,
+ const struct pipe_scissor_state * );
+
void (*set_viewport_states)( struct pipe_context *,
unsigned start_slot,
unsigned num_viewports,
const struct pipe_viewport_state *);
- void (*set_sampler_views)(struct pipe_context *, unsigned shader,
+ void (*set_sampler_views)(struct pipe_context *,
+ enum pipe_shader_type shader,
unsigned start_slot, unsigned num_views,
- struct pipe_sampler_view **);
+ struct pipe_sampler_view **views);
+
+ void (*set_tess_state)(struct pipe_context *,
+ const float default_outer_level[4],
+ const float default_inner_level[2]);
/**
- * Bind an array of shader resources that will be used by the
- * graphics pipeline. Any resources that were previously bound to
- * the specified range will be unbound after this call.
+ * Sets the debug callback. If the pointer is null, then no callback is
+ * set, otherwise a copy of the data should be made.
+ */
+ void (*set_debug_callback)(struct pipe_context *,
+ const struct pipe_debug_callback *);
+
+ /**
+ * Bind an array of shader buffers that will be used by a shader.
+ * Any buffers that were previously bound to the specified range
+ * will be unbound.
*
- * \param start first resource to bind.
- * \param count number of consecutive resources to bind.
- * \param resources array of pointers to the resources to bind, it
+ * \param shader selects shader stage
+ * \param start_slot first buffer slot to bind.
+ * \param count number of consecutive buffers to bind.
+ * \param buffers array of pointers to the buffers to bind, it
* should contain at least \a count elements
- * unless it's NULL, in which case no new
- * resources will be bound.
+ * unless it's NULL, in which case no buffers will
+ * be bound.
+ * \param writable_bitmask If bit i is not set, buffers[i] will only be
+ * used with loads. If unsure, set to ~0.
+ */
+ void (*set_shader_buffers)(struct pipe_context *,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers,
+ unsigned writable_bitmask);
+
+ /**
+ * Bind an array of hw atomic buffers for use by all shaders.
+ * And buffers that were previously bound to the specified range
+ * will be unbound.
+ *
+ * \param start_slot first buffer slot to bind.
+ * \param count number of consecutive buffers to bind.
+ * \param buffers array of pointers to the buffers to bind, it
+ * should contain at least \a count elements
+ * unless it's NULL, in which case no buffers will
+ * be bound.
*/
- void (*set_shader_resources)(struct pipe_context *,
- unsigned start, unsigned count,
- struct pipe_surface **resources);
+ void (*set_hw_atomic_buffers)(struct pipe_context *,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers);
+
+ /**
+ * Bind an array of images that will be used by a shader.
+ * Any images that were previously bound to the specified range
+ * will be unbound.
+ *
+ * \param shader selects shader stage
+ * \param start_slot first image slot to bind.
+ * \param count number of consecutive images to bind.
+ * \param buffers array of the images to bind, it
+ * should contain at least \a count elements
+ * unless it's NULL, in which case no images will
+ * be bound.
+ */
+ void (*set_shader_images)(struct pipe_context *,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_image_view *images);
void (*set_vertex_buffers)( struct pipe_context *,
unsigned start_slot,
unsigned num_buffers,
const struct pipe_vertex_buffer * );
- void (*set_index_buffer)( struct pipe_context *pipe,
- const struct pipe_index_buffer * );
-
/*@}*/
/**
void (*set_stream_output_targets)(struct pipe_context *,
unsigned num_targets,
struct pipe_stream_output_target **targets,
- unsigned append_bitmask);
+ const unsigned *offsets);
+
+ /*@}*/
+
+
+ /**
+ * INTEL_blackhole_render
+ */
+ /*@{*/
+
+ void (*set_frontend_noop)(struct pipe_context *,
+ bool enable);
/*@}*/
* The entire buffers are cleared (no scissor, no colormask, etc).
*
* \param buffers bitfield of PIPE_CLEAR_* values.
+ * \param scissor_state the scissored region to clear
* \param color pointer to a union of fiu array for each of r, g, b, a.
* \param depth depth clear value in [0,1].
* \param stencil stencil clear value
*/
void (*clear)(struct pipe_context *pipe,
unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *color,
double depth,
unsigned stencil);
struct pipe_surface *dst,
const union pipe_color_union *color,
unsigned dstx, unsigned dsty,
- unsigned width, unsigned height);
+ unsigned width, unsigned height,
+ bool render_condition_enabled);
/**
* Clear a depth-stencil surface.
double depth,
unsigned stencil,
unsigned dstx, unsigned dsty,
- unsigned width, unsigned height);
+ unsigned width, unsigned height,
+ bool render_condition_enabled);
+
+ /**
+ * Clear the texture with the specified texel. Not guaranteed to be a
+ * renderable format. Data provided in the resource's format.
+ */
+ void (*clear_texture)(struct pipe_context *pipe,
+ struct pipe_resource *res,
+ unsigned level,
+ const struct pipe_box *box,
+ const void *data);
- /** Flush draw commands
+ /**
+ * Clear a buffer. Runs a memset over the specified region with the element
+ * value passed in through clear_value of size clear_value_size.
+ */
+ void (*clear_buffer)(struct pipe_context *pipe,
+ struct pipe_resource *res,
+ unsigned offset,
+ unsigned size,
+ const void *clear_value,
+ int clear_value_size);
+
+ /**
+ * If a depth buffer is rendered with different sample location state than
+ * what is current at the time of reading, the values may differ because
+ * depth buffer compression can depend the sample locations.
+ *
+ * This function is a hint to decompress the current depth buffer to avoid
+ * such problems.
+ */
+ void (*evaluate_depth_buffer)(struct pipe_context *pipe);
+
+ /**
+ * Flush draw commands.
+ *
+ * This guarantees that the new fence (if any) will finish in finite time,
+ * unless PIPE_FLUSH_DEFERRED is used.
+ *
+ * Subsequent operations on other contexts of the same screen are guaranteed
+ * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used.
+ *
+ * NOTE: use screen->fence_reference() (or equivalent) to transfer
+ * new fence ref to **fence, to ensure that previous fence is unref'd
*
+ * \param fence if not NULL, an old fence to unref and transfer a
+ * new fence reference to
* \param flags bitfield of enum pipe_flush_flags values.
*/
void (*flush)(struct pipe_context *pipe,
struct pipe_fence_handle **fence,
unsigned flags);
+ /**
+ * Create a fence from a fd.
+ *
+ * This is used for importing a foreign/external fence fd.
+ *
+ * \param fence if not NULL, an old fence to unref and transfer a
+ * new fence reference to
+ * \param fd fd representing the fence object
+ * \param type indicates which fence types backs fd
+ */
+ void (*create_fence_fd)(struct pipe_context *pipe,
+ struct pipe_fence_handle **fence,
+ int fd,
+ enum pipe_fd_type type);
+
+ /**
+ * Insert commands to have GPU wait for fence to be signaled.
+ */
+ void (*fence_server_sync)(struct pipe_context *pipe,
+ struct pipe_fence_handle *fence);
+
+ /**
+ * Insert commands to have the GPU signal a fence.
+ */
+ void (*fence_server_signal)(struct pipe_context *pipe,
+ struct pipe_fence_handle *fence);
+
/**
* Create a view on a texture to be used by a shader stage.
*/
struct pipe_resource *texture,
const struct pipe_sampler_view *templat);
+ /**
+ * Destroy a view on a texture.
+ *
+ * \param ctx the current context
+ * \param view the view to be destroyed
+ *
+ * \note The current context may not be the context in which the view was
+ * created (view->context). However, the caller must guarantee that
+ * the context which created the view is still alive.
+ */
void (*sampler_view_destroy)(struct pipe_context *ctx,
struct pipe_sampler_view *view);
void (*surface_destroy)(struct pipe_context *ctx,
struct pipe_surface *);
+
/**
* Map a resource.
*
struct pipe_transfer *transfer);
/* One-shot transfer operation with data supplied in a user
- * pointer. XXX: strides??
+ * pointer.
*/
- void (*transfer_inline_write)( struct pipe_context *,
- struct pipe_resource *,
- unsigned level,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
- const struct pipe_box *,
- const void *data,
- unsigned stride,
- unsigned layer_stride);
+ void (*buffer_subdata)(struct pipe_context *,
+ struct pipe_resource *,
+ unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ unsigned offset,
+ unsigned size,
+ const void *data);
+
+ void (*texture_subdata)(struct pipe_context *,
+ struct pipe_resource *,
+ unsigned level,
+ unsigned usage, /* a combination of PIPE_TRANSFER_x */
+ const struct pipe_box *,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride);
/**
* Flush any pending framebuffer writes and invalidate texture caches.
*/
- void (*texture_barrier)(struct pipe_context *);
-
+ void (*texture_barrier)(struct pipe_context *, unsigned flags);
+
+ /**
+ * Flush caches according to flags.
+ */
+ void (*memory_barrier)(struct pipe_context *, unsigned flags);
+
+ /**
+ * Change the commitment status of a part of the given resource, which must
+ * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit.
+ *
+ * \param level The texture level whose commitment should be changed.
+ * \param box The region of the resource whose commitment should be changed.
+ * \param commit Whether memory should be committed or un-committed.
+ *
+ * \return false if out of memory, true on success.
+ */
+ bool (*resource_commit)(struct pipe_context *, struct pipe_resource *,
+ unsigned level, struct pipe_box *box, bool commit);
+
/**
* Creates a video codec for a specific video format/profile
*/
/**
* Launch the compute kernel starting from instruction \a pc of the
* currently bound compute program.
- *
- * \a grid_layout and \a block_layout are arrays of size \a
- * PIPE_COMPUTE_CAP_GRID_DIMENSION that determine the layout of the
- * grid (in block units) and working block (in thread units) to be
- * used, respectively.
- *
- * \a pc For drivers that use PIPE_SHADER_IR_LLVM as their prefered IR,
- * this value will be the index of the kernel in the opencl.kernels
- * metadata list.
- *
- * \a input will be used to initialize the INPUT resource, and it
- * should point to a buffer of at least
- * pipe_compute_state::req_input_mem bytes.
*/
void (*launch_grid)(struct pipe_context *context,
- const uint *block_layout, const uint *grid_layout,
- uint32_t pc, const void *input);
+ const struct pipe_grid_info *info);
/*@}*/
/**
- * Get sample position for an individual sample point.
+ * Get the default sample position for an individual sample point.
*
* \param sample_count - total number of samples
* \param sample_index - sample to get the position values for
unsigned sample_index,
float *out_value);
+ /**
+ * Query a timestamp in nanoseconds. This is completely equivalent to
+ * pipe_screen::get_timestamp() but takes a context handle for drivers
+ * that require a context.
+ */
+ uint64_t (*get_timestamp)(struct pipe_context *);
+
/**
* Flush the resource cache, so that the resource can be used
* by an external client. Possible usage:
*/
void (*flush_resource)(struct pipe_context *ctx,
struct pipe_resource *resource);
+
+ /**
+ * Invalidate the contents of the resource. This is used to
+ *
+ * (1) implement EGL's semantic of undefined depth/stencil
+ * contents after a swapbuffers. This allows a tiled renderer (for
+ * example) to not store the depth buffer.
+ *
+ * (2) implement GL's InvalidateBufferData. For backwards compatibility,
+ * you must only rely on the usability for this purpose when
+ * PIPE_CAP_INVALIDATE_BUFFER is enabled.
+ */
+ void (*invalidate_resource)(struct pipe_context *ctx,
+ struct pipe_resource *resource);
+
+ /**
+ * Return information about unexpected device resets.
+ */
+ enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx);
+
+ /**
+ * Sets the reset status callback. If the pointer is null, then no callback
+ * is set, otherwise a copy of the data should be made.
+ */
+ void (*set_device_reset_callback)(struct pipe_context *ctx,
+ const struct pipe_device_reset_callback *cb);
+
+ /**
+ * Dump driver-specific debug information into a stream. This is
+ * used by debugging tools.
+ *
+ * \param ctx pipe context
+ * \param stream where the output should be written to
+ * \param flags a mask of PIPE_DUMP_* flags
+ */
+ void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream,
+ unsigned flags);
+
+ /**
+ * Set the log context to which the driver should write internal debug logs
+ * (internal states, command streams).
+ *
+ * The caller must ensure that the log context is destroyed and reset to
+ * NULL before the pipe context is destroyed, and that log context functions
+ * are only called from the driver thread.
+ *
+ * \param ctx pipe context
+ * \param log logging context
+ */
+ void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log);
+
+ /**
+ * Emit string marker in cmdstream
+ */
+ void (*emit_string_marker)(struct pipe_context *ctx,
+ const char *string,
+ int len);
+
+ /**
+ * Generate mipmap.
+ * \return TRUE if mipmap generation succeeds, FALSE otherwise
+ */
+ bool (*generate_mipmap)(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ enum pipe_format format,
+ unsigned base_level,
+ unsigned last_level,
+ unsigned first_layer,
+ unsigned last_layer);
+
+ /**
+ * Create a 64-bit texture handle.
+ *
+ * \param ctx pipe context
+ * \param view pipe sampler view object
+ * \param state pipe sampler state template
+ * \return a 64-bit texture handle if success, 0 otherwise
+ */
+ uint64_t (*create_texture_handle)(struct pipe_context *ctx,
+ struct pipe_sampler_view *view,
+ const struct pipe_sampler_state *state);
+
+ /**
+ * Delete a texture handle.
+ *
+ * \param ctx pipe context
+ * \param handle 64-bit texture handle
+ */
+ void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle);
+
+ /**
+ * Make a texture handle resident.
+ *
+ * \param ctx pipe context
+ * \param handle 64-bit texture handle
+ * \param resident TRUE for resident, FALSE otherwise
+ */
+ void (*make_texture_handle_resident)(struct pipe_context *ctx,
+ uint64_t handle, bool resident);
+
+ /**
+ * Create a 64-bit image handle.
+ *
+ * \param ctx pipe context
+ * \param image pipe image view template
+ * \return a 64-bit image handle if success, 0 otherwise
+ */
+ uint64_t (*create_image_handle)(struct pipe_context *ctx,
+ const struct pipe_image_view *image);
+
+ /**
+ * Delete an image handle.
+ *
+ * \param ctx pipe context
+ * \param handle 64-bit image handle
+ */
+ void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle);
+
+ /**
+ * Make an image handle resident.
+ *
+ * \param ctx pipe context
+ * \param handle 64-bit image handle
+ * \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE
+ * \param resident TRUE for resident, FALSE otherwise
+ */
+ void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle,
+ unsigned access, bool resident);
+
+ /**
+ * Call the given function from the driver thread.
+ *
+ * This is set by threaded contexts for use by debugging wrappers.
+ *
+ * \param asap if true, run the callback immediately if there are no pending
+ * commands to be processed by the driver thread
+ */
+ void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data,
+ bool asap);
+
+ /**
+ * Set a context parameter See enum pipe_context_param for more details.
+ */
+ void (*set_context_param)(struct pipe_context *ctx,
+ enum pipe_context_param param,
+ unsigned value);
};