X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Finclude%2Fpipe%2Fp_context.h;h=1869d22ad47607f61578eb3dfbbc50aab40273ee;hb=8c9b9aac7d09e65195dca6681d59c10e4ef713d9;hp=4609d4dbf2331fc6de39c13af5e13e2021c22812;hpb=1966d9ff412359c4189601231ca9182071bef285;p=mesa.git diff --git a/src/gallium/include/pipe/p_context.h b/src/gallium/include/pipe/p_context.h index 4609d4dbf23..1869d22ad47 100644 --- a/src/gallium/include/pipe/p_context.h +++ b/src/gallium/include/pipe/p_context.h @@ -91,7 +91,7 @@ struct pipe_context { void *draw; /**< private, for draw module (temporary?) */ /** - * Stream uploaders created by the driver. All drivers, state trackers, and + * Stream uploaders created by the driver. All drivers, gallium frontends, and * modules should use them. * * Use u_upload_alloc or u_upload_data as many times as you want. @@ -118,7 +118,7 @@ struct pipe_context { */ void (*render_condition)( struct pipe_context *pipe, struct pipe_query *query, - boolean condition, + bool condition, enum pipe_render_cond_flag mode ); /** @@ -151,7 +151,7 @@ struct pipe_context { void (*destroy_query)(struct pipe_context *pipe, struct pipe_query *q); - boolean (*begin_query)(struct pipe_context *pipe, struct pipe_query *q); + bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q); bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q); /** @@ -159,10 +159,10 @@ struct pipe_context { * \param wait if true, this query will block until the result is ready * \return TRUE if results are ready, FALSE otherwise */ - boolean (*get_query_result)(struct pipe_context *pipe, - struct pipe_query *q, - boolean wait, - union pipe_query_result *result); + bool (*get_query_result)(struct pipe_context *pipe, + struct pipe_query *q, + bool wait, + union pipe_query_result *result); /** * Get results of a query, storing into resource. Note that this may not @@ -179,7 +179,7 @@ struct pipe_context { */ void (*get_query_result_resource)(struct pipe_context *pipe, struct pipe_query *q, - boolean wait, + bool wait, enum pipe_query_value_type result_type, int index, struct pipe_resource *resource, @@ -189,7 +189,51 @@ struct pipe_context { * Set whether all current non-driver queries except TIME_ELAPSED are * active or paused. */ - void (*set_active_query_state)(struct pipe_context *pipe, boolean enable); + void (*set_active_query_state)(struct pipe_context *pipe, bool enable); + + /** + * INTEL Performance Query + */ + /*@{*/ + + unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe); + + void (*get_intel_perf_query_info)(struct pipe_context *pipe, + unsigned query_index, + const char **name, + uint32_t *data_size, + uint32_t *n_counters, + uint32_t *n_active); + + void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe, + unsigned query_index, + unsigned counter_index, + const char **name, + const char **desc, + uint32_t *offset, + uint32_t *data_size, + uint32_t *type_enum, + uint32_t *data_type_enum, + uint64_t *raw_max); + + struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe, + unsigned query_index); + + void (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); + + void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); + + void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); + + void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); + + bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q); + + void (*get_intel_perf_query_data)(struct pipe_context *pipe, + struct pipe_query *q, + size_t data_size, + uint32_t *data, + uint32_t *bytes_written); /*@}*/ @@ -279,8 +323,35 @@ struct pipe_context { void (*set_framebuffer_state)( struct pipe_context *, const struct pipe_framebuffer_state * ); + /** + * Set the sample locations used during rasterization. When NULL or sized + * zero, the default locations are used. + * + * Note that get_sample_position() still returns the default locations. + * + * The samples are accessed with + * locations[(pixel_y*grid_w+pixel_x)*ms+i], + * where: + * ms = the sample count + * grid_w = the pixel grid width for the sample count + * grid_w = the pixel grid height for the sample count + * pixel_x = the window x coordinate modulo grid_w + * pixel_y = the window y coordinate modulo grid_w + * i = the sample index + * This gives a result with the x coordinate as the low 4 bits and the y + * coordinate as the high 4 bits. For each coordinate 0 is the left or top + * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge. + * + * Out of bounds accesses are return undefined values. + * + * The pixel grid is used to vary sample locations across pixels and its + * size can be queried with get_sample_pixel_grid(). + */ + void (*set_sample_locations)( struct pipe_context *, + size_t size, const uint8_t *locations ); + void (*set_polygon_stipple)( struct pipe_context *, - const struct pipe_poly_stipple * ); + const struct pipe_poly_stipple * ); void (*set_scissor_states)( struct pipe_context *, unsigned start_slot, @@ -288,7 +359,7 @@ struct pipe_context { const struct pipe_scissor_state * ); void (*set_window_rectangles)( struct pipe_context *, - boolean include, + bool include, unsigned num_rectangles, const struct pipe_scissor_state * ); @@ -300,7 +371,7 @@ struct pipe_context { void (*set_sampler_views)(struct pipe_context *, enum pipe_shader_type shader, unsigned start_slot, unsigned num_views, - struct pipe_sampler_view **); + struct pipe_sampler_view **views); void (*set_tess_state)(struct pipe_context *, const float default_outer_level[4], @@ -325,11 +396,30 @@ struct pipe_context { * should contain at least \a count elements * unless it's NULL, in which case no buffers will * be bound. + * \param writable_bitmask If bit i is not set, buffers[i] will only be + * used with loads. If unsure, set to ~0. */ void (*set_shader_buffers)(struct pipe_context *, enum pipe_shader_type shader, unsigned start_slot, unsigned count, - const struct pipe_shader_buffer *buffers); + const struct pipe_shader_buffer *buffers, + unsigned writable_bitmask); + + /** + * Bind an array of hw atomic buffers for use by all shaders. + * And buffers that were previously bound to the specified range + * will be unbound. + * + * \param start_slot first buffer slot to bind. + * \param count number of consecutive buffers to bind. + * \param buffers array of pointers to the buffers to bind, it + * should contain at least \a count elements + * unless it's NULL, in which case no buffers will + * be bound. + */ + void (*set_hw_atomic_buffers)(struct pipe_context *, + unsigned start_slot, unsigned count, + const struct pipe_shader_buffer *buffers); /** * Bind an array of images that will be used by a shader. @@ -378,6 +468,17 @@ struct pipe_context { /*@}*/ + /** + * INTEL_blackhole_render + */ + /*@{*/ + + void (*set_frontend_noop)(struct pipe_context *, + bool enable); + + /*@}*/ + + /** * Resource functions for blit-like functionality * @@ -411,12 +512,14 @@ struct pipe_context { * The entire buffers are cleared (no scissor, no colormask, etc). * * \param buffers bitfield of PIPE_CLEAR_* values. + * \param scissor_state the scissored region to clear * \param color pointer to a union of fiu array for each of r, g, b, a. * \param depth depth clear value in [0,1]. * \param stencil stencil clear value */ void (*clear)(struct pipe_context *pipe, unsigned buffers, + const struct pipe_scissor_state *scissor_state, const union pipe_color_union *color, double depth, unsigned stencil); @@ -469,7 +572,23 @@ struct pipe_context { int clear_value_size); /** - * Flush draw commands + * If a depth buffer is rendered with different sample location state than + * what is current at the time of reading, the values may differ because + * depth buffer compression can depend the sample locations. + * + * This function is a hint to decompress the current depth buffer to avoid + * such problems. + */ + void (*evaluate_depth_buffer)(struct pipe_context *pipe); + + /** + * Flush draw commands. + * + * This guarantees that the new fence (if any) will finish in finite time, + * unless PIPE_FLUSH_DEFERRED is used. + * + * Subsequent operations on other contexts of the same screen are guaranteed + * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used. * * NOTE: use screen->fence_reference() (or equivalent) to transfer * new fence ref to **fence, to ensure that previous fence is unref'd @@ -483,17 +602,19 @@ struct pipe_context { unsigned flags); /** - * Create a fence from a native sync fd. + * Create a fence from a fd. * * This is used for importing a foreign/external fence fd. * * \param fence if not NULL, an old fence to unref and transfer a * new fence reference to - * \param fd native fence fd + * \param fd fd representing the fence object + * \param type indicates which fence types backs fd */ void (*create_fence_fd)(struct pipe_context *pipe, struct pipe_fence_handle **fence, - int fd); + int fd, + enum pipe_fd_type type); /** * Insert commands to have GPU wait for fence to be signaled. @@ -501,6 +622,12 @@ struct pipe_context { void (*fence_server_sync)(struct pipe_context *pipe, struct pipe_fence_handle *fence); + /** + * Insert commands to have the GPU signal a fence. + */ + void (*fence_server_signal)(struct pipe_context *pipe, + struct pipe_fence_handle *fence); + /** * Create a view on a texture to be used by a shader stage. */ @@ -508,6 +635,16 @@ struct pipe_context { struct pipe_resource *texture, const struct pipe_sampler_view *templat); + /** + * Destroy a view on a texture. + * + * \param ctx the current context + * \param view the view to be destroyed + * + * \note The current context may not be the context in which the view was + * created (view->context). However, the caller must guarantee that + * the context which created the view is still alive. + */ void (*sampler_view_destroy)(struct pipe_context *ctx, struct pipe_sampler_view *view); @@ -680,7 +817,7 @@ struct pipe_context { /*@}*/ /** - * Get sample position for an individual sample point. + * Get the default sample position for an individual sample point. * * \param sample_count - total number of samples * \param sample_index - sample to get the position values for @@ -716,7 +853,7 @@ struct pipe_context { * Invalidate the contents of the resource. This is used to * * (1) implement EGL's semantic of undefined depth/stencil - * contenst after a swapbuffers. This allows a tiled renderer (for + * contents after a swapbuffers. This allows a tiled renderer (for * example) to not store the depth buffer. * * (2) implement GL's InvalidateBufferData. For backwards compatibility, @@ -773,13 +910,13 @@ struct pipe_context { * Generate mipmap. * \return TRUE if mipmap generation succeeds, FALSE otherwise */ - boolean (*generate_mipmap)(struct pipe_context *ctx, - struct pipe_resource *resource, - enum pipe_format format, - unsigned base_level, - unsigned last_level, - unsigned first_layer, - unsigned last_layer); + bool (*generate_mipmap)(struct pipe_context *ctx, + struct pipe_resource *resource, + enum pipe_format format, + unsigned base_level, + unsigned last_level, + unsigned first_layer, + unsigned last_layer); /** * Create a 64-bit texture handle. @@ -839,6 +976,24 @@ struct pipe_context { */ void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle, unsigned access, bool resident); + + /** + * Call the given function from the driver thread. + * + * This is set by threaded contexts for use by debugging wrappers. + * + * \param asap if true, run the callback immediately if there are no pending + * commands to be processed by the driver thread + */ + void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data, + bool asap); + + /** + * Set a context parameter See enum pipe_context_param for more details. + */ + void (*set_context_param)(struct pipe_context *ctx, + enum pipe_context_param param, + unsigned value); };