iris: propagate error from gen_perf_begin_query to glBeginPerfQueryINTEL
[mesa.git] / src / gallium / include / pipe / p_context.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef PIPE_CONTEXT_H
29 #define PIPE_CONTEXT_H
30
31 #include "p_compiler.h"
32 #include "p_format.h"
33 #include "p_video_enums.h"
34 #include "p_defines.h"
35 #include <stdio.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41
42 struct pipe_blend_color;
43 struct pipe_blend_state;
44 struct pipe_blit_info;
45 struct pipe_box;
46 struct pipe_clip_state;
47 struct pipe_constant_buffer;
48 struct pipe_debug_callback;
49 struct pipe_depth_stencil_alpha_state;
50 struct pipe_device_reset_callback;
51 struct pipe_draw_info;
52 struct pipe_grid_info;
53 struct pipe_fence_handle;
54 struct pipe_framebuffer_state;
55 struct pipe_image_view;
56 struct pipe_query;
57 struct pipe_poly_stipple;
58 struct pipe_rasterizer_state;
59 struct pipe_resolve_info;
60 struct pipe_resource;
61 struct pipe_sampler_state;
62 struct pipe_sampler_view;
63 struct pipe_scissor_state;
64 struct pipe_shader_buffer;
65 struct pipe_shader_state;
66 struct pipe_stencil_ref;
67 struct pipe_stream_output_target;
68 struct pipe_surface;
69 struct pipe_transfer;
70 struct pipe_vertex_buffer;
71 struct pipe_vertex_element;
72 struct pipe_video_buffer;
73 struct pipe_video_codec;
74 struct pipe_viewport_state;
75 struct pipe_compute_state;
76 union pipe_color_union;
77 union pipe_query_result;
78 struct u_log_context;
79 struct u_upload_mgr;
80
81 /**
82 * Gallium rendering context. Basically:
83 * - state setting functions
84 * - VBO drawing functions
85 * - surface functions
86 */
87 struct pipe_context {
88 struct pipe_screen *screen;
89
90 void *priv; /**< context private data (for DRI for example) */
91 void *draw; /**< private, for draw module (temporary?) */
92
93 /**
94 * Stream uploaders created by the driver. All drivers, gallium frontends, and
95 * modules should use them.
96 *
97 * Use u_upload_alloc or u_upload_data as many times as you want.
98 * Once you are done, use u_upload_unmap.
99 */
100 struct u_upload_mgr *stream_uploader; /* everything but shader constants */
101 struct u_upload_mgr *const_uploader; /* shader constants only */
102
103 void (*destroy)( struct pipe_context * );
104
105 /**
106 * VBO drawing
107 */
108 /*@{*/
109 void (*draw_vbo)( struct pipe_context *pipe,
110 const struct pipe_draw_info *info );
111 /*@}*/
112
113 /**
114 * Predicate subsequent rendering on occlusion query result
115 * \param query the query predicate, or NULL if no predicate
116 * \param condition whether to skip on FALSE or TRUE query results
117 * \param mode one of PIPE_RENDER_COND_x
118 */
119 void (*render_condition)( struct pipe_context *pipe,
120 struct pipe_query *query,
121 bool condition,
122 enum pipe_render_cond_flag mode );
123
124 /**
125 * Query objects
126 */
127 /*@{*/
128 struct pipe_query *(*create_query)( struct pipe_context *pipe,
129 unsigned query_type,
130 unsigned index );
131
132 /**
133 * Create a query object that queries all given query types simultaneously.
134 *
135 * This can only be used for those query types for which
136 * get_driver_query_info indicates that it must be used. Only one batch
137 * query object may be active at a time.
138 *
139 * There may be additional constraints on which query types can be used
140 * together, in particular those that are implied by
141 * get_driver_query_group_info.
142 *
143 * \param num_queries the number of query types
144 * \param query_types array of \p num_queries query types
145 * \return a query object, or NULL on error.
146 */
147 struct pipe_query *(*create_batch_query)( struct pipe_context *pipe,
148 unsigned num_queries,
149 unsigned *query_types );
150
151 void (*destroy_query)(struct pipe_context *pipe,
152 struct pipe_query *q);
153
154 bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
155 bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
156
157 /**
158 * Get results of a query.
159 * \param wait if true, this query will block until the result is ready
160 * \return TRUE if results are ready, FALSE otherwise
161 */
162 bool (*get_query_result)(struct pipe_context *pipe,
163 struct pipe_query *q,
164 bool wait,
165 union pipe_query_result *result);
166
167 /**
168 * Get results of a query, storing into resource. Note that this may not
169 * be used with batch queries.
170 *
171 * \param wait if true, this query will block until the result is ready
172 * \param result_type the type of the value being stored:
173 * \param index for queries that return multiple pieces of data, which
174 * item of that data to store (e.g. for
175 * PIPE_QUERY_PIPELINE_STATISTICS).
176 * When the index is -1, instead of the value of the query
177 * the driver should instead write a 1 or 0 to the appropriate
178 * location with 1 meaning that the query result is available.
179 */
180 void (*get_query_result_resource)(struct pipe_context *pipe,
181 struct pipe_query *q,
182 bool wait,
183 enum pipe_query_value_type result_type,
184 int index,
185 struct pipe_resource *resource,
186 unsigned offset);
187
188 /**
189 * Set whether all current non-driver queries except TIME_ELAPSED are
190 * active or paused.
191 */
192 void (*set_active_query_state)(struct pipe_context *pipe, bool enable);
193
194 /**
195 * INTEL Performance Query
196 */
197 /*@{*/
198
199 unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe);
200
201 void (*get_intel_perf_query_info)(struct pipe_context *pipe,
202 unsigned query_index,
203 const char **name,
204 uint32_t *data_size,
205 uint32_t *n_counters,
206 uint32_t *n_active);
207
208 void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe,
209 unsigned query_index,
210 unsigned counter_index,
211 const char **name,
212 const char **desc,
213 uint32_t *offset,
214 uint32_t *data_size,
215 uint32_t *type_enum,
216 uint32_t *data_type_enum,
217 uint64_t *raw_max);
218
219 struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe,
220 unsigned query_index);
221
222 bool (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
223
224 void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
225
226 void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
227
228 void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
229
230 bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q);
231
232 void (*get_intel_perf_query_data)(struct pipe_context *pipe,
233 struct pipe_query *q,
234 size_t data_size,
235 uint32_t *data,
236 uint32_t *bytes_written);
237
238 /*@}*/
239
240 /**
241 * State functions (create/bind/destroy state objects)
242 */
243 /*@{*/
244 void * (*create_blend_state)(struct pipe_context *,
245 const struct pipe_blend_state *);
246 void (*bind_blend_state)(struct pipe_context *, void *);
247 void (*delete_blend_state)(struct pipe_context *, void *);
248
249 void * (*create_sampler_state)(struct pipe_context *,
250 const struct pipe_sampler_state *);
251 void (*bind_sampler_states)(struct pipe_context *,
252 enum pipe_shader_type shader,
253 unsigned start_slot, unsigned num_samplers,
254 void **samplers);
255 void (*delete_sampler_state)(struct pipe_context *, void *);
256
257 void * (*create_rasterizer_state)(struct pipe_context *,
258 const struct pipe_rasterizer_state *);
259 void (*bind_rasterizer_state)(struct pipe_context *, void *);
260 void (*delete_rasterizer_state)(struct pipe_context *, void *);
261
262 void * (*create_depth_stencil_alpha_state)(struct pipe_context *,
263 const struct pipe_depth_stencil_alpha_state *);
264 void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *);
265 void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *);
266
267 void * (*create_fs_state)(struct pipe_context *,
268 const struct pipe_shader_state *);
269 void (*bind_fs_state)(struct pipe_context *, void *);
270 void (*delete_fs_state)(struct pipe_context *, void *);
271
272 void * (*create_vs_state)(struct pipe_context *,
273 const struct pipe_shader_state *);
274 void (*bind_vs_state)(struct pipe_context *, void *);
275 void (*delete_vs_state)(struct pipe_context *, void *);
276
277 void * (*create_gs_state)(struct pipe_context *,
278 const struct pipe_shader_state *);
279 void (*bind_gs_state)(struct pipe_context *, void *);
280 void (*delete_gs_state)(struct pipe_context *, void *);
281
282 void * (*create_tcs_state)(struct pipe_context *,
283 const struct pipe_shader_state *);
284 void (*bind_tcs_state)(struct pipe_context *, void *);
285 void (*delete_tcs_state)(struct pipe_context *, void *);
286
287 void * (*create_tes_state)(struct pipe_context *,
288 const struct pipe_shader_state *);
289 void (*bind_tes_state)(struct pipe_context *, void *);
290 void (*delete_tes_state)(struct pipe_context *, void *);
291
292 void * (*create_vertex_elements_state)(struct pipe_context *,
293 unsigned num_elements,
294 const struct pipe_vertex_element *);
295 void (*bind_vertex_elements_state)(struct pipe_context *, void *);
296 void (*delete_vertex_elements_state)(struct pipe_context *, void *);
297
298 /*@}*/
299
300 /**
301 * Parameter-like state (or properties)
302 */
303 /*@{*/
304 void (*set_blend_color)( struct pipe_context *,
305 const struct pipe_blend_color * );
306
307 void (*set_stencil_ref)( struct pipe_context *,
308 const struct pipe_stencil_ref * );
309
310 void (*set_sample_mask)( struct pipe_context *,
311 unsigned sample_mask );
312
313 void (*set_min_samples)( struct pipe_context *,
314 unsigned min_samples );
315
316 void (*set_clip_state)( struct pipe_context *,
317 const struct pipe_clip_state * );
318
319 void (*set_constant_buffer)( struct pipe_context *,
320 enum pipe_shader_type shader, uint index,
321 const struct pipe_constant_buffer *buf );
322
323 void (*set_framebuffer_state)( struct pipe_context *,
324 const struct pipe_framebuffer_state * );
325
326 /**
327 * Set the sample locations used during rasterization. When NULL or sized
328 * zero, the default locations are used.
329 *
330 * Note that get_sample_position() still returns the default locations.
331 *
332 * The samples are accessed with
333 * locations[(pixel_y*grid_w+pixel_x)*ms+i],
334 * where:
335 * ms = the sample count
336 * grid_w = the pixel grid width for the sample count
337 * grid_w = the pixel grid height for the sample count
338 * pixel_x = the window x coordinate modulo grid_w
339 * pixel_y = the window y coordinate modulo grid_w
340 * i = the sample index
341 * This gives a result with the x coordinate as the low 4 bits and the y
342 * coordinate as the high 4 bits. For each coordinate 0 is the left or top
343 * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge.
344 *
345 * Out of bounds accesses are return undefined values.
346 *
347 * The pixel grid is used to vary sample locations across pixels and its
348 * size can be queried with get_sample_pixel_grid().
349 */
350 void (*set_sample_locations)( struct pipe_context *,
351 size_t size, const uint8_t *locations );
352
353 void (*set_polygon_stipple)( struct pipe_context *,
354 const struct pipe_poly_stipple * );
355
356 void (*set_scissor_states)( struct pipe_context *,
357 unsigned start_slot,
358 unsigned num_scissors,
359 const struct pipe_scissor_state * );
360
361 void (*set_window_rectangles)( struct pipe_context *,
362 bool include,
363 unsigned num_rectangles,
364 const struct pipe_scissor_state * );
365
366 void (*set_viewport_states)( struct pipe_context *,
367 unsigned start_slot,
368 unsigned num_viewports,
369 const struct pipe_viewport_state *);
370
371 void (*set_sampler_views)(struct pipe_context *,
372 enum pipe_shader_type shader,
373 unsigned start_slot, unsigned num_views,
374 struct pipe_sampler_view **views);
375
376 void (*set_tess_state)(struct pipe_context *,
377 const float default_outer_level[4],
378 const float default_inner_level[2]);
379
380 /**
381 * Sets the debug callback. If the pointer is null, then no callback is
382 * set, otherwise a copy of the data should be made.
383 */
384 void (*set_debug_callback)(struct pipe_context *,
385 const struct pipe_debug_callback *);
386
387 /**
388 * Bind an array of shader buffers that will be used by a shader.
389 * Any buffers that were previously bound to the specified range
390 * will be unbound.
391 *
392 * \param shader selects shader stage
393 * \param start_slot first buffer slot to bind.
394 * \param count number of consecutive buffers to bind.
395 * \param buffers array of pointers to the buffers to bind, it
396 * should contain at least \a count elements
397 * unless it's NULL, in which case no buffers will
398 * be bound.
399 * \param writable_bitmask If bit i is not set, buffers[i] will only be
400 * used with loads. If unsure, set to ~0.
401 */
402 void (*set_shader_buffers)(struct pipe_context *,
403 enum pipe_shader_type shader,
404 unsigned start_slot, unsigned count,
405 const struct pipe_shader_buffer *buffers,
406 unsigned writable_bitmask);
407
408 /**
409 * Bind an array of hw atomic buffers for use by all shaders.
410 * And buffers that were previously bound to the specified range
411 * will be unbound.
412 *
413 * \param start_slot first buffer slot to bind.
414 * \param count number of consecutive buffers to bind.
415 * \param buffers array of pointers to the buffers to bind, it
416 * should contain at least \a count elements
417 * unless it's NULL, in which case no buffers will
418 * be bound.
419 */
420 void (*set_hw_atomic_buffers)(struct pipe_context *,
421 unsigned start_slot, unsigned count,
422 const struct pipe_shader_buffer *buffers);
423
424 /**
425 * Bind an array of images that will be used by a shader.
426 * Any images that were previously bound to the specified range
427 * will be unbound.
428 *
429 * \param shader selects shader stage
430 * \param start_slot first image slot to bind.
431 * \param count number of consecutive images to bind.
432 * \param buffers array of the images to bind, it
433 * should contain at least \a count elements
434 * unless it's NULL, in which case no images will
435 * be bound.
436 */
437 void (*set_shader_images)(struct pipe_context *,
438 enum pipe_shader_type shader,
439 unsigned start_slot, unsigned count,
440 const struct pipe_image_view *images);
441
442 void (*set_vertex_buffers)( struct pipe_context *,
443 unsigned start_slot,
444 unsigned num_buffers,
445 const struct pipe_vertex_buffer * );
446
447 /*@}*/
448
449 /**
450 * Stream output functions.
451 */
452 /*@{*/
453
454 struct pipe_stream_output_target *(*create_stream_output_target)(
455 struct pipe_context *,
456 struct pipe_resource *,
457 unsigned buffer_offset,
458 unsigned buffer_size);
459
460 void (*stream_output_target_destroy)(struct pipe_context *,
461 struct pipe_stream_output_target *);
462
463 void (*set_stream_output_targets)(struct pipe_context *,
464 unsigned num_targets,
465 struct pipe_stream_output_target **targets,
466 const unsigned *offsets);
467
468 /*@}*/
469
470
471 /**
472 * INTEL_blackhole_render
473 */
474 /*@{*/
475
476 void (*set_frontend_noop)(struct pipe_context *,
477 bool enable);
478
479 /*@}*/
480
481
482 /**
483 * Resource functions for blit-like functionality
484 *
485 * If a driver supports multisampling, blit must implement color resolve.
486 */
487 /*@{*/
488
489 /**
490 * Copy a block of pixels from one resource to another.
491 * The resource must be of the same format.
492 * Resources with nr_samples > 1 are not allowed.
493 */
494 void (*resource_copy_region)(struct pipe_context *pipe,
495 struct pipe_resource *dst,
496 unsigned dst_level,
497 unsigned dstx, unsigned dsty, unsigned dstz,
498 struct pipe_resource *src,
499 unsigned src_level,
500 const struct pipe_box *src_box);
501
502 /* Optimal hardware path for blitting pixels.
503 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
504 */
505 void (*blit)(struct pipe_context *pipe,
506 const struct pipe_blit_info *info);
507
508 /*@}*/
509
510 /**
511 * Clear the specified set of currently bound buffers to specified values.
512 * The entire buffers are cleared (no scissor, no colormask, etc).
513 *
514 * \param buffers bitfield of PIPE_CLEAR_* values.
515 * \param scissor_state the scissored region to clear
516 * \param color pointer to a union of fiu array for each of r, g, b, a.
517 * \param depth depth clear value in [0,1].
518 * \param stencil stencil clear value
519 */
520 void (*clear)(struct pipe_context *pipe,
521 unsigned buffers,
522 const struct pipe_scissor_state *scissor_state,
523 const union pipe_color_union *color,
524 double depth,
525 unsigned stencil);
526
527 /**
528 * Clear a color rendertarget surface.
529 * \param color pointer to an union of fiu array for each of r, g, b, a.
530 */
531 void (*clear_render_target)(struct pipe_context *pipe,
532 struct pipe_surface *dst,
533 const union pipe_color_union *color,
534 unsigned dstx, unsigned dsty,
535 unsigned width, unsigned height,
536 bool render_condition_enabled);
537
538 /**
539 * Clear a depth-stencil surface.
540 * \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values.
541 * \param depth depth clear value in [0,1].
542 * \param stencil stencil clear value
543 */
544 void (*clear_depth_stencil)(struct pipe_context *pipe,
545 struct pipe_surface *dst,
546 unsigned clear_flags,
547 double depth,
548 unsigned stencil,
549 unsigned dstx, unsigned dsty,
550 unsigned width, unsigned height,
551 bool render_condition_enabled);
552
553 /**
554 * Clear the texture with the specified texel. Not guaranteed to be a
555 * renderable format. Data provided in the resource's format.
556 */
557 void (*clear_texture)(struct pipe_context *pipe,
558 struct pipe_resource *res,
559 unsigned level,
560 const struct pipe_box *box,
561 const void *data);
562
563 /**
564 * Clear a buffer. Runs a memset over the specified region with the element
565 * value passed in through clear_value of size clear_value_size.
566 */
567 void (*clear_buffer)(struct pipe_context *pipe,
568 struct pipe_resource *res,
569 unsigned offset,
570 unsigned size,
571 const void *clear_value,
572 int clear_value_size);
573
574 /**
575 * If a depth buffer is rendered with different sample location state than
576 * what is current at the time of reading, the values may differ because
577 * depth buffer compression can depend the sample locations.
578 *
579 * This function is a hint to decompress the current depth buffer to avoid
580 * such problems.
581 */
582 void (*evaluate_depth_buffer)(struct pipe_context *pipe);
583
584 /**
585 * Flush draw commands.
586 *
587 * This guarantees that the new fence (if any) will finish in finite time,
588 * unless PIPE_FLUSH_DEFERRED is used.
589 *
590 * Subsequent operations on other contexts of the same screen are guaranteed
591 * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used.
592 *
593 * NOTE: use screen->fence_reference() (or equivalent) to transfer
594 * new fence ref to **fence, to ensure that previous fence is unref'd
595 *
596 * \param fence if not NULL, an old fence to unref and transfer a
597 * new fence reference to
598 * \param flags bitfield of enum pipe_flush_flags values.
599 */
600 void (*flush)(struct pipe_context *pipe,
601 struct pipe_fence_handle **fence,
602 unsigned flags);
603
604 /**
605 * Create a fence from a fd.
606 *
607 * This is used for importing a foreign/external fence fd.
608 *
609 * \param fence if not NULL, an old fence to unref and transfer a
610 * new fence reference to
611 * \param fd fd representing the fence object
612 * \param type indicates which fence types backs fd
613 */
614 void (*create_fence_fd)(struct pipe_context *pipe,
615 struct pipe_fence_handle **fence,
616 int fd,
617 enum pipe_fd_type type);
618
619 /**
620 * Insert commands to have GPU wait for fence to be signaled.
621 */
622 void (*fence_server_sync)(struct pipe_context *pipe,
623 struct pipe_fence_handle *fence);
624
625 /**
626 * Insert commands to have the GPU signal a fence.
627 */
628 void (*fence_server_signal)(struct pipe_context *pipe,
629 struct pipe_fence_handle *fence);
630
631 /**
632 * Create a view on a texture to be used by a shader stage.
633 */
634 struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx,
635 struct pipe_resource *texture,
636 const struct pipe_sampler_view *templat);
637
638 /**
639 * Destroy a view on a texture.
640 *
641 * \param ctx the current context
642 * \param view the view to be destroyed
643 *
644 * \note The current context may not be the context in which the view was
645 * created (view->context). However, the caller must guarantee that
646 * the context which created the view is still alive.
647 */
648 void (*sampler_view_destroy)(struct pipe_context *ctx,
649 struct pipe_sampler_view *view);
650
651
652 /**
653 * Get a surface which is a "view" into a resource, used by
654 * render target / depth stencil stages.
655 */
656 struct pipe_surface *(*create_surface)(struct pipe_context *ctx,
657 struct pipe_resource *resource,
658 const struct pipe_surface *templat);
659
660 void (*surface_destroy)(struct pipe_context *ctx,
661 struct pipe_surface *);
662
663
664 /**
665 * Map a resource.
666 *
667 * Transfers are (by default) context-private and allow uploads to be
668 * interleaved with rendering.
669 *
670 * out_transfer will contain the transfer object that must be passed
671 * to all the other transfer functions. It also contains useful
672 * information (like texture strides).
673 */
674 void *(*transfer_map)(struct pipe_context *,
675 struct pipe_resource *resource,
676 unsigned level,
677 unsigned usage, /* a combination of PIPE_TRANSFER_x */
678 const struct pipe_box *,
679 struct pipe_transfer **out_transfer);
680
681 /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the
682 * regions specified with this call are guaranteed to be written to
683 * the resource.
684 */
685 void (*transfer_flush_region)( struct pipe_context *,
686 struct pipe_transfer *transfer,
687 const struct pipe_box *);
688
689 void (*transfer_unmap)(struct pipe_context *,
690 struct pipe_transfer *transfer);
691
692 /* One-shot transfer operation with data supplied in a user
693 * pointer.
694 */
695 void (*buffer_subdata)(struct pipe_context *,
696 struct pipe_resource *,
697 unsigned usage, /* a combination of PIPE_TRANSFER_x */
698 unsigned offset,
699 unsigned size,
700 const void *data);
701
702 void (*texture_subdata)(struct pipe_context *,
703 struct pipe_resource *,
704 unsigned level,
705 unsigned usage, /* a combination of PIPE_TRANSFER_x */
706 const struct pipe_box *,
707 const void *data,
708 unsigned stride,
709 unsigned layer_stride);
710
711 /**
712 * Flush any pending framebuffer writes and invalidate texture caches.
713 */
714 void (*texture_barrier)(struct pipe_context *, unsigned flags);
715
716 /**
717 * Flush caches according to flags.
718 */
719 void (*memory_barrier)(struct pipe_context *, unsigned flags);
720
721 /**
722 * Change the commitment status of a part of the given resource, which must
723 * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit.
724 *
725 * \param level The texture level whose commitment should be changed.
726 * \param box The region of the resource whose commitment should be changed.
727 * \param commit Whether memory should be committed or un-committed.
728 *
729 * \return false if out of memory, true on success.
730 */
731 bool (*resource_commit)(struct pipe_context *, struct pipe_resource *,
732 unsigned level, struct pipe_box *box, bool commit);
733
734 /**
735 * Creates a video codec for a specific video format/profile
736 */
737 struct pipe_video_codec *(*create_video_codec)( struct pipe_context *context,
738 const struct pipe_video_codec *templat );
739
740 /**
741 * Creates a video buffer as decoding target
742 */
743 struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context,
744 const struct pipe_video_buffer *templat );
745
746 /**
747 * Compute kernel execution
748 */
749 /*@{*/
750 /**
751 * Define the compute program and parameters to be used by
752 * pipe_context::launch_grid.
753 */
754 void *(*create_compute_state)(struct pipe_context *context,
755 const struct pipe_compute_state *);
756 void (*bind_compute_state)(struct pipe_context *, void *);
757 void (*delete_compute_state)(struct pipe_context *, void *);
758
759 /**
760 * Bind an array of shader resources that will be used by the
761 * compute program. Any resources that were previously bound to
762 * the specified range will be unbound after this call.
763 *
764 * \param start first resource to bind.
765 * \param count number of consecutive resources to bind.
766 * \param resources array of pointers to the resources to bind, it
767 * should contain at least \a count elements
768 * unless it's NULL, in which case no new
769 * resources will be bound.
770 */
771 void (*set_compute_resources)(struct pipe_context *,
772 unsigned start, unsigned count,
773 struct pipe_surface **resources);
774
775 /**
776 * Bind an array of buffers to be mapped into the address space of
777 * the GLOBAL resource. Any buffers that were previously bound
778 * between [first, first + count - 1] are unbound after this call.
779 *
780 * \param first first buffer to map.
781 * \param count number of consecutive buffers to map.
782 * \param resources array of pointers to the buffers to map, it
783 * should contain at least \a count elements
784 * unless it's NULL, in which case no new
785 * resources will be bound.
786 * \param handles array of pointers to the memory locations that
787 * will be updated with the address each buffer
788 * will be mapped to. The base memory address of
789 * each of the buffers will be added to the value
790 * pointed to by its corresponding handle to form
791 * the final address argument. It should contain
792 * at least \a count elements, unless \a
793 * resources is NULL in which case \a handles
794 * should be NULL as well.
795 *
796 * Note that the driver isn't required to make any guarantees about
797 * the contents of the \a handles array being valid anytime except
798 * during the subsequent calls to pipe_context::launch_grid. This
799 * means that the only sensible location handles[i] may point to is
800 * somewhere within the INPUT buffer itself. This is so to
801 * accommodate implementations that lack virtual memory but
802 * nevertheless migrate buffers on the fly, leading to resource
803 * base addresses that change on each kernel invocation or are
804 * unknown to the pipe driver.
805 */
806 void (*set_global_binding)(struct pipe_context *context,
807 unsigned first, unsigned count,
808 struct pipe_resource **resources,
809 uint32_t **handles);
810
811 /**
812 * Launch the compute kernel starting from instruction \a pc of the
813 * currently bound compute program.
814 */
815 void (*launch_grid)(struct pipe_context *context,
816 const struct pipe_grid_info *info);
817 /*@}*/
818
819 /**
820 * Get the default sample position for an individual sample point.
821 *
822 * \param sample_count - total number of samples
823 * \param sample_index - sample to get the position values for
824 * \param out_value - return value of 2 floats for x and y position for
825 * requested sample.
826 */
827 void (*get_sample_position)(struct pipe_context *context,
828 unsigned sample_count,
829 unsigned sample_index,
830 float *out_value);
831
832 /**
833 * Query a timestamp in nanoseconds. This is completely equivalent to
834 * pipe_screen::get_timestamp() but takes a context handle for drivers
835 * that require a context.
836 */
837 uint64_t (*get_timestamp)(struct pipe_context *);
838
839 /**
840 * Flush the resource cache, so that the resource can be used
841 * by an external client. Possible usage:
842 * - flushing a resource before presenting it on the screen
843 * - flushing a resource if some other process or device wants to use it
844 * This shouldn't be used to flush caches if the resource is only managed
845 * by a single pipe_screen and is not shared with another process.
846 * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g.
847 * use the resource for texturing)
848 */
849 void (*flush_resource)(struct pipe_context *ctx,
850 struct pipe_resource *resource);
851
852 /**
853 * Invalidate the contents of the resource. This is used to
854 *
855 * (1) implement EGL's semantic of undefined depth/stencil
856 * contents after a swapbuffers. This allows a tiled renderer (for
857 * example) to not store the depth buffer.
858 *
859 * (2) implement GL's InvalidateBufferData. For backwards compatibility,
860 * you must only rely on the usability for this purpose when
861 * PIPE_CAP_INVALIDATE_BUFFER is enabled.
862 */
863 void (*invalidate_resource)(struct pipe_context *ctx,
864 struct pipe_resource *resource);
865
866 /**
867 * Return information about unexpected device resets.
868 */
869 enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx);
870
871 /**
872 * Sets the reset status callback. If the pointer is null, then no callback
873 * is set, otherwise a copy of the data should be made.
874 */
875 void (*set_device_reset_callback)(struct pipe_context *ctx,
876 const struct pipe_device_reset_callback *cb);
877
878 /**
879 * Dump driver-specific debug information into a stream. This is
880 * used by debugging tools.
881 *
882 * \param ctx pipe context
883 * \param stream where the output should be written to
884 * \param flags a mask of PIPE_DUMP_* flags
885 */
886 void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream,
887 unsigned flags);
888
889 /**
890 * Set the log context to which the driver should write internal debug logs
891 * (internal states, command streams).
892 *
893 * The caller must ensure that the log context is destroyed and reset to
894 * NULL before the pipe context is destroyed, and that log context functions
895 * are only called from the driver thread.
896 *
897 * \param ctx pipe context
898 * \param log logging context
899 */
900 void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log);
901
902 /**
903 * Emit string marker in cmdstream
904 */
905 void (*emit_string_marker)(struct pipe_context *ctx,
906 const char *string,
907 int len);
908
909 /**
910 * Generate mipmap.
911 * \return TRUE if mipmap generation succeeds, FALSE otherwise
912 */
913 bool (*generate_mipmap)(struct pipe_context *ctx,
914 struct pipe_resource *resource,
915 enum pipe_format format,
916 unsigned base_level,
917 unsigned last_level,
918 unsigned first_layer,
919 unsigned last_layer);
920
921 /**
922 * Create a 64-bit texture handle.
923 *
924 * \param ctx pipe context
925 * \param view pipe sampler view object
926 * \param state pipe sampler state template
927 * \return a 64-bit texture handle if success, 0 otherwise
928 */
929 uint64_t (*create_texture_handle)(struct pipe_context *ctx,
930 struct pipe_sampler_view *view,
931 const struct pipe_sampler_state *state);
932
933 /**
934 * Delete a texture handle.
935 *
936 * \param ctx pipe context
937 * \param handle 64-bit texture handle
938 */
939 void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle);
940
941 /**
942 * Make a texture handle resident.
943 *
944 * \param ctx pipe context
945 * \param handle 64-bit texture handle
946 * \param resident TRUE for resident, FALSE otherwise
947 */
948 void (*make_texture_handle_resident)(struct pipe_context *ctx,
949 uint64_t handle, bool resident);
950
951 /**
952 * Create a 64-bit image handle.
953 *
954 * \param ctx pipe context
955 * \param image pipe image view template
956 * \return a 64-bit image handle if success, 0 otherwise
957 */
958 uint64_t (*create_image_handle)(struct pipe_context *ctx,
959 const struct pipe_image_view *image);
960
961 /**
962 * Delete an image handle.
963 *
964 * \param ctx pipe context
965 * \param handle 64-bit image handle
966 */
967 void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle);
968
969 /**
970 * Make an image handle resident.
971 *
972 * \param ctx pipe context
973 * \param handle 64-bit image handle
974 * \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE
975 * \param resident TRUE for resident, FALSE otherwise
976 */
977 void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle,
978 unsigned access, bool resident);
979
980 /**
981 * Call the given function from the driver thread.
982 *
983 * This is set by threaded contexts for use by debugging wrappers.
984 *
985 * \param asap if true, run the callback immediately if there are no pending
986 * commands to be processed by the driver thread
987 */
988 void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data,
989 bool asap);
990
991 /**
992 * Set a context parameter See enum pipe_context_param for more details.
993 */
994 void (*set_context_param)(struct pipe_context *ctx,
995 enum pipe_context_param param,
996 unsigned value);
997 };
998
999
1000 #ifdef __cplusplus
1001 }
1002 #endif
1003
1004 #endif /* PIPE_CONTEXT_H */