st: add support for INTEL_blackhole_render
[mesa.git] / src / gallium / include / pipe / p_context.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef PIPE_CONTEXT_H
29 #define PIPE_CONTEXT_H
30
31 #include "p_compiler.h"
32 #include "p_format.h"
33 #include "p_video_enums.h"
34 #include "p_defines.h"
35 #include <stdio.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41
42 struct pipe_blend_color;
43 struct pipe_blend_state;
44 struct pipe_blit_info;
45 struct pipe_box;
46 struct pipe_clip_state;
47 struct pipe_constant_buffer;
48 struct pipe_debug_callback;
49 struct pipe_depth_stencil_alpha_state;
50 struct pipe_device_reset_callback;
51 struct pipe_draw_info;
52 struct pipe_grid_info;
53 struct pipe_fence_handle;
54 struct pipe_framebuffer_state;
55 struct pipe_image_view;
56 struct pipe_query;
57 struct pipe_poly_stipple;
58 struct pipe_rasterizer_state;
59 struct pipe_resolve_info;
60 struct pipe_resource;
61 struct pipe_sampler_state;
62 struct pipe_sampler_view;
63 struct pipe_scissor_state;
64 struct pipe_shader_buffer;
65 struct pipe_shader_state;
66 struct pipe_stencil_ref;
67 struct pipe_stream_output_target;
68 struct pipe_surface;
69 struct pipe_transfer;
70 struct pipe_vertex_buffer;
71 struct pipe_vertex_element;
72 struct pipe_video_buffer;
73 struct pipe_video_codec;
74 struct pipe_viewport_state;
75 struct pipe_compute_state;
76 union pipe_color_union;
77 union pipe_query_result;
78 struct u_log_context;
79 struct u_upload_mgr;
80
81 /**
82 * Gallium rendering context. Basically:
83 * - state setting functions
84 * - VBO drawing functions
85 * - surface functions
86 */
87 struct pipe_context {
88 struct pipe_screen *screen;
89
90 void *priv; /**< context private data (for DRI for example) */
91 void *draw; /**< private, for draw module (temporary?) */
92
93 /**
94 * Stream uploaders created by the driver. All drivers, state trackers, and
95 * modules should use them.
96 *
97 * Use u_upload_alloc or u_upload_data as many times as you want.
98 * Once you are done, use u_upload_unmap.
99 */
100 struct u_upload_mgr *stream_uploader; /* everything but shader constants */
101 struct u_upload_mgr *const_uploader; /* shader constants only */
102
103 void (*destroy)( struct pipe_context * );
104
105 /**
106 * VBO drawing
107 */
108 /*@{*/
109 void (*draw_vbo)( struct pipe_context *pipe,
110 const struct pipe_draw_info *info );
111 /*@}*/
112
113 /**
114 * Predicate subsequent rendering on occlusion query result
115 * \param query the query predicate, or NULL if no predicate
116 * \param condition whether to skip on FALSE or TRUE query results
117 * \param mode one of PIPE_RENDER_COND_x
118 */
119 void (*render_condition)( struct pipe_context *pipe,
120 struct pipe_query *query,
121 bool condition,
122 enum pipe_render_cond_flag mode );
123
124 /**
125 * Query objects
126 */
127 /*@{*/
128 struct pipe_query *(*create_query)( struct pipe_context *pipe,
129 unsigned query_type,
130 unsigned index );
131
132 /**
133 * Create a query object that queries all given query types simultaneously.
134 *
135 * This can only be used for those query types for which
136 * get_driver_query_info indicates that it must be used. Only one batch
137 * query object may be active at a time.
138 *
139 * There may be additional constraints on which query types can be used
140 * together, in particular those that are implied by
141 * get_driver_query_group_info.
142 *
143 * \param num_queries the number of query types
144 * \param query_types array of \p num_queries query types
145 * \return a query object, or NULL on error.
146 */
147 struct pipe_query *(*create_batch_query)( struct pipe_context *pipe,
148 unsigned num_queries,
149 unsigned *query_types );
150
151 void (*destroy_query)(struct pipe_context *pipe,
152 struct pipe_query *q);
153
154 bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
155 bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
156
157 /**
158 * Get results of a query.
159 * \param wait if true, this query will block until the result is ready
160 * \return TRUE if results are ready, FALSE otherwise
161 */
162 bool (*get_query_result)(struct pipe_context *pipe,
163 struct pipe_query *q,
164 bool wait,
165 union pipe_query_result *result);
166
167 /**
168 * Get results of a query, storing into resource. Note that this may not
169 * be used with batch queries.
170 *
171 * \param wait if true, this query will block until the result is ready
172 * \param result_type the type of the value being stored:
173 * \param index for queries that return multiple pieces of data, which
174 * item of that data to store (e.g. for
175 * PIPE_QUERY_PIPELINE_STATISTICS).
176 * When the index is -1, instead of the value of the query
177 * the driver should instead write a 1 or 0 to the appropriate
178 * location with 1 meaning that the query result is available.
179 */
180 void (*get_query_result_resource)(struct pipe_context *pipe,
181 struct pipe_query *q,
182 bool wait,
183 enum pipe_query_value_type result_type,
184 int index,
185 struct pipe_resource *resource,
186 unsigned offset);
187
188 /**
189 * Set whether all current non-driver queries except TIME_ELAPSED are
190 * active or paused.
191 */
192 void (*set_active_query_state)(struct pipe_context *pipe, bool enable);
193
194 /**
195 * INTEL Performance Query
196 */
197 /*@{*/
198
199 unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe);
200
201 void (*get_intel_perf_query_info)(struct pipe_context *pipe,
202 unsigned query_index,
203 const char **name,
204 uint32_t *data_size,
205 uint32_t *n_counters,
206 uint32_t *n_active);
207
208 void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe,
209 unsigned query_index,
210 unsigned counter_index,
211 const char **name,
212 const char **desc,
213 uint32_t *offset,
214 uint32_t *data_size,
215 uint32_t *type_enum,
216 uint32_t *data_type_enum,
217 uint64_t *raw_max);
218
219 struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe,
220 unsigned query_index);
221
222 void (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
223
224 void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
225
226 void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
227
228 void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
229
230 bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q);
231
232 void (*get_intel_perf_query_data)(struct pipe_context *pipe,
233 struct pipe_query *q,
234 size_t data_size,
235 uint32_t *data,
236 uint32_t *bytes_written);
237
238 /*@}*/
239
240 /**
241 * State functions (create/bind/destroy state objects)
242 */
243 /*@{*/
244 void * (*create_blend_state)(struct pipe_context *,
245 const struct pipe_blend_state *);
246 void (*bind_blend_state)(struct pipe_context *, void *);
247 void (*delete_blend_state)(struct pipe_context *, void *);
248
249 void * (*create_sampler_state)(struct pipe_context *,
250 const struct pipe_sampler_state *);
251 void (*bind_sampler_states)(struct pipe_context *,
252 enum pipe_shader_type shader,
253 unsigned start_slot, unsigned num_samplers,
254 void **samplers);
255 void (*delete_sampler_state)(struct pipe_context *, void *);
256
257 void * (*create_rasterizer_state)(struct pipe_context *,
258 const struct pipe_rasterizer_state *);
259 void (*bind_rasterizer_state)(struct pipe_context *, void *);
260 void (*delete_rasterizer_state)(struct pipe_context *, void *);
261
262 void * (*create_depth_stencil_alpha_state)(struct pipe_context *,
263 const struct pipe_depth_stencil_alpha_state *);
264 void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *);
265 void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *);
266
267 void * (*create_fs_state)(struct pipe_context *,
268 const struct pipe_shader_state *);
269 void (*bind_fs_state)(struct pipe_context *, void *);
270 void (*delete_fs_state)(struct pipe_context *, void *);
271
272 void * (*create_vs_state)(struct pipe_context *,
273 const struct pipe_shader_state *);
274 void (*bind_vs_state)(struct pipe_context *, void *);
275 void (*delete_vs_state)(struct pipe_context *, void *);
276
277 void * (*create_gs_state)(struct pipe_context *,
278 const struct pipe_shader_state *);
279 void (*bind_gs_state)(struct pipe_context *, void *);
280 void (*delete_gs_state)(struct pipe_context *, void *);
281
282 void * (*create_tcs_state)(struct pipe_context *,
283 const struct pipe_shader_state *);
284 void (*bind_tcs_state)(struct pipe_context *, void *);
285 void (*delete_tcs_state)(struct pipe_context *, void *);
286
287 void * (*create_tes_state)(struct pipe_context *,
288 const struct pipe_shader_state *);
289 void (*bind_tes_state)(struct pipe_context *, void *);
290 void (*delete_tes_state)(struct pipe_context *, void *);
291
292 void * (*create_vertex_elements_state)(struct pipe_context *,
293 unsigned num_elements,
294 const struct pipe_vertex_element *);
295 void (*bind_vertex_elements_state)(struct pipe_context *, void *);
296 void (*delete_vertex_elements_state)(struct pipe_context *, void *);
297
298 /*@}*/
299
300 /**
301 * Parameter-like state (or properties)
302 */
303 /*@{*/
304 void (*set_blend_color)( struct pipe_context *,
305 const struct pipe_blend_color * );
306
307 void (*set_stencil_ref)( struct pipe_context *,
308 const struct pipe_stencil_ref * );
309
310 void (*set_sample_mask)( struct pipe_context *,
311 unsigned sample_mask );
312
313 void (*set_min_samples)( struct pipe_context *,
314 unsigned min_samples );
315
316 void (*set_clip_state)( struct pipe_context *,
317 const struct pipe_clip_state * );
318
319 void (*set_constant_buffer)( struct pipe_context *,
320 enum pipe_shader_type shader, uint index,
321 const struct pipe_constant_buffer *buf );
322
323 void (*set_framebuffer_state)( struct pipe_context *,
324 const struct pipe_framebuffer_state * );
325
326 /**
327 * Set the sample locations used during rasterization. When NULL or sized
328 * zero, the default locations are used.
329 *
330 * Note that get_sample_position() still returns the default locations.
331 *
332 * The samples are accessed with
333 * locations[(pixel_y*grid_w+pixel_x)*ms+i],
334 * where:
335 * ms = the sample count
336 * grid_w = the pixel grid width for the sample count
337 * grid_w = the pixel grid height for the sample count
338 * pixel_x = the window x coordinate modulo grid_w
339 * pixel_y = the window y coordinate modulo grid_w
340 * i = the sample index
341 * This gives a result with the x coordinate as the low 4 bits and the y
342 * coordinate as the high 4 bits. For each coordinate 0 is the left or top
343 * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge.
344 *
345 * Out of bounds accesses are return undefined values.
346 *
347 * The pixel grid is used to vary sample locations across pixels and its
348 * size can be queried with get_sample_pixel_grid().
349 */
350 void (*set_sample_locations)( struct pipe_context *,
351 size_t size, const uint8_t *locations );
352
353 void (*set_polygon_stipple)( struct pipe_context *,
354 const struct pipe_poly_stipple * );
355
356 void (*set_scissor_states)( struct pipe_context *,
357 unsigned start_slot,
358 unsigned num_scissors,
359 const struct pipe_scissor_state * );
360
361 void (*set_window_rectangles)( struct pipe_context *,
362 bool include,
363 unsigned num_rectangles,
364 const struct pipe_scissor_state * );
365
366 void (*set_viewport_states)( struct pipe_context *,
367 unsigned start_slot,
368 unsigned num_viewports,
369 const struct pipe_viewport_state *);
370
371 void (*set_sampler_views)(struct pipe_context *,
372 enum pipe_shader_type shader,
373 unsigned start_slot, unsigned num_views,
374 struct pipe_sampler_view **views);
375
376 void (*set_tess_state)(struct pipe_context *,
377 const float default_outer_level[4],
378 const float default_inner_level[2]);
379
380 /**
381 * Sets the debug callback. If the pointer is null, then no callback is
382 * set, otherwise a copy of the data should be made.
383 */
384 void (*set_debug_callback)(struct pipe_context *,
385 const struct pipe_debug_callback *);
386
387 /**
388 * Bind an array of shader buffers that will be used by a shader.
389 * Any buffers that were previously bound to the specified range
390 * will be unbound.
391 *
392 * \param shader selects shader stage
393 * \param start_slot first buffer slot to bind.
394 * \param count number of consecutive buffers to bind.
395 * \param buffers array of pointers to the buffers to bind, it
396 * should contain at least \a count elements
397 * unless it's NULL, in which case no buffers will
398 * be bound.
399 * \param writable_bitmask If bit i is not set, buffers[i] will only be
400 * used with loads. If unsure, set to ~0.
401 */
402 void (*set_shader_buffers)(struct pipe_context *,
403 enum pipe_shader_type shader,
404 unsigned start_slot, unsigned count,
405 const struct pipe_shader_buffer *buffers,
406 unsigned writable_bitmask);
407
408 /**
409 * Bind an array of hw atomic buffers for use by all shaders.
410 * And buffers that were previously bound to the specified range
411 * will be unbound.
412 *
413 * \param start_slot first buffer slot to bind.
414 * \param count number of consecutive buffers to bind.
415 * \param buffers array of pointers to the buffers to bind, it
416 * should contain at least \a count elements
417 * unless it's NULL, in which case no buffers will
418 * be bound.
419 */
420 void (*set_hw_atomic_buffers)(struct pipe_context *,
421 unsigned start_slot, unsigned count,
422 const struct pipe_shader_buffer *buffers);
423
424 /**
425 * Bind an array of images that will be used by a shader.
426 * Any images that were previously bound to the specified range
427 * will be unbound.
428 *
429 * \param shader selects shader stage
430 * \param start_slot first image slot to bind.
431 * \param count number of consecutive images to bind.
432 * \param buffers array of the images to bind, it
433 * should contain at least \a count elements
434 * unless it's NULL, in which case no images will
435 * be bound.
436 */
437 void (*set_shader_images)(struct pipe_context *,
438 enum pipe_shader_type shader,
439 unsigned start_slot, unsigned count,
440 const struct pipe_image_view *images);
441
442 void (*set_vertex_buffers)( struct pipe_context *,
443 unsigned start_slot,
444 unsigned num_buffers,
445 const struct pipe_vertex_buffer * );
446
447 /*@}*/
448
449 /**
450 * Stream output functions.
451 */
452 /*@{*/
453
454 struct pipe_stream_output_target *(*create_stream_output_target)(
455 struct pipe_context *,
456 struct pipe_resource *,
457 unsigned buffer_offset,
458 unsigned buffer_size);
459
460 void (*stream_output_target_destroy)(struct pipe_context *,
461 struct pipe_stream_output_target *);
462
463 void (*set_stream_output_targets)(struct pipe_context *,
464 unsigned num_targets,
465 struct pipe_stream_output_target **targets,
466 const unsigned *offsets);
467
468 /*@}*/
469
470
471 /**
472 * INTEL_blackhole_render
473 */
474 /*@{*/
475
476 void (*set_frontend_noop)(struct pipe_context *,
477 bool enable);
478
479 /*@}*/
480
481
482 /**
483 * Resource functions for blit-like functionality
484 *
485 * If a driver supports multisampling, blit must implement color resolve.
486 */
487 /*@{*/
488
489 /**
490 * Copy a block of pixels from one resource to another.
491 * The resource must be of the same format.
492 * Resources with nr_samples > 1 are not allowed.
493 */
494 void (*resource_copy_region)(struct pipe_context *pipe,
495 struct pipe_resource *dst,
496 unsigned dst_level,
497 unsigned dstx, unsigned dsty, unsigned dstz,
498 struct pipe_resource *src,
499 unsigned src_level,
500 const struct pipe_box *src_box);
501
502 /* Optimal hardware path for blitting pixels.
503 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
504 */
505 void (*blit)(struct pipe_context *pipe,
506 const struct pipe_blit_info *info);
507
508 /*@}*/
509
510 /**
511 * Clear the specified set of currently bound buffers to specified values.
512 * The entire buffers are cleared (no scissor, no colormask, etc).
513 *
514 * \param buffers bitfield of PIPE_CLEAR_* values.
515 * \param color pointer to a union of fiu array for each of r, g, b, a.
516 * \param depth depth clear value in [0,1].
517 * \param stencil stencil clear value
518 */
519 void (*clear)(struct pipe_context *pipe,
520 unsigned buffers,
521 const union pipe_color_union *color,
522 double depth,
523 unsigned stencil);
524
525 /**
526 * Clear a color rendertarget surface.
527 * \param color pointer to an union of fiu array for each of r, g, b, a.
528 */
529 void (*clear_render_target)(struct pipe_context *pipe,
530 struct pipe_surface *dst,
531 const union pipe_color_union *color,
532 unsigned dstx, unsigned dsty,
533 unsigned width, unsigned height,
534 bool render_condition_enabled);
535
536 /**
537 * Clear a depth-stencil surface.
538 * \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values.
539 * \param depth depth clear value in [0,1].
540 * \param stencil stencil clear value
541 */
542 void (*clear_depth_stencil)(struct pipe_context *pipe,
543 struct pipe_surface *dst,
544 unsigned clear_flags,
545 double depth,
546 unsigned stencil,
547 unsigned dstx, unsigned dsty,
548 unsigned width, unsigned height,
549 bool render_condition_enabled);
550
551 /**
552 * Clear the texture with the specified texel. Not guaranteed to be a
553 * renderable format. Data provided in the resource's format.
554 */
555 void (*clear_texture)(struct pipe_context *pipe,
556 struct pipe_resource *res,
557 unsigned level,
558 const struct pipe_box *box,
559 const void *data);
560
561 /**
562 * Clear a buffer. Runs a memset over the specified region with the element
563 * value passed in through clear_value of size clear_value_size.
564 */
565 void (*clear_buffer)(struct pipe_context *pipe,
566 struct pipe_resource *res,
567 unsigned offset,
568 unsigned size,
569 const void *clear_value,
570 int clear_value_size);
571
572 /**
573 * If a depth buffer is rendered with different sample location state than
574 * what is current at the time of reading, the values may differ because
575 * depth buffer compression can depend the sample locations.
576 *
577 * This function is a hint to decompress the current depth buffer to avoid
578 * such problems.
579 */
580 void (*evaluate_depth_buffer)(struct pipe_context *pipe);
581
582 /**
583 * Flush draw commands.
584 *
585 * This guarantees that the new fence (if any) will finish in finite time,
586 * unless PIPE_FLUSH_DEFERRED is used.
587 *
588 * Subsequent operations on other contexts of the same screen are guaranteed
589 * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used.
590 *
591 * NOTE: use screen->fence_reference() (or equivalent) to transfer
592 * new fence ref to **fence, to ensure that previous fence is unref'd
593 *
594 * \param fence if not NULL, an old fence to unref and transfer a
595 * new fence reference to
596 * \param flags bitfield of enum pipe_flush_flags values.
597 */
598 void (*flush)(struct pipe_context *pipe,
599 struct pipe_fence_handle **fence,
600 unsigned flags);
601
602 /**
603 * Create a fence from a fd.
604 *
605 * This is used for importing a foreign/external fence fd.
606 *
607 * \param fence if not NULL, an old fence to unref and transfer a
608 * new fence reference to
609 * \param fd fd representing the fence object
610 * \param type indicates which fence types backs fd
611 */
612 void (*create_fence_fd)(struct pipe_context *pipe,
613 struct pipe_fence_handle **fence,
614 int fd,
615 enum pipe_fd_type type);
616
617 /**
618 * Insert commands to have GPU wait for fence to be signaled.
619 */
620 void (*fence_server_sync)(struct pipe_context *pipe,
621 struct pipe_fence_handle *fence);
622
623 /**
624 * Insert commands to have the GPU signal a fence.
625 */
626 void (*fence_server_signal)(struct pipe_context *pipe,
627 struct pipe_fence_handle *fence);
628
629 /**
630 * Create a view on a texture to be used by a shader stage.
631 */
632 struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx,
633 struct pipe_resource *texture,
634 const struct pipe_sampler_view *templat);
635
636 /**
637 * Destroy a view on a texture.
638 *
639 * \param ctx the current context
640 * \param view the view to be destroyed
641 *
642 * \note The current context may not be the context in which the view was
643 * created (view->context). However, the caller must guarantee that
644 * the context which created the view is still alive.
645 */
646 void (*sampler_view_destroy)(struct pipe_context *ctx,
647 struct pipe_sampler_view *view);
648
649
650 /**
651 * Get a surface which is a "view" into a resource, used by
652 * render target / depth stencil stages.
653 */
654 struct pipe_surface *(*create_surface)(struct pipe_context *ctx,
655 struct pipe_resource *resource,
656 const struct pipe_surface *templat);
657
658 void (*surface_destroy)(struct pipe_context *ctx,
659 struct pipe_surface *);
660
661
662 /**
663 * Map a resource.
664 *
665 * Transfers are (by default) context-private and allow uploads to be
666 * interleaved with rendering.
667 *
668 * out_transfer will contain the transfer object that must be passed
669 * to all the other transfer functions. It also contains useful
670 * information (like texture strides).
671 */
672 void *(*transfer_map)(struct pipe_context *,
673 struct pipe_resource *resource,
674 unsigned level,
675 unsigned usage, /* a combination of PIPE_TRANSFER_x */
676 const struct pipe_box *,
677 struct pipe_transfer **out_transfer);
678
679 /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the
680 * regions specified with this call are guaranteed to be written to
681 * the resource.
682 */
683 void (*transfer_flush_region)( struct pipe_context *,
684 struct pipe_transfer *transfer,
685 const struct pipe_box *);
686
687 void (*transfer_unmap)(struct pipe_context *,
688 struct pipe_transfer *transfer);
689
690 /* One-shot transfer operation with data supplied in a user
691 * pointer.
692 */
693 void (*buffer_subdata)(struct pipe_context *,
694 struct pipe_resource *,
695 unsigned usage, /* a combination of PIPE_TRANSFER_x */
696 unsigned offset,
697 unsigned size,
698 const void *data);
699
700 void (*texture_subdata)(struct pipe_context *,
701 struct pipe_resource *,
702 unsigned level,
703 unsigned usage, /* a combination of PIPE_TRANSFER_x */
704 const struct pipe_box *,
705 const void *data,
706 unsigned stride,
707 unsigned layer_stride);
708
709 /**
710 * Flush any pending framebuffer writes and invalidate texture caches.
711 */
712 void (*texture_barrier)(struct pipe_context *, unsigned flags);
713
714 /**
715 * Flush caches according to flags.
716 */
717 void (*memory_barrier)(struct pipe_context *, unsigned flags);
718
719 /**
720 * Change the commitment status of a part of the given resource, which must
721 * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit.
722 *
723 * \param level The texture level whose commitment should be changed.
724 * \param box The region of the resource whose commitment should be changed.
725 * \param commit Whether memory should be committed or un-committed.
726 *
727 * \return false if out of memory, true on success.
728 */
729 bool (*resource_commit)(struct pipe_context *, struct pipe_resource *,
730 unsigned level, struct pipe_box *box, bool commit);
731
732 /**
733 * Creates a video codec for a specific video format/profile
734 */
735 struct pipe_video_codec *(*create_video_codec)( struct pipe_context *context,
736 const struct pipe_video_codec *templat );
737
738 /**
739 * Creates a video buffer as decoding target
740 */
741 struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context,
742 const struct pipe_video_buffer *templat );
743
744 /**
745 * Compute kernel execution
746 */
747 /*@{*/
748 /**
749 * Define the compute program and parameters to be used by
750 * pipe_context::launch_grid.
751 */
752 void *(*create_compute_state)(struct pipe_context *context,
753 const struct pipe_compute_state *);
754 void (*bind_compute_state)(struct pipe_context *, void *);
755 void (*delete_compute_state)(struct pipe_context *, void *);
756
757 /**
758 * Bind an array of shader resources that will be used by the
759 * compute program. Any resources that were previously bound to
760 * the specified range will be unbound after this call.
761 *
762 * \param start first resource to bind.
763 * \param count number of consecutive resources to bind.
764 * \param resources array of pointers to the resources to bind, it
765 * should contain at least \a count elements
766 * unless it's NULL, in which case no new
767 * resources will be bound.
768 */
769 void (*set_compute_resources)(struct pipe_context *,
770 unsigned start, unsigned count,
771 struct pipe_surface **resources);
772
773 /**
774 * Bind an array of buffers to be mapped into the address space of
775 * the GLOBAL resource. Any buffers that were previously bound
776 * between [first, first + count - 1] are unbound after this call.
777 *
778 * \param first first buffer to map.
779 * \param count number of consecutive buffers to map.
780 * \param resources array of pointers to the buffers to map, it
781 * should contain at least \a count elements
782 * unless it's NULL, in which case no new
783 * resources will be bound.
784 * \param handles array of pointers to the memory locations that
785 * will be updated with the address each buffer
786 * will be mapped to. The base memory address of
787 * each of the buffers will be added to the value
788 * pointed to by its corresponding handle to form
789 * the final address argument. It should contain
790 * at least \a count elements, unless \a
791 * resources is NULL in which case \a handles
792 * should be NULL as well.
793 *
794 * Note that the driver isn't required to make any guarantees about
795 * the contents of the \a handles array being valid anytime except
796 * during the subsequent calls to pipe_context::launch_grid. This
797 * means that the only sensible location handles[i] may point to is
798 * somewhere within the INPUT buffer itself. This is so to
799 * accommodate implementations that lack virtual memory but
800 * nevertheless migrate buffers on the fly, leading to resource
801 * base addresses that change on each kernel invocation or are
802 * unknown to the pipe driver.
803 */
804 void (*set_global_binding)(struct pipe_context *context,
805 unsigned first, unsigned count,
806 struct pipe_resource **resources,
807 uint32_t **handles);
808
809 /**
810 * Launch the compute kernel starting from instruction \a pc of the
811 * currently bound compute program.
812 */
813 void (*launch_grid)(struct pipe_context *context,
814 const struct pipe_grid_info *info);
815 /*@}*/
816
817 /**
818 * Get the default sample position for an individual sample point.
819 *
820 * \param sample_count - total number of samples
821 * \param sample_index - sample to get the position values for
822 * \param out_value - return value of 2 floats for x and y position for
823 * requested sample.
824 */
825 void (*get_sample_position)(struct pipe_context *context,
826 unsigned sample_count,
827 unsigned sample_index,
828 float *out_value);
829
830 /**
831 * Query a timestamp in nanoseconds. This is completely equivalent to
832 * pipe_screen::get_timestamp() but takes a context handle for drivers
833 * that require a context.
834 */
835 uint64_t (*get_timestamp)(struct pipe_context *);
836
837 /**
838 * Flush the resource cache, so that the resource can be used
839 * by an external client. Possible usage:
840 * - flushing a resource before presenting it on the screen
841 * - flushing a resource if some other process or device wants to use it
842 * This shouldn't be used to flush caches if the resource is only managed
843 * by a single pipe_screen and is not shared with another process.
844 * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g.
845 * use the resource for texturing)
846 */
847 void (*flush_resource)(struct pipe_context *ctx,
848 struct pipe_resource *resource);
849
850 /**
851 * Invalidate the contents of the resource. This is used to
852 *
853 * (1) implement EGL's semantic of undefined depth/stencil
854 * contents after a swapbuffers. This allows a tiled renderer (for
855 * example) to not store the depth buffer.
856 *
857 * (2) implement GL's InvalidateBufferData. For backwards compatibility,
858 * you must only rely on the usability for this purpose when
859 * PIPE_CAP_INVALIDATE_BUFFER is enabled.
860 */
861 void (*invalidate_resource)(struct pipe_context *ctx,
862 struct pipe_resource *resource);
863
864 /**
865 * Return information about unexpected device resets.
866 */
867 enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx);
868
869 /**
870 * Sets the reset status callback. If the pointer is null, then no callback
871 * is set, otherwise a copy of the data should be made.
872 */
873 void (*set_device_reset_callback)(struct pipe_context *ctx,
874 const struct pipe_device_reset_callback *cb);
875
876 /**
877 * Dump driver-specific debug information into a stream. This is
878 * used by debugging tools.
879 *
880 * \param ctx pipe context
881 * \param stream where the output should be written to
882 * \param flags a mask of PIPE_DUMP_* flags
883 */
884 void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream,
885 unsigned flags);
886
887 /**
888 * Set the log context to which the driver should write internal debug logs
889 * (internal states, command streams).
890 *
891 * The caller must ensure that the log context is destroyed and reset to
892 * NULL before the pipe context is destroyed, and that log context functions
893 * are only called from the driver thread.
894 *
895 * \param ctx pipe context
896 * \param log logging context
897 */
898 void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log);
899
900 /**
901 * Emit string marker in cmdstream
902 */
903 void (*emit_string_marker)(struct pipe_context *ctx,
904 const char *string,
905 int len);
906
907 /**
908 * Generate mipmap.
909 * \return TRUE if mipmap generation succeeds, FALSE otherwise
910 */
911 bool (*generate_mipmap)(struct pipe_context *ctx,
912 struct pipe_resource *resource,
913 enum pipe_format format,
914 unsigned base_level,
915 unsigned last_level,
916 unsigned first_layer,
917 unsigned last_layer);
918
919 /**
920 * Create a 64-bit texture handle.
921 *
922 * \param ctx pipe context
923 * \param view pipe sampler view object
924 * \param state pipe sampler state template
925 * \return a 64-bit texture handle if success, 0 otherwise
926 */
927 uint64_t (*create_texture_handle)(struct pipe_context *ctx,
928 struct pipe_sampler_view *view,
929 const struct pipe_sampler_state *state);
930
931 /**
932 * Delete a texture handle.
933 *
934 * \param ctx pipe context
935 * \param handle 64-bit texture handle
936 */
937 void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle);
938
939 /**
940 * Make a texture handle resident.
941 *
942 * \param ctx pipe context
943 * \param handle 64-bit texture handle
944 * \param resident TRUE for resident, FALSE otherwise
945 */
946 void (*make_texture_handle_resident)(struct pipe_context *ctx,
947 uint64_t handle, bool resident);
948
949 /**
950 * Create a 64-bit image handle.
951 *
952 * \param ctx pipe context
953 * \param image pipe image view template
954 * \return a 64-bit image handle if success, 0 otherwise
955 */
956 uint64_t (*create_image_handle)(struct pipe_context *ctx,
957 const struct pipe_image_view *image);
958
959 /**
960 * Delete an image handle.
961 *
962 * \param ctx pipe context
963 * \param handle 64-bit image handle
964 */
965 void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle);
966
967 /**
968 * Make an image handle resident.
969 *
970 * \param ctx pipe context
971 * \param handle 64-bit image handle
972 * \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE
973 * \param resident TRUE for resident, FALSE otherwise
974 */
975 void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle,
976 unsigned access, bool resident);
977
978 /**
979 * Call the given function from the driver thread.
980 *
981 * This is set by threaded contexts for use by debugging wrappers.
982 *
983 * \param asap if true, run the callback immediately if there are no pending
984 * commands to be processed by the driver thread
985 */
986 void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data,
987 bool asap);
988
989 /**
990 * Set a context parameter See enum pipe_context_param for more details.
991 */
992 void (*set_context_param)(struct pipe_context *ctx,
993 enum pipe_context_param param,
994 unsigned value);
995 };
996
997
998 #ifdef __cplusplus
999 }
1000 #endif
1001
1002 #endif /* PIPE_CONTEXT_H */