1 /**************************************************************************
3 * Copyright 2007 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
54 * Per-shader sampler information.
58 struct cso_sampler
*cso_samplers
[PIPE_MAX_SAMPLERS
];
59 void *samplers
[PIPE_MAX_SAMPLERS
];
65 struct pipe_context
*pipe
;
66 struct cso_cache
*cache
;
69 struct u_vbuf
*vbuf_current
;
72 boolean has_geometry_shader
;
73 boolean has_tessellation
;
74 boolean has_compute_shader
;
75 boolean has_streamout
;
77 unsigned saved_state
; /**< bitmask of CSO_BIT_x flags */
79 struct pipe_sampler_view
*fragment_views
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
80 unsigned nr_fragment_views
;
82 struct pipe_sampler_view
*fragment_views_saved
[PIPE_MAX_SHADER_SAMPLER_VIEWS
];
83 unsigned nr_fragment_views_saved
;
85 struct sampler_info fragment_samplers_saved
;
86 struct sampler_info samplers
[PIPE_SHADER_TYPES
];
88 /* Temporary number until cso_single_sampler_done is called.
89 * It tracks the highest sampler seen in cso_single_sampler.
93 struct pipe_vertex_buffer vertex_buffer0_current
;
94 struct pipe_vertex_buffer vertex_buffer0_saved
;
96 struct pipe_constant_buffer aux_constbuf_current
[PIPE_SHADER_TYPES
];
97 struct pipe_constant_buffer aux_constbuf_saved
[PIPE_SHADER_TYPES
];
99 struct pipe_image_view fragment_image0_current
;
100 struct pipe_image_view fragment_image0_saved
;
102 unsigned nr_so_targets
;
103 struct pipe_stream_output_target
*so_targets
[PIPE_MAX_SO_BUFFERS
];
105 unsigned nr_so_targets_saved
;
106 struct pipe_stream_output_target
*so_targets_saved
[PIPE_MAX_SO_BUFFERS
];
108 /** Current and saved state.
109 * The saved state is used as a 1-deep stack.
111 void *blend
, *blend_saved
;
112 void *depth_stencil
, *depth_stencil_saved
;
113 void *rasterizer
, *rasterizer_saved
;
114 void *fragment_shader
, *fragment_shader_saved
;
115 void *vertex_shader
, *vertex_shader_saved
;
116 void *geometry_shader
, *geometry_shader_saved
;
117 void *tessctrl_shader
, *tessctrl_shader_saved
;
118 void *tesseval_shader
, *tesseval_shader_saved
;
119 void *compute_shader
;
120 void *velements
, *velements_saved
;
121 struct pipe_query
*render_condition
, *render_condition_saved
;
122 uint render_condition_mode
, render_condition_mode_saved
;
123 boolean render_condition_cond
, render_condition_cond_saved
;
125 struct pipe_framebuffer_state fb
, fb_saved
;
126 struct pipe_viewport_state vp
, vp_saved
;
127 struct pipe_blend_color blend_color
;
128 unsigned sample_mask
, sample_mask_saved
;
129 unsigned min_samples
, min_samples_saved
;
130 struct pipe_stencil_ref stencil_ref
, stencil_ref_saved
;
133 struct pipe_context
*cso_get_pipe_context(struct cso_context
*cso
)
138 static boolean
delete_blend_state(struct cso_context
*ctx
, void *state
)
140 struct cso_blend
*cso
= (struct cso_blend
*)state
;
142 if (ctx
->blend
== cso
->data
)
145 if (cso
->delete_state
)
146 cso
->delete_state(cso
->context
, cso
->data
);
151 static boolean
delete_depth_stencil_state(struct cso_context
*ctx
, void *state
)
153 struct cso_depth_stencil_alpha
*cso
=
154 (struct cso_depth_stencil_alpha
*)state
;
156 if (ctx
->depth_stencil
== cso
->data
)
159 if (cso
->delete_state
)
160 cso
->delete_state(cso
->context
, cso
->data
);
166 static boolean
delete_sampler_state(UNUSED
struct cso_context
*ctx
, void *state
)
168 struct cso_sampler
*cso
= (struct cso_sampler
*)state
;
169 if (cso
->delete_state
)
170 cso
->delete_state(cso
->context
, cso
->data
);
175 static boolean
delete_rasterizer_state(struct cso_context
*ctx
, void *state
)
177 struct cso_rasterizer
*cso
= (struct cso_rasterizer
*)state
;
179 if (ctx
->rasterizer
== cso
->data
)
181 if (cso
->delete_state
)
182 cso
->delete_state(cso
->context
, cso
->data
);
187 static boolean
delete_vertex_elements(struct cso_context
*ctx
,
190 struct cso_velements
*cso
= (struct cso_velements
*)state
;
192 if (ctx
->velements
== cso
->data
)
195 if (cso
->delete_state
)
196 cso
->delete_state(cso
->context
, cso
->data
);
202 static inline boolean
delete_cso(struct cso_context
*ctx
,
203 void *state
, enum cso_cache_type type
)
207 return delete_blend_state(ctx
, state
);
209 return delete_sampler_state(ctx
, state
);
210 case CSO_DEPTH_STENCIL_ALPHA
:
211 return delete_depth_stencil_state(ctx
, state
);
213 return delete_rasterizer_state(ctx
, state
);
215 return delete_vertex_elements(ctx
, state
);
224 sanitize_hash(struct cso_hash
*hash
, enum cso_cache_type type
,
225 int max_size
, void *user_data
)
227 struct cso_context
*ctx
= (struct cso_context
*)user_data
;
228 /* if we're approach the maximum size, remove fourth of the entries
229 * otherwise every subsequent call will go through the same */
230 int hash_size
= cso_hash_size(hash
);
231 int max_entries
= (max_size
> hash_size
) ? max_size
: hash_size
;
232 int to_remove
= (max_size
< max_entries
) * max_entries
/4;
233 struct cso_hash_iter iter
;
234 struct cso_sampler
**samplers_to_restore
= NULL
;
235 unsigned to_restore
= 0;
237 if (hash_size
> max_size
)
238 to_remove
+= hash_size
- max_size
;
243 if (type
== CSO_SAMPLER
) {
246 samplers_to_restore
= MALLOC(PIPE_SHADER_TYPES
* PIPE_MAX_SAMPLERS
*
247 sizeof(*samplers_to_restore
));
249 /* Temporarily remove currently bound sampler states from the hash
250 * table, to prevent them from being deleted
252 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
253 for (j
= 0; j
< PIPE_MAX_SAMPLERS
; j
++) {
254 struct cso_sampler
*sampler
= ctx
->samplers
[i
].cso_samplers
[j
];
256 if (sampler
&& cso_hash_take(hash
, sampler
->hash_key
))
257 samplers_to_restore
[to_restore
++] = sampler
;
262 iter
= cso_hash_first_node(hash
);
264 /*remove elements until we're good */
265 /*fixme: currently we pick the nodes to remove at random*/
266 void *cso
= cso_hash_iter_data(iter
);
271 if (delete_cso(ctx
, cso
, type
)) {
272 iter
= cso_hash_erase(hash
, iter
);
275 iter
= cso_hash_iter_next(iter
);
278 if (type
== CSO_SAMPLER
) {
279 /* Put currently bound sampler states back into the hash table */
280 while (to_restore
--) {
281 struct cso_sampler
*sampler
= samplers_to_restore
[to_restore
];
283 cso_hash_insert(hash
, sampler
->hash_key
, sampler
);
286 FREE(samplers_to_restore
);
290 static void cso_init_vbuf(struct cso_context
*cso
, unsigned flags
)
292 struct u_vbuf_caps caps
;
293 bool uses_user_vertex_buffers
= !(flags
& CSO_NO_USER_VERTEX_BUFFERS
);
295 u_vbuf_get_caps(cso
->pipe
->screen
, &caps
);
297 /* Enable u_vbuf if needed. */
298 if (caps
.fallback_always
||
299 (uses_user_vertex_buffers
&&
300 caps
.fallback_only_for_user_vbuffers
)) {
301 cso
->vbuf
= u_vbuf_create(cso
->pipe
, &caps
);
302 cso
->vbuf_current
= cso
->vbuf
;
303 cso
->always_use_vbuf
= caps
.fallback_always
;
308 cso_create_context(struct pipe_context
*pipe
, unsigned flags
)
310 struct cso_context
*ctx
= CALLOC_STRUCT(cso_context
);
314 ctx
->cache
= cso_cache_create();
315 if (ctx
->cache
== NULL
)
317 cso_cache_set_sanitize_callback(ctx
->cache
,
322 ctx
->sample_mask
= ~0;
324 cso_init_vbuf(ctx
, flags
);
326 /* Enable for testing: */
327 if (0) cso_set_maximum_cache_size( ctx
->cache
, 4 );
329 if (pipe
->screen
->get_shader_param(pipe
->screen
, PIPE_SHADER_GEOMETRY
,
330 PIPE_SHADER_CAP_MAX_INSTRUCTIONS
) > 0) {
331 ctx
->has_geometry_shader
= TRUE
;
333 if (pipe
->screen
->get_shader_param(pipe
->screen
, PIPE_SHADER_TESS_CTRL
,
334 PIPE_SHADER_CAP_MAX_INSTRUCTIONS
) > 0) {
335 ctx
->has_tessellation
= TRUE
;
337 if (pipe
->screen
->get_shader_param(pipe
->screen
, PIPE_SHADER_COMPUTE
,
338 PIPE_SHADER_CAP_MAX_INSTRUCTIONS
) > 0) {
340 pipe
->screen
->get_shader_param(pipe
->screen
, PIPE_SHADER_COMPUTE
,
341 PIPE_SHADER_CAP_SUPPORTED_IRS
);
342 if (supported_irs
& ((1 << PIPE_SHADER_IR_TGSI
) |
343 (1 << PIPE_SHADER_IR_NIR
))) {
344 ctx
->has_compute_shader
= TRUE
;
347 if (pipe
->screen
->get_param(pipe
->screen
,
348 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS
) != 0) {
349 ctx
->has_streamout
= TRUE
;
352 ctx
->max_sampler_seen
= -1;
356 cso_destroy_context( ctx
);
361 * Free the CSO context.
363 void cso_destroy_context( struct cso_context
*ctx
)
368 ctx
->pipe
->bind_blend_state( ctx
->pipe
, NULL
);
369 ctx
->pipe
->bind_rasterizer_state( ctx
->pipe
, NULL
);
372 static struct pipe_sampler_view
*views
[PIPE_MAX_SHADER_SAMPLER_VIEWS
] = { NULL
};
373 static void *zeros
[PIPE_MAX_SAMPLERS
] = { NULL
};
374 struct pipe_screen
*scr
= ctx
->pipe
->screen
;
375 enum pipe_shader_type sh
;
376 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
377 int maxsam
= scr
->get_shader_param(scr
, sh
,
378 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS
);
379 int maxview
= scr
->get_shader_param(scr
, sh
,
380 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS
);
381 assert(maxsam
<= PIPE_MAX_SAMPLERS
);
382 assert(maxview
<= PIPE_MAX_SHADER_SAMPLER_VIEWS
);
384 ctx
->pipe
->bind_sampler_states(ctx
->pipe
, sh
, 0, maxsam
, zeros
);
387 ctx
->pipe
->set_sampler_views(ctx
->pipe
, sh
, 0, maxview
, views
);
392 ctx
->pipe
->bind_depth_stencil_alpha_state( ctx
->pipe
, NULL
);
393 ctx
->pipe
->bind_fs_state( ctx
->pipe
, NULL
);
394 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_FRAGMENT
, 0, NULL
);
395 ctx
->pipe
->bind_vs_state( ctx
->pipe
, NULL
);
396 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_VERTEX
, 0, NULL
);
397 if (ctx
->has_geometry_shader
) {
398 ctx
->pipe
->bind_gs_state(ctx
->pipe
, NULL
);
399 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_GEOMETRY
, 0, NULL
);
401 if (ctx
->has_tessellation
) {
402 ctx
->pipe
->bind_tcs_state(ctx
->pipe
, NULL
);
403 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_TESS_CTRL
, 0, NULL
);
404 ctx
->pipe
->bind_tes_state(ctx
->pipe
, NULL
);
405 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_TESS_EVAL
, 0, NULL
);
407 if (ctx
->has_compute_shader
) {
408 ctx
->pipe
->bind_compute_state(ctx
->pipe
, NULL
);
409 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_COMPUTE
, 0, NULL
);
411 ctx
->pipe
->bind_vertex_elements_state( ctx
->pipe
, NULL
);
413 if (ctx
->has_streamout
)
414 ctx
->pipe
->set_stream_output_targets(ctx
->pipe
, 0, NULL
, NULL
);
417 for (i
= 0; i
< ctx
->nr_fragment_views
; i
++) {
418 pipe_sampler_view_reference(&ctx
->fragment_views
[i
], NULL
);
420 for (i
= 0; i
< ctx
->nr_fragment_views_saved
; i
++) {
421 pipe_sampler_view_reference(&ctx
->fragment_views_saved
[i
], NULL
);
424 util_unreference_framebuffer_state(&ctx
->fb
);
425 util_unreference_framebuffer_state(&ctx
->fb_saved
);
427 pipe_vertex_buffer_unreference(&ctx
->vertex_buffer0_current
);
428 pipe_vertex_buffer_unreference(&ctx
->vertex_buffer0_saved
);
430 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
431 pipe_resource_reference(&ctx
->aux_constbuf_current
[i
].buffer
, NULL
);
432 pipe_resource_reference(&ctx
->aux_constbuf_saved
[i
].buffer
, NULL
);
435 pipe_resource_reference(&ctx
->fragment_image0_current
.resource
, NULL
);
436 pipe_resource_reference(&ctx
->fragment_image0_saved
.resource
, NULL
);
438 for (i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
439 pipe_so_target_reference(&ctx
->so_targets
[i
], NULL
);
440 pipe_so_target_reference(&ctx
->so_targets_saved
[i
], NULL
);
444 cso_cache_delete( ctx
->cache
);
449 u_vbuf_destroy(ctx
->vbuf
);
454 /* Those function will either find the state of the given template
455 * in the cache or they will create a new state from the given
456 * template, insert it in the cache and return it.
460 * If the driver returns 0 from the create method then they will assign
461 * the data member of the cso to be the template itself.
464 enum pipe_error
cso_set_blend(struct cso_context
*ctx
,
465 const struct pipe_blend_state
*templ
)
467 unsigned key_size
, hash_key
;
468 struct cso_hash_iter iter
;
471 key_size
= templ
->independent_blend_enable
?
472 sizeof(struct pipe_blend_state
) :
473 (char *)&(templ
->rt
[1]) - (char *)templ
;
474 hash_key
= cso_construct_key((void*)templ
, key_size
);
475 iter
= cso_find_state_template(ctx
->cache
, hash_key
, CSO_BLEND
,
476 (void*)templ
, key_size
);
478 if (cso_hash_iter_is_null(iter
)) {
479 struct cso_blend
*cso
= MALLOC(sizeof(struct cso_blend
));
481 return PIPE_ERROR_OUT_OF_MEMORY
;
483 memset(&cso
->state
, 0, sizeof cso
->state
);
484 memcpy(&cso
->state
, templ
, key_size
);
485 cso
->data
= ctx
->pipe
->create_blend_state(ctx
->pipe
, &cso
->state
);
486 cso
->delete_state
= (cso_state_callback
)ctx
->pipe
->delete_blend_state
;
487 cso
->context
= ctx
->pipe
;
489 iter
= cso_insert_state(ctx
->cache
, hash_key
, CSO_BLEND
, cso
);
490 if (cso_hash_iter_is_null(iter
)) {
492 return PIPE_ERROR_OUT_OF_MEMORY
;
498 handle
= ((struct cso_blend
*)cso_hash_iter_data(iter
))->data
;
501 if (ctx
->blend
!= handle
) {
503 ctx
->pipe
->bind_blend_state(ctx
->pipe
, handle
);
509 cso_save_blend(struct cso_context
*ctx
)
511 assert(!ctx
->blend_saved
);
512 ctx
->blend_saved
= ctx
->blend
;
516 cso_restore_blend(struct cso_context
*ctx
)
518 if (ctx
->blend
!= ctx
->blend_saved
) {
519 ctx
->blend
= ctx
->blend_saved
;
520 ctx
->pipe
->bind_blend_state(ctx
->pipe
, ctx
->blend_saved
);
522 ctx
->blend_saved
= NULL
;
528 cso_set_depth_stencil_alpha(struct cso_context
*ctx
,
529 const struct pipe_depth_stencil_alpha_state
*templ
)
531 unsigned key_size
= sizeof(struct pipe_depth_stencil_alpha_state
);
532 unsigned hash_key
= cso_construct_key((void*)templ
, key_size
);
533 struct cso_hash_iter iter
= cso_find_state_template(ctx
->cache
,
535 CSO_DEPTH_STENCIL_ALPHA
,
536 (void*)templ
, key_size
);
539 if (cso_hash_iter_is_null(iter
)) {
540 struct cso_depth_stencil_alpha
*cso
=
541 MALLOC(sizeof(struct cso_depth_stencil_alpha
));
543 return PIPE_ERROR_OUT_OF_MEMORY
;
545 memcpy(&cso
->state
, templ
, sizeof(*templ
));
546 cso
->data
= ctx
->pipe
->create_depth_stencil_alpha_state(ctx
->pipe
,
549 (cso_state_callback
)ctx
->pipe
->delete_depth_stencil_alpha_state
;
550 cso
->context
= ctx
->pipe
;
552 iter
= cso_insert_state(ctx
->cache
, hash_key
,
553 CSO_DEPTH_STENCIL_ALPHA
, cso
);
554 if (cso_hash_iter_is_null(iter
)) {
556 return PIPE_ERROR_OUT_OF_MEMORY
;
562 handle
= ((struct cso_depth_stencil_alpha
*)
563 cso_hash_iter_data(iter
))->data
;
566 if (ctx
->depth_stencil
!= handle
) {
567 ctx
->depth_stencil
= handle
;
568 ctx
->pipe
->bind_depth_stencil_alpha_state(ctx
->pipe
, handle
);
574 cso_save_depth_stencil_alpha(struct cso_context
*ctx
)
576 assert(!ctx
->depth_stencil_saved
);
577 ctx
->depth_stencil_saved
= ctx
->depth_stencil
;
581 cso_restore_depth_stencil_alpha(struct cso_context
*ctx
)
583 if (ctx
->depth_stencil
!= ctx
->depth_stencil_saved
) {
584 ctx
->depth_stencil
= ctx
->depth_stencil_saved
;
585 ctx
->pipe
->bind_depth_stencil_alpha_state(ctx
->pipe
,
586 ctx
->depth_stencil_saved
);
588 ctx
->depth_stencil_saved
= NULL
;
593 enum pipe_error
cso_set_rasterizer(struct cso_context
*ctx
,
594 const struct pipe_rasterizer_state
*templ
)
596 unsigned key_size
= sizeof(struct pipe_rasterizer_state
);
597 unsigned hash_key
= cso_construct_key((void*)templ
, key_size
);
598 struct cso_hash_iter iter
= cso_find_state_template(ctx
->cache
,
601 (void*)templ
, key_size
);
604 /* We can't have both point_quad_rasterization (sprites) and point_smooth
605 * (round AA points) enabled at the same time.
607 assert(!(templ
->point_quad_rasterization
&& templ
->point_smooth
));
609 if (cso_hash_iter_is_null(iter
)) {
610 struct cso_rasterizer
*cso
= MALLOC(sizeof(struct cso_rasterizer
));
612 return PIPE_ERROR_OUT_OF_MEMORY
;
614 memcpy(&cso
->state
, templ
, sizeof(*templ
));
615 cso
->data
= ctx
->pipe
->create_rasterizer_state(ctx
->pipe
, &cso
->state
);
617 (cso_state_callback
)ctx
->pipe
->delete_rasterizer_state
;
618 cso
->context
= ctx
->pipe
;
620 iter
= cso_insert_state(ctx
->cache
, hash_key
, CSO_RASTERIZER
, cso
);
621 if (cso_hash_iter_is_null(iter
)) {
623 return PIPE_ERROR_OUT_OF_MEMORY
;
629 handle
= ((struct cso_rasterizer
*)cso_hash_iter_data(iter
))->data
;
632 if (ctx
->rasterizer
!= handle
) {
633 ctx
->rasterizer
= handle
;
634 ctx
->pipe
->bind_rasterizer_state(ctx
->pipe
, handle
);
640 cso_save_rasterizer(struct cso_context
*ctx
)
642 assert(!ctx
->rasterizer_saved
);
643 ctx
->rasterizer_saved
= ctx
->rasterizer
;
647 cso_restore_rasterizer(struct cso_context
*ctx
)
649 if (ctx
->rasterizer
!= ctx
->rasterizer_saved
) {
650 ctx
->rasterizer
= ctx
->rasterizer_saved
;
651 ctx
->pipe
->bind_rasterizer_state(ctx
->pipe
, ctx
->rasterizer_saved
);
653 ctx
->rasterizer_saved
= NULL
;
657 void cso_set_fragment_shader_handle(struct cso_context
*ctx
, void *handle
)
659 if (ctx
->fragment_shader
!= handle
) {
660 ctx
->fragment_shader
= handle
;
661 ctx
->pipe
->bind_fs_state(ctx
->pipe
, handle
);
665 void cso_delete_fragment_shader(struct cso_context
*ctx
, void *handle
)
667 if (handle
== ctx
->fragment_shader
) {
668 /* unbind before deleting */
669 ctx
->pipe
->bind_fs_state(ctx
->pipe
, NULL
);
670 ctx
->fragment_shader
= NULL
;
672 ctx
->pipe
->delete_fs_state(ctx
->pipe
, handle
);
676 cso_save_fragment_shader(struct cso_context
*ctx
)
678 assert(!ctx
->fragment_shader_saved
);
679 ctx
->fragment_shader_saved
= ctx
->fragment_shader
;
683 cso_restore_fragment_shader(struct cso_context
*ctx
)
685 if (ctx
->fragment_shader_saved
!= ctx
->fragment_shader
) {
686 ctx
->pipe
->bind_fs_state(ctx
->pipe
, ctx
->fragment_shader_saved
);
687 ctx
->fragment_shader
= ctx
->fragment_shader_saved
;
689 ctx
->fragment_shader_saved
= NULL
;
693 void cso_set_vertex_shader_handle(struct cso_context
*ctx
, void *handle
)
695 if (ctx
->vertex_shader
!= handle
) {
696 ctx
->vertex_shader
= handle
;
697 ctx
->pipe
->bind_vs_state(ctx
->pipe
, handle
);
701 void cso_delete_vertex_shader(struct cso_context
*ctx
, void *handle
)
703 if (handle
== ctx
->vertex_shader
) {
704 /* unbind before deleting */
705 ctx
->pipe
->bind_vs_state(ctx
->pipe
, NULL
);
706 ctx
->vertex_shader
= NULL
;
708 ctx
->pipe
->delete_vs_state(ctx
->pipe
, handle
);
712 cso_save_vertex_shader(struct cso_context
*ctx
)
714 assert(!ctx
->vertex_shader_saved
);
715 ctx
->vertex_shader_saved
= ctx
->vertex_shader
;
719 cso_restore_vertex_shader(struct cso_context
*ctx
)
721 if (ctx
->vertex_shader_saved
!= ctx
->vertex_shader
) {
722 ctx
->pipe
->bind_vs_state(ctx
->pipe
, ctx
->vertex_shader_saved
);
723 ctx
->vertex_shader
= ctx
->vertex_shader_saved
;
725 ctx
->vertex_shader_saved
= NULL
;
729 void cso_set_framebuffer(struct cso_context
*ctx
,
730 const struct pipe_framebuffer_state
*fb
)
732 if (memcmp(&ctx
->fb
, fb
, sizeof(*fb
)) != 0) {
733 util_copy_framebuffer_state(&ctx
->fb
, fb
);
734 ctx
->pipe
->set_framebuffer_state(ctx
->pipe
, fb
);
739 cso_save_framebuffer(struct cso_context
*ctx
)
741 util_copy_framebuffer_state(&ctx
->fb_saved
, &ctx
->fb
);
745 cso_restore_framebuffer(struct cso_context
*ctx
)
747 if (memcmp(&ctx
->fb
, &ctx
->fb_saved
, sizeof(ctx
->fb
))) {
748 util_copy_framebuffer_state(&ctx
->fb
, &ctx
->fb_saved
);
749 ctx
->pipe
->set_framebuffer_state(ctx
->pipe
, &ctx
->fb
);
750 util_unreference_framebuffer_state(&ctx
->fb_saved
);
755 void cso_set_viewport(struct cso_context
*ctx
,
756 const struct pipe_viewport_state
*vp
)
758 if (memcmp(&ctx
->vp
, vp
, sizeof(*vp
))) {
760 ctx
->pipe
->set_viewport_states(ctx
->pipe
, 0, 1, vp
);
765 * Setup viewport state for given width and height (position is always (0,0)).
766 * Invert the Y axis if 'invert' is true.
769 cso_set_viewport_dims(struct cso_context
*ctx
,
770 float width
, float height
, boolean invert
)
772 struct pipe_viewport_state vp
;
773 vp
.scale
[0] = width
* 0.5f
;
774 vp
.scale
[1] = height
* (invert
? -0.5f
: 0.5f
);
776 vp
.translate
[0] = 0.5f
* width
;
777 vp
.translate
[1] = 0.5f
* height
;
778 vp
.translate
[2] = 0.5f
;
779 cso_set_viewport(ctx
, &vp
);
783 cso_save_viewport(struct cso_context
*ctx
)
785 ctx
->vp_saved
= ctx
->vp
;
790 cso_restore_viewport(struct cso_context
*ctx
)
792 if (memcmp(&ctx
->vp
, &ctx
->vp_saved
, sizeof(ctx
->vp
))) {
793 ctx
->vp
= ctx
->vp_saved
;
794 ctx
->pipe
->set_viewport_states(ctx
->pipe
, 0, 1, &ctx
->vp
);
799 void cso_set_blend_color(struct cso_context
*ctx
,
800 const struct pipe_blend_color
*bc
)
802 if (memcmp(&ctx
->blend_color
, bc
, sizeof(ctx
->blend_color
))) {
803 ctx
->blend_color
= *bc
;
804 ctx
->pipe
->set_blend_color(ctx
->pipe
, bc
);
808 void cso_set_sample_mask(struct cso_context
*ctx
, unsigned sample_mask
)
810 if (ctx
->sample_mask
!= sample_mask
) {
811 ctx
->sample_mask
= sample_mask
;
812 ctx
->pipe
->set_sample_mask(ctx
->pipe
, sample_mask
);
817 cso_save_sample_mask(struct cso_context
*ctx
)
819 ctx
->sample_mask_saved
= ctx
->sample_mask
;
823 cso_restore_sample_mask(struct cso_context
*ctx
)
825 cso_set_sample_mask(ctx
, ctx
->sample_mask_saved
);
828 void cso_set_min_samples(struct cso_context
*ctx
, unsigned min_samples
)
830 if (ctx
->min_samples
!= min_samples
&& ctx
->pipe
->set_min_samples
) {
831 ctx
->min_samples
= min_samples
;
832 ctx
->pipe
->set_min_samples(ctx
->pipe
, min_samples
);
837 cso_save_min_samples(struct cso_context
*ctx
)
839 ctx
->min_samples_saved
= ctx
->min_samples
;
843 cso_restore_min_samples(struct cso_context
*ctx
)
845 cso_set_min_samples(ctx
, ctx
->min_samples_saved
);
848 void cso_set_stencil_ref(struct cso_context
*ctx
,
849 const struct pipe_stencil_ref
*sr
)
851 if (memcmp(&ctx
->stencil_ref
, sr
, sizeof(ctx
->stencil_ref
))) {
852 ctx
->stencil_ref
= *sr
;
853 ctx
->pipe
->set_stencil_ref(ctx
->pipe
, sr
);
858 cso_save_stencil_ref(struct cso_context
*ctx
)
860 ctx
->stencil_ref_saved
= ctx
->stencil_ref
;
865 cso_restore_stencil_ref(struct cso_context
*ctx
)
867 if (memcmp(&ctx
->stencil_ref
, &ctx
->stencil_ref_saved
,
868 sizeof(ctx
->stencil_ref
))) {
869 ctx
->stencil_ref
= ctx
->stencil_ref_saved
;
870 ctx
->pipe
->set_stencil_ref(ctx
->pipe
, &ctx
->stencil_ref
);
874 void cso_set_render_condition(struct cso_context
*ctx
,
875 struct pipe_query
*query
,
877 enum pipe_render_cond_flag mode
)
879 struct pipe_context
*pipe
= ctx
->pipe
;
881 if (ctx
->render_condition
!= query
||
882 ctx
->render_condition_mode
!= mode
||
883 ctx
->render_condition_cond
!= condition
) {
884 pipe
->render_condition(pipe
, query
, condition
, mode
);
885 ctx
->render_condition
= query
;
886 ctx
->render_condition_cond
= condition
;
887 ctx
->render_condition_mode
= mode
;
892 cso_save_render_condition(struct cso_context
*ctx
)
894 ctx
->render_condition_saved
= ctx
->render_condition
;
895 ctx
->render_condition_cond_saved
= ctx
->render_condition_cond
;
896 ctx
->render_condition_mode_saved
= ctx
->render_condition_mode
;
900 cso_restore_render_condition(struct cso_context
*ctx
)
902 cso_set_render_condition(ctx
, ctx
->render_condition_saved
,
903 ctx
->render_condition_cond_saved
,
904 ctx
->render_condition_mode_saved
);
907 void cso_set_geometry_shader_handle(struct cso_context
*ctx
, void *handle
)
909 assert(ctx
->has_geometry_shader
|| !handle
);
911 if (ctx
->has_geometry_shader
&& ctx
->geometry_shader
!= handle
) {
912 ctx
->geometry_shader
= handle
;
913 ctx
->pipe
->bind_gs_state(ctx
->pipe
, handle
);
917 void cso_delete_geometry_shader(struct cso_context
*ctx
, void *handle
)
919 if (handle
== ctx
->geometry_shader
) {
920 /* unbind before deleting */
921 ctx
->pipe
->bind_gs_state(ctx
->pipe
, NULL
);
922 ctx
->geometry_shader
= NULL
;
924 ctx
->pipe
->delete_gs_state(ctx
->pipe
, handle
);
928 cso_save_geometry_shader(struct cso_context
*ctx
)
930 if (!ctx
->has_geometry_shader
) {
934 assert(!ctx
->geometry_shader_saved
);
935 ctx
->geometry_shader_saved
= ctx
->geometry_shader
;
939 cso_restore_geometry_shader(struct cso_context
*ctx
)
941 if (!ctx
->has_geometry_shader
) {
945 if (ctx
->geometry_shader_saved
!= ctx
->geometry_shader
) {
946 ctx
->pipe
->bind_gs_state(ctx
->pipe
, ctx
->geometry_shader_saved
);
947 ctx
->geometry_shader
= ctx
->geometry_shader_saved
;
949 ctx
->geometry_shader_saved
= NULL
;
952 void cso_set_tessctrl_shader_handle(struct cso_context
*ctx
, void *handle
)
954 assert(ctx
->has_tessellation
|| !handle
);
956 if (ctx
->has_tessellation
&& ctx
->tessctrl_shader
!= handle
) {
957 ctx
->tessctrl_shader
= handle
;
958 ctx
->pipe
->bind_tcs_state(ctx
->pipe
, handle
);
962 void cso_delete_tessctrl_shader(struct cso_context
*ctx
, void *handle
)
964 if (handle
== ctx
->tessctrl_shader
) {
965 /* unbind before deleting */
966 ctx
->pipe
->bind_tcs_state(ctx
->pipe
, NULL
);
967 ctx
->tessctrl_shader
= NULL
;
969 ctx
->pipe
->delete_tcs_state(ctx
->pipe
, handle
);
973 cso_save_tessctrl_shader(struct cso_context
*ctx
)
975 if (!ctx
->has_tessellation
) {
979 assert(!ctx
->tessctrl_shader_saved
);
980 ctx
->tessctrl_shader_saved
= ctx
->tessctrl_shader
;
984 cso_restore_tessctrl_shader(struct cso_context
*ctx
)
986 if (!ctx
->has_tessellation
) {
990 if (ctx
->tessctrl_shader_saved
!= ctx
->tessctrl_shader
) {
991 ctx
->pipe
->bind_tcs_state(ctx
->pipe
, ctx
->tessctrl_shader_saved
);
992 ctx
->tessctrl_shader
= ctx
->tessctrl_shader_saved
;
994 ctx
->tessctrl_shader_saved
= NULL
;
997 void cso_set_tesseval_shader_handle(struct cso_context
*ctx
, void *handle
)
999 assert(ctx
->has_tessellation
|| !handle
);
1001 if (ctx
->has_tessellation
&& ctx
->tesseval_shader
!= handle
) {
1002 ctx
->tesseval_shader
= handle
;
1003 ctx
->pipe
->bind_tes_state(ctx
->pipe
, handle
);
1007 void cso_delete_tesseval_shader(struct cso_context
*ctx
, void *handle
)
1009 if (handle
== ctx
->tesseval_shader
) {
1010 /* unbind before deleting */
1011 ctx
->pipe
->bind_tes_state(ctx
->pipe
, NULL
);
1012 ctx
->tesseval_shader
= NULL
;
1014 ctx
->pipe
->delete_tes_state(ctx
->pipe
, handle
);
1018 cso_save_tesseval_shader(struct cso_context
*ctx
)
1020 if (!ctx
->has_tessellation
) {
1024 assert(!ctx
->tesseval_shader_saved
);
1025 ctx
->tesseval_shader_saved
= ctx
->tesseval_shader
;
1029 cso_restore_tesseval_shader(struct cso_context
*ctx
)
1031 if (!ctx
->has_tessellation
) {
1035 if (ctx
->tesseval_shader_saved
!= ctx
->tesseval_shader
) {
1036 ctx
->pipe
->bind_tes_state(ctx
->pipe
, ctx
->tesseval_shader_saved
);
1037 ctx
->tesseval_shader
= ctx
->tesseval_shader_saved
;
1039 ctx
->tesseval_shader_saved
= NULL
;
1042 void cso_set_compute_shader_handle(struct cso_context
*ctx
, void *handle
)
1044 assert(ctx
->has_compute_shader
|| !handle
);
1046 if (ctx
->has_compute_shader
&& ctx
->compute_shader
!= handle
) {
1047 ctx
->compute_shader
= handle
;
1048 ctx
->pipe
->bind_compute_state(ctx
->pipe
, handle
);
1052 void cso_delete_compute_shader(struct cso_context
*ctx
, void *handle
)
1054 if (handle
== ctx
->compute_shader
) {
1055 /* unbind before deleting */
1056 ctx
->pipe
->bind_compute_state(ctx
->pipe
, NULL
);
1057 ctx
->compute_shader
= NULL
;
1059 ctx
->pipe
->delete_compute_state(ctx
->pipe
, handle
);
1063 cso_set_vertex_elements_direct(struct cso_context
*ctx
,
1064 const struct cso_velems_state
*velems
)
1066 unsigned key_size
, hash_key
;
1067 struct cso_hash_iter iter
;
1070 /* Need to include the count into the stored state data too.
1071 * Otherwise first few count pipe_vertex_elements could be identical
1072 * even if count is different, and there's no guarantee the hash would
1073 * be different in that case neither.
1075 key_size
= sizeof(struct pipe_vertex_element
) * velems
->count
+
1077 hash_key
= cso_construct_key((void*)velems
, key_size
);
1078 iter
= cso_find_state_template(ctx
->cache
, hash_key
, CSO_VELEMENTS
,
1079 (void*)velems
, key_size
);
1081 if (cso_hash_iter_is_null(iter
)) {
1082 struct cso_velements
*cso
= MALLOC(sizeof(struct cso_velements
));
1086 memcpy(&cso
->state
, velems
, key_size
);
1087 cso
->data
= ctx
->pipe
->create_vertex_elements_state(ctx
->pipe
,
1089 &cso
->state
.velems
[0]);
1091 (cso_state_callback
) ctx
->pipe
->delete_vertex_elements_state
;
1092 cso
->context
= ctx
->pipe
;
1094 iter
= cso_insert_state(ctx
->cache
, hash_key
, CSO_VELEMENTS
, cso
);
1095 if (cso_hash_iter_is_null(iter
)) {
1103 handle
= ((struct cso_velements
*)cso_hash_iter_data(iter
))->data
;
1106 if (ctx
->velements
!= handle
) {
1107 ctx
->velements
= handle
;
1108 ctx
->pipe
->bind_vertex_elements_state(ctx
->pipe
, handle
);
1113 cso_set_vertex_elements(struct cso_context
*ctx
,
1114 const struct cso_velems_state
*velems
)
1116 struct u_vbuf
*vbuf
= ctx
->vbuf_current
;
1119 u_vbuf_set_vertex_elements(vbuf
, velems
);
1123 cso_set_vertex_elements_direct(ctx
, velems
);
1128 cso_save_vertex_elements(struct cso_context
*ctx
)
1130 struct u_vbuf
*vbuf
= ctx
->vbuf_current
;
1133 u_vbuf_save_vertex_elements(vbuf
);
1137 assert(!ctx
->velements_saved
);
1138 ctx
->velements_saved
= ctx
->velements
;
1142 cso_restore_vertex_elements(struct cso_context
*ctx
)
1144 struct u_vbuf
*vbuf
= ctx
->vbuf_current
;
1147 u_vbuf_restore_vertex_elements(vbuf
);
1151 if (ctx
->velements
!= ctx
->velements_saved
) {
1152 ctx
->velements
= ctx
->velements_saved
;
1153 ctx
->pipe
->bind_vertex_elements_state(ctx
->pipe
, ctx
->velements_saved
);
1155 ctx
->velements_saved
= NULL
;
1158 /* vertex buffers */
1161 cso_set_vertex_buffers_direct(struct cso_context
*ctx
,
1162 unsigned start_slot
, unsigned count
,
1163 const struct pipe_vertex_buffer
*buffers
)
1165 /* Save what's in the auxiliary slot, so that we can save and restore it
1168 if (start_slot
== 0) {
1170 pipe_vertex_buffer_reference(&ctx
->vertex_buffer0_current
,
1173 pipe_vertex_buffer_unreference(&ctx
->vertex_buffer0_current
);
1177 ctx
->pipe
->set_vertex_buffers(ctx
->pipe
, start_slot
, count
, buffers
);
1181 void cso_set_vertex_buffers(struct cso_context
*ctx
,
1182 unsigned start_slot
, unsigned count
,
1183 const struct pipe_vertex_buffer
*buffers
)
1185 struct u_vbuf
*vbuf
= ctx
->vbuf_current
;
1191 u_vbuf_set_vertex_buffers(vbuf
, start_slot
, count
, buffers
);
1195 cso_set_vertex_buffers_direct(ctx
, start_slot
, count
, buffers
);
1199 cso_save_vertex_buffer0(struct cso_context
*ctx
)
1201 struct u_vbuf
*vbuf
= ctx
->vbuf_current
;
1204 u_vbuf_save_vertex_buffer0(vbuf
);
1208 pipe_vertex_buffer_reference(&ctx
->vertex_buffer0_saved
,
1209 &ctx
->vertex_buffer0_current
);
1213 cso_restore_vertex_buffer0(struct cso_context
*ctx
)
1215 struct u_vbuf
*vbuf
= ctx
->vbuf_current
;
1218 u_vbuf_restore_vertex_buffer0(vbuf
);
1222 cso_set_vertex_buffers(ctx
, 0, 1, &ctx
->vertex_buffer0_saved
);
1223 pipe_vertex_buffer_unreference(&ctx
->vertex_buffer0_saved
);
1227 * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1228 * for user vertex buffers and user vertex buffers are not set by this call.
1229 * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1231 * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1232 * such as VBOs, glBegin/End, and display lists.
1234 * Internal operations that do "save states, draw, restore states" shouldn't
1235 * use this, because the states are only saved in either cso_context or
1239 cso_set_vertex_buffers_and_elements(struct cso_context
*ctx
,
1240 const struct cso_velems_state
*velems
,
1242 unsigned unbind_trailing_vb_count
,
1243 const struct pipe_vertex_buffer
*vbuffers
,
1244 bool uses_user_vertex_buffers
)
1246 struct u_vbuf
*vbuf
= ctx
->vbuf
;
1248 if (vbuf
&& (ctx
->always_use_vbuf
|| uses_user_vertex_buffers
)) {
1249 if (!ctx
->vbuf_current
) {
1250 /* Unbind all buffers in cso_context, because we'll use u_vbuf. */
1251 unsigned unbind_vb_count
= vb_count
+ unbind_trailing_vb_count
;
1252 if (unbind_vb_count
)
1253 cso_set_vertex_buffers_direct(ctx
, 0, unbind_vb_count
, NULL
);
1255 /* Unset this to make sure the CSO is re-bound on the next use. */
1256 ctx
->velements
= NULL
;
1257 ctx
->vbuf_current
= vbuf
;
1258 } else if (unbind_trailing_vb_count
) {
1259 u_vbuf_set_vertex_buffers(vbuf
, vb_count
, unbind_trailing_vb_count
,
1264 u_vbuf_set_vertex_buffers(vbuf
, 0, vb_count
, vbuffers
);
1265 u_vbuf_set_vertex_elements(vbuf
, velems
);
1269 if (ctx
->vbuf_current
) {
1270 /* Unbind all buffers in u_vbuf, because we'll use cso_context. */
1271 unsigned unbind_vb_count
= vb_count
+ unbind_trailing_vb_count
;
1272 if (unbind_vb_count
)
1273 u_vbuf_set_vertex_buffers(vbuf
, 0, unbind_vb_count
, NULL
);
1275 /* Unset this to make sure the CSO is re-bound on the next use. */
1276 u_vbuf_unset_vertex_elements(vbuf
);
1277 ctx
->vbuf_current
= NULL
;
1278 } else if (unbind_trailing_vb_count
) {
1279 cso_set_vertex_buffers_direct(ctx
, vb_count
, unbind_trailing_vb_count
,
1284 cso_set_vertex_buffers_direct(ctx
, 0, vb_count
, vbuffers
);
1285 cso_set_vertex_elements_direct(ctx
, velems
);
1289 cso_single_sampler(struct cso_context
*ctx
, enum pipe_shader_type shader_stage
,
1290 unsigned idx
, const struct pipe_sampler_state
*templ
)
1293 unsigned key_size
= sizeof(struct pipe_sampler_state
);
1294 unsigned hash_key
= cso_construct_key((void*)templ
, key_size
);
1295 struct cso_sampler
*cso
;
1296 struct cso_hash_iter iter
=
1297 cso_find_state_template(ctx
->cache
,
1298 hash_key
, CSO_SAMPLER
,
1299 (void *) templ
, key_size
);
1301 if (cso_hash_iter_is_null(iter
)) {
1302 cso
= MALLOC(sizeof(struct cso_sampler
));
1306 memcpy(&cso
->state
, templ
, sizeof(*templ
));
1307 cso
->data
= ctx
->pipe
->create_sampler_state(ctx
->pipe
, &cso
->state
);
1309 (cso_state_callback
) ctx
->pipe
->delete_sampler_state
;
1310 cso
->context
= ctx
->pipe
;
1311 cso
->hash_key
= hash_key
;
1313 iter
= cso_insert_state(ctx
->cache
, hash_key
, CSO_SAMPLER
, cso
);
1314 if (cso_hash_iter_is_null(iter
)) {
1320 cso
= cso_hash_iter_data(iter
);
1323 ctx
->samplers
[shader_stage
].cso_samplers
[idx
] = cso
;
1324 ctx
->samplers
[shader_stage
].samplers
[idx
] = cso
->data
;
1325 ctx
->max_sampler_seen
= MAX2(ctx
->max_sampler_seen
, (int)idx
);
1331 * Send staged sampler state to the driver.
1334 cso_single_sampler_done(struct cso_context
*ctx
,
1335 enum pipe_shader_type shader_stage
)
1337 struct sampler_info
*info
= &ctx
->samplers
[shader_stage
];
1339 if (ctx
->max_sampler_seen
== -1)
1342 ctx
->pipe
->bind_sampler_states(ctx
->pipe
, shader_stage
, 0,
1343 ctx
->max_sampler_seen
+ 1,
1345 ctx
->max_sampler_seen
= -1;
1350 * If the function encouters any errors it will return the
1351 * last one. Done to always try to set as many samplers
1355 cso_set_samplers(struct cso_context
*ctx
,
1356 enum pipe_shader_type shader_stage
,
1358 const struct pipe_sampler_state
**templates
)
1360 for (unsigned i
= 0; i
< nr
; i
++)
1361 cso_single_sampler(ctx
, shader_stage
, i
, templates
[i
]);
1363 cso_single_sampler_done(ctx
, shader_stage
);
1367 cso_save_fragment_samplers(struct cso_context
*ctx
)
1369 struct sampler_info
*info
= &ctx
->samplers
[PIPE_SHADER_FRAGMENT
];
1370 struct sampler_info
*saved
= &ctx
->fragment_samplers_saved
;
1372 memcpy(saved
->cso_samplers
, info
->cso_samplers
,
1373 sizeof(info
->cso_samplers
));
1374 memcpy(saved
->samplers
, info
->samplers
, sizeof(info
->samplers
));
1379 cso_restore_fragment_samplers(struct cso_context
*ctx
)
1381 struct sampler_info
*info
= &ctx
->samplers
[PIPE_SHADER_FRAGMENT
];
1382 struct sampler_info
*saved
= &ctx
->fragment_samplers_saved
;
1384 memcpy(info
->cso_samplers
, saved
->cso_samplers
,
1385 sizeof(info
->cso_samplers
));
1386 memcpy(info
->samplers
, saved
->samplers
, sizeof(info
->samplers
));
1388 for (int i
= PIPE_MAX_SAMPLERS
- 1; i
>= 0; i
--) {
1389 if (info
->samplers
[i
]) {
1390 ctx
->max_sampler_seen
= i
;
1395 cso_single_sampler_done(ctx
, PIPE_SHADER_FRAGMENT
);
1400 cso_set_sampler_views(struct cso_context
*ctx
,
1401 enum pipe_shader_type shader_stage
,
1403 struct pipe_sampler_view
**views
)
1405 if (shader_stage
== PIPE_SHADER_FRAGMENT
) {
1407 boolean any_change
= FALSE
;
1409 /* reference new views */
1410 for (i
= 0; i
< count
; i
++) {
1411 any_change
|= ctx
->fragment_views
[i
] != views
[i
];
1412 pipe_sampler_view_reference(&ctx
->fragment_views
[i
], views
[i
]);
1414 /* unref extra old views, if any */
1415 for (; i
< ctx
->nr_fragment_views
; i
++) {
1416 any_change
|= ctx
->fragment_views
[i
] != NULL
;
1417 pipe_sampler_view_reference(&ctx
->fragment_views
[i
], NULL
);
1420 /* bind the new sampler views */
1422 ctx
->pipe
->set_sampler_views(ctx
->pipe
, shader_stage
, 0,
1423 MAX2(ctx
->nr_fragment_views
, count
),
1424 ctx
->fragment_views
);
1427 ctx
->nr_fragment_views
= count
;
1430 ctx
->pipe
->set_sampler_views(ctx
->pipe
, shader_stage
, 0, count
, views
);
1435 cso_save_fragment_sampler_views(struct cso_context
*ctx
)
1439 ctx
->nr_fragment_views_saved
= ctx
->nr_fragment_views
;
1441 for (i
= 0; i
< ctx
->nr_fragment_views
; i
++) {
1442 assert(!ctx
->fragment_views_saved
[i
]);
1443 pipe_sampler_view_reference(&ctx
->fragment_views_saved
[i
],
1444 ctx
->fragment_views
[i
]);
1450 cso_restore_fragment_sampler_views(struct cso_context
*ctx
)
1452 unsigned i
, nr_saved
= ctx
->nr_fragment_views_saved
;
1455 for (i
= 0; i
< nr_saved
; i
++) {
1456 pipe_sampler_view_reference(&ctx
->fragment_views
[i
], NULL
);
1457 /* move the reference from one pointer to another */
1458 ctx
->fragment_views
[i
] = ctx
->fragment_views_saved
[i
];
1459 ctx
->fragment_views_saved
[i
] = NULL
;
1461 for (; i
< ctx
->nr_fragment_views
; i
++) {
1462 pipe_sampler_view_reference(&ctx
->fragment_views
[i
], NULL
);
1465 num
= MAX2(ctx
->nr_fragment_views
, nr_saved
);
1467 /* bind the old/saved sampler views */
1468 ctx
->pipe
->set_sampler_views(ctx
->pipe
, PIPE_SHADER_FRAGMENT
, 0, num
,
1469 ctx
->fragment_views
);
1471 ctx
->nr_fragment_views
= nr_saved
;
1472 ctx
->nr_fragment_views_saved
= 0;
1477 cso_set_shader_images(struct cso_context
*ctx
,
1478 enum pipe_shader_type shader_stage
,
1479 unsigned start
, unsigned count
,
1480 struct pipe_image_view
*images
)
1482 if (shader_stage
== PIPE_SHADER_FRAGMENT
&& start
== 0 && count
>= 1) {
1483 util_copy_image_view(&ctx
->fragment_image0_current
, &images
[0]);
1486 ctx
->pipe
->set_shader_images(ctx
->pipe
, shader_stage
, start
, count
, images
);
1491 cso_save_fragment_image0(struct cso_context
*ctx
)
1493 util_copy_image_view(&ctx
->fragment_image0_saved
,
1494 &ctx
->fragment_image0_current
);
1499 cso_restore_fragment_image0(struct cso_context
*ctx
)
1501 cso_set_shader_images(ctx
, PIPE_SHADER_FRAGMENT
, 0, 1,
1502 &ctx
->fragment_image0_saved
);
1507 cso_set_stream_outputs(struct cso_context
*ctx
,
1508 unsigned num_targets
,
1509 struct pipe_stream_output_target
**targets
,
1510 const unsigned *offsets
)
1512 struct pipe_context
*pipe
= ctx
->pipe
;
1515 if (!ctx
->has_streamout
) {
1516 assert(num_targets
== 0);
1520 if (ctx
->nr_so_targets
== 0 && num_targets
== 0) {
1521 /* Nothing to do. */
1525 /* reference new targets */
1526 for (i
= 0; i
< num_targets
; i
++) {
1527 pipe_so_target_reference(&ctx
->so_targets
[i
], targets
[i
]);
1529 /* unref extra old targets, if any */
1530 for (; i
< ctx
->nr_so_targets
; i
++) {
1531 pipe_so_target_reference(&ctx
->so_targets
[i
], NULL
);
1534 pipe
->set_stream_output_targets(pipe
, num_targets
, targets
,
1536 ctx
->nr_so_targets
= num_targets
;
1540 cso_save_stream_outputs(struct cso_context
*ctx
)
1544 if (!ctx
->has_streamout
) {
1548 ctx
->nr_so_targets_saved
= ctx
->nr_so_targets
;
1550 for (i
= 0; i
< ctx
->nr_so_targets
; i
++) {
1551 assert(!ctx
->so_targets_saved
[i
]);
1552 pipe_so_target_reference(&ctx
->so_targets_saved
[i
], ctx
->so_targets
[i
]);
1557 cso_restore_stream_outputs(struct cso_context
*ctx
)
1559 struct pipe_context
*pipe
= ctx
->pipe
;
1561 unsigned offset
[PIPE_MAX_SO_BUFFERS
];
1563 if (!ctx
->has_streamout
) {
1567 if (ctx
->nr_so_targets
== 0 && ctx
->nr_so_targets_saved
== 0) {
1568 /* Nothing to do. */
1572 assert(ctx
->nr_so_targets_saved
<= PIPE_MAX_SO_BUFFERS
);
1573 for (i
= 0; i
< ctx
->nr_so_targets_saved
; i
++) {
1574 pipe_so_target_reference(&ctx
->so_targets
[i
], NULL
);
1575 /* move the reference from one pointer to another */
1576 ctx
->so_targets
[i
] = ctx
->so_targets_saved
[i
];
1577 ctx
->so_targets_saved
[i
] = NULL
;
1578 /* -1 means append */
1579 offset
[i
] = (unsigned)-1;
1581 for (; i
< ctx
->nr_so_targets
; i
++) {
1582 pipe_so_target_reference(&ctx
->so_targets
[i
], NULL
);
1585 pipe
->set_stream_output_targets(pipe
, ctx
->nr_so_targets_saved
,
1586 ctx
->so_targets
, offset
);
1588 ctx
->nr_so_targets
= ctx
->nr_so_targets_saved
;
1589 ctx
->nr_so_targets_saved
= 0;
1592 /* constant buffers */
1595 cso_set_constant_buffer(struct cso_context
*cso
,
1596 enum pipe_shader_type shader_stage
,
1597 unsigned index
, struct pipe_constant_buffer
*cb
)
1599 struct pipe_context
*pipe
= cso
->pipe
;
1601 pipe
->set_constant_buffer(pipe
, shader_stage
, index
, cb
);
1604 util_copy_constant_buffer(&cso
->aux_constbuf_current
[shader_stage
], cb
);
1609 cso_set_constant_buffer_resource(struct cso_context
*cso
,
1610 enum pipe_shader_type shader_stage
,
1612 struct pipe_resource
*buffer
)
1615 struct pipe_constant_buffer cb
;
1617 cb
.buffer_offset
= 0;
1618 cb
.buffer_size
= buffer
->width0
;
1619 cb
.user_buffer
= NULL
;
1620 cso_set_constant_buffer(cso
, shader_stage
, index
, &cb
);
1622 cso_set_constant_buffer(cso
, shader_stage
, index
, NULL
);
1627 cso_set_constant_user_buffer(struct cso_context
*cso
,
1628 enum pipe_shader_type shader_stage
,
1629 unsigned index
, void *ptr
, unsigned size
)
1632 struct pipe_constant_buffer cb
;
1634 cb
.buffer_offset
= 0;
1635 cb
.buffer_size
= size
;
1636 cb
.user_buffer
= ptr
;
1637 cso_set_constant_buffer(cso
, shader_stage
, index
, &cb
);
1639 cso_set_constant_buffer(cso
, shader_stage
, index
, NULL
);
1644 cso_save_constant_buffer_slot0(struct cso_context
*cso
,
1645 enum pipe_shader_type shader_stage
)
1647 util_copy_constant_buffer(&cso
->aux_constbuf_saved
[shader_stage
],
1648 &cso
->aux_constbuf_current
[shader_stage
]);
1652 cso_restore_constant_buffer_slot0(struct cso_context
*cso
,
1653 enum pipe_shader_type shader_stage
)
1655 cso_set_constant_buffer(cso
, shader_stage
, 0,
1656 &cso
->aux_constbuf_saved
[shader_stage
]);
1657 pipe_resource_reference(&cso
->aux_constbuf_saved
[shader_stage
].buffer
,
1663 * Save all the CSO state items specified by the state_mask bitmask
1664 * of CSO_BIT_x flags.
1667 cso_save_state(struct cso_context
*cso
, unsigned state_mask
)
1669 assert(cso
->saved_state
== 0);
1671 cso
->saved_state
= state_mask
;
1673 if (state_mask
& CSO_BIT_AUX_VERTEX_BUFFER_SLOT
)
1674 cso_save_vertex_buffer0(cso
);
1675 if (state_mask
& CSO_BIT_BLEND
)
1676 cso_save_blend(cso
);
1677 if (state_mask
& CSO_BIT_DEPTH_STENCIL_ALPHA
)
1678 cso_save_depth_stencil_alpha(cso
);
1679 if (state_mask
& CSO_BIT_FRAGMENT_SAMPLERS
)
1680 cso_save_fragment_samplers(cso
);
1681 if (state_mask
& CSO_BIT_FRAGMENT_SAMPLER_VIEWS
)
1682 cso_save_fragment_sampler_views(cso
);
1683 if (state_mask
& CSO_BIT_FRAGMENT_SHADER
)
1684 cso_save_fragment_shader(cso
);
1685 if (state_mask
& CSO_BIT_FRAMEBUFFER
)
1686 cso_save_framebuffer(cso
);
1687 if (state_mask
& CSO_BIT_GEOMETRY_SHADER
)
1688 cso_save_geometry_shader(cso
);
1689 if (state_mask
& CSO_BIT_MIN_SAMPLES
)
1690 cso_save_min_samples(cso
);
1691 if (state_mask
& CSO_BIT_RASTERIZER
)
1692 cso_save_rasterizer(cso
);
1693 if (state_mask
& CSO_BIT_RENDER_CONDITION
)
1694 cso_save_render_condition(cso
);
1695 if (state_mask
& CSO_BIT_SAMPLE_MASK
)
1696 cso_save_sample_mask(cso
);
1697 if (state_mask
& CSO_BIT_STENCIL_REF
)
1698 cso_save_stencil_ref(cso
);
1699 if (state_mask
& CSO_BIT_STREAM_OUTPUTS
)
1700 cso_save_stream_outputs(cso
);
1701 if (state_mask
& CSO_BIT_TESSCTRL_SHADER
)
1702 cso_save_tessctrl_shader(cso
);
1703 if (state_mask
& CSO_BIT_TESSEVAL_SHADER
)
1704 cso_save_tesseval_shader(cso
);
1705 if (state_mask
& CSO_BIT_VERTEX_ELEMENTS
)
1706 cso_save_vertex_elements(cso
);
1707 if (state_mask
& CSO_BIT_VERTEX_SHADER
)
1708 cso_save_vertex_shader(cso
);
1709 if (state_mask
& CSO_BIT_VIEWPORT
)
1710 cso_save_viewport(cso
);
1711 if (state_mask
& CSO_BIT_PAUSE_QUERIES
)
1712 cso
->pipe
->set_active_query_state(cso
->pipe
, false);
1713 if (state_mask
& CSO_BIT_FRAGMENT_IMAGE0
)
1714 cso_save_fragment_image0(cso
);
1719 * Restore the state which was saved by cso_save_state().
1722 cso_restore_state(struct cso_context
*cso
)
1724 unsigned state_mask
= cso
->saved_state
;
1728 if (state_mask
& CSO_BIT_AUX_VERTEX_BUFFER_SLOT
)
1729 cso_restore_vertex_buffer0(cso
);
1730 if (state_mask
& CSO_BIT_BLEND
)
1731 cso_restore_blend(cso
);
1732 if (state_mask
& CSO_BIT_DEPTH_STENCIL_ALPHA
)
1733 cso_restore_depth_stencil_alpha(cso
);
1734 if (state_mask
& CSO_BIT_FRAGMENT_SAMPLERS
)
1735 cso_restore_fragment_samplers(cso
);
1736 if (state_mask
& CSO_BIT_FRAGMENT_SAMPLER_VIEWS
)
1737 cso_restore_fragment_sampler_views(cso
);
1738 if (state_mask
& CSO_BIT_FRAGMENT_SHADER
)
1739 cso_restore_fragment_shader(cso
);
1740 if (state_mask
& CSO_BIT_FRAMEBUFFER
)
1741 cso_restore_framebuffer(cso
);
1742 if (state_mask
& CSO_BIT_GEOMETRY_SHADER
)
1743 cso_restore_geometry_shader(cso
);
1744 if (state_mask
& CSO_BIT_MIN_SAMPLES
)
1745 cso_restore_min_samples(cso
);
1746 if (state_mask
& CSO_BIT_RASTERIZER
)
1747 cso_restore_rasterizer(cso
);
1748 if (state_mask
& CSO_BIT_RENDER_CONDITION
)
1749 cso_restore_render_condition(cso
);
1750 if (state_mask
& CSO_BIT_SAMPLE_MASK
)
1751 cso_restore_sample_mask(cso
);
1752 if (state_mask
& CSO_BIT_STENCIL_REF
)
1753 cso_restore_stencil_ref(cso
);
1754 if (state_mask
& CSO_BIT_STREAM_OUTPUTS
)
1755 cso_restore_stream_outputs(cso
);
1756 if (state_mask
& CSO_BIT_TESSCTRL_SHADER
)
1757 cso_restore_tessctrl_shader(cso
);
1758 if (state_mask
& CSO_BIT_TESSEVAL_SHADER
)
1759 cso_restore_tesseval_shader(cso
);
1760 if (state_mask
& CSO_BIT_VERTEX_ELEMENTS
)
1761 cso_restore_vertex_elements(cso
);
1762 if (state_mask
& CSO_BIT_VERTEX_SHADER
)
1763 cso_restore_vertex_shader(cso
);
1764 if (state_mask
& CSO_BIT_VIEWPORT
)
1765 cso_restore_viewport(cso
);
1766 if (state_mask
& CSO_BIT_PAUSE_QUERIES
)
1767 cso
->pipe
->set_active_query_state(cso
->pipe
, true);
1768 if (state_mask
& CSO_BIT_FRAGMENT_IMAGE0
)
1769 cso_restore_fragment_image0(cso
);
1771 cso
->saved_state
= 0;
1779 cso_draw_vbo(struct cso_context
*cso
,
1780 const struct pipe_draw_info
*info
)
1782 struct u_vbuf
*vbuf
= cso
->vbuf_current
;
1784 /* We can't have both indirect drawing and SO-vertex-count drawing */
1785 assert(info
->indirect
== NULL
|| info
->count_from_stream_output
== NULL
);
1787 /* We can't have SO-vertex-count drawing with an index buffer */
1788 assert(info
->count_from_stream_output
== NULL
|| info
->index_size
== 0);
1791 u_vbuf_draw_vbo(vbuf
, info
);
1793 struct pipe_context
*pipe
= cso
->pipe
;
1794 pipe
->draw_vbo(pipe
, info
);
1799 cso_draw_arrays(struct cso_context
*cso
, uint mode
, uint start
, uint count
)
1801 struct pipe_draw_info info
;
1803 util_draw_init_info(&info
);
1808 info
.min_index
= start
;
1809 info
.max_index
= start
+ count
- 1;
1811 cso_draw_vbo(cso
, &info
);
1815 cso_draw_arrays_instanced(struct cso_context
*cso
, uint mode
,
1816 uint start
, uint count
,
1817 uint start_instance
, uint instance_count
)
1819 struct pipe_draw_info info
;
1821 util_draw_init_info(&info
);
1826 info
.min_index
= start
;
1827 info
.max_index
= start
+ count
- 1;
1828 info
.start_instance
= start_instance
;
1829 info
.instance_count
= instance_count
;
1831 cso_draw_vbo(cso
, &info
);