3d730ab94045e2988493473462077c98921fcdbd
[mesa.git] / src / gallium / auxiliary / cso_cache / cso_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51
52
53 /**
54 * Per-shader sampler information.
55 */
56 struct sampler_info
57 {
58 struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
59 void *samplers[PIPE_MAX_SAMPLERS];
60 };
61
62
63
64 struct cso_context {
65 struct pipe_context *pipe;
66 struct cso_cache *cache;
67 struct u_vbuf *vbuf;
68
69 boolean has_geometry_shader;
70 boolean has_tessellation;
71 boolean has_compute_shader;
72 boolean has_streamout;
73
74 unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
75
76 struct pipe_sampler_view *fragment_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
77 unsigned nr_fragment_views;
78
79 struct pipe_sampler_view *fragment_views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS];
80 unsigned nr_fragment_views_saved;
81
82 struct sampler_info fragment_samplers_saved;
83 struct sampler_info samplers[PIPE_SHADER_TYPES];
84
85 /* Temporary number until cso_single_sampler_done is called.
86 * It tracks the highest sampler seen in cso_single_sampler.
87 */
88 int max_sampler_seen;
89
90 struct pipe_vertex_buffer vertex_buffer0_current;
91 struct pipe_vertex_buffer vertex_buffer0_saved;
92
93 struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
94 struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
95
96 struct pipe_image_view fragment_image0_current;
97 struct pipe_image_view fragment_image0_saved;
98
99 unsigned nr_so_targets;
100 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
101
102 unsigned nr_so_targets_saved;
103 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
104
105 /** Current and saved state.
106 * The saved state is used as a 1-deep stack.
107 */
108 void *blend, *blend_saved;
109 void *depth_stencil, *depth_stencil_saved;
110 void *rasterizer, *rasterizer_saved;
111 void *fragment_shader, *fragment_shader_saved;
112 void *vertex_shader, *vertex_shader_saved;
113 void *geometry_shader, *geometry_shader_saved;
114 void *tessctrl_shader, *tessctrl_shader_saved;
115 void *tesseval_shader, *tesseval_shader_saved;
116 void *compute_shader;
117 void *velements, *velements_saved;
118 struct pipe_query *render_condition, *render_condition_saved;
119 uint render_condition_mode, render_condition_mode_saved;
120 boolean render_condition_cond, render_condition_cond_saved;
121
122 struct pipe_framebuffer_state fb, fb_saved;
123 struct pipe_viewport_state vp, vp_saved;
124 struct pipe_blend_color blend_color;
125 unsigned sample_mask, sample_mask_saved;
126 unsigned min_samples, min_samples_saved;
127 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
128 };
129
130 struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
131 {
132 return cso->pipe;
133 }
134
135 static boolean delete_blend_state(struct cso_context *ctx, void *state)
136 {
137 struct cso_blend *cso = (struct cso_blend *)state;
138
139 if (ctx->blend == cso->data)
140 return FALSE;
141
142 if (cso->delete_state)
143 cso->delete_state(cso->context, cso->data);
144 FREE(state);
145 return TRUE;
146 }
147
148 static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
149 {
150 struct cso_depth_stencil_alpha *cso =
151 (struct cso_depth_stencil_alpha *)state;
152
153 if (ctx->depth_stencil == cso->data)
154 return FALSE;
155
156 if (cso->delete_state)
157 cso->delete_state(cso->context, cso->data);
158 FREE(state);
159
160 return TRUE;
161 }
162
163 static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
164 {
165 struct cso_sampler *cso = (struct cso_sampler *)state;
166 if (cso->delete_state)
167 cso->delete_state(cso->context, cso->data);
168 FREE(state);
169 return TRUE;
170 }
171
172 static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
173 {
174 struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
175
176 if (ctx->rasterizer == cso->data)
177 return FALSE;
178 if (cso->delete_state)
179 cso->delete_state(cso->context, cso->data);
180 FREE(state);
181 return TRUE;
182 }
183
184 static boolean delete_vertex_elements(struct cso_context *ctx,
185 void *state)
186 {
187 struct cso_velements *cso = (struct cso_velements *)state;
188
189 if (ctx->velements == cso->data)
190 return FALSE;
191
192 if (cso->delete_state)
193 cso->delete_state(cso->context, cso->data);
194 FREE(state);
195 return TRUE;
196 }
197
198
199 static inline boolean delete_cso(struct cso_context *ctx,
200 void *state, enum cso_cache_type type)
201 {
202 switch (type) {
203 case CSO_BLEND:
204 return delete_blend_state(ctx, state);
205 case CSO_SAMPLER:
206 return delete_sampler_state(ctx, state);
207 case CSO_DEPTH_STENCIL_ALPHA:
208 return delete_depth_stencil_state(ctx, state);
209 case CSO_RASTERIZER:
210 return delete_rasterizer_state(ctx, state);
211 case CSO_VELEMENTS:
212 return delete_vertex_elements(ctx, state);
213 default:
214 assert(0);
215 FREE(state);
216 }
217 return FALSE;
218 }
219
220 static inline void
221 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
222 int max_size, void *user_data)
223 {
224 struct cso_context *ctx = (struct cso_context *)user_data;
225 /* if we're approach the maximum size, remove fourth of the entries
226 * otherwise every subsequent call will go through the same */
227 int hash_size = cso_hash_size(hash);
228 int max_entries = (max_size > hash_size) ? max_size : hash_size;
229 int to_remove = (max_size < max_entries) * max_entries/4;
230 struct cso_hash_iter iter;
231 struct cso_sampler **samplers_to_restore = NULL;
232 unsigned to_restore = 0;
233
234 if (hash_size > max_size)
235 to_remove += hash_size - max_size;
236
237 if (to_remove == 0)
238 return;
239
240 if (type == CSO_SAMPLER) {
241 int i, j;
242
243 samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
244 sizeof(*samplers_to_restore));
245
246 /* Temporarily remove currently bound sampler states from the hash
247 * table, to prevent them from being deleted
248 */
249 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
250 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
251 struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
252
253 if (sampler && cso_hash_take(hash, sampler->hash_key))
254 samplers_to_restore[to_restore++] = sampler;
255 }
256 }
257 }
258
259 iter = cso_hash_first_node(hash);
260 while (to_remove) {
261 /*remove elements until we're good */
262 /*fixme: currently we pick the nodes to remove at random*/
263 void *cso = cso_hash_iter_data(iter);
264
265 if (!cso)
266 break;
267
268 if (delete_cso(ctx, cso, type)) {
269 iter = cso_hash_erase(hash, iter);
270 --to_remove;
271 } else
272 iter = cso_hash_iter_next(iter);
273 }
274
275 if (type == CSO_SAMPLER) {
276 /* Put currently bound sampler states back into the hash table */
277 while (to_restore--) {
278 struct cso_sampler *sampler = samplers_to_restore[to_restore];
279
280 cso_hash_insert(hash, sampler->hash_key, sampler);
281 }
282
283 FREE(samplers_to_restore);
284 }
285 }
286
287 static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
288 {
289 struct u_vbuf_caps caps;
290 bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
291
292 u_vbuf_get_caps(cso->pipe->screen, &caps);
293
294 /* Enable u_vbuf if needed. */
295 if (caps.fallback_always ||
296 (uses_user_vertex_buffers &&
297 caps.fallback_only_for_user_vbuffers)) {
298 cso->vbuf = u_vbuf_create(cso->pipe, &caps);
299 }
300 }
301
302 struct cso_context *
303 cso_create_context(struct pipe_context *pipe, unsigned flags)
304 {
305 struct cso_context *ctx = CALLOC_STRUCT(cso_context);
306 if (!ctx)
307 return NULL;
308
309 ctx->cache = cso_cache_create();
310 if (ctx->cache == NULL)
311 goto out;
312 cso_cache_set_sanitize_callback(ctx->cache,
313 sanitize_hash,
314 ctx);
315
316 ctx->pipe = pipe;
317 ctx->sample_mask = ~0;
318
319 cso_init_vbuf(ctx, flags);
320
321 /* Enable for testing: */
322 if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
323
324 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
325 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
326 ctx->has_geometry_shader = TRUE;
327 }
328 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
329 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
330 ctx->has_tessellation = TRUE;
331 }
332 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
333 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
334 int supported_irs =
335 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
336 PIPE_SHADER_CAP_SUPPORTED_IRS);
337 if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
338 (1 << PIPE_SHADER_IR_NIR))) {
339 ctx->has_compute_shader = TRUE;
340 }
341 }
342 if (pipe->screen->get_param(pipe->screen,
343 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
344 ctx->has_streamout = TRUE;
345 }
346
347 ctx->max_sampler_seen = -1;
348 return ctx;
349
350 out:
351 cso_destroy_context( ctx );
352 return NULL;
353 }
354
355 /**
356 * Free the CSO context.
357 */
358 void cso_destroy_context( struct cso_context *ctx )
359 {
360 unsigned i;
361
362 if (ctx->pipe) {
363 ctx->pipe->bind_blend_state( ctx->pipe, NULL );
364 ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
365
366 {
367 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
368 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
369 struct pipe_screen *scr = ctx->pipe->screen;
370 enum pipe_shader_type sh;
371 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
372 int maxsam = scr->get_shader_param(scr, sh,
373 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
374 int maxview = scr->get_shader_param(scr, sh,
375 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
376 assert(maxsam <= PIPE_MAX_SAMPLERS);
377 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
378 if (maxsam > 0) {
379 ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
380 }
381 if (maxview > 0) {
382 ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views);
383 }
384 }
385 }
386
387 ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
388 ctx->pipe->bind_fs_state( ctx->pipe, NULL );
389 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
390 ctx->pipe->bind_vs_state( ctx->pipe, NULL );
391 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL);
392 if (ctx->has_geometry_shader) {
393 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
394 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL);
395 }
396 if (ctx->has_tessellation) {
397 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
398 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL);
399 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
400 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL);
401 }
402 if (ctx->has_compute_shader) {
403 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
404 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_COMPUTE, 0, NULL);
405 }
406 ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
407
408 if (ctx->has_streamout)
409 ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
410 }
411
412 for (i = 0; i < ctx->nr_fragment_views; i++) {
413 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
414 }
415 for (i = 0; i < ctx->nr_fragment_views_saved; i++) {
416 pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL);
417 }
418
419 util_unreference_framebuffer_state(&ctx->fb);
420 util_unreference_framebuffer_state(&ctx->fb_saved);
421
422 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
423 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
424
425 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
426 pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
427 pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL);
428 }
429
430 pipe_resource_reference(&ctx->fragment_image0_current.resource, NULL);
431 pipe_resource_reference(&ctx->fragment_image0_saved.resource, NULL);
432
433 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
434 pipe_so_target_reference(&ctx->so_targets[i], NULL);
435 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
436 }
437
438 if (ctx->cache) {
439 cso_cache_delete( ctx->cache );
440 ctx->cache = NULL;
441 }
442
443 if (ctx->vbuf)
444 u_vbuf_destroy(ctx->vbuf);
445 FREE( ctx );
446 }
447
448
449 /* Those function will either find the state of the given template
450 * in the cache or they will create a new state from the given
451 * template, insert it in the cache and return it.
452 */
453
454 /*
455 * If the driver returns 0 from the create method then they will assign
456 * the data member of the cso to be the template itself.
457 */
458
459 enum pipe_error cso_set_blend(struct cso_context *ctx,
460 const struct pipe_blend_state *templ)
461 {
462 unsigned key_size, hash_key;
463 struct cso_hash_iter iter;
464 void *handle;
465
466 key_size = templ->independent_blend_enable ?
467 sizeof(struct pipe_blend_state) :
468 (char *)&(templ->rt[1]) - (char *)templ;
469 hash_key = cso_construct_key((void*)templ, key_size);
470 iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND,
471 (void*)templ, key_size);
472
473 if (cso_hash_iter_is_null(iter)) {
474 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
475 if (!cso)
476 return PIPE_ERROR_OUT_OF_MEMORY;
477
478 memset(&cso->state, 0, sizeof cso->state);
479 memcpy(&cso->state, templ, key_size);
480 cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
481 cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
482 cso->context = ctx->pipe;
483
484 iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
485 if (cso_hash_iter_is_null(iter)) {
486 FREE(cso);
487 return PIPE_ERROR_OUT_OF_MEMORY;
488 }
489
490 handle = cso->data;
491 }
492 else {
493 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
494 }
495
496 if (ctx->blend != handle) {
497 ctx->blend = handle;
498 ctx->pipe->bind_blend_state(ctx->pipe, handle);
499 }
500 return PIPE_OK;
501 }
502
503 static void
504 cso_save_blend(struct cso_context *ctx)
505 {
506 assert(!ctx->blend_saved);
507 ctx->blend_saved = ctx->blend;
508 }
509
510 static void
511 cso_restore_blend(struct cso_context *ctx)
512 {
513 if (ctx->blend != ctx->blend_saved) {
514 ctx->blend = ctx->blend_saved;
515 ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
516 }
517 ctx->blend_saved = NULL;
518 }
519
520
521
522 enum pipe_error
523 cso_set_depth_stencil_alpha(struct cso_context *ctx,
524 const struct pipe_depth_stencil_alpha_state *templ)
525 {
526 unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
527 unsigned hash_key = cso_construct_key((void*)templ, key_size);
528 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
529 hash_key,
530 CSO_DEPTH_STENCIL_ALPHA,
531 (void*)templ, key_size);
532 void *handle;
533
534 if (cso_hash_iter_is_null(iter)) {
535 struct cso_depth_stencil_alpha *cso =
536 MALLOC(sizeof(struct cso_depth_stencil_alpha));
537 if (!cso)
538 return PIPE_ERROR_OUT_OF_MEMORY;
539
540 memcpy(&cso->state, templ, sizeof(*templ));
541 cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
542 &cso->state);
543 cso->delete_state =
544 (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
545 cso->context = ctx->pipe;
546
547 iter = cso_insert_state(ctx->cache, hash_key,
548 CSO_DEPTH_STENCIL_ALPHA, cso);
549 if (cso_hash_iter_is_null(iter)) {
550 FREE(cso);
551 return PIPE_ERROR_OUT_OF_MEMORY;
552 }
553
554 handle = cso->data;
555 }
556 else {
557 handle = ((struct cso_depth_stencil_alpha *)
558 cso_hash_iter_data(iter))->data;
559 }
560
561 if (ctx->depth_stencil != handle) {
562 ctx->depth_stencil = handle;
563 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
564 }
565 return PIPE_OK;
566 }
567
568 static void
569 cso_save_depth_stencil_alpha(struct cso_context *ctx)
570 {
571 assert(!ctx->depth_stencil_saved);
572 ctx->depth_stencil_saved = ctx->depth_stencil;
573 }
574
575 static void
576 cso_restore_depth_stencil_alpha(struct cso_context *ctx)
577 {
578 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
579 ctx->depth_stencil = ctx->depth_stencil_saved;
580 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
581 ctx->depth_stencil_saved);
582 }
583 ctx->depth_stencil_saved = NULL;
584 }
585
586
587
588 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
589 const struct pipe_rasterizer_state *templ)
590 {
591 unsigned key_size = sizeof(struct pipe_rasterizer_state);
592 unsigned hash_key = cso_construct_key((void*)templ, key_size);
593 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
594 hash_key,
595 CSO_RASTERIZER,
596 (void*)templ, key_size);
597 void *handle = NULL;
598
599 /* We can't have both point_quad_rasterization (sprites) and point_smooth
600 * (round AA points) enabled at the same time.
601 */
602 assert(!(templ->point_quad_rasterization && templ->point_smooth));
603
604 if (cso_hash_iter_is_null(iter)) {
605 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
606 if (!cso)
607 return PIPE_ERROR_OUT_OF_MEMORY;
608
609 memcpy(&cso->state, templ, sizeof(*templ));
610 cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
611 cso->delete_state =
612 (cso_state_callback)ctx->pipe->delete_rasterizer_state;
613 cso->context = ctx->pipe;
614
615 iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
616 if (cso_hash_iter_is_null(iter)) {
617 FREE(cso);
618 return PIPE_ERROR_OUT_OF_MEMORY;
619 }
620
621 handle = cso->data;
622 }
623 else {
624 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
625 }
626
627 if (ctx->rasterizer != handle) {
628 ctx->rasterizer = handle;
629 ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
630 }
631 return PIPE_OK;
632 }
633
634 static void
635 cso_save_rasterizer(struct cso_context *ctx)
636 {
637 assert(!ctx->rasterizer_saved);
638 ctx->rasterizer_saved = ctx->rasterizer;
639 }
640
641 static void
642 cso_restore_rasterizer(struct cso_context *ctx)
643 {
644 if (ctx->rasterizer != ctx->rasterizer_saved) {
645 ctx->rasterizer = ctx->rasterizer_saved;
646 ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
647 }
648 ctx->rasterizer_saved = NULL;
649 }
650
651
652 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
653 {
654 if (ctx->fragment_shader != handle) {
655 ctx->fragment_shader = handle;
656 ctx->pipe->bind_fs_state(ctx->pipe, handle);
657 }
658 }
659
660 void cso_delete_fragment_shader(struct cso_context *ctx, void *handle )
661 {
662 if (handle == ctx->fragment_shader) {
663 /* unbind before deleting */
664 ctx->pipe->bind_fs_state(ctx->pipe, NULL);
665 ctx->fragment_shader = NULL;
666 }
667 ctx->pipe->delete_fs_state(ctx->pipe, handle);
668 }
669
670 static void
671 cso_save_fragment_shader(struct cso_context *ctx)
672 {
673 assert(!ctx->fragment_shader_saved);
674 ctx->fragment_shader_saved = ctx->fragment_shader;
675 }
676
677 static void
678 cso_restore_fragment_shader(struct cso_context *ctx)
679 {
680 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
681 ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
682 ctx->fragment_shader = ctx->fragment_shader_saved;
683 }
684 ctx->fragment_shader_saved = NULL;
685 }
686
687
688 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
689 {
690 if (ctx->vertex_shader != handle) {
691 ctx->vertex_shader = handle;
692 ctx->pipe->bind_vs_state(ctx->pipe, handle);
693 }
694 }
695
696 void cso_delete_vertex_shader(struct cso_context *ctx, void *handle )
697 {
698 if (handle == ctx->vertex_shader) {
699 /* unbind before deleting */
700 ctx->pipe->bind_vs_state(ctx->pipe, NULL);
701 ctx->vertex_shader = NULL;
702 }
703 ctx->pipe->delete_vs_state(ctx->pipe, handle);
704 }
705
706 static void
707 cso_save_vertex_shader(struct cso_context *ctx)
708 {
709 assert(!ctx->vertex_shader_saved);
710 ctx->vertex_shader_saved = ctx->vertex_shader;
711 }
712
713 static void
714 cso_restore_vertex_shader(struct cso_context *ctx)
715 {
716 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
717 ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
718 ctx->vertex_shader = ctx->vertex_shader_saved;
719 }
720 ctx->vertex_shader_saved = NULL;
721 }
722
723
724 void cso_set_framebuffer(struct cso_context *ctx,
725 const struct pipe_framebuffer_state *fb)
726 {
727 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
728 util_copy_framebuffer_state(&ctx->fb, fb);
729 ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
730 }
731 }
732
733 static void
734 cso_save_framebuffer(struct cso_context *ctx)
735 {
736 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
737 }
738
739 static void
740 cso_restore_framebuffer(struct cso_context *ctx)
741 {
742 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
743 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
744 ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
745 util_unreference_framebuffer_state(&ctx->fb_saved);
746 }
747 }
748
749
750 void cso_set_viewport(struct cso_context *ctx,
751 const struct pipe_viewport_state *vp)
752 {
753 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
754 ctx->vp = *vp;
755 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
756 }
757 }
758
759 /**
760 * Setup viewport state for given width and height (position is always (0,0)).
761 * Invert the Y axis if 'invert' is true.
762 */
763 void
764 cso_set_viewport_dims(struct cso_context *ctx,
765 float width, float height, boolean invert)
766 {
767 struct pipe_viewport_state vp;
768 vp.scale[0] = width * 0.5f;
769 vp.scale[1] = height * (invert ? -0.5f : 0.5f);
770 vp.scale[2] = 0.5f;
771 vp.translate[0] = 0.5f * width;
772 vp.translate[1] = 0.5f * height;
773 vp.translate[2] = 0.5f;
774 cso_set_viewport(ctx, &vp);
775 }
776
777 static void
778 cso_save_viewport(struct cso_context *ctx)
779 {
780 ctx->vp_saved = ctx->vp;
781 }
782
783
784 static void
785 cso_restore_viewport(struct cso_context *ctx)
786 {
787 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
788 ctx->vp = ctx->vp_saved;
789 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
790 }
791 }
792
793
794 void cso_set_blend_color(struct cso_context *ctx,
795 const struct pipe_blend_color *bc)
796 {
797 if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
798 ctx->blend_color = *bc;
799 ctx->pipe->set_blend_color(ctx->pipe, bc);
800 }
801 }
802
803 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
804 {
805 if (ctx->sample_mask != sample_mask) {
806 ctx->sample_mask = sample_mask;
807 ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
808 }
809 }
810
811 static void
812 cso_save_sample_mask(struct cso_context *ctx)
813 {
814 ctx->sample_mask_saved = ctx->sample_mask;
815 }
816
817 static void
818 cso_restore_sample_mask(struct cso_context *ctx)
819 {
820 cso_set_sample_mask(ctx, ctx->sample_mask_saved);
821 }
822
823 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
824 {
825 if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
826 ctx->min_samples = min_samples;
827 ctx->pipe->set_min_samples(ctx->pipe, min_samples);
828 }
829 }
830
831 static void
832 cso_save_min_samples(struct cso_context *ctx)
833 {
834 ctx->min_samples_saved = ctx->min_samples;
835 }
836
837 static void
838 cso_restore_min_samples(struct cso_context *ctx)
839 {
840 cso_set_min_samples(ctx, ctx->min_samples_saved);
841 }
842
843 void cso_set_stencil_ref(struct cso_context *ctx,
844 const struct pipe_stencil_ref *sr)
845 {
846 if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) {
847 ctx->stencil_ref = *sr;
848 ctx->pipe->set_stencil_ref(ctx->pipe, sr);
849 }
850 }
851
852 static void
853 cso_save_stencil_ref(struct cso_context *ctx)
854 {
855 ctx->stencil_ref_saved = ctx->stencil_ref;
856 }
857
858
859 static void
860 cso_restore_stencil_ref(struct cso_context *ctx)
861 {
862 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
863 sizeof(ctx->stencil_ref))) {
864 ctx->stencil_ref = ctx->stencil_ref_saved;
865 ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref);
866 }
867 }
868
869 void cso_set_render_condition(struct cso_context *ctx,
870 struct pipe_query *query,
871 boolean condition,
872 enum pipe_render_cond_flag mode)
873 {
874 struct pipe_context *pipe = ctx->pipe;
875
876 if (ctx->render_condition != query ||
877 ctx->render_condition_mode != mode ||
878 ctx->render_condition_cond != condition) {
879 pipe->render_condition(pipe, query, condition, mode);
880 ctx->render_condition = query;
881 ctx->render_condition_cond = condition;
882 ctx->render_condition_mode = mode;
883 }
884 }
885
886 static void
887 cso_save_render_condition(struct cso_context *ctx)
888 {
889 ctx->render_condition_saved = ctx->render_condition;
890 ctx->render_condition_cond_saved = ctx->render_condition_cond;
891 ctx->render_condition_mode_saved = ctx->render_condition_mode;
892 }
893
894 static void
895 cso_restore_render_condition(struct cso_context *ctx)
896 {
897 cso_set_render_condition(ctx, ctx->render_condition_saved,
898 ctx->render_condition_cond_saved,
899 ctx->render_condition_mode_saved);
900 }
901
902 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
903 {
904 assert(ctx->has_geometry_shader || !handle);
905
906 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
907 ctx->geometry_shader = handle;
908 ctx->pipe->bind_gs_state(ctx->pipe, handle);
909 }
910 }
911
912 void cso_delete_geometry_shader(struct cso_context *ctx, void *handle)
913 {
914 if (handle == ctx->geometry_shader) {
915 /* unbind before deleting */
916 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
917 ctx->geometry_shader = NULL;
918 }
919 ctx->pipe->delete_gs_state(ctx->pipe, handle);
920 }
921
922 static void
923 cso_save_geometry_shader(struct cso_context *ctx)
924 {
925 if (!ctx->has_geometry_shader) {
926 return;
927 }
928
929 assert(!ctx->geometry_shader_saved);
930 ctx->geometry_shader_saved = ctx->geometry_shader;
931 }
932
933 static void
934 cso_restore_geometry_shader(struct cso_context *ctx)
935 {
936 if (!ctx->has_geometry_shader) {
937 return;
938 }
939
940 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
941 ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
942 ctx->geometry_shader = ctx->geometry_shader_saved;
943 }
944 ctx->geometry_shader_saved = NULL;
945 }
946
947 void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
948 {
949 assert(ctx->has_tessellation || !handle);
950
951 if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
952 ctx->tessctrl_shader = handle;
953 ctx->pipe->bind_tcs_state(ctx->pipe, handle);
954 }
955 }
956
957 void cso_delete_tessctrl_shader(struct cso_context *ctx, void *handle)
958 {
959 if (handle == ctx->tessctrl_shader) {
960 /* unbind before deleting */
961 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
962 ctx->tessctrl_shader = NULL;
963 }
964 ctx->pipe->delete_tcs_state(ctx->pipe, handle);
965 }
966
967 static void
968 cso_save_tessctrl_shader(struct cso_context *ctx)
969 {
970 if (!ctx->has_tessellation) {
971 return;
972 }
973
974 assert(!ctx->tessctrl_shader_saved);
975 ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
976 }
977
978 static void
979 cso_restore_tessctrl_shader(struct cso_context *ctx)
980 {
981 if (!ctx->has_tessellation) {
982 return;
983 }
984
985 if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
986 ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
987 ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
988 }
989 ctx->tessctrl_shader_saved = NULL;
990 }
991
992 void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
993 {
994 assert(ctx->has_tessellation || !handle);
995
996 if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
997 ctx->tesseval_shader = handle;
998 ctx->pipe->bind_tes_state(ctx->pipe, handle);
999 }
1000 }
1001
1002 void cso_delete_tesseval_shader(struct cso_context *ctx, void *handle)
1003 {
1004 if (handle == ctx->tesseval_shader) {
1005 /* unbind before deleting */
1006 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
1007 ctx->tesseval_shader = NULL;
1008 }
1009 ctx->pipe->delete_tes_state(ctx->pipe, handle);
1010 }
1011
1012 static void
1013 cso_save_tesseval_shader(struct cso_context *ctx)
1014 {
1015 if (!ctx->has_tessellation) {
1016 return;
1017 }
1018
1019 assert(!ctx->tesseval_shader_saved);
1020 ctx->tesseval_shader_saved = ctx->tesseval_shader;
1021 }
1022
1023 static void
1024 cso_restore_tesseval_shader(struct cso_context *ctx)
1025 {
1026 if (!ctx->has_tessellation) {
1027 return;
1028 }
1029
1030 if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
1031 ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
1032 ctx->tesseval_shader = ctx->tesseval_shader_saved;
1033 }
1034 ctx->tesseval_shader_saved = NULL;
1035 }
1036
1037 void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
1038 {
1039 assert(ctx->has_compute_shader || !handle);
1040
1041 if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1042 ctx->compute_shader = handle;
1043 ctx->pipe->bind_compute_state(ctx->pipe, handle);
1044 }
1045 }
1046
1047 void cso_delete_compute_shader(struct cso_context *ctx, void *handle)
1048 {
1049 if (handle == ctx->compute_shader) {
1050 /* unbind before deleting */
1051 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
1052 ctx->compute_shader = NULL;
1053 }
1054 ctx->pipe->delete_compute_state(ctx->pipe, handle);
1055 }
1056
1057 static void
1058 cso_set_vertex_elements_direct(struct cso_context *ctx,
1059 unsigned count,
1060 const struct pipe_vertex_element *states)
1061 {
1062 unsigned key_size, hash_key;
1063 struct cso_hash_iter iter;
1064 void *handle;
1065 struct cso_velems_state velems_state;
1066
1067 /* Need to include the count into the stored state data too.
1068 * Otherwise first few count pipe_vertex_elements could be identical
1069 * even if count is different, and there's no guarantee the hash would
1070 * be different in that case neither.
1071 */
1072 key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned);
1073 velems_state.count = count;
1074 memcpy(velems_state.velems, states,
1075 sizeof(struct pipe_vertex_element) * count);
1076 hash_key = cso_construct_key((void*)&velems_state, key_size);
1077 iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS,
1078 (void*)&velems_state, key_size);
1079
1080 if (cso_hash_iter_is_null(iter)) {
1081 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1082 if (!cso)
1083 return;
1084
1085 memcpy(&cso->state, &velems_state, key_size);
1086 cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, count,
1087 &cso->state.velems[0]);
1088 cso->delete_state =
1089 (cso_state_callback) ctx->pipe->delete_vertex_elements_state;
1090 cso->context = ctx->pipe;
1091
1092 iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
1093 if (cso_hash_iter_is_null(iter)) {
1094 FREE(cso);
1095 return;
1096 }
1097
1098 handle = cso->data;
1099 }
1100 else {
1101 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1102 }
1103
1104 if (ctx->velements != handle) {
1105 ctx->velements = handle;
1106 ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1107 }
1108 }
1109
1110 enum pipe_error
1111 cso_set_vertex_elements(struct cso_context *ctx,
1112 unsigned count,
1113 const struct pipe_vertex_element *states)
1114 {
1115 struct u_vbuf *vbuf = ctx->vbuf;
1116
1117 if (vbuf) {
1118 u_vbuf_set_vertex_elements(vbuf, count, states);
1119 return PIPE_OK;
1120 }
1121
1122 cso_set_vertex_elements_direct(ctx, count, states);
1123 return PIPE_OK;
1124 }
1125
1126 static void
1127 cso_save_vertex_elements(struct cso_context *ctx)
1128 {
1129 struct u_vbuf *vbuf = ctx->vbuf;
1130
1131 if (vbuf) {
1132 u_vbuf_save_vertex_elements(vbuf);
1133 return;
1134 }
1135
1136 assert(!ctx->velements_saved);
1137 ctx->velements_saved = ctx->velements;
1138 }
1139
1140 static void
1141 cso_restore_vertex_elements(struct cso_context *ctx)
1142 {
1143 struct u_vbuf *vbuf = ctx->vbuf;
1144
1145 if (vbuf) {
1146 u_vbuf_restore_vertex_elements(vbuf);
1147 return;
1148 }
1149
1150 if (ctx->velements != ctx->velements_saved) {
1151 ctx->velements = ctx->velements_saved;
1152 ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1153 }
1154 ctx->velements_saved = NULL;
1155 }
1156
1157 /* vertex buffers */
1158
1159 static void
1160 cso_set_vertex_buffers_direct(struct cso_context *ctx,
1161 unsigned start_slot, unsigned count,
1162 const struct pipe_vertex_buffer *buffers)
1163 {
1164 /* Save what's in the auxiliary slot, so that we can save and restore it
1165 * for meta ops.
1166 */
1167 if (start_slot == 0) {
1168 if (buffers) {
1169 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
1170 buffers);
1171 } else {
1172 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
1173 }
1174 }
1175
1176 ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
1177 }
1178
1179
1180 void cso_set_vertex_buffers(struct cso_context *ctx,
1181 unsigned start_slot, unsigned count,
1182 const struct pipe_vertex_buffer *buffers)
1183 {
1184 struct u_vbuf *vbuf = ctx->vbuf;
1185
1186 if (!count)
1187 return;
1188
1189 if (vbuf) {
1190 u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
1191 return;
1192 }
1193
1194 cso_set_vertex_buffers_direct(ctx, start_slot, count, buffers);
1195 }
1196
1197 static void
1198 cso_save_vertex_buffer0(struct cso_context *ctx)
1199 {
1200 struct u_vbuf *vbuf = ctx->vbuf;
1201
1202 if (vbuf) {
1203 u_vbuf_save_vertex_buffer0(vbuf);
1204 return;
1205 }
1206
1207 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
1208 &ctx->vertex_buffer0_current);
1209 }
1210
1211 static void
1212 cso_restore_vertex_buffer0(struct cso_context *ctx)
1213 {
1214 struct u_vbuf *vbuf = ctx->vbuf;
1215
1216 if (vbuf) {
1217 u_vbuf_restore_vertex_buffer0(vbuf);
1218 return;
1219 }
1220
1221 cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
1222 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
1223 }
1224
1225
1226 void
1227 cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1228 unsigned idx, const struct pipe_sampler_state *templ)
1229 {
1230 if (templ) {
1231 unsigned key_size = sizeof(struct pipe_sampler_state);
1232 unsigned hash_key = cso_construct_key((void*)templ, key_size);
1233 struct cso_sampler *cso;
1234 struct cso_hash_iter iter =
1235 cso_find_state_template(ctx->cache,
1236 hash_key, CSO_SAMPLER,
1237 (void *) templ, key_size);
1238
1239 if (cso_hash_iter_is_null(iter)) {
1240 cso = MALLOC(sizeof(struct cso_sampler));
1241 if (!cso)
1242 return;
1243
1244 memcpy(&cso->state, templ, sizeof(*templ));
1245 cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1246 cso->delete_state =
1247 (cso_state_callback) ctx->pipe->delete_sampler_state;
1248 cso->context = ctx->pipe;
1249 cso->hash_key = hash_key;
1250
1251 iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
1252 if (cso_hash_iter_is_null(iter)) {
1253 FREE(cso);
1254 return;
1255 }
1256 }
1257 else {
1258 cso = cso_hash_iter_data(iter);
1259 }
1260
1261 ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1262 ctx->samplers[shader_stage].samplers[idx] = cso->data;
1263 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1264 }
1265 }
1266
1267
1268 /**
1269 * Send staged sampler state to the driver.
1270 */
1271 void
1272 cso_single_sampler_done(struct cso_context *ctx,
1273 enum pipe_shader_type shader_stage)
1274 {
1275 struct sampler_info *info = &ctx->samplers[shader_stage];
1276
1277 if (ctx->max_sampler_seen == -1)
1278 return;
1279
1280 ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1281 ctx->max_sampler_seen + 1,
1282 info->samplers);
1283 ctx->max_sampler_seen = -1;
1284 }
1285
1286
1287 /*
1288 * If the function encouters any errors it will return the
1289 * last one. Done to always try to set as many samplers
1290 * as possible.
1291 */
1292 void
1293 cso_set_samplers(struct cso_context *ctx,
1294 enum pipe_shader_type shader_stage,
1295 unsigned nr,
1296 const struct pipe_sampler_state **templates)
1297 {
1298 for (unsigned i = 0; i < nr; i++)
1299 cso_single_sampler(ctx, shader_stage, i, templates[i]);
1300
1301 cso_single_sampler_done(ctx, shader_stage);
1302 }
1303
1304 static void
1305 cso_save_fragment_samplers(struct cso_context *ctx)
1306 {
1307 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1308 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1309
1310 memcpy(saved->cso_samplers, info->cso_samplers,
1311 sizeof(info->cso_samplers));
1312 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1313 }
1314
1315
1316 static void
1317 cso_restore_fragment_samplers(struct cso_context *ctx)
1318 {
1319 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1320 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1321
1322 memcpy(info->cso_samplers, saved->cso_samplers,
1323 sizeof(info->cso_samplers));
1324 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1325
1326 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1327 if (info->samplers[i]) {
1328 ctx->max_sampler_seen = i;
1329 break;
1330 }
1331 }
1332
1333 cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1334 }
1335
1336
1337 void
1338 cso_set_sampler_views(struct cso_context *ctx,
1339 enum pipe_shader_type shader_stage,
1340 unsigned count,
1341 struct pipe_sampler_view **views)
1342 {
1343 if (shader_stage == PIPE_SHADER_FRAGMENT) {
1344 unsigned i;
1345 boolean any_change = FALSE;
1346
1347 /* reference new views */
1348 for (i = 0; i < count; i++) {
1349 any_change |= ctx->fragment_views[i] != views[i];
1350 pipe_sampler_view_reference(&ctx->fragment_views[i], views[i]);
1351 }
1352 /* unref extra old views, if any */
1353 for (; i < ctx->nr_fragment_views; i++) {
1354 any_change |= ctx->fragment_views[i] != NULL;
1355 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1356 }
1357
1358 /* bind the new sampler views */
1359 if (any_change) {
1360 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0,
1361 MAX2(ctx->nr_fragment_views, count),
1362 ctx->fragment_views);
1363 }
1364
1365 ctx->nr_fragment_views = count;
1366 }
1367 else
1368 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, count, views);
1369 }
1370
1371
1372 static void
1373 cso_save_fragment_sampler_views(struct cso_context *ctx)
1374 {
1375 unsigned i;
1376
1377 ctx->nr_fragment_views_saved = ctx->nr_fragment_views;
1378
1379 for (i = 0; i < ctx->nr_fragment_views; i++) {
1380 assert(!ctx->fragment_views_saved[i]);
1381 pipe_sampler_view_reference(&ctx->fragment_views_saved[i],
1382 ctx->fragment_views[i]);
1383 }
1384 }
1385
1386
1387 static void
1388 cso_restore_fragment_sampler_views(struct cso_context *ctx)
1389 {
1390 unsigned i, nr_saved = ctx->nr_fragment_views_saved;
1391 unsigned num;
1392
1393 for (i = 0; i < nr_saved; i++) {
1394 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1395 /* move the reference from one pointer to another */
1396 ctx->fragment_views[i] = ctx->fragment_views_saved[i];
1397 ctx->fragment_views_saved[i] = NULL;
1398 }
1399 for (; i < ctx->nr_fragment_views; i++) {
1400 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1401 }
1402
1403 num = MAX2(ctx->nr_fragment_views, nr_saved);
1404
1405 /* bind the old/saved sampler views */
1406 ctx->pipe->set_sampler_views(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, num,
1407 ctx->fragment_views);
1408
1409 ctx->nr_fragment_views = nr_saved;
1410 ctx->nr_fragment_views_saved = 0;
1411 }
1412
1413
1414 void
1415 cso_set_shader_images(struct cso_context *ctx,
1416 enum pipe_shader_type shader_stage,
1417 unsigned start, unsigned count,
1418 struct pipe_image_view *images)
1419 {
1420 if (shader_stage == PIPE_SHADER_FRAGMENT && start == 0 && count >= 1) {
1421 util_copy_image_view(&ctx->fragment_image0_current, &images[0]);
1422 }
1423
1424 ctx->pipe->set_shader_images(ctx->pipe, shader_stage, start, count, images);
1425 }
1426
1427
1428 static void
1429 cso_save_fragment_image0(struct cso_context *ctx)
1430 {
1431 util_copy_image_view(&ctx->fragment_image0_saved,
1432 &ctx->fragment_image0_current);
1433 }
1434
1435
1436 static void
1437 cso_restore_fragment_image0(struct cso_context *ctx)
1438 {
1439 cso_set_shader_images(ctx, PIPE_SHADER_FRAGMENT, 0, 1,
1440 &ctx->fragment_image0_saved);
1441 }
1442
1443
1444 void
1445 cso_set_stream_outputs(struct cso_context *ctx,
1446 unsigned num_targets,
1447 struct pipe_stream_output_target **targets,
1448 const unsigned *offsets)
1449 {
1450 struct pipe_context *pipe = ctx->pipe;
1451 uint i;
1452
1453 if (!ctx->has_streamout) {
1454 assert(num_targets == 0);
1455 return;
1456 }
1457
1458 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1459 /* Nothing to do. */
1460 return;
1461 }
1462
1463 /* reference new targets */
1464 for (i = 0; i < num_targets; i++) {
1465 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1466 }
1467 /* unref extra old targets, if any */
1468 for (; i < ctx->nr_so_targets; i++) {
1469 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1470 }
1471
1472 pipe->set_stream_output_targets(pipe, num_targets, targets,
1473 offsets);
1474 ctx->nr_so_targets = num_targets;
1475 }
1476
1477 static void
1478 cso_save_stream_outputs(struct cso_context *ctx)
1479 {
1480 uint i;
1481
1482 if (!ctx->has_streamout) {
1483 return;
1484 }
1485
1486 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1487
1488 for (i = 0; i < ctx->nr_so_targets; i++) {
1489 assert(!ctx->so_targets_saved[i]);
1490 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1491 }
1492 }
1493
1494 static void
1495 cso_restore_stream_outputs(struct cso_context *ctx)
1496 {
1497 struct pipe_context *pipe = ctx->pipe;
1498 uint i;
1499 unsigned offset[PIPE_MAX_SO_BUFFERS];
1500
1501 if (!ctx->has_streamout) {
1502 return;
1503 }
1504
1505 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1506 /* Nothing to do. */
1507 return;
1508 }
1509
1510 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1511 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1512 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1513 /* move the reference from one pointer to another */
1514 ctx->so_targets[i] = ctx->so_targets_saved[i];
1515 ctx->so_targets_saved[i] = NULL;
1516 /* -1 means append */
1517 offset[i] = (unsigned)-1;
1518 }
1519 for (; i < ctx->nr_so_targets; i++) {
1520 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1521 }
1522
1523 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1524 ctx->so_targets, offset);
1525
1526 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1527 ctx->nr_so_targets_saved = 0;
1528 }
1529
1530 /* constant buffers */
1531
1532 void
1533 cso_set_constant_buffer(struct cso_context *cso,
1534 enum pipe_shader_type shader_stage,
1535 unsigned index, struct pipe_constant_buffer *cb)
1536 {
1537 struct pipe_context *pipe = cso->pipe;
1538
1539 pipe->set_constant_buffer(pipe, shader_stage, index, cb);
1540
1541 if (index == 0) {
1542 util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb);
1543 }
1544 }
1545
1546 void
1547 cso_set_constant_buffer_resource(struct cso_context *cso,
1548 enum pipe_shader_type shader_stage,
1549 unsigned index,
1550 struct pipe_resource *buffer)
1551 {
1552 if (buffer) {
1553 struct pipe_constant_buffer cb;
1554 cb.buffer = buffer;
1555 cb.buffer_offset = 0;
1556 cb.buffer_size = buffer->width0;
1557 cb.user_buffer = NULL;
1558 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1559 } else {
1560 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1561 }
1562 }
1563
1564 void
1565 cso_set_constant_user_buffer(struct cso_context *cso,
1566 enum pipe_shader_type shader_stage,
1567 unsigned index, void *ptr, unsigned size)
1568 {
1569 if (ptr) {
1570 struct pipe_constant_buffer cb;
1571 cb.buffer = NULL;
1572 cb.buffer_offset = 0;
1573 cb.buffer_size = size;
1574 cb.user_buffer = ptr;
1575 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1576 } else {
1577 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1578 }
1579 }
1580
1581 void
1582 cso_save_constant_buffer_slot0(struct cso_context *cso,
1583 enum pipe_shader_type shader_stage)
1584 {
1585 util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage],
1586 &cso->aux_constbuf_current[shader_stage]);
1587 }
1588
1589 void
1590 cso_restore_constant_buffer_slot0(struct cso_context *cso,
1591 enum pipe_shader_type shader_stage)
1592 {
1593 cso_set_constant_buffer(cso, shader_stage, 0,
1594 &cso->aux_constbuf_saved[shader_stage]);
1595 pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer,
1596 NULL);
1597 }
1598
1599
1600 /**
1601 * Save all the CSO state items specified by the state_mask bitmask
1602 * of CSO_BIT_x flags.
1603 */
1604 void
1605 cso_save_state(struct cso_context *cso, unsigned state_mask)
1606 {
1607 assert(cso->saved_state == 0);
1608
1609 cso->saved_state = state_mask;
1610
1611 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1612 cso_save_vertex_buffer0(cso);
1613 if (state_mask & CSO_BIT_BLEND)
1614 cso_save_blend(cso);
1615 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1616 cso_save_depth_stencil_alpha(cso);
1617 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1618 cso_save_fragment_samplers(cso);
1619 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1620 cso_save_fragment_sampler_views(cso);
1621 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1622 cso_save_fragment_shader(cso);
1623 if (state_mask & CSO_BIT_FRAMEBUFFER)
1624 cso_save_framebuffer(cso);
1625 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1626 cso_save_geometry_shader(cso);
1627 if (state_mask & CSO_BIT_MIN_SAMPLES)
1628 cso_save_min_samples(cso);
1629 if (state_mask & CSO_BIT_RASTERIZER)
1630 cso_save_rasterizer(cso);
1631 if (state_mask & CSO_BIT_RENDER_CONDITION)
1632 cso_save_render_condition(cso);
1633 if (state_mask & CSO_BIT_SAMPLE_MASK)
1634 cso_save_sample_mask(cso);
1635 if (state_mask & CSO_BIT_STENCIL_REF)
1636 cso_save_stencil_ref(cso);
1637 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1638 cso_save_stream_outputs(cso);
1639 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1640 cso_save_tessctrl_shader(cso);
1641 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1642 cso_save_tesseval_shader(cso);
1643 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1644 cso_save_vertex_elements(cso);
1645 if (state_mask & CSO_BIT_VERTEX_SHADER)
1646 cso_save_vertex_shader(cso);
1647 if (state_mask & CSO_BIT_VIEWPORT)
1648 cso_save_viewport(cso);
1649 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1650 cso->pipe->set_active_query_state(cso->pipe, false);
1651 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1652 cso_save_fragment_image0(cso);
1653 }
1654
1655
1656 /**
1657 * Restore the state which was saved by cso_save_state().
1658 */
1659 void
1660 cso_restore_state(struct cso_context *cso)
1661 {
1662 unsigned state_mask = cso->saved_state;
1663
1664 assert(state_mask);
1665
1666 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1667 cso_restore_vertex_buffer0(cso);
1668 if (state_mask & CSO_BIT_BLEND)
1669 cso_restore_blend(cso);
1670 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1671 cso_restore_depth_stencil_alpha(cso);
1672 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1673 cso_restore_fragment_samplers(cso);
1674 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1675 cso_restore_fragment_sampler_views(cso);
1676 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1677 cso_restore_fragment_shader(cso);
1678 if (state_mask & CSO_BIT_FRAMEBUFFER)
1679 cso_restore_framebuffer(cso);
1680 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1681 cso_restore_geometry_shader(cso);
1682 if (state_mask & CSO_BIT_MIN_SAMPLES)
1683 cso_restore_min_samples(cso);
1684 if (state_mask & CSO_BIT_RASTERIZER)
1685 cso_restore_rasterizer(cso);
1686 if (state_mask & CSO_BIT_RENDER_CONDITION)
1687 cso_restore_render_condition(cso);
1688 if (state_mask & CSO_BIT_SAMPLE_MASK)
1689 cso_restore_sample_mask(cso);
1690 if (state_mask & CSO_BIT_STENCIL_REF)
1691 cso_restore_stencil_ref(cso);
1692 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1693 cso_restore_stream_outputs(cso);
1694 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1695 cso_restore_tessctrl_shader(cso);
1696 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1697 cso_restore_tesseval_shader(cso);
1698 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1699 cso_restore_vertex_elements(cso);
1700 if (state_mask & CSO_BIT_VERTEX_SHADER)
1701 cso_restore_vertex_shader(cso);
1702 if (state_mask & CSO_BIT_VIEWPORT)
1703 cso_restore_viewport(cso);
1704 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1705 cso->pipe->set_active_query_state(cso->pipe, true);
1706 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1707 cso_restore_fragment_image0(cso);
1708
1709 cso->saved_state = 0;
1710 }
1711
1712
1713
1714 /* drawing */
1715
1716 void
1717 cso_draw_vbo(struct cso_context *cso,
1718 const struct pipe_draw_info *info)
1719 {
1720 struct u_vbuf *vbuf = cso->vbuf;
1721
1722 /* We can't have both indirect drawing and SO-vertex-count drawing */
1723 assert(info->indirect == NULL || info->count_from_stream_output == NULL);
1724
1725 /* We can't have SO-vertex-count drawing with an index buffer */
1726 assert(info->count_from_stream_output == NULL || info->index_size == 0);
1727
1728 if (vbuf) {
1729 u_vbuf_draw_vbo(vbuf, info);
1730 } else {
1731 struct pipe_context *pipe = cso->pipe;
1732 pipe->draw_vbo(pipe, info);
1733 }
1734 }
1735
1736 void
1737 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1738 {
1739 struct pipe_draw_info info;
1740
1741 util_draw_init_info(&info);
1742
1743 info.mode = mode;
1744 info.start = start;
1745 info.count = count;
1746 info.min_index = start;
1747 info.max_index = start + count - 1;
1748
1749 cso_draw_vbo(cso, &info);
1750 }
1751
1752 void
1753 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1754 uint start, uint count,
1755 uint start_instance, uint instance_count)
1756 {
1757 struct pipe_draw_info info;
1758
1759 util_draw_init_info(&info);
1760
1761 info.mode = mode;
1762 info.start = start;
1763 info.count = count;
1764 info.min_index = start;
1765 info.max_index = start + count - 1;
1766 info.start_instance = start_instance;
1767 info.instance_count = instance_count;
1768
1769 cso_draw_vbo(cso, &info);
1770 }