cso: fix stream-out clean up in cso_release_all()
[mesa.git] / src / gallium / auxiliary / cso_cache / cso_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51
52
53 /**
54 * Info related to samplers and sampler views.
55 * We have one of these for fragment samplers and another for vertex samplers.
56 */
57 struct sampler_info
58 {
59 struct {
60 void *samplers[PIPE_MAX_SAMPLERS];
61 unsigned nr_samplers;
62 } hw;
63
64 void *samplers[PIPE_MAX_SAMPLERS];
65 unsigned nr_samplers;
66
67 void *samplers_saved[PIPE_MAX_SAMPLERS];
68 unsigned nr_samplers_saved;
69
70 struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
71 unsigned nr_views;
72
73 struct pipe_sampler_view *views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS];
74 unsigned nr_views_saved;
75 };
76
77
78
79 struct cso_context {
80 struct pipe_context *pipe;
81 struct cso_cache *cache;
82 struct u_vbuf *vbuf;
83
84 boolean has_geometry_shader;
85 boolean has_streamout;
86
87 struct sampler_info samplers[PIPE_SHADER_TYPES];
88
89 struct pipe_vertex_buffer aux_vertex_buffer_current;
90 struct pipe_vertex_buffer aux_vertex_buffer_saved;
91 unsigned aux_vertex_buffer_index;
92
93 struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
94 struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
95
96 unsigned nr_so_targets;
97 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
98
99 unsigned nr_so_targets_saved;
100 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
101
102 /** Current and saved state.
103 * The saved state is used as a 1-deep stack.
104 */
105 void *blend, *blend_saved;
106 void *depth_stencil, *depth_stencil_saved;
107 void *rasterizer, *rasterizer_saved;
108 void *fragment_shader, *fragment_shader_saved;
109 void *vertex_shader, *vertex_shader_saved;
110 void *geometry_shader, *geometry_shader_saved;
111 void *velements, *velements_saved;
112 struct pipe_query *render_condition, *render_condition_saved;
113 uint render_condition_mode, render_condition_mode_saved;
114 boolean render_condition_cond, render_condition_cond_saved;
115
116 struct pipe_clip_state clip;
117 struct pipe_clip_state clip_saved;
118
119 struct pipe_framebuffer_state fb, fb_saved;
120 struct pipe_viewport_state vp, vp_saved;
121 struct pipe_blend_color blend_color;
122 unsigned sample_mask, sample_mask_saved;
123 unsigned min_samples, min_samples_saved;
124 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
125 };
126
127
128 static boolean delete_blend_state(struct cso_context *ctx, void *state)
129 {
130 struct cso_blend *cso = (struct cso_blend *)state;
131
132 if (ctx->blend == cso->data)
133 return FALSE;
134
135 if (cso->delete_state)
136 cso->delete_state(cso->context, cso->data);
137 FREE(state);
138 return TRUE;
139 }
140
141 static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
142 {
143 struct cso_depth_stencil_alpha *cso =
144 (struct cso_depth_stencil_alpha *)state;
145
146 if (ctx->depth_stencil == cso->data)
147 return FALSE;
148
149 if (cso->delete_state)
150 cso->delete_state(cso->context, cso->data);
151 FREE(state);
152
153 return TRUE;
154 }
155
156 static boolean delete_sampler_state(struct cso_context *ctx, void *state)
157 {
158 struct cso_sampler *cso = (struct cso_sampler *)state;
159 if (cso->delete_state)
160 cso->delete_state(cso->context, cso->data);
161 FREE(state);
162 return TRUE;
163 }
164
165 static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
166 {
167 struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
168
169 if (ctx->rasterizer == cso->data)
170 return FALSE;
171 if (cso->delete_state)
172 cso->delete_state(cso->context, cso->data);
173 FREE(state);
174 return TRUE;
175 }
176
177 static boolean delete_vertex_elements(struct cso_context *ctx,
178 void *state)
179 {
180 struct cso_velements *cso = (struct cso_velements *)state;
181
182 if (ctx->velements == cso->data)
183 return FALSE;
184
185 if (cso->delete_state)
186 cso->delete_state(cso->context, cso->data);
187 FREE(state);
188 return TRUE;
189 }
190
191
192 static INLINE boolean delete_cso(struct cso_context *ctx,
193 void *state, enum cso_cache_type type)
194 {
195 switch (type) {
196 case CSO_BLEND:
197 return delete_blend_state(ctx, state);
198 case CSO_SAMPLER:
199 return delete_sampler_state(ctx, state);
200 case CSO_DEPTH_STENCIL_ALPHA:
201 return delete_depth_stencil_state(ctx, state);
202 case CSO_RASTERIZER:
203 return delete_rasterizer_state(ctx, state);
204 case CSO_VELEMENTS:
205 return delete_vertex_elements(ctx, state);
206 default:
207 assert(0);
208 FREE(state);
209 }
210 return FALSE;
211 }
212
213 static INLINE void
214 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
215 int max_size, void *user_data)
216 {
217 struct cso_context *ctx = (struct cso_context *)user_data;
218 /* if we're approach the maximum size, remove fourth of the entries
219 * otherwise every subsequent call will go through the same */
220 int hash_size = cso_hash_size(hash);
221 int max_entries = (max_size > hash_size) ? max_size : hash_size;
222 int to_remove = (max_size < max_entries) * max_entries/4;
223 struct cso_hash_iter iter = cso_hash_first_node(hash);
224 if (hash_size > max_size)
225 to_remove += hash_size - max_size;
226 while (to_remove) {
227 /*remove elements until we're good */
228 /*fixme: currently we pick the nodes to remove at random*/
229 void *cso = cso_hash_iter_data(iter);
230 if (delete_cso(ctx, cso, type)) {
231 iter = cso_hash_erase(hash, iter);
232 --to_remove;
233 } else
234 iter = cso_hash_iter_next(iter);
235 }
236 }
237
238 static void cso_init_vbuf(struct cso_context *cso)
239 {
240 struct u_vbuf_caps caps;
241
242 u_vbuf_get_caps(cso->pipe->screen, &caps);
243
244 /* Install u_vbuf if there is anything unsupported. */
245 if (!caps.buffer_offset_unaligned ||
246 !caps.buffer_stride_unaligned ||
247 !caps.velem_src_offset_unaligned ||
248 !caps.format_fixed32 ||
249 !caps.format_float16 ||
250 !caps.format_float64 ||
251 !caps.format_norm32 ||
252 !caps.format_scaled32 ||
253 !caps.user_vertex_buffers) {
254 cso->vbuf = u_vbuf_create(cso->pipe, &caps,
255 cso->aux_vertex_buffer_index);
256 }
257 }
258
259 struct cso_context *cso_create_context( struct pipe_context *pipe )
260 {
261 struct cso_context *ctx = CALLOC_STRUCT(cso_context);
262 if (ctx == NULL)
263 goto out;
264
265 ctx->cache = cso_cache_create();
266 if (ctx->cache == NULL)
267 goto out;
268 cso_cache_set_sanitize_callback(ctx->cache,
269 sanitize_hash,
270 ctx);
271
272 ctx->pipe = pipe;
273 ctx->sample_mask = ~0;
274
275 ctx->aux_vertex_buffer_index = 0; /* 0 for now */
276
277 cso_init_vbuf(ctx);
278
279 /* Enable for testing: */
280 if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
281
282 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
283 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
284 ctx->has_geometry_shader = TRUE;
285 }
286 if (pipe->screen->get_param(pipe->screen,
287 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
288 ctx->has_streamout = TRUE;
289 }
290
291 return ctx;
292
293 out:
294 cso_destroy_context( ctx );
295 return NULL;
296 }
297
298 /**
299 * Prior to context destruction, this function unbinds all state objects.
300 */
301 void cso_release_all( struct cso_context *ctx )
302 {
303 unsigned i, shader;
304
305 if (ctx->pipe) {
306 ctx->pipe->bind_blend_state( ctx->pipe, NULL );
307 ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
308
309 {
310 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
311 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
312 struct pipe_screen *scr = ctx->pipe->screen;
313 unsigned sh;
314 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
315 int maxsam = scr->get_shader_param(scr, sh,
316 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
317 int maxview = scr->get_shader_param(scr, sh,
318 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
319 assert(maxsam <= PIPE_MAX_SAMPLERS);
320 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
321 if (maxsam > 0) {
322 ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
323 }
324 if (maxview > 0) {
325 ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views);
326 }
327 }
328 }
329
330 ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
331 ctx->pipe->bind_fs_state( ctx->pipe, NULL );
332 ctx->pipe->bind_vs_state( ctx->pipe, NULL );
333 ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
334
335 if (ctx->has_streamout)
336 ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
337 }
338
339 /* free fragment sampler views */
340 for (shader = 0; shader < Elements(ctx->samplers); shader++) {
341 struct sampler_info *info = &ctx->samplers[shader];
342 for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) {
343 pipe_sampler_view_reference(&info->views[i], NULL);
344 pipe_sampler_view_reference(&info->views_saved[i], NULL);
345 }
346 }
347
348 util_unreference_framebuffer_state(&ctx->fb);
349 util_unreference_framebuffer_state(&ctx->fb_saved);
350
351 pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL);
352 pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL);
353
354 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
355 pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
356 pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL);
357 }
358
359 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
360 pipe_so_target_reference(&ctx->so_targets[i], NULL);
361 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
362 }
363
364 if (ctx->cache) {
365 cso_cache_delete( ctx->cache );
366 ctx->cache = NULL;
367 }
368 }
369
370
371 /**
372 * Free the CSO context. NOTE: the state tracker should have previously called
373 * cso_release_all().
374 */
375 void cso_destroy_context( struct cso_context *ctx )
376 {
377 if (ctx) {
378 if (ctx->vbuf)
379 u_vbuf_destroy(ctx->vbuf);
380 FREE( ctx );
381 }
382 }
383
384
385 /* Those function will either find the state of the given template
386 * in the cache or they will create a new state from the given
387 * template, insert it in the cache and return it.
388 */
389
390 /*
391 * If the driver returns 0 from the create method then they will assign
392 * the data member of the cso to be the template itself.
393 */
394
395 enum pipe_error cso_set_blend(struct cso_context *ctx,
396 const struct pipe_blend_state *templ)
397 {
398 unsigned key_size, hash_key;
399 struct cso_hash_iter iter;
400 void *handle;
401
402 key_size = templ->independent_blend_enable ?
403 sizeof(struct pipe_blend_state) :
404 (char *)&(templ->rt[1]) - (char *)templ;
405 hash_key = cso_construct_key((void*)templ, key_size);
406 iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND,
407 (void*)templ, key_size);
408
409 if (cso_hash_iter_is_null(iter)) {
410 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
411 if (!cso)
412 return PIPE_ERROR_OUT_OF_MEMORY;
413
414 memset(&cso->state, 0, sizeof cso->state);
415 memcpy(&cso->state, templ, key_size);
416 cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
417 cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
418 cso->context = ctx->pipe;
419
420 iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
421 if (cso_hash_iter_is_null(iter)) {
422 FREE(cso);
423 return PIPE_ERROR_OUT_OF_MEMORY;
424 }
425
426 handle = cso->data;
427 }
428 else {
429 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
430 }
431
432 if (ctx->blend != handle) {
433 ctx->blend = handle;
434 ctx->pipe->bind_blend_state(ctx->pipe, handle);
435 }
436 return PIPE_OK;
437 }
438
439 void cso_save_blend(struct cso_context *ctx)
440 {
441 assert(!ctx->blend_saved);
442 ctx->blend_saved = ctx->blend;
443 }
444
445 void cso_restore_blend(struct cso_context *ctx)
446 {
447 if (ctx->blend != ctx->blend_saved) {
448 ctx->blend = ctx->blend_saved;
449 ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
450 }
451 ctx->blend_saved = NULL;
452 }
453
454
455
456 enum pipe_error
457 cso_set_depth_stencil_alpha(struct cso_context *ctx,
458 const struct pipe_depth_stencil_alpha_state *templ)
459 {
460 unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
461 unsigned hash_key = cso_construct_key((void*)templ, key_size);
462 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
463 hash_key,
464 CSO_DEPTH_STENCIL_ALPHA,
465 (void*)templ, key_size);
466 void *handle;
467
468 if (cso_hash_iter_is_null(iter)) {
469 struct cso_depth_stencil_alpha *cso =
470 MALLOC(sizeof(struct cso_depth_stencil_alpha));
471 if (!cso)
472 return PIPE_ERROR_OUT_OF_MEMORY;
473
474 memcpy(&cso->state, templ, sizeof(*templ));
475 cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
476 &cso->state);
477 cso->delete_state =
478 (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
479 cso->context = ctx->pipe;
480
481 iter = cso_insert_state(ctx->cache, hash_key,
482 CSO_DEPTH_STENCIL_ALPHA, cso);
483 if (cso_hash_iter_is_null(iter)) {
484 FREE(cso);
485 return PIPE_ERROR_OUT_OF_MEMORY;
486 }
487
488 handle = cso->data;
489 }
490 else {
491 handle = ((struct cso_depth_stencil_alpha *)
492 cso_hash_iter_data(iter))->data;
493 }
494
495 if (ctx->depth_stencil != handle) {
496 ctx->depth_stencil = handle;
497 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
498 }
499 return PIPE_OK;
500 }
501
502 void cso_save_depth_stencil_alpha(struct cso_context *ctx)
503 {
504 assert(!ctx->depth_stencil_saved);
505 ctx->depth_stencil_saved = ctx->depth_stencil;
506 }
507
508 void cso_restore_depth_stencil_alpha(struct cso_context *ctx)
509 {
510 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
511 ctx->depth_stencil = ctx->depth_stencil_saved;
512 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
513 ctx->depth_stencil_saved);
514 }
515 ctx->depth_stencil_saved = NULL;
516 }
517
518
519
520 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
521 const struct pipe_rasterizer_state *templ)
522 {
523 unsigned key_size = sizeof(struct pipe_rasterizer_state);
524 unsigned hash_key = cso_construct_key((void*)templ, key_size);
525 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
526 hash_key,
527 CSO_RASTERIZER,
528 (void*)templ, key_size);
529 void *handle = NULL;
530
531 if (cso_hash_iter_is_null(iter)) {
532 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
533 if (!cso)
534 return PIPE_ERROR_OUT_OF_MEMORY;
535
536 memcpy(&cso->state, templ, sizeof(*templ));
537 cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
538 cso->delete_state =
539 (cso_state_callback)ctx->pipe->delete_rasterizer_state;
540 cso->context = ctx->pipe;
541
542 iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
543 if (cso_hash_iter_is_null(iter)) {
544 FREE(cso);
545 return PIPE_ERROR_OUT_OF_MEMORY;
546 }
547
548 handle = cso->data;
549 }
550 else {
551 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
552 }
553
554 if (ctx->rasterizer != handle) {
555 ctx->rasterizer = handle;
556 ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
557 }
558 return PIPE_OK;
559 }
560
561 void cso_save_rasterizer(struct cso_context *ctx)
562 {
563 assert(!ctx->rasterizer_saved);
564 ctx->rasterizer_saved = ctx->rasterizer;
565 }
566
567 void cso_restore_rasterizer(struct cso_context *ctx)
568 {
569 if (ctx->rasterizer != ctx->rasterizer_saved) {
570 ctx->rasterizer = ctx->rasterizer_saved;
571 ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
572 }
573 ctx->rasterizer_saved = NULL;
574 }
575
576
577 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
578 {
579 if (ctx->fragment_shader != handle) {
580 ctx->fragment_shader = handle;
581 ctx->pipe->bind_fs_state(ctx->pipe, handle);
582 }
583 }
584
585 void cso_delete_fragment_shader(struct cso_context *ctx, void *handle )
586 {
587 if (handle == ctx->fragment_shader) {
588 /* unbind before deleting */
589 ctx->pipe->bind_fs_state(ctx->pipe, NULL);
590 ctx->fragment_shader = NULL;
591 }
592 ctx->pipe->delete_fs_state(ctx->pipe, handle);
593 }
594
595 void cso_save_fragment_shader(struct cso_context *ctx)
596 {
597 assert(!ctx->fragment_shader_saved);
598 ctx->fragment_shader_saved = ctx->fragment_shader;
599 }
600
601 void cso_restore_fragment_shader(struct cso_context *ctx)
602 {
603 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
604 ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
605 ctx->fragment_shader = ctx->fragment_shader_saved;
606 }
607 ctx->fragment_shader_saved = NULL;
608 }
609
610
611 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
612 {
613 if (ctx->vertex_shader != handle) {
614 ctx->vertex_shader = handle;
615 ctx->pipe->bind_vs_state(ctx->pipe, handle);
616 }
617 }
618
619 void cso_delete_vertex_shader(struct cso_context *ctx, void *handle )
620 {
621 if (handle == ctx->vertex_shader) {
622 /* unbind before deleting */
623 ctx->pipe->bind_vs_state(ctx->pipe, NULL);
624 ctx->vertex_shader = NULL;
625 }
626 ctx->pipe->delete_vs_state(ctx->pipe, handle);
627 }
628
629 void cso_save_vertex_shader(struct cso_context *ctx)
630 {
631 assert(!ctx->vertex_shader_saved);
632 ctx->vertex_shader_saved = ctx->vertex_shader;
633 }
634
635 void cso_restore_vertex_shader(struct cso_context *ctx)
636 {
637 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
638 ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
639 ctx->vertex_shader = ctx->vertex_shader_saved;
640 }
641 ctx->vertex_shader_saved = NULL;
642 }
643
644
645 void cso_set_framebuffer(struct cso_context *ctx,
646 const struct pipe_framebuffer_state *fb)
647 {
648 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
649 util_copy_framebuffer_state(&ctx->fb, fb);
650 ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
651 }
652 }
653
654 void cso_save_framebuffer(struct cso_context *ctx)
655 {
656 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
657 }
658
659 void cso_restore_framebuffer(struct cso_context *ctx)
660 {
661 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
662 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
663 ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
664 util_unreference_framebuffer_state(&ctx->fb_saved);
665 }
666 }
667
668
669 void cso_set_viewport(struct cso_context *ctx,
670 const struct pipe_viewport_state *vp)
671 {
672 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
673 ctx->vp = *vp;
674 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
675 }
676 }
677
678 void cso_save_viewport(struct cso_context *ctx)
679 {
680 ctx->vp_saved = ctx->vp;
681 }
682
683
684 void cso_restore_viewport(struct cso_context *ctx)
685 {
686 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
687 ctx->vp = ctx->vp_saved;
688 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
689 }
690 }
691
692
693 void cso_set_blend_color(struct cso_context *ctx,
694 const struct pipe_blend_color *bc)
695 {
696 if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
697 ctx->blend_color = *bc;
698 ctx->pipe->set_blend_color(ctx->pipe, bc);
699 }
700 }
701
702 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
703 {
704 if (ctx->sample_mask != sample_mask) {
705 ctx->sample_mask = sample_mask;
706 ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
707 }
708 }
709
710 void cso_save_sample_mask(struct cso_context *ctx)
711 {
712 ctx->sample_mask_saved = ctx->sample_mask;
713 }
714
715 void cso_restore_sample_mask(struct cso_context *ctx)
716 {
717 cso_set_sample_mask(ctx, ctx->sample_mask_saved);
718 }
719
720 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
721 {
722 if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
723 ctx->min_samples = min_samples;
724 ctx->pipe->set_min_samples(ctx->pipe, min_samples);
725 }
726 }
727
728 void cso_save_min_samples(struct cso_context *ctx)
729 {
730 ctx->min_samples_saved = ctx->min_samples;
731 }
732
733 void cso_restore_min_samples(struct cso_context *ctx)
734 {
735 cso_set_min_samples(ctx, ctx->min_samples_saved);
736 }
737
738 void cso_set_stencil_ref(struct cso_context *ctx,
739 const struct pipe_stencil_ref *sr)
740 {
741 if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) {
742 ctx->stencil_ref = *sr;
743 ctx->pipe->set_stencil_ref(ctx->pipe, sr);
744 }
745 }
746
747 void cso_save_stencil_ref(struct cso_context *ctx)
748 {
749 ctx->stencil_ref_saved = ctx->stencil_ref;
750 }
751
752
753 void cso_restore_stencil_ref(struct cso_context *ctx)
754 {
755 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
756 sizeof(ctx->stencil_ref))) {
757 ctx->stencil_ref = ctx->stencil_ref_saved;
758 ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref);
759 }
760 }
761
762 void cso_set_render_condition(struct cso_context *ctx,
763 struct pipe_query *query,
764 boolean condition, uint mode)
765 {
766 struct pipe_context *pipe = ctx->pipe;
767
768 if (ctx->render_condition != query ||
769 ctx->render_condition_mode != mode ||
770 ctx->render_condition_cond != condition) {
771 pipe->render_condition(pipe, query, condition, mode);
772 ctx->render_condition = query;
773 ctx->render_condition_cond = condition;
774 ctx->render_condition_mode = mode;
775 }
776 }
777
778 void cso_save_render_condition(struct cso_context *ctx)
779 {
780 ctx->render_condition_saved = ctx->render_condition;
781 ctx->render_condition_cond_saved = ctx->render_condition_cond;
782 ctx->render_condition_mode_saved = ctx->render_condition_mode;
783 }
784
785 void cso_restore_render_condition(struct cso_context *ctx)
786 {
787 cso_set_render_condition(ctx, ctx->render_condition_saved,
788 ctx->render_condition_cond_saved,
789 ctx->render_condition_mode_saved);
790 }
791
792 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
793 {
794 assert(ctx->has_geometry_shader || !handle);
795
796 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
797 ctx->geometry_shader = handle;
798 ctx->pipe->bind_gs_state(ctx->pipe, handle);
799 }
800 }
801
802 void cso_delete_geometry_shader(struct cso_context *ctx, void *handle)
803 {
804 if (handle == ctx->geometry_shader) {
805 /* unbind before deleting */
806 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
807 ctx->geometry_shader = NULL;
808 }
809 ctx->pipe->delete_gs_state(ctx->pipe, handle);
810 }
811
812 void cso_save_geometry_shader(struct cso_context *ctx)
813 {
814 if (!ctx->has_geometry_shader) {
815 return;
816 }
817
818 assert(!ctx->geometry_shader_saved);
819 ctx->geometry_shader_saved = ctx->geometry_shader;
820 }
821
822 void cso_restore_geometry_shader(struct cso_context *ctx)
823 {
824 if (!ctx->has_geometry_shader) {
825 return;
826 }
827
828 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
829 ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
830 ctx->geometry_shader = ctx->geometry_shader_saved;
831 }
832 ctx->geometry_shader_saved = NULL;
833 }
834
835 /* clip state */
836
837 static INLINE void
838 clip_state_cpy(struct pipe_clip_state *dst,
839 const struct pipe_clip_state *src)
840 {
841 memcpy(dst->ucp, src->ucp, sizeof(dst->ucp));
842 }
843
844 static INLINE int
845 clip_state_cmp(const struct pipe_clip_state *a,
846 const struct pipe_clip_state *b)
847 {
848 return memcmp(a->ucp, b->ucp, sizeof(a->ucp));
849 }
850
851 void
852 cso_set_clip(struct cso_context *ctx,
853 const struct pipe_clip_state *clip)
854 {
855 if (clip_state_cmp(&ctx->clip, clip)) {
856 clip_state_cpy(&ctx->clip, clip);
857 ctx->pipe->set_clip_state(ctx->pipe, clip);
858 }
859 }
860
861 void
862 cso_save_clip(struct cso_context *ctx)
863 {
864 clip_state_cpy(&ctx->clip_saved, &ctx->clip);
865 }
866
867 void
868 cso_restore_clip(struct cso_context *ctx)
869 {
870 if (clip_state_cmp(&ctx->clip, &ctx->clip_saved)) {
871 clip_state_cpy(&ctx->clip, &ctx->clip_saved);
872 ctx->pipe->set_clip_state(ctx->pipe, &ctx->clip_saved);
873 }
874 }
875
876 enum pipe_error
877 cso_set_vertex_elements(struct cso_context *ctx,
878 unsigned count,
879 const struct pipe_vertex_element *states)
880 {
881 struct u_vbuf *vbuf = ctx->vbuf;
882 unsigned key_size, hash_key;
883 struct cso_hash_iter iter;
884 void *handle;
885 struct cso_velems_state velems_state;
886
887 if (vbuf) {
888 u_vbuf_set_vertex_elements(vbuf, count, states);
889 return PIPE_OK;
890 }
891
892 /* Need to include the count into the stored state data too.
893 * Otherwise first few count pipe_vertex_elements could be identical
894 * even if count is different, and there's no guarantee the hash would
895 * be different in that case neither.
896 */
897 key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned);
898 velems_state.count = count;
899 memcpy(velems_state.velems, states,
900 sizeof(struct pipe_vertex_element) * count);
901 hash_key = cso_construct_key((void*)&velems_state, key_size);
902 iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS,
903 (void*)&velems_state, key_size);
904
905 if (cso_hash_iter_is_null(iter)) {
906 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
907 if (!cso)
908 return PIPE_ERROR_OUT_OF_MEMORY;
909
910 memcpy(&cso->state, &velems_state, key_size);
911 cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, count,
912 &cso->state.velems[0]);
913 cso->delete_state =
914 (cso_state_callback) ctx->pipe->delete_vertex_elements_state;
915 cso->context = ctx->pipe;
916
917 iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
918 if (cso_hash_iter_is_null(iter)) {
919 FREE(cso);
920 return PIPE_ERROR_OUT_OF_MEMORY;
921 }
922
923 handle = cso->data;
924 }
925 else {
926 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
927 }
928
929 if (ctx->velements != handle) {
930 ctx->velements = handle;
931 ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
932 }
933 return PIPE_OK;
934 }
935
936 void cso_save_vertex_elements(struct cso_context *ctx)
937 {
938 struct u_vbuf *vbuf = ctx->vbuf;
939
940 if (vbuf) {
941 u_vbuf_save_vertex_elements(vbuf);
942 return;
943 }
944
945 assert(!ctx->velements_saved);
946 ctx->velements_saved = ctx->velements;
947 }
948
949 void cso_restore_vertex_elements(struct cso_context *ctx)
950 {
951 struct u_vbuf *vbuf = ctx->vbuf;
952
953 if (vbuf) {
954 u_vbuf_restore_vertex_elements(vbuf);
955 return;
956 }
957
958 if (ctx->velements != ctx->velements_saved) {
959 ctx->velements = ctx->velements_saved;
960 ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
961 }
962 ctx->velements_saved = NULL;
963 }
964
965 /* vertex buffers */
966
967 void cso_set_vertex_buffers(struct cso_context *ctx,
968 unsigned start_slot, unsigned count,
969 const struct pipe_vertex_buffer *buffers)
970 {
971 struct u_vbuf *vbuf = ctx->vbuf;
972
973 if (vbuf) {
974 u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
975 return;
976 }
977
978 /* Save what's in the auxiliary slot, so that we can save and restore it
979 * for meta ops. */
980 if (start_slot <= ctx->aux_vertex_buffer_index &&
981 start_slot+count > ctx->aux_vertex_buffer_index) {
982 if (buffers) {
983 const struct pipe_vertex_buffer *vb =
984 buffers + (ctx->aux_vertex_buffer_index - start_slot);
985
986 pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer,
987 vb->buffer);
988 memcpy(&ctx->aux_vertex_buffer_current, vb,
989 sizeof(struct pipe_vertex_buffer));
990 }
991 else {
992 pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer,
993 NULL);
994 ctx->aux_vertex_buffer_current.user_buffer = NULL;
995 }
996 }
997
998 ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
999 }
1000
1001 void cso_save_aux_vertex_buffer_slot(struct cso_context *ctx)
1002 {
1003 struct u_vbuf *vbuf = ctx->vbuf;
1004
1005 if (vbuf) {
1006 u_vbuf_save_aux_vertex_buffer_slot(vbuf);
1007 return;
1008 }
1009
1010 pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer,
1011 ctx->aux_vertex_buffer_current.buffer);
1012 memcpy(&ctx->aux_vertex_buffer_saved, &ctx->aux_vertex_buffer_current,
1013 sizeof(struct pipe_vertex_buffer));
1014 }
1015
1016 void cso_restore_aux_vertex_buffer_slot(struct cso_context *ctx)
1017 {
1018 struct u_vbuf *vbuf = ctx->vbuf;
1019
1020 if (vbuf) {
1021 u_vbuf_restore_aux_vertex_buffer_slot(vbuf);
1022 return;
1023 }
1024
1025 cso_set_vertex_buffers(ctx, ctx->aux_vertex_buffer_index, 1,
1026 &ctx->aux_vertex_buffer_saved);
1027 pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL);
1028 }
1029
1030 unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx)
1031 {
1032 return ctx->aux_vertex_buffer_index;
1033 }
1034
1035
1036 /**************** fragment/vertex sampler view state *************************/
1037
1038 static enum pipe_error
1039 single_sampler(struct cso_context *ctx,
1040 struct sampler_info *info,
1041 unsigned idx,
1042 const struct pipe_sampler_state *templ)
1043 {
1044 void *handle = NULL;
1045
1046 if (templ != NULL) {
1047 unsigned key_size = sizeof(struct pipe_sampler_state);
1048 unsigned hash_key = cso_construct_key((void*)templ, key_size);
1049 struct cso_hash_iter iter =
1050 cso_find_state_template(ctx->cache,
1051 hash_key, CSO_SAMPLER,
1052 (void *) templ, key_size);
1053
1054 if (cso_hash_iter_is_null(iter)) {
1055 struct cso_sampler *cso = MALLOC(sizeof(struct cso_sampler));
1056 if (!cso)
1057 return PIPE_ERROR_OUT_OF_MEMORY;
1058
1059 memcpy(&cso->state, templ, sizeof(*templ));
1060 cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1061 cso->delete_state =
1062 (cso_state_callback) ctx->pipe->delete_sampler_state;
1063 cso->context = ctx->pipe;
1064
1065 iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
1066 if (cso_hash_iter_is_null(iter)) {
1067 FREE(cso);
1068 return PIPE_ERROR_OUT_OF_MEMORY;
1069 }
1070
1071 handle = cso->data;
1072 }
1073 else {
1074 handle = ((struct cso_sampler *)cso_hash_iter_data(iter))->data;
1075 }
1076 }
1077
1078 info->samplers[idx] = handle;
1079
1080 return PIPE_OK;
1081 }
1082
1083 enum pipe_error
1084 cso_single_sampler(struct cso_context *ctx,
1085 unsigned shader_stage,
1086 unsigned idx,
1087 const struct pipe_sampler_state *templ)
1088 {
1089 return single_sampler(ctx, &ctx->samplers[shader_stage], idx, templ);
1090 }
1091
1092
1093
1094 static void
1095 single_sampler_done(struct cso_context *ctx, unsigned shader_stage)
1096 {
1097 struct sampler_info *info = &ctx->samplers[shader_stage];
1098 unsigned i;
1099
1100 /* find highest non-null sampler */
1101 for (i = PIPE_MAX_SAMPLERS; i > 0; i--) {
1102 if (info->samplers[i - 1] != NULL)
1103 break;
1104 }
1105
1106 info->nr_samplers = i;
1107
1108 if (info->hw.nr_samplers != info->nr_samplers ||
1109 memcmp(info->hw.samplers,
1110 info->samplers,
1111 info->nr_samplers * sizeof(void *)) != 0)
1112 {
1113 memcpy(info->hw.samplers,
1114 info->samplers,
1115 info->nr_samplers * sizeof(void *));
1116
1117 /* set remaining slots/pointers to null */
1118 for (i = info->nr_samplers; i < info->hw.nr_samplers; i++)
1119 info->samplers[i] = NULL;
1120
1121 ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1122 MAX2(info->nr_samplers,
1123 info->hw.nr_samplers),
1124 info->samplers);
1125
1126 info->hw.nr_samplers = info->nr_samplers;
1127 }
1128 }
1129
1130 void
1131 cso_single_sampler_done(struct cso_context *ctx, unsigned shader_stage)
1132 {
1133 single_sampler_done(ctx, shader_stage);
1134 }
1135
1136
1137 /*
1138 * If the function encouters any errors it will return the
1139 * last one. Done to always try to set as many samplers
1140 * as possible.
1141 */
1142 enum pipe_error
1143 cso_set_samplers(struct cso_context *ctx,
1144 unsigned shader_stage,
1145 unsigned nr,
1146 const struct pipe_sampler_state **templates)
1147 {
1148 struct sampler_info *info = &ctx->samplers[shader_stage];
1149 unsigned i;
1150 enum pipe_error temp, error = PIPE_OK;
1151
1152 /* TODO: fastpath
1153 */
1154
1155 for (i = 0; i < nr; i++) {
1156 temp = single_sampler(ctx, info, i, templates[i]);
1157 if (temp != PIPE_OK)
1158 error = temp;
1159 }
1160
1161 for ( ; i < info->nr_samplers; i++) {
1162 temp = single_sampler(ctx, info, i, NULL);
1163 if (temp != PIPE_OK)
1164 error = temp;
1165 }
1166
1167 single_sampler_done(ctx, shader_stage);
1168
1169 return error;
1170 }
1171
1172 void
1173 cso_save_samplers(struct cso_context *ctx, unsigned shader_stage)
1174 {
1175 struct sampler_info *info = &ctx->samplers[shader_stage];
1176 info->nr_samplers_saved = info->nr_samplers;
1177 memcpy(info->samplers_saved, info->samplers, sizeof(info->samplers));
1178 }
1179
1180
1181 void
1182 cso_restore_samplers(struct cso_context *ctx, unsigned shader_stage)
1183 {
1184 struct sampler_info *info = &ctx->samplers[shader_stage];
1185 info->nr_samplers = info->nr_samplers_saved;
1186 memcpy(info->samplers, info->samplers_saved, sizeof(info->samplers));
1187 single_sampler_done(ctx, shader_stage);
1188 }
1189
1190
1191 void
1192 cso_set_sampler_views(struct cso_context *ctx,
1193 unsigned shader_stage,
1194 unsigned count,
1195 struct pipe_sampler_view **views)
1196 {
1197 struct sampler_info *info = &ctx->samplers[shader_stage];
1198 unsigned i;
1199 boolean any_change = FALSE;
1200
1201 /* reference new views */
1202 for (i = 0; i < count; i++) {
1203 any_change |= info->views[i] != views[i];
1204 pipe_sampler_view_reference(&info->views[i], views[i]);
1205 }
1206 /* unref extra old views, if any */
1207 for (; i < info->nr_views; i++) {
1208 any_change |= info->views[i] != NULL;
1209 pipe_sampler_view_reference(&info->views[i], NULL);
1210 }
1211
1212 /* bind the new sampler views */
1213 if (any_change) {
1214 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0,
1215 MAX2(info->nr_views, count),
1216 info->views);
1217 }
1218
1219 info->nr_views = count;
1220 }
1221
1222
1223 void
1224 cso_save_sampler_views(struct cso_context *ctx, unsigned shader_stage)
1225 {
1226 struct sampler_info *info = &ctx->samplers[shader_stage];
1227 unsigned i;
1228
1229 info->nr_views_saved = info->nr_views;
1230
1231 for (i = 0; i < info->nr_views; i++) {
1232 assert(!info->views_saved[i]);
1233 pipe_sampler_view_reference(&info->views_saved[i], info->views[i]);
1234 }
1235 }
1236
1237
1238 void
1239 cso_restore_sampler_views(struct cso_context *ctx, unsigned shader_stage)
1240 {
1241 struct sampler_info *info = &ctx->samplers[shader_stage];
1242 unsigned i, nr_saved = info->nr_views_saved;
1243 unsigned num;
1244
1245 for (i = 0; i < nr_saved; i++) {
1246 pipe_sampler_view_reference(&info->views[i], NULL);
1247 /* move the reference from one pointer to another */
1248 info->views[i] = info->views_saved[i];
1249 info->views_saved[i] = NULL;
1250 }
1251 for (; i < info->nr_views; i++) {
1252 pipe_sampler_view_reference(&info->views[i], NULL);
1253 }
1254
1255 num = MAX2(info->nr_views, nr_saved);
1256
1257 /* bind the old/saved sampler views */
1258 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, num, info->views);
1259
1260 info->nr_views = nr_saved;
1261 info->nr_views_saved = 0;
1262 }
1263
1264
1265 void
1266 cso_set_stream_outputs(struct cso_context *ctx,
1267 unsigned num_targets,
1268 struct pipe_stream_output_target **targets,
1269 const unsigned *offsets)
1270 {
1271 struct pipe_context *pipe = ctx->pipe;
1272 uint i;
1273
1274 if (!ctx->has_streamout) {
1275 assert(num_targets == 0);
1276 return;
1277 }
1278
1279 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1280 /* Nothing to do. */
1281 return;
1282 }
1283
1284 /* reference new targets */
1285 for (i = 0; i < num_targets; i++) {
1286 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1287 }
1288 /* unref extra old targets, if any */
1289 for (; i < ctx->nr_so_targets; i++) {
1290 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1291 }
1292
1293 pipe->set_stream_output_targets(pipe, num_targets, targets,
1294 offsets);
1295 ctx->nr_so_targets = num_targets;
1296 }
1297
1298 void
1299 cso_save_stream_outputs(struct cso_context *ctx)
1300 {
1301 uint i;
1302
1303 if (!ctx->has_streamout) {
1304 return;
1305 }
1306
1307 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1308
1309 for (i = 0; i < ctx->nr_so_targets; i++) {
1310 assert(!ctx->so_targets_saved[i]);
1311 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1312 }
1313 }
1314
1315 void
1316 cso_restore_stream_outputs(struct cso_context *ctx)
1317 {
1318 struct pipe_context *pipe = ctx->pipe;
1319 uint i;
1320 unsigned offset[PIPE_MAX_SO_BUFFERS];
1321
1322 if (!ctx->has_streamout) {
1323 return;
1324 }
1325
1326 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1327 /* Nothing to do. */
1328 return;
1329 }
1330
1331 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1332 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1333 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1334 /* move the reference from one pointer to another */
1335 ctx->so_targets[i] = ctx->so_targets_saved[i];
1336 ctx->so_targets_saved[i] = NULL;
1337 /* -1 means append */
1338 offset[i] = (unsigned)-1;
1339 }
1340 for (; i < ctx->nr_so_targets; i++) {
1341 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1342 }
1343
1344 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1345 ctx->so_targets, offset);
1346
1347 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1348 ctx->nr_so_targets_saved = 0;
1349 }
1350
1351 /* constant buffers */
1352
1353 void
1354 cso_set_constant_buffer(struct cso_context *cso, unsigned shader_stage,
1355 unsigned index, struct pipe_constant_buffer *cb)
1356 {
1357 struct pipe_context *pipe = cso->pipe;
1358
1359 pipe->set_constant_buffer(pipe, shader_stage, index, cb);
1360
1361 if (index == 0) {
1362 util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb);
1363 }
1364 }
1365
1366 void
1367 cso_set_constant_buffer_resource(struct cso_context *cso,
1368 unsigned shader_stage,
1369 unsigned index,
1370 struct pipe_resource *buffer)
1371 {
1372 if (buffer) {
1373 struct pipe_constant_buffer cb;
1374 cb.buffer = buffer;
1375 cb.buffer_offset = 0;
1376 cb.buffer_size = buffer->width0;
1377 cb.user_buffer = NULL;
1378 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1379 } else {
1380 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1381 }
1382 }
1383
1384 void
1385 cso_save_constant_buffer_slot0(struct cso_context *cso,
1386 unsigned shader_stage)
1387 {
1388 util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage],
1389 &cso->aux_constbuf_current[shader_stage]);
1390 }
1391
1392 void
1393 cso_restore_constant_buffer_slot0(struct cso_context *cso,
1394 unsigned shader_stage)
1395 {
1396 cso_set_constant_buffer(cso, shader_stage, 0,
1397 &cso->aux_constbuf_saved[shader_stage]);
1398 pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer,
1399 NULL);
1400 }
1401
1402 /* drawing */
1403
1404 void
1405 cso_set_index_buffer(struct cso_context *cso,
1406 const struct pipe_index_buffer *ib)
1407 {
1408 struct u_vbuf *vbuf = cso->vbuf;
1409
1410 if (vbuf) {
1411 u_vbuf_set_index_buffer(vbuf, ib);
1412 } else {
1413 struct pipe_context *pipe = cso->pipe;
1414 pipe->set_index_buffer(pipe, ib);
1415 }
1416 }
1417
1418 void
1419 cso_draw_vbo(struct cso_context *cso,
1420 const struct pipe_draw_info *info)
1421 {
1422 struct u_vbuf *vbuf = cso->vbuf;
1423
1424 if (vbuf) {
1425 u_vbuf_draw_vbo(vbuf, info);
1426 } else {
1427 struct pipe_context *pipe = cso->pipe;
1428 pipe->draw_vbo(pipe, info);
1429 }
1430 }
1431
1432 void
1433 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1434 {
1435 struct pipe_draw_info info;
1436
1437 util_draw_init_info(&info);
1438
1439 info.mode = mode;
1440 info.start = start;
1441 info.count = count;
1442 info.min_index = start;
1443 info.max_index = start + count - 1;
1444
1445 cso_draw_vbo(cso, &info);
1446 }
1447
1448 void
1449 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1450 uint start, uint count,
1451 uint start_instance, uint instance_count)
1452 {
1453 struct pipe_draw_info info;
1454
1455 util_draw_init_info(&info);
1456
1457 info.mode = mode;
1458 info.start = start;
1459 info.count = count;
1460 info.min_index = start;
1461 info.max_index = start + count - 1;
1462 info.start_instance = start_instance;
1463 info.instance_count = instance_count;
1464
1465 cso_draw_vbo(cso, &info);
1466 }