459833b21becc83139674d70a138f2ff9d7e15ff
[mesa.git] / src / gallium / auxiliary / cso_cache / cso_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51
52
53 /**
54 * Per-shader sampler information.
55 */
56 struct sampler_info
57 {
58 struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
59 void *samplers[PIPE_MAX_SAMPLERS];
60 };
61
62
63
64 struct cso_context {
65 struct pipe_context *pipe;
66 struct cso_cache *cache;
67
68 struct u_vbuf *vbuf;
69 struct u_vbuf *vbuf_current;
70 bool always_use_vbuf;
71
72 boolean has_geometry_shader;
73 boolean has_tessellation;
74 boolean has_compute_shader;
75 boolean has_streamout;
76
77 unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
78
79 struct pipe_sampler_view *fragment_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
80 unsigned nr_fragment_views;
81
82 struct pipe_sampler_view *fragment_views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS];
83 unsigned nr_fragment_views_saved;
84
85 struct sampler_info fragment_samplers_saved;
86 struct sampler_info samplers[PIPE_SHADER_TYPES];
87
88 /* Temporary number until cso_single_sampler_done is called.
89 * It tracks the highest sampler seen in cso_single_sampler.
90 */
91 int max_sampler_seen;
92
93 struct pipe_vertex_buffer vertex_buffer0_current;
94 struct pipe_vertex_buffer vertex_buffer0_saved;
95
96 struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
97 struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
98
99 struct pipe_image_view fragment_image0_current;
100 struct pipe_image_view fragment_image0_saved;
101
102 unsigned nr_so_targets;
103 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
104
105 unsigned nr_so_targets_saved;
106 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
107
108 /** Current and saved state.
109 * The saved state is used as a 1-deep stack.
110 */
111 void *blend, *blend_saved;
112 void *depth_stencil, *depth_stencil_saved;
113 void *rasterizer, *rasterizer_saved;
114 void *fragment_shader, *fragment_shader_saved;
115 void *vertex_shader, *vertex_shader_saved;
116 void *geometry_shader, *geometry_shader_saved;
117 void *tessctrl_shader, *tessctrl_shader_saved;
118 void *tesseval_shader, *tesseval_shader_saved;
119 void *compute_shader;
120 void *velements, *velements_saved;
121 struct pipe_query *render_condition, *render_condition_saved;
122 uint render_condition_mode, render_condition_mode_saved;
123 boolean render_condition_cond, render_condition_cond_saved;
124
125 struct pipe_framebuffer_state fb, fb_saved;
126 struct pipe_viewport_state vp, vp_saved;
127 struct pipe_blend_color blend_color;
128 unsigned sample_mask, sample_mask_saved;
129 unsigned min_samples, min_samples_saved;
130 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
131 };
132
133 struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
134 {
135 return cso->pipe;
136 }
137
138 static boolean delete_blend_state(struct cso_context *ctx, void *state)
139 {
140 struct cso_blend *cso = (struct cso_blend *)state;
141
142 if (ctx->blend == cso->data)
143 return FALSE;
144
145 if (cso->delete_state)
146 cso->delete_state(cso->context, cso->data);
147 FREE(state);
148 return TRUE;
149 }
150
151 static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
152 {
153 struct cso_depth_stencil_alpha *cso =
154 (struct cso_depth_stencil_alpha *)state;
155
156 if (ctx->depth_stencil == cso->data)
157 return FALSE;
158
159 if (cso->delete_state)
160 cso->delete_state(cso->context, cso->data);
161 FREE(state);
162
163 return TRUE;
164 }
165
166 static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
167 {
168 struct cso_sampler *cso = (struct cso_sampler *)state;
169 if (cso->delete_state)
170 cso->delete_state(cso->context, cso->data);
171 FREE(state);
172 return TRUE;
173 }
174
175 static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
176 {
177 struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
178
179 if (ctx->rasterizer == cso->data)
180 return FALSE;
181 if (cso->delete_state)
182 cso->delete_state(cso->context, cso->data);
183 FREE(state);
184 return TRUE;
185 }
186
187 static boolean delete_vertex_elements(struct cso_context *ctx,
188 void *state)
189 {
190 struct cso_velements *cso = (struct cso_velements *)state;
191
192 if (ctx->velements == cso->data)
193 return FALSE;
194
195 if (cso->delete_state)
196 cso->delete_state(cso->context, cso->data);
197 FREE(state);
198 return TRUE;
199 }
200
201
202 static inline boolean delete_cso(struct cso_context *ctx,
203 void *state, enum cso_cache_type type)
204 {
205 switch (type) {
206 case CSO_BLEND:
207 return delete_blend_state(ctx, state);
208 case CSO_SAMPLER:
209 return delete_sampler_state(ctx, state);
210 case CSO_DEPTH_STENCIL_ALPHA:
211 return delete_depth_stencil_state(ctx, state);
212 case CSO_RASTERIZER:
213 return delete_rasterizer_state(ctx, state);
214 case CSO_VELEMENTS:
215 return delete_vertex_elements(ctx, state);
216 default:
217 assert(0);
218 FREE(state);
219 }
220 return FALSE;
221 }
222
223 static inline void
224 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
225 int max_size, void *user_data)
226 {
227 struct cso_context *ctx = (struct cso_context *)user_data;
228 /* if we're approach the maximum size, remove fourth of the entries
229 * otherwise every subsequent call will go through the same */
230 int hash_size = cso_hash_size(hash);
231 int max_entries = (max_size > hash_size) ? max_size : hash_size;
232 int to_remove = (max_size < max_entries) * max_entries/4;
233 struct cso_hash_iter iter;
234 struct cso_sampler **samplers_to_restore = NULL;
235 unsigned to_restore = 0;
236
237 if (hash_size > max_size)
238 to_remove += hash_size - max_size;
239
240 if (to_remove == 0)
241 return;
242
243 if (type == CSO_SAMPLER) {
244 int i, j;
245
246 samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
247 sizeof(*samplers_to_restore));
248
249 /* Temporarily remove currently bound sampler states from the hash
250 * table, to prevent them from being deleted
251 */
252 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
253 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
254 struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
255
256 if (sampler && cso_hash_take(hash, sampler->hash_key))
257 samplers_to_restore[to_restore++] = sampler;
258 }
259 }
260 }
261
262 iter = cso_hash_first_node(hash);
263 while (to_remove) {
264 /*remove elements until we're good */
265 /*fixme: currently we pick the nodes to remove at random*/
266 void *cso = cso_hash_iter_data(iter);
267
268 if (!cso)
269 break;
270
271 if (delete_cso(ctx, cso, type)) {
272 iter = cso_hash_erase(hash, iter);
273 --to_remove;
274 } else
275 iter = cso_hash_iter_next(iter);
276 }
277
278 if (type == CSO_SAMPLER) {
279 /* Put currently bound sampler states back into the hash table */
280 while (to_restore--) {
281 struct cso_sampler *sampler = samplers_to_restore[to_restore];
282
283 cso_hash_insert(hash, sampler->hash_key, sampler);
284 }
285
286 FREE(samplers_to_restore);
287 }
288 }
289
290 static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
291 {
292 struct u_vbuf_caps caps;
293 bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
294
295 u_vbuf_get_caps(cso->pipe->screen, &caps);
296
297 /* Enable u_vbuf if needed. */
298 if (caps.fallback_always ||
299 (uses_user_vertex_buffers &&
300 caps.fallback_only_for_user_vbuffers)) {
301 cso->vbuf = u_vbuf_create(cso->pipe, &caps);
302 cso->vbuf_current = cso->vbuf;
303 cso->always_use_vbuf = caps.fallback_always;
304 }
305 }
306
307 struct cso_context *
308 cso_create_context(struct pipe_context *pipe, unsigned flags)
309 {
310 struct cso_context *ctx = CALLOC_STRUCT(cso_context);
311 if (!ctx)
312 return NULL;
313
314 ctx->cache = cso_cache_create();
315 if (ctx->cache == NULL)
316 goto out;
317 cso_cache_set_sanitize_callback(ctx->cache,
318 sanitize_hash,
319 ctx);
320
321 ctx->pipe = pipe;
322 ctx->sample_mask = ~0;
323
324 cso_init_vbuf(ctx, flags);
325
326 /* Enable for testing: */
327 if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
328
329 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
330 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
331 ctx->has_geometry_shader = TRUE;
332 }
333 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
334 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
335 ctx->has_tessellation = TRUE;
336 }
337 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
338 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
339 int supported_irs =
340 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
341 PIPE_SHADER_CAP_SUPPORTED_IRS);
342 if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
343 (1 << PIPE_SHADER_IR_NIR))) {
344 ctx->has_compute_shader = TRUE;
345 }
346 }
347 if (pipe->screen->get_param(pipe->screen,
348 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
349 ctx->has_streamout = TRUE;
350 }
351
352 ctx->max_sampler_seen = -1;
353 return ctx;
354
355 out:
356 cso_destroy_context( ctx );
357 return NULL;
358 }
359
360 /**
361 * Free the CSO context.
362 */
363 void cso_destroy_context( struct cso_context *ctx )
364 {
365 unsigned i;
366
367 if (ctx->pipe) {
368 ctx->pipe->bind_blend_state( ctx->pipe, NULL );
369 ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
370
371 {
372 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
373 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
374 struct pipe_screen *scr = ctx->pipe->screen;
375 enum pipe_shader_type sh;
376 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
377 int maxsam = scr->get_shader_param(scr, sh,
378 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
379 int maxview = scr->get_shader_param(scr, sh,
380 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
381 assert(maxsam <= PIPE_MAX_SAMPLERS);
382 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
383 if (maxsam > 0) {
384 ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
385 }
386 if (maxview > 0) {
387 ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views);
388 }
389 }
390 }
391
392 ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
393 ctx->pipe->bind_fs_state( ctx->pipe, NULL );
394 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
395 ctx->pipe->bind_vs_state( ctx->pipe, NULL );
396 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL);
397 if (ctx->has_geometry_shader) {
398 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
399 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL);
400 }
401 if (ctx->has_tessellation) {
402 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
403 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL);
404 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
405 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL);
406 }
407 if (ctx->has_compute_shader) {
408 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
409 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_COMPUTE, 0, NULL);
410 }
411 ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
412
413 if (ctx->has_streamout)
414 ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
415 }
416
417 for (i = 0; i < ctx->nr_fragment_views; i++) {
418 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
419 }
420 for (i = 0; i < ctx->nr_fragment_views_saved; i++) {
421 pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL);
422 }
423
424 util_unreference_framebuffer_state(&ctx->fb);
425 util_unreference_framebuffer_state(&ctx->fb_saved);
426
427 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
428 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
429
430 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
431 pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
432 pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL);
433 }
434
435 pipe_resource_reference(&ctx->fragment_image0_current.resource, NULL);
436 pipe_resource_reference(&ctx->fragment_image0_saved.resource, NULL);
437
438 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
439 pipe_so_target_reference(&ctx->so_targets[i], NULL);
440 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
441 }
442
443 if (ctx->cache) {
444 cso_cache_delete( ctx->cache );
445 ctx->cache = NULL;
446 }
447
448 if (ctx->vbuf)
449 u_vbuf_destroy(ctx->vbuf);
450 FREE( ctx );
451 }
452
453
454 /* Those function will either find the state of the given template
455 * in the cache or they will create a new state from the given
456 * template, insert it in the cache and return it.
457 */
458
459 /*
460 * If the driver returns 0 from the create method then they will assign
461 * the data member of the cso to be the template itself.
462 */
463
464 enum pipe_error cso_set_blend(struct cso_context *ctx,
465 const struct pipe_blend_state *templ)
466 {
467 unsigned key_size, hash_key;
468 struct cso_hash_iter iter;
469 void *handle;
470
471 key_size = templ->independent_blend_enable ?
472 sizeof(struct pipe_blend_state) :
473 (char *)&(templ->rt[1]) - (char *)templ;
474 hash_key = cso_construct_key((void*)templ, key_size);
475 iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND,
476 (void*)templ, key_size);
477
478 if (cso_hash_iter_is_null(iter)) {
479 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
480 if (!cso)
481 return PIPE_ERROR_OUT_OF_MEMORY;
482
483 memset(&cso->state, 0, sizeof cso->state);
484 memcpy(&cso->state, templ, key_size);
485 cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
486 cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
487 cso->context = ctx->pipe;
488
489 iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
490 if (cso_hash_iter_is_null(iter)) {
491 FREE(cso);
492 return PIPE_ERROR_OUT_OF_MEMORY;
493 }
494
495 handle = cso->data;
496 }
497 else {
498 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
499 }
500
501 if (ctx->blend != handle) {
502 ctx->blend = handle;
503 ctx->pipe->bind_blend_state(ctx->pipe, handle);
504 }
505 return PIPE_OK;
506 }
507
508 static void
509 cso_save_blend(struct cso_context *ctx)
510 {
511 assert(!ctx->blend_saved);
512 ctx->blend_saved = ctx->blend;
513 }
514
515 static void
516 cso_restore_blend(struct cso_context *ctx)
517 {
518 if (ctx->blend != ctx->blend_saved) {
519 ctx->blend = ctx->blend_saved;
520 ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
521 }
522 ctx->blend_saved = NULL;
523 }
524
525
526
527 enum pipe_error
528 cso_set_depth_stencil_alpha(struct cso_context *ctx,
529 const struct pipe_depth_stencil_alpha_state *templ)
530 {
531 unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
532 unsigned hash_key = cso_construct_key((void*)templ, key_size);
533 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
534 hash_key,
535 CSO_DEPTH_STENCIL_ALPHA,
536 (void*)templ, key_size);
537 void *handle;
538
539 if (cso_hash_iter_is_null(iter)) {
540 struct cso_depth_stencil_alpha *cso =
541 MALLOC(sizeof(struct cso_depth_stencil_alpha));
542 if (!cso)
543 return PIPE_ERROR_OUT_OF_MEMORY;
544
545 memcpy(&cso->state, templ, sizeof(*templ));
546 cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
547 &cso->state);
548 cso->delete_state =
549 (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
550 cso->context = ctx->pipe;
551
552 iter = cso_insert_state(ctx->cache, hash_key,
553 CSO_DEPTH_STENCIL_ALPHA, cso);
554 if (cso_hash_iter_is_null(iter)) {
555 FREE(cso);
556 return PIPE_ERROR_OUT_OF_MEMORY;
557 }
558
559 handle = cso->data;
560 }
561 else {
562 handle = ((struct cso_depth_stencil_alpha *)
563 cso_hash_iter_data(iter))->data;
564 }
565
566 if (ctx->depth_stencil != handle) {
567 ctx->depth_stencil = handle;
568 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
569 }
570 return PIPE_OK;
571 }
572
573 static void
574 cso_save_depth_stencil_alpha(struct cso_context *ctx)
575 {
576 assert(!ctx->depth_stencil_saved);
577 ctx->depth_stencil_saved = ctx->depth_stencil;
578 }
579
580 static void
581 cso_restore_depth_stencil_alpha(struct cso_context *ctx)
582 {
583 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
584 ctx->depth_stencil = ctx->depth_stencil_saved;
585 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
586 ctx->depth_stencil_saved);
587 }
588 ctx->depth_stencil_saved = NULL;
589 }
590
591
592
593 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
594 const struct pipe_rasterizer_state *templ)
595 {
596 unsigned key_size = sizeof(struct pipe_rasterizer_state);
597 unsigned hash_key = cso_construct_key((void*)templ, key_size);
598 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
599 hash_key,
600 CSO_RASTERIZER,
601 (void*)templ, key_size);
602 void *handle = NULL;
603
604 /* We can't have both point_quad_rasterization (sprites) and point_smooth
605 * (round AA points) enabled at the same time.
606 */
607 assert(!(templ->point_quad_rasterization && templ->point_smooth));
608
609 if (cso_hash_iter_is_null(iter)) {
610 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
611 if (!cso)
612 return PIPE_ERROR_OUT_OF_MEMORY;
613
614 memcpy(&cso->state, templ, sizeof(*templ));
615 cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
616 cso->delete_state =
617 (cso_state_callback)ctx->pipe->delete_rasterizer_state;
618 cso->context = ctx->pipe;
619
620 iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
621 if (cso_hash_iter_is_null(iter)) {
622 FREE(cso);
623 return PIPE_ERROR_OUT_OF_MEMORY;
624 }
625
626 handle = cso->data;
627 }
628 else {
629 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
630 }
631
632 if (ctx->rasterizer != handle) {
633 ctx->rasterizer = handle;
634 ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
635 }
636 return PIPE_OK;
637 }
638
639 static void
640 cso_save_rasterizer(struct cso_context *ctx)
641 {
642 assert(!ctx->rasterizer_saved);
643 ctx->rasterizer_saved = ctx->rasterizer;
644 }
645
646 static void
647 cso_restore_rasterizer(struct cso_context *ctx)
648 {
649 if (ctx->rasterizer != ctx->rasterizer_saved) {
650 ctx->rasterizer = ctx->rasterizer_saved;
651 ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
652 }
653 ctx->rasterizer_saved = NULL;
654 }
655
656
657 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
658 {
659 if (ctx->fragment_shader != handle) {
660 ctx->fragment_shader = handle;
661 ctx->pipe->bind_fs_state(ctx->pipe, handle);
662 }
663 }
664
665 static void
666 cso_save_fragment_shader(struct cso_context *ctx)
667 {
668 assert(!ctx->fragment_shader_saved);
669 ctx->fragment_shader_saved = ctx->fragment_shader;
670 }
671
672 static void
673 cso_restore_fragment_shader(struct cso_context *ctx)
674 {
675 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
676 ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
677 ctx->fragment_shader = ctx->fragment_shader_saved;
678 }
679 ctx->fragment_shader_saved = NULL;
680 }
681
682
683 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
684 {
685 if (ctx->vertex_shader != handle) {
686 ctx->vertex_shader = handle;
687 ctx->pipe->bind_vs_state(ctx->pipe, handle);
688 }
689 }
690
691 static void
692 cso_save_vertex_shader(struct cso_context *ctx)
693 {
694 assert(!ctx->vertex_shader_saved);
695 ctx->vertex_shader_saved = ctx->vertex_shader;
696 }
697
698 static void
699 cso_restore_vertex_shader(struct cso_context *ctx)
700 {
701 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
702 ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
703 ctx->vertex_shader = ctx->vertex_shader_saved;
704 }
705 ctx->vertex_shader_saved = NULL;
706 }
707
708
709 void cso_set_framebuffer(struct cso_context *ctx,
710 const struct pipe_framebuffer_state *fb)
711 {
712 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
713 util_copy_framebuffer_state(&ctx->fb, fb);
714 ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
715 }
716 }
717
718 static void
719 cso_save_framebuffer(struct cso_context *ctx)
720 {
721 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
722 }
723
724 static void
725 cso_restore_framebuffer(struct cso_context *ctx)
726 {
727 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
728 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
729 ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
730 util_unreference_framebuffer_state(&ctx->fb_saved);
731 }
732 }
733
734
735 void cso_set_viewport(struct cso_context *ctx,
736 const struct pipe_viewport_state *vp)
737 {
738 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
739 ctx->vp = *vp;
740 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
741 }
742 }
743
744 /**
745 * Setup viewport state for given width and height (position is always (0,0)).
746 * Invert the Y axis if 'invert' is true.
747 */
748 void
749 cso_set_viewport_dims(struct cso_context *ctx,
750 float width, float height, boolean invert)
751 {
752 struct pipe_viewport_state vp;
753 vp.scale[0] = width * 0.5f;
754 vp.scale[1] = height * (invert ? -0.5f : 0.5f);
755 vp.scale[2] = 0.5f;
756 vp.translate[0] = 0.5f * width;
757 vp.translate[1] = 0.5f * height;
758 vp.translate[2] = 0.5f;
759 vp.swizzle_x = PIPE_VIEWPORT_SWIZZLE_POSITIVE_X;
760 vp.swizzle_y = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y;
761 vp.swizzle_z = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z;
762 vp.swizzle_w = PIPE_VIEWPORT_SWIZZLE_POSITIVE_W;
763 cso_set_viewport(ctx, &vp);
764 }
765
766 static void
767 cso_save_viewport(struct cso_context *ctx)
768 {
769 ctx->vp_saved = ctx->vp;
770 }
771
772
773 static void
774 cso_restore_viewport(struct cso_context *ctx)
775 {
776 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
777 ctx->vp = ctx->vp_saved;
778 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
779 }
780 }
781
782
783 void cso_set_blend_color(struct cso_context *ctx,
784 const struct pipe_blend_color *bc)
785 {
786 if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
787 ctx->blend_color = *bc;
788 ctx->pipe->set_blend_color(ctx->pipe, bc);
789 }
790 }
791
792 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
793 {
794 if (ctx->sample_mask != sample_mask) {
795 ctx->sample_mask = sample_mask;
796 ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
797 }
798 }
799
800 static void
801 cso_save_sample_mask(struct cso_context *ctx)
802 {
803 ctx->sample_mask_saved = ctx->sample_mask;
804 }
805
806 static void
807 cso_restore_sample_mask(struct cso_context *ctx)
808 {
809 cso_set_sample_mask(ctx, ctx->sample_mask_saved);
810 }
811
812 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
813 {
814 if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
815 ctx->min_samples = min_samples;
816 ctx->pipe->set_min_samples(ctx->pipe, min_samples);
817 }
818 }
819
820 static void
821 cso_save_min_samples(struct cso_context *ctx)
822 {
823 ctx->min_samples_saved = ctx->min_samples;
824 }
825
826 static void
827 cso_restore_min_samples(struct cso_context *ctx)
828 {
829 cso_set_min_samples(ctx, ctx->min_samples_saved);
830 }
831
832 void cso_set_stencil_ref(struct cso_context *ctx,
833 const struct pipe_stencil_ref *sr)
834 {
835 if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) {
836 ctx->stencil_ref = *sr;
837 ctx->pipe->set_stencil_ref(ctx->pipe, sr);
838 }
839 }
840
841 static void
842 cso_save_stencil_ref(struct cso_context *ctx)
843 {
844 ctx->stencil_ref_saved = ctx->stencil_ref;
845 }
846
847
848 static void
849 cso_restore_stencil_ref(struct cso_context *ctx)
850 {
851 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
852 sizeof(ctx->stencil_ref))) {
853 ctx->stencil_ref = ctx->stencil_ref_saved;
854 ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref);
855 }
856 }
857
858 void cso_set_render_condition(struct cso_context *ctx,
859 struct pipe_query *query,
860 boolean condition,
861 enum pipe_render_cond_flag mode)
862 {
863 struct pipe_context *pipe = ctx->pipe;
864
865 if (ctx->render_condition != query ||
866 ctx->render_condition_mode != mode ||
867 ctx->render_condition_cond != condition) {
868 pipe->render_condition(pipe, query, condition, mode);
869 ctx->render_condition = query;
870 ctx->render_condition_cond = condition;
871 ctx->render_condition_mode = mode;
872 }
873 }
874
875 static void
876 cso_save_render_condition(struct cso_context *ctx)
877 {
878 ctx->render_condition_saved = ctx->render_condition;
879 ctx->render_condition_cond_saved = ctx->render_condition_cond;
880 ctx->render_condition_mode_saved = ctx->render_condition_mode;
881 }
882
883 static void
884 cso_restore_render_condition(struct cso_context *ctx)
885 {
886 cso_set_render_condition(ctx, ctx->render_condition_saved,
887 ctx->render_condition_cond_saved,
888 ctx->render_condition_mode_saved);
889 }
890
891 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
892 {
893 assert(ctx->has_geometry_shader || !handle);
894
895 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
896 ctx->geometry_shader = handle;
897 ctx->pipe->bind_gs_state(ctx->pipe, handle);
898 }
899 }
900
901 static void
902 cso_save_geometry_shader(struct cso_context *ctx)
903 {
904 if (!ctx->has_geometry_shader) {
905 return;
906 }
907
908 assert(!ctx->geometry_shader_saved);
909 ctx->geometry_shader_saved = ctx->geometry_shader;
910 }
911
912 static void
913 cso_restore_geometry_shader(struct cso_context *ctx)
914 {
915 if (!ctx->has_geometry_shader) {
916 return;
917 }
918
919 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
920 ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
921 ctx->geometry_shader = ctx->geometry_shader_saved;
922 }
923 ctx->geometry_shader_saved = NULL;
924 }
925
926 void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
927 {
928 assert(ctx->has_tessellation || !handle);
929
930 if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
931 ctx->tessctrl_shader = handle;
932 ctx->pipe->bind_tcs_state(ctx->pipe, handle);
933 }
934 }
935
936 static void
937 cso_save_tessctrl_shader(struct cso_context *ctx)
938 {
939 if (!ctx->has_tessellation) {
940 return;
941 }
942
943 assert(!ctx->tessctrl_shader_saved);
944 ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
945 }
946
947 static void
948 cso_restore_tessctrl_shader(struct cso_context *ctx)
949 {
950 if (!ctx->has_tessellation) {
951 return;
952 }
953
954 if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
955 ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
956 ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
957 }
958 ctx->tessctrl_shader_saved = NULL;
959 }
960
961 void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
962 {
963 assert(ctx->has_tessellation || !handle);
964
965 if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
966 ctx->tesseval_shader = handle;
967 ctx->pipe->bind_tes_state(ctx->pipe, handle);
968 }
969 }
970
971 static void
972 cso_save_tesseval_shader(struct cso_context *ctx)
973 {
974 if (!ctx->has_tessellation) {
975 return;
976 }
977
978 assert(!ctx->tesseval_shader_saved);
979 ctx->tesseval_shader_saved = ctx->tesseval_shader;
980 }
981
982 static void
983 cso_restore_tesseval_shader(struct cso_context *ctx)
984 {
985 if (!ctx->has_tessellation) {
986 return;
987 }
988
989 if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
990 ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
991 ctx->tesseval_shader = ctx->tesseval_shader_saved;
992 }
993 ctx->tesseval_shader_saved = NULL;
994 }
995
996 void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
997 {
998 assert(ctx->has_compute_shader || !handle);
999
1000 if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1001 ctx->compute_shader = handle;
1002 ctx->pipe->bind_compute_state(ctx->pipe, handle);
1003 }
1004 }
1005
1006 static void
1007 cso_set_vertex_elements_direct(struct cso_context *ctx,
1008 const struct cso_velems_state *velems)
1009 {
1010 unsigned key_size, hash_key;
1011 struct cso_hash_iter iter;
1012 void *handle;
1013
1014 /* Need to include the count into the stored state data too.
1015 * Otherwise first few count pipe_vertex_elements could be identical
1016 * even if count is different, and there's no guarantee the hash would
1017 * be different in that case neither.
1018 */
1019 key_size = sizeof(struct pipe_vertex_element) * velems->count +
1020 sizeof(unsigned);
1021 hash_key = cso_construct_key((void*)velems, key_size);
1022 iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS,
1023 (void*)velems, key_size);
1024
1025 if (cso_hash_iter_is_null(iter)) {
1026 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1027 if (!cso)
1028 return;
1029
1030 memcpy(&cso->state, velems, key_size);
1031 cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe,
1032 velems->count,
1033 &cso->state.velems[0]);
1034 cso->delete_state =
1035 (cso_state_callback) ctx->pipe->delete_vertex_elements_state;
1036 cso->context = ctx->pipe;
1037
1038 iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
1039 if (cso_hash_iter_is_null(iter)) {
1040 FREE(cso);
1041 return;
1042 }
1043
1044 handle = cso->data;
1045 }
1046 else {
1047 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1048 }
1049
1050 if (ctx->velements != handle) {
1051 ctx->velements = handle;
1052 ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1053 }
1054 }
1055
1056 enum pipe_error
1057 cso_set_vertex_elements(struct cso_context *ctx,
1058 const struct cso_velems_state *velems)
1059 {
1060 struct u_vbuf *vbuf = ctx->vbuf_current;
1061
1062 if (vbuf) {
1063 u_vbuf_set_vertex_elements(vbuf, velems);
1064 return PIPE_OK;
1065 }
1066
1067 cso_set_vertex_elements_direct(ctx, velems);
1068 return PIPE_OK;
1069 }
1070
1071 static void
1072 cso_save_vertex_elements(struct cso_context *ctx)
1073 {
1074 struct u_vbuf *vbuf = ctx->vbuf_current;
1075
1076 if (vbuf) {
1077 u_vbuf_save_vertex_elements(vbuf);
1078 return;
1079 }
1080
1081 assert(!ctx->velements_saved);
1082 ctx->velements_saved = ctx->velements;
1083 }
1084
1085 static void
1086 cso_restore_vertex_elements(struct cso_context *ctx)
1087 {
1088 struct u_vbuf *vbuf = ctx->vbuf_current;
1089
1090 if (vbuf) {
1091 u_vbuf_restore_vertex_elements(vbuf);
1092 return;
1093 }
1094
1095 if (ctx->velements != ctx->velements_saved) {
1096 ctx->velements = ctx->velements_saved;
1097 ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1098 }
1099 ctx->velements_saved = NULL;
1100 }
1101
1102 /* vertex buffers */
1103
1104 static void
1105 cso_set_vertex_buffers_direct(struct cso_context *ctx,
1106 unsigned start_slot, unsigned count,
1107 const struct pipe_vertex_buffer *buffers)
1108 {
1109 /* Save what's in the auxiliary slot, so that we can save and restore it
1110 * for meta ops.
1111 */
1112 if (start_slot == 0) {
1113 if (buffers) {
1114 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
1115 buffers);
1116 } else {
1117 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
1118 }
1119 }
1120
1121 ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
1122 }
1123
1124
1125 void cso_set_vertex_buffers(struct cso_context *ctx,
1126 unsigned start_slot, unsigned count,
1127 const struct pipe_vertex_buffer *buffers)
1128 {
1129 struct u_vbuf *vbuf = ctx->vbuf_current;
1130
1131 if (!count)
1132 return;
1133
1134 if (vbuf) {
1135 u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
1136 return;
1137 }
1138
1139 cso_set_vertex_buffers_direct(ctx, start_slot, count, buffers);
1140 }
1141
1142 static void
1143 cso_save_vertex_buffer0(struct cso_context *ctx)
1144 {
1145 struct u_vbuf *vbuf = ctx->vbuf_current;
1146
1147 if (vbuf) {
1148 u_vbuf_save_vertex_buffer0(vbuf);
1149 return;
1150 }
1151
1152 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
1153 &ctx->vertex_buffer0_current);
1154 }
1155
1156 static void
1157 cso_restore_vertex_buffer0(struct cso_context *ctx)
1158 {
1159 struct u_vbuf *vbuf = ctx->vbuf_current;
1160
1161 if (vbuf) {
1162 u_vbuf_restore_vertex_buffer0(vbuf);
1163 return;
1164 }
1165
1166 cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
1167 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
1168 }
1169
1170 /**
1171 * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1172 * for user vertex buffers and user vertex buffers are not set by this call.
1173 * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1174 *
1175 * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1176 * such as VBOs, glBegin/End, and display lists.
1177 *
1178 * Internal operations that do "save states, draw, restore states" shouldn't
1179 * use this, because the states are only saved in either cso_context or
1180 * u_vbuf, not both.
1181 */
1182 void
1183 cso_set_vertex_buffers_and_elements(struct cso_context *ctx,
1184 const struct cso_velems_state *velems,
1185 unsigned vb_count,
1186 unsigned unbind_trailing_vb_count,
1187 const struct pipe_vertex_buffer *vbuffers,
1188 bool uses_user_vertex_buffers)
1189 {
1190 struct u_vbuf *vbuf = ctx->vbuf;
1191
1192 if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1193 if (!ctx->vbuf_current) {
1194 /* Unbind all buffers in cso_context, because we'll use u_vbuf. */
1195 unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1196 if (unbind_vb_count)
1197 cso_set_vertex_buffers_direct(ctx, 0, unbind_vb_count, NULL);
1198
1199 /* Unset this to make sure the CSO is re-bound on the next use. */
1200 ctx->velements = NULL;
1201 ctx->vbuf_current = vbuf;
1202 } else if (unbind_trailing_vb_count) {
1203 u_vbuf_set_vertex_buffers(vbuf, vb_count, unbind_trailing_vb_count,
1204 NULL);
1205 }
1206
1207 if (vb_count)
1208 u_vbuf_set_vertex_buffers(vbuf, 0, vb_count, vbuffers);
1209 u_vbuf_set_vertex_elements(vbuf, velems);
1210 return;
1211 }
1212
1213 if (ctx->vbuf_current) {
1214 /* Unbind all buffers in u_vbuf, because we'll use cso_context. */
1215 unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1216 if (unbind_vb_count)
1217 u_vbuf_set_vertex_buffers(vbuf, 0, unbind_vb_count, NULL);
1218
1219 /* Unset this to make sure the CSO is re-bound on the next use. */
1220 u_vbuf_unset_vertex_elements(vbuf);
1221 ctx->vbuf_current = NULL;
1222 } else if (unbind_trailing_vb_count) {
1223 cso_set_vertex_buffers_direct(ctx, vb_count, unbind_trailing_vb_count,
1224 NULL);
1225 }
1226
1227 if (vb_count)
1228 cso_set_vertex_buffers_direct(ctx, 0, vb_count, vbuffers);
1229 cso_set_vertex_elements_direct(ctx, velems);
1230 }
1231
1232 void
1233 cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1234 unsigned idx, const struct pipe_sampler_state *templ)
1235 {
1236 if (templ) {
1237 unsigned key_size = sizeof(struct pipe_sampler_state);
1238 unsigned hash_key = cso_construct_key((void*)templ, key_size);
1239 struct cso_sampler *cso;
1240 struct cso_hash_iter iter =
1241 cso_find_state_template(ctx->cache,
1242 hash_key, CSO_SAMPLER,
1243 (void *) templ, key_size);
1244
1245 if (cso_hash_iter_is_null(iter)) {
1246 cso = MALLOC(sizeof(struct cso_sampler));
1247 if (!cso)
1248 return;
1249
1250 memcpy(&cso->state, templ, sizeof(*templ));
1251 cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1252 cso->delete_state =
1253 (cso_state_callback) ctx->pipe->delete_sampler_state;
1254 cso->context = ctx->pipe;
1255 cso->hash_key = hash_key;
1256
1257 iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
1258 if (cso_hash_iter_is_null(iter)) {
1259 FREE(cso);
1260 return;
1261 }
1262 }
1263 else {
1264 cso = cso_hash_iter_data(iter);
1265 }
1266
1267 ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1268 ctx->samplers[shader_stage].samplers[idx] = cso->data;
1269 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1270 }
1271 }
1272
1273
1274 /**
1275 * Send staged sampler state to the driver.
1276 */
1277 void
1278 cso_single_sampler_done(struct cso_context *ctx,
1279 enum pipe_shader_type shader_stage)
1280 {
1281 struct sampler_info *info = &ctx->samplers[shader_stage];
1282
1283 if (ctx->max_sampler_seen == -1)
1284 return;
1285
1286 ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1287 ctx->max_sampler_seen + 1,
1288 info->samplers);
1289 ctx->max_sampler_seen = -1;
1290 }
1291
1292
1293 /*
1294 * If the function encouters any errors it will return the
1295 * last one. Done to always try to set as many samplers
1296 * as possible.
1297 */
1298 void
1299 cso_set_samplers(struct cso_context *ctx,
1300 enum pipe_shader_type shader_stage,
1301 unsigned nr,
1302 const struct pipe_sampler_state **templates)
1303 {
1304 for (unsigned i = 0; i < nr; i++)
1305 cso_single_sampler(ctx, shader_stage, i, templates[i]);
1306
1307 cso_single_sampler_done(ctx, shader_stage);
1308 }
1309
1310 static void
1311 cso_save_fragment_samplers(struct cso_context *ctx)
1312 {
1313 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1314 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1315
1316 memcpy(saved->cso_samplers, info->cso_samplers,
1317 sizeof(info->cso_samplers));
1318 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1319 }
1320
1321
1322 static void
1323 cso_restore_fragment_samplers(struct cso_context *ctx)
1324 {
1325 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1326 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1327
1328 memcpy(info->cso_samplers, saved->cso_samplers,
1329 sizeof(info->cso_samplers));
1330 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1331
1332 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1333 if (info->samplers[i]) {
1334 ctx->max_sampler_seen = i;
1335 break;
1336 }
1337 }
1338
1339 cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1340 }
1341
1342
1343 void
1344 cso_set_sampler_views(struct cso_context *ctx,
1345 enum pipe_shader_type shader_stage,
1346 unsigned count,
1347 struct pipe_sampler_view **views)
1348 {
1349 if (shader_stage == PIPE_SHADER_FRAGMENT) {
1350 unsigned i;
1351 boolean any_change = FALSE;
1352
1353 /* reference new views */
1354 for (i = 0; i < count; i++) {
1355 any_change |= ctx->fragment_views[i] != views[i];
1356 pipe_sampler_view_reference(&ctx->fragment_views[i], views[i]);
1357 }
1358 /* unref extra old views, if any */
1359 for (; i < ctx->nr_fragment_views; i++) {
1360 any_change |= ctx->fragment_views[i] != NULL;
1361 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1362 }
1363
1364 /* bind the new sampler views */
1365 if (any_change) {
1366 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0,
1367 MAX2(ctx->nr_fragment_views, count),
1368 ctx->fragment_views);
1369 }
1370
1371 ctx->nr_fragment_views = count;
1372 }
1373 else
1374 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, count, views);
1375 }
1376
1377
1378 static void
1379 cso_save_fragment_sampler_views(struct cso_context *ctx)
1380 {
1381 unsigned i;
1382
1383 ctx->nr_fragment_views_saved = ctx->nr_fragment_views;
1384
1385 for (i = 0; i < ctx->nr_fragment_views; i++) {
1386 assert(!ctx->fragment_views_saved[i]);
1387 pipe_sampler_view_reference(&ctx->fragment_views_saved[i],
1388 ctx->fragment_views[i]);
1389 }
1390 }
1391
1392
1393 static void
1394 cso_restore_fragment_sampler_views(struct cso_context *ctx)
1395 {
1396 unsigned i, nr_saved = ctx->nr_fragment_views_saved;
1397 unsigned num;
1398
1399 for (i = 0; i < nr_saved; i++) {
1400 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1401 /* move the reference from one pointer to another */
1402 ctx->fragment_views[i] = ctx->fragment_views_saved[i];
1403 ctx->fragment_views_saved[i] = NULL;
1404 }
1405 for (; i < ctx->nr_fragment_views; i++) {
1406 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1407 }
1408
1409 num = MAX2(ctx->nr_fragment_views, nr_saved);
1410
1411 /* bind the old/saved sampler views */
1412 ctx->pipe->set_sampler_views(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, num,
1413 ctx->fragment_views);
1414
1415 ctx->nr_fragment_views = nr_saved;
1416 ctx->nr_fragment_views_saved = 0;
1417 }
1418
1419
1420 void
1421 cso_set_shader_images(struct cso_context *ctx,
1422 enum pipe_shader_type shader_stage,
1423 unsigned start, unsigned count,
1424 struct pipe_image_view *images)
1425 {
1426 if (shader_stage == PIPE_SHADER_FRAGMENT && start == 0 && count >= 1) {
1427 util_copy_image_view(&ctx->fragment_image0_current, &images[0]);
1428 }
1429
1430 ctx->pipe->set_shader_images(ctx->pipe, shader_stage, start, count, images);
1431 }
1432
1433
1434 static void
1435 cso_save_fragment_image0(struct cso_context *ctx)
1436 {
1437 util_copy_image_view(&ctx->fragment_image0_saved,
1438 &ctx->fragment_image0_current);
1439 }
1440
1441
1442 static void
1443 cso_restore_fragment_image0(struct cso_context *ctx)
1444 {
1445 cso_set_shader_images(ctx, PIPE_SHADER_FRAGMENT, 0, 1,
1446 &ctx->fragment_image0_saved);
1447 }
1448
1449
1450 void
1451 cso_set_stream_outputs(struct cso_context *ctx,
1452 unsigned num_targets,
1453 struct pipe_stream_output_target **targets,
1454 const unsigned *offsets)
1455 {
1456 struct pipe_context *pipe = ctx->pipe;
1457 uint i;
1458
1459 if (!ctx->has_streamout) {
1460 assert(num_targets == 0);
1461 return;
1462 }
1463
1464 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1465 /* Nothing to do. */
1466 return;
1467 }
1468
1469 /* reference new targets */
1470 for (i = 0; i < num_targets; i++) {
1471 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1472 }
1473 /* unref extra old targets, if any */
1474 for (; i < ctx->nr_so_targets; i++) {
1475 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1476 }
1477
1478 pipe->set_stream_output_targets(pipe, num_targets, targets,
1479 offsets);
1480 ctx->nr_so_targets = num_targets;
1481 }
1482
1483 static void
1484 cso_save_stream_outputs(struct cso_context *ctx)
1485 {
1486 uint i;
1487
1488 if (!ctx->has_streamout) {
1489 return;
1490 }
1491
1492 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1493
1494 for (i = 0; i < ctx->nr_so_targets; i++) {
1495 assert(!ctx->so_targets_saved[i]);
1496 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1497 }
1498 }
1499
1500 static void
1501 cso_restore_stream_outputs(struct cso_context *ctx)
1502 {
1503 struct pipe_context *pipe = ctx->pipe;
1504 uint i;
1505 unsigned offset[PIPE_MAX_SO_BUFFERS];
1506
1507 if (!ctx->has_streamout) {
1508 return;
1509 }
1510
1511 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1512 /* Nothing to do. */
1513 return;
1514 }
1515
1516 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1517 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1518 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1519 /* move the reference from one pointer to another */
1520 ctx->so_targets[i] = ctx->so_targets_saved[i];
1521 ctx->so_targets_saved[i] = NULL;
1522 /* -1 means append */
1523 offset[i] = (unsigned)-1;
1524 }
1525 for (; i < ctx->nr_so_targets; i++) {
1526 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1527 }
1528
1529 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1530 ctx->so_targets, offset);
1531
1532 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1533 ctx->nr_so_targets_saved = 0;
1534 }
1535
1536 /* constant buffers */
1537
1538 void
1539 cso_set_constant_buffer(struct cso_context *cso,
1540 enum pipe_shader_type shader_stage,
1541 unsigned index, struct pipe_constant_buffer *cb)
1542 {
1543 struct pipe_context *pipe = cso->pipe;
1544
1545 pipe->set_constant_buffer(pipe, shader_stage, index, cb);
1546
1547 if (index == 0) {
1548 util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb);
1549 }
1550 }
1551
1552 void
1553 cso_set_constant_buffer_resource(struct cso_context *cso,
1554 enum pipe_shader_type shader_stage,
1555 unsigned index,
1556 struct pipe_resource *buffer)
1557 {
1558 if (buffer) {
1559 struct pipe_constant_buffer cb;
1560 cb.buffer = buffer;
1561 cb.buffer_offset = 0;
1562 cb.buffer_size = buffer->width0;
1563 cb.user_buffer = NULL;
1564 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1565 } else {
1566 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1567 }
1568 }
1569
1570 void
1571 cso_set_constant_user_buffer(struct cso_context *cso,
1572 enum pipe_shader_type shader_stage,
1573 unsigned index, void *ptr, unsigned size)
1574 {
1575 if (ptr) {
1576 struct pipe_constant_buffer cb;
1577 cb.buffer = NULL;
1578 cb.buffer_offset = 0;
1579 cb.buffer_size = size;
1580 cb.user_buffer = ptr;
1581 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1582 } else {
1583 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1584 }
1585 }
1586
1587 void
1588 cso_save_constant_buffer_slot0(struct cso_context *cso,
1589 enum pipe_shader_type shader_stage)
1590 {
1591 util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage],
1592 &cso->aux_constbuf_current[shader_stage]);
1593 }
1594
1595 void
1596 cso_restore_constant_buffer_slot0(struct cso_context *cso,
1597 enum pipe_shader_type shader_stage)
1598 {
1599 cso_set_constant_buffer(cso, shader_stage, 0,
1600 &cso->aux_constbuf_saved[shader_stage]);
1601 pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer,
1602 NULL);
1603 }
1604
1605
1606 /**
1607 * Save all the CSO state items specified by the state_mask bitmask
1608 * of CSO_BIT_x flags.
1609 */
1610 void
1611 cso_save_state(struct cso_context *cso, unsigned state_mask)
1612 {
1613 assert(cso->saved_state == 0);
1614
1615 cso->saved_state = state_mask;
1616
1617 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1618 cso_save_vertex_buffer0(cso);
1619 if (state_mask & CSO_BIT_BLEND)
1620 cso_save_blend(cso);
1621 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1622 cso_save_depth_stencil_alpha(cso);
1623 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1624 cso_save_fragment_samplers(cso);
1625 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1626 cso_save_fragment_sampler_views(cso);
1627 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1628 cso_save_fragment_shader(cso);
1629 if (state_mask & CSO_BIT_FRAMEBUFFER)
1630 cso_save_framebuffer(cso);
1631 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1632 cso_save_geometry_shader(cso);
1633 if (state_mask & CSO_BIT_MIN_SAMPLES)
1634 cso_save_min_samples(cso);
1635 if (state_mask & CSO_BIT_RASTERIZER)
1636 cso_save_rasterizer(cso);
1637 if (state_mask & CSO_BIT_RENDER_CONDITION)
1638 cso_save_render_condition(cso);
1639 if (state_mask & CSO_BIT_SAMPLE_MASK)
1640 cso_save_sample_mask(cso);
1641 if (state_mask & CSO_BIT_STENCIL_REF)
1642 cso_save_stencil_ref(cso);
1643 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1644 cso_save_stream_outputs(cso);
1645 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1646 cso_save_tessctrl_shader(cso);
1647 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1648 cso_save_tesseval_shader(cso);
1649 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1650 cso_save_vertex_elements(cso);
1651 if (state_mask & CSO_BIT_VERTEX_SHADER)
1652 cso_save_vertex_shader(cso);
1653 if (state_mask & CSO_BIT_VIEWPORT)
1654 cso_save_viewport(cso);
1655 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1656 cso->pipe->set_active_query_state(cso->pipe, false);
1657 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1658 cso_save_fragment_image0(cso);
1659 }
1660
1661
1662 /**
1663 * Restore the state which was saved by cso_save_state().
1664 */
1665 void
1666 cso_restore_state(struct cso_context *cso)
1667 {
1668 unsigned state_mask = cso->saved_state;
1669
1670 assert(state_mask);
1671
1672 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1673 cso_restore_vertex_buffer0(cso);
1674 if (state_mask & CSO_BIT_BLEND)
1675 cso_restore_blend(cso);
1676 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1677 cso_restore_depth_stencil_alpha(cso);
1678 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1679 cso_restore_fragment_samplers(cso);
1680 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1681 cso_restore_fragment_sampler_views(cso);
1682 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1683 cso_restore_fragment_shader(cso);
1684 if (state_mask & CSO_BIT_FRAMEBUFFER)
1685 cso_restore_framebuffer(cso);
1686 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1687 cso_restore_geometry_shader(cso);
1688 if (state_mask & CSO_BIT_MIN_SAMPLES)
1689 cso_restore_min_samples(cso);
1690 if (state_mask & CSO_BIT_RASTERIZER)
1691 cso_restore_rasterizer(cso);
1692 if (state_mask & CSO_BIT_RENDER_CONDITION)
1693 cso_restore_render_condition(cso);
1694 if (state_mask & CSO_BIT_SAMPLE_MASK)
1695 cso_restore_sample_mask(cso);
1696 if (state_mask & CSO_BIT_STENCIL_REF)
1697 cso_restore_stencil_ref(cso);
1698 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1699 cso_restore_stream_outputs(cso);
1700 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1701 cso_restore_tessctrl_shader(cso);
1702 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1703 cso_restore_tesseval_shader(cso);
1704 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1705 cso_restore_vertex_elements(cso);
1706 if (state_mask & CSO_BIT_VERTEX_SHADER)
1707 cso_restore_vertex_shader(cso);
1708 if (state_mask & CSO_BIT_VIEWPORT)
1709 cso_restore_viewport(cso);
1710 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1711 cso->pipe->set_active_query_state(cso->pipe, true);
1712 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1713 cso_restore_fragment_image0(cso);
1714
1715 cso->saved_state = 0;
1716 }
1717
1718
1719
1720 /* drawing */
1721
1722 void
1723 cso_draw_vbo(struct cso_context *cso,
1724 const struct pipe_draw_info *info)
1725 {
1726 struct u_vbuf *vbuf = cso->vbuf_current;
1727
1728 /* We can't have both indirect drawing and SO-vertex-count drawing */
1729 assert(info->indirect == NULL || info->count_from_stream_output == NULL);
1730
1731 /* We can't have SO-vertex-count drawing with an index buffer */
1732 assert(info->count_from_stream_output == NULL || info->index_size == 0);
1733
1734 if (vbuf) {
1735 u_vbuf_draw_vbo(vbuf, info);
1736 } else {
1737 struct pipe_context *pipe = cso->pipe;
1738 pipe->draw_vbo(pipe, info);
1739 }
1740 }
1741
1742 void
1743 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1744 {
1745 struct pipe_draw_info info;
1746
1747 util_draw_init_info(&info);
1748
1749 info.mode = mode;
1750 info.start = start;
1751 info.count = count;
1752 info.min_index = start;
1753 info.max_index = start + count - 1;
1754
1755 cso_draw_vbo(cso, &info);
1756 }
1757
1758 void
1759 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1760 uint start, uint count,
1761 uint start_instance, uint instance_count)
1762 {
1763 struct pipe_draw_info info;
1764
1765 util_draw_init_info(&info);
1766
1767 info.mode = mode;
1768 info.start = start;
1769 info.count = count;
1770 info.min_index = start;
1771 info.max_index = start + count - 1;
1772 info.start_instance = start_instance;
1773 info.instance_count = instance_count;
1774
1775 cso_draw_vbo(cso, &info);
1776 }