ff40f19e68514205b2457e8498da24ab5b78d1f3
[mesa.git] / src / gallium / auxiliary / cso_cache / cso_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51
52
53 /**
54 * Per-shader sampler information.
55 */
56 struct sampler_info
57 {
58 struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
59 void *samplers[PIPE_MAX_SAMPLERS];
60 };
61
62
63
64 struct cso_context {
65 struct pipe_context *pipe;
66 struct cso_cache *cache;
67
68 struct u_vbuf *vbuf;
69 struct u_vbuf *vbuf_current;
70 bool always_use_vbuf;
71
72 boolean has_geometry_shader;
73 boolean has_tessellation;
74 boolean has_compute_shader;
75 boolean has_streamout;
76
77 unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
78
79 struct pipe_sampler_view *fragment_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
80 unsigned nr_fragment_views;
81
82 struct pipe_sampler_view *fragment_views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS];
83 unsigned nr_fragment_views_saved;
84
85 struct sampler_info fragment_samplers_saved;
86 struct sampler_info samplers[PIPE_SHADER_TYPES];
87
88 /* Temporary number until cso_single_sampler_done is called.
89 * It tracks the highest sampler seen in cso_single_sampler.
90 */
91 int max_sampler_seen;
92
93 struct pipe_vertex_buffer vertex_buffer0_current;
94 struct pipe_vertex_buffer vertex_buffer0_saved;
95
96 struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
97 struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
98
99 struct pipe_image_view fragment_image0_current;
100 struct pipe_image_view fragment_image0_saved;
101
102 unsigned nr_so_targets;
103 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
104
105 unsigned nr_so_targets_saved;
106 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
107
108 /** Current and saved state.
109 * The saved state is used as a 1-deep stack.
110 */
111 void *blend, *blend_saved;
112 void *depth_stencil, *depth_stencil_saved;
113 void *rasterizer, *rasterizer_saved;
114 void *fragment_shader, *fragment_shader_saved;
115 void *vertex_shader, *vertex_shader_saved;
116 void *geometry_shader, *geometry_shader_saved;
117 void *tessctrl_shader, *tessctrl_shader_saved;
118 void *tesseval_shader, *tesseval_shader_saved;
119 void *compute_shader;
120 void *velements, *velements_saved;
121 struct pipe_query *render_condition, *render_condition_saved;
122 uint render_condition_mode, render_condition_mode_saved;
123 boolean render_condition_cond, render_condition_cond_saved;
124
125 struct pipe_framebuffer_state fb, fb_saved;
126 struct pipe_viewport_state vp, vp_saved;
127 struct pipe_blend_color blend_color;
128 unsigned sample_mask, sample_mask_saved;
129 unsigned min_samples, min_samples_saved;
130 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
131 };
132
133 struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
134 {
135 return cso->pipe;
136 }
137
138 static boolean delete_blend_state(struct cso_context *ctx, void *state)
139 {
140 struct cso_blend *cso = (struct cso_blend *)state;
141
142 if (ctx->blend == cso->data)
143 return FALSE;
144
145 if (cso->delete_state)
146 cso->delete_state(cso->context, cso->data);
147 FREE(state);
148 return TRUE;
149 }
150
151 static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
152 {
153 struct cso_depth_stencil_alpha *cso =
154 (struct cso_depth_stencil_alpha *)state;
155
156 if (ctx->depth_stencil == cso->data)
157 return FALSE;
158
159 if (cso->delete_state)
160 cso->delete_state(cso->context, cso->data);
161 FREE(state);
162
163 return TRUE;
164 }
165
166 static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
167 {
168 struct cso_sampler *cso = (struct cso_sampler *)state;
169 if (cso->delete_state)
170 cso->delete_state(cso->context, cso->data);
171 FREE(state);
172 return TRUE;
173 }
174
175 static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
176 {
177 struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
178
179 if (ctx->rasterizer == cso->data)
180 return FALSE;
181 if (cso->delete_state)
182 cso->delete_state(cso->context, cso->data);
183 FREE(state);
184 return TRUE;
185 }
186
187 static boolean delete_vertex_elements(struct cso_context *ctx,
188 void *state)
189 {
190 struct cso_velements *cso = (struct cso_velements *)state;
191
192 if (ctx->velements == cso->data)
193 return FALSE;
194
195 if (cso->delete_state)
196 cso->delete_state(cso->context, cso->data);
197 FREE(state);
198 return TRUE;
199 }
200
201
202 static inline boolean delete_cso(struct cso_context *ctx,
203 void *state, enum cso_cache_type type)
204 {
205 switch (type) {
206 case CSO_BLEND:
207 return delete_blend_state(ctx, state);
208 case CSO_SAMPLER:
209 return delete_sampler_state(ctx, state);
210 case CSO_DEPTH_STENCIL_ALPHA:
211 return delete_depth_stencil_state(ctx, state);
212 case CSO_RASTERIZER:
213 return delete_rasterizer_state(ctx, state);
214 case CSO_VELEMENTS:
215 return delete_vertex_elements(ctx, state);
216 default:
217 assert(0);
218 FREE(state);
219 }
220 return FALSE;
221 }
222
223 static inline void
224 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
225 int max_size, void *user_data)
226 {
227 struct cso_context *ctx = (struct cso_context *)user_data;
228 /* if we're approach the maximum size, remove fourth of the entries
229 * otherwise every subsequent call will go through the same */
230 int hash_size = cso_hash_size(hash);
231 int max_entries = (max_size > hash_size) ? max_size : hash_size;
232 int to_remove = (max_size < max_entries) * max_entries/4;
233 struct cso_hash_iter iter;
234 struct cso_sampler **samplers_to_restore = NULL;
235 unsigned to_restore = 0;
236
237 if (hash_size > max_size)
238 to_remove += hash_size - max_size;
239
240 if (to_remove == 0)
241 return;
242
243 if (type == CSO_SAMPLER) {
244 int i, j;
245
246 samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
247 sizeof(*samplers_to_restore));
248
249 /* Temporarily remove currently bound sampler states from the hash
250 * table, to prevent them from being deleted
251 */
252 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
253 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
254 struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
255
256 if (sampler && cso_hash_take(hash, sampler->hash_key))
257 samplers_to_restore[to_restore++] = sampler;
258 }
259 }
260 }
261
262 iter = cso_hash_first_node(hash);
263 while (to_remove) {
264 /*remove elements until we're good */
265 /*fixme: currently we pick the nodes to remove at random*/
266 void *cso = cso_hash_iter_data(iter);
267
268 if (!cso)
269 break;
270
271 if (delete_cso(ctx, cso, type)) {
272 iter = cso_hash_erase(hash, iter);
273 --to_remove;
274 } else
275 iter = cso_hash_iter_next(iter);
276 }
277
278 if (type == CSO_SAMPLER) {
279 /* Put currently bound sampler states back into the hash table */
280 while (to_restore--) {
281 struct cso_sampler *sampler = samplers_to_restore[to_restore];
282
283 cso_hash_insert(hash, sampler->hash_key, sampler);
284 }
285
286 FREE(samplers_to_restore);
287 }
288 }
289
290 static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
291 {
292 struct u_vbuf_caps caps;
293 bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
294
295 u_vbuf_get_caps(cso->pipe->screen, &caps);
296
297 /* Enable u_vbuf if needed. */
298 if (caps.fallback_always ||
299 (uses_user_vertex_buffers &&
300 caps.fallback_only_for_user_vbuffers)) {
301 cso->vbuf = u_vbuf_create(cso->pipe, &caps);
302 cso->vbuf_current = cso->vbuf;
303 cso->always_use_vbuf = caps.fallback_always;
304 }
305 }
306
307 struct cso_context *
308 cso_create_context(struct pipe_context *pipe, unsigned flags)
309 {
310 struct cso_context *ctx = CALLOC_STRUCT(cso_context);
311 if (!ctx)
312 return NULL;
313
314 ctx->cache = cso_cache_create();
315 if (ctx->cache == NULL)
316 goto out;
317 cso_cache_set_sanitize_callback(ctx->cache,
318 sanitize_hash,
319 ctx);
320
321 ctx->pipe = pipe;
322 ctx->sample_mask = ~0;
323
324 cso_init_vbuf(ctx, flags);
325
326 /* Enable for testing: */
327 if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
328
329 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
330 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
331 ctx->has_geometry_shader = TRUE;
332 }
333 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
334 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
335 ctx->has_tessellation = TRUE;
336 }
337 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
338 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
339 int supported_irs =
340 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
341 PIPE_SHADER_CAP_SUPPORTED_IRS);
342 if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
343 (1 << PIPE_SHADER_IR_NIR))) {
344 ctx->has_compute_shader = TRUE;
345 }
346 }
347 if (pipe->screen->get_param(pipe->screen,
348 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
349 ctx->has_streamout = TRUE;
350 }
351
352 ctx->max_sampler_seen = -1;
353 return ctx;
354
355 out:
356 cso_destroy_context( ctx );
357 return NULL;
358 }
359
360 /**
361 * Free the CSO context.
362 */
363 void cso_destroy_context( struct cso_context *ctx )
364 {
365 unsigned i;
366
367 if (ctx->pipe) {
368 ctx->pipe->bind_blend_state( ctx->pipe, NULL );
369 ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
370
371 {
372 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
373 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
374 struct pipe_screen *scr = ctx->pipe->screen;
375 enum pipe_shader_type sh;
376 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
377 int maxsam = scr->get_shader_param(scr, sh,
378 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
379 int maxview = scr->get_shader_param(scr, sh,
380 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
381 assert(maxsam <= PIPE_MAX_SAMPLERS);
382 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
383 if (maxsam > 0) {
384 ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
385 }
386 if (maxview > 0) {
387 ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views);
388 }
389 }
390 }
391
392 ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
393 ctx->pipe->bind_fs_state( ctx->pipe, NULL );
394 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
395 ctx->pipe->bind_vs_state( ctx->pipe, NULL );
396 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL);
397 if (ctx->has_geometry_shader) {
398 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
399 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL);
400 }
401 if (ctx->has_tessellation) {
402 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
403 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL);
404 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
405 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL);
406 }
407 if (ctx->has_compute_shader) {
408 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
409 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_COMPUTE, 0, NULL);
410 }
411 ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
412
413 if (ctx->has_streamout)
414 ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
415 }
416
417 for (i = 0; i < ctx->nr_fragment_views; i++) {
418 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
419 }
420 for (i = 0; i < ctx->nr_fragment_views_saved; i++) {
421 pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL);
422 }
423
424 util_unreference_framebuffer_state(&ctx->fb);
425 util_unreference_framebuffer_state(&ctx->fb_saved);
426
427 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
428 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
429
430 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
431 pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
432 pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL);
433 }
434
435 pipe_resource_reference(&ctx->fragment_image0_current.resource, NULL);
436 pipe_resource_reference(&ctx->fragment_image0_saved.resource, NULL);
437
438 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
439 pipe_so_target_reference(&ctx->so_targets[i], NULL);
440 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
441 }
442
443 if (ctx->cache) {
444 cso_cache_delete( ctx->cache );
445 ctx->cache = NULL;
446 }
447
448 if (ctx->vbuf)
449 u_vbuf_destroy(ctx->vbuf);
450 FREE( ctx );
451 }
452
453
454 /* Those function will either find the state of the given template
455 * in the cache or they will create a new state from the given
456 * template, insert it in the cache and return it.
457 */
458
459 /*
460 * If the driver returns 0 from the create method then they will assign
461 * the data member of the cso to be the template itself.
462 */
463
464 enum pipe_error cso_set_blend(struct cso_context *ctx,
465 const struct pipe_blend_state *templ)
466 {
467 unsigned key_size, hash_key;
468 struct cso_hash_iter iter;
469 void *handle;
470
471 key_size = templ->independent_blend_enable ?
472 sizeof(struct pipe_blend_state) :
473 (char *)&(templ->rt[1]) - (char *)templ;
474 hash_key = cso_construct_key((void*)templ, key_size);
475 iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND,
476 (void*)templ, key_size);
477
478 if (cso_hash_iter_is_null(iter)) {
479 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
480 if (!cso)
481 return PIPE_ERROR_OUT_OF_MEMORY;
482
483 memset(&cso->state, 0, sizeof cso->state);
484 memcpy(&cso->state, templ, key_size);
485 cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
486 cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
487 cso->context = ctx->pipe;
488
489 iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
490 if (cso_hash_iter_is_null(iter)) {
491 FREE(cso);
492 return PIPE_ERROR_OUT_OF_MEMORY;
493 }
494
495 handle = cso->data;
496 }
497 else {
498 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
499 }
500
501 if (ctx->blend != handle) {
502 ctx->blend = handle;
503 ctx->pipe->bind_blend_state(ctx->pipe, handle);
504 }
505 return PIPE_OK;
506 }
507
508 static void
509 cso_save_blend(struct cso_context *ctx)
510 {
511 assert(!ctx->blend_saved);
512 ctx->blend_saved = ctx->blend;
513 }
514
515 static void
516 cso_restore_blend(struct cso_context *ctx)
517 {
518 if (ctx->blend != ctx->blend_saved) {
519 ctx->blend = ctx->blend_saved;
520 ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
521 }
522 ctx->blend_saved = NULL;
523 }
524
525
526
527 enum pipe_error
528 cso_set_depth_stencil_alpha(struct cso_context *ctx,
529 const struct pipe_depth_stencil_alpha_state *templ)
530 {
531 unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
532 unsigned hash_key = cso_construct_key((void*)templ, key_size);
533 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
534 hash_key,
535 CSO_DEPTH_STENCIL_ALPHA,
536 (void*)templ, key_size);
537 void *handle;
538
539 if (cso_hash_iter_is_null(iter)) {
540 struct cso_depth_stencil_alpha *cso =
541 MALLOC(sizeof(struct cso_depth_stencil_alpha));
542 if (!cso)
543 return PIPE_ERROR_OUT_OF_MEMORY;
544
545 memcpy(&cso->state, templ, sizeof(*templ));
546 cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
547 &cso->state);
548 cso->delete_state =
549 (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
550 cso->context = ctx->pipe;
551
552 iter = cso_insert_state(ctx->cache, hash_key,
553 CSO_DEPTH_STENCIL_ALPHA, cso);
554 if (cso_hash_iter_is_null(iter)) {
555 FREE(cso);
556 return PIPE_ERROR_OUT_OF_MEMORY;
557 }
558
559 handle = cso->data;
560 }
561 else {
562 handle = ((struct cso_depth_stencil_alpha *)
563 cso_hash_iter_data(iter))->data;
564 }
565
566 if (ctx->depth_stencil != handle) {
567 ctx->depth_stencil = handle;
568 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
569 }
570 return PIPE_OK;
571 }
572
573 static void
574 cso_save_depth_stencil_alpha(struct cso_context *ctx)
575 {
576 assert(!ctx->depth_stencil_saved);
577 ctx->depth_stencil_saved = ctx->depth_stencil;
578 }
579
580 static void
581 cso_restore_depth_stencil_alpha(struct cso_context *ctx)
582 {
583 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
584 ctx->depth_stencil = ctx->depth_stencil_saved;
585 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
586 ctx->depth_stencil_saved);
587 }
588 ctx->depth_stencil_saved = NULL;
589 }
590
591
592
593 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
594 const struct pipe_rasterizer_state *templ)
595 {
596 unsigned key_size = sizeof(struct pipe_rasterizer_state);
597 unsigned hash_key = cso_construct_key((void*)templ, key_size);
598 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
599 hash_key,
600 CSO_RASTERIZER,
601 (void*)templ, key_size);
602 void *handle = NULL;
603
604 /* We can't have both point_quad_rasterization (sprites) and point_smooth
605 * (round AA points) enabled at the same time.
606 */
607 assert(!(templ->point_quad_rasterization && templ->point_smooth));
608
609 if (cso_hash_iter_is_null(iter)) {
610 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
611 if (!cso)
612 return PIPE_ERROR_OUT_OF_MEMORY;
613
614 memcpy(&cso->state, templ, sizeof(*templ));
615 cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
616 cso->delete_state =
617 (cso_state_callback)ctx->pipe->delete_rasterizer_state;
618 cso->context = ctx->pipe;
619
620 iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
621 if (cso_hash_iter_is_null(iter)) {
622 FREE(cso);
623 return PIPE_ERROR_OUT_OF_MEMORY;
624 }
625
626 handle = cso->data;
627 }
628 else {
629 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
630 }
631
632 if (ctx->rasterizer != handle) {
633 ctx->rasterizer = handle;
634 ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
635 }
636 return PIPE_OK;
637 }
638
639 static void
640 cso_save_rasterizer(struct cso_context *ctx)
641 {
642 assert(!ctx->rasterizer_saved);
643 ctx->rasterizer_saved = ctx->rasterizer;
644 }
645
646 static void
647 cso_restore_rasterizer(struct cso_context *ctx)
648 {
649 if (ctx->rasterizer != ctx->rasterizer_saved) {
650 ctx->rasterizer = ctx->rasterizer_saved;
651 ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
652 }
653 ctx->rasterizer_saved = NULL;
654 }
655
656
657 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
658 {
659 if (ctx->fragment_shader != handle) {
660 ctx->fragment_shader = handle;
661 ctx->pipe->bind_fs_state(ctx->pipe, handle);
662 }
663 }
664
665 void cso_delete_fragment_shader(struct cso_context *ctx, void *handle )
666 {
667 if (handle == ctx->fragment_shader) {
668 /* unbind before deleting */
669 ctx->pipe->bind_fs_state(ctx->pipe, NULL);
670 ctx->fragment_shader = NULL;
671 }
672 ctx->pipe->delete_fs_state(ctx->pipe, handle);
673 }
674
675 static void
676 cso_save_fragment_shader(struct cso_context *ctx)
677 {
678 assert(!ctx->fragment_shader_saved);
679 ctx->fragment_shader_saved = ctx->fragment_shader;
680 }
681
682 static void
683 cso_restore_fragment_shader(struct cso_context *ctx)
684 {
685 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
686 ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
687 ctx->fragment_shader = ctx->fragment_shader_saved;
688 }
689 ctx->fragment_shader_saved = NULL;
690 }
691
692
693 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
694 {
695 if (ctx->vertex_shader != handle) {
696 ctx->vertex_shader = handle;
697 ctx->pipe->bind_vs_state(ctx->pipe, handle);
698 }
699 }
700
701 void cso_delete_vertex_shader(struct cso_context *ctx, void *handle )
702 {
703 if (handle == ctx->vertex_shader) {
704 /* unbind before deleting */
705 ctx->pipe->bind_vs_state(ctx->pipe, NULL);
706 ctx->vertex_shader = NULL;
707 }
708 ctx->pipe->delete_vs_state(ctx->pipe, handle);
709 }
710
711 static void
712 cso_save_vertex_shader(struct cso_context *ctx)
713 {
714 assert(!ctx->vertex_shader_saved);
715 ctx->vertex_shader_saved = ctx->vertex_shader;
716 }
717
718 static void
719 cso_restore_vertex_shader(struct cso_context *ctx)
720 {
721 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
722 ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
723 ctx->vertex_shader = ctx->vertex_shader_saved;
724 }
725 ctx->vertex_shader_saved = NULL;
726 }
727
728
729 void cso_set_framebuffer(struct cso_context *ctx,
730 const struct pipe_framebuffer_state *fb)
731 {
732 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
733 util_copy_framebuffer_state(&ctx->fb, fb);
734 ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
735 }
736 }
737
738 static void
739 cso_save_framebuffer(struct cso_context *ctx)
740 {
741 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
742 }
743
744 static void
745 cso_restore_framebuffer(struct cso_context *ctx)
746 {
747 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
748 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
749 ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
750 util_unreference_framebuffer_state(&ctx->fb_saved);
751 }
752 }
753
754
755 void cso_set_viewport(struct cso_context *ctx,
756 const struct pipe_viewport_state *vp)
757 {
758 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
759 ctx->vp = *vp;
760 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
761 }
762 }
763
764 /**
765 * Setup viewport state for given width and height (position is always (0,0)).
766 * Invert the Y axis if 'invert' is true.
767 */
768 void
769 cso_set_viewport_dims(struct cso_context *ctx,
770 float width, float height, boolean invert)
771 {
772 struct pipe_viewport_state vp;
773 vp.scale[0] = width * 0.5f;
774 vp.scale[1] = height * (invert ? -0.5f : 0.5f);
775 vp.scale[2] = 0.5f;
776 vp.translate[0] = 0.5f * width;
777 vp.translate[1] = 0.5f * height;
778 vp.translate[2] = 0.5f;
779 cso_set_viewport(ctx, &vp);
780 }
781
782 static void
783 cso_save_viewport(struct cso_context *ctx)
784 {
785 ctx->vp_saved = ctx->vp;
786 }
787
788
789 static void
790 cso_restore_viewport(struct cso_context *ctx)
791 {
792 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
793 ctx->vp = ctx->vp_saved;
794 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
795 }
796 }
797
798
799 void cso_set_blend_color(struct cso_context *ctx,
800 const struct pipe_blend_color *bc)
801 {
802 if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
803 ctx->blend_color = *bc;
804 ctx->pipe->set_blend_color(ctx->pipe, bc);
805 }
806 }
807
808 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
809 {
810 if (ctx->sample_mask != sample_mask) {
811 ctx->sample_mask = sample_mask;
812 ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
813 }
814 }
815
816 static void
817 cso_save_sample_mask(struct cso_context *ctx)
818 {
819 ctx->sample_mask_saved = ctx->sample_mask;
820 }
821
822 static void
823 cso_restore_sample_mask(struct cso_context *ctx)
824 {
825 cso_set_sample_mask(ctx, ctx->sample_mask_saved);
826 }
827
828 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
829 {
830 if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
831 ctx->min_samples = min_samples;
832 ctx->pipe->set_min_samples(ctx->pipe, min_samples);
833 }
834 }
835
836 static void
837 cso_save_min_samples(struct cso_context *ctx)
838 {
839 ctx->min_samples_saved = ctx->min_samples;
840 }
841
842 static void
843 cso_restore_min_samples(struct cso_context *ctx)
844 {
845 cso_set_min_samples(ctx, ctx->min_samples_saved);
846 }
847
848 void cso_set_stencil_ref(struct cso_context *ctx,
849 const struct pipe_stencil_ref *sr)
850 {
851 if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) {
852 ctx->stencil_ref = *sr;
853 ctx->pipe->set_stencil_ref(ctx->pipe, sr);
854 }
855 }
856
857 static void
858 cso_save_stencil_ref(struct cso_context *ctx)
859 {
860 ctx->stencil_ref_saved = ctx->stencil_ref;
861 }
862
863
864 static void
865 cso_restore_stencil_ref(struct cso_context *ctx)
866 {
867 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
868 sizeof(ctx->stencil_ref))) {
869 ctx->stencil_ref = ctx->stencil_ref_saved;
870 ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref);
871 }
872 }
873
874 void cso_set_render_condition(struct cso_context *ctx,
875 struct pipe_query *query,
876 boolean condition,
877 enum pipe_render_cond_flag mode)
878 {
879 struct pipe_context *pipe = ctx->pipe;
880
881 if (ctx->render_condition != query ||
882 ctx->render_condition_mode != mode ||
883 ctx->render_condition_cond != condition) {
884 pipe->render_condition(pipe, query, condition, mode);
885 ctx->render_condition = query;
886 ctx->render_condition_cond = condition;
887 ctx->render_condition_mode = mode;
888 }
889 }
890
891 static void
892 cso_save_render_condition(struct cso_context *ctx)
893 {
894 ctx->render_condition_saved = ctx->render_condition;
895 ctx->render_condition_cond_saved = ctx->render_condition_cond;
896 ctx->render_condition_mode_saved = ctx->render_condition_mode;
897 }
898
899 static void
900 cso_restore_render_condition(struct cso_context *ctx)
901 {
902 cso_set_render_condition(ctx, ctx->render_condition_saved,
903 ctx->render_condition_cond_saved,
904 ctx->render_condition_mode_saved);
905 }
906
907 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
908 {
909 assert(ctx->has_geometry_shader || !handle);
910
911 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
912 ctx->geometry_shader = handle;
913 ctx->pipe->bind_gs_state(ctx->pipe, handle);
914 }
915 }
916
917 void cso_delete_geometry_shader(struct cso_context *ctx, void *handle)
918 {
919 if (handle == ctx->geometry_shader) {
920 /* unbind before deleting */
921 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
922 ctx->geometry_shader = NULL;
923 }
924 ctx->pipe->delete_gs_state(ctx->pipe, handle);
925 }
926
927 static void
928 cso_save_geometry_shader(struct cso_context *ctx)
929 {
930 if (!ctx->has_geometry_shader) {
931 return;
932 }
933
934 assert(!ctx->geometry_shader_saved);
935 ctx->geometry_shader_saved = ctx->geometry_shader;
936 }
937
938 static void
939 cso_restore_geometry_shader(struct cso_context *ctx)
940 {
941 if (!ctx->has_geometry_shader) {
942 return;
943 }
944
945 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
946 ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
947 ctx->geometry_shader = ctx->geometry_shader_saved;
948 }
949 ctx->geometry_shader_saved = NULL;
950 }
951
952 void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
953 {
954 assert(ctx->has_tessellation || !handle);
955
956 if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
957 ctx->tessctrl_shader = handle;
958 ctx->pipe->bind_tcs_state(ctx->pipe, handle);
959 }
960 }
961
962 void cso_delete_tessctrl_shader(struct cso_context *ctx, void *handle)
963 {
964 if (handle == ctx->tessctrl_shader) {
965 /* unbind before deleting */
966 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
967 ctx->tessctrl_shader = NULL;
968 }
969 ctx->pipe->delete_tcs_state(ctx->pipe, handle);
970 }
971
972 static void
973 cso_save_tessctrl_shader(struct cso_context *ctx)
974 {
975 if (!ctx->has_tessellation) {
976 return;
977 }
978
979 assert(!ctx->tessctrl_shader_saved);
980 ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
981 }
982
983 static void
984 cso_restore_tessctrl_shader(struct cso_context *ctx)
985 {
986 if (!ctx->has_tessellation) {
987 return;
988 }
989
990 if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
991 ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
992 ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
993 }
994 ctx->tessctrl_shader_saved = NULL;
995 }
996
997 void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
998 {
999 assert(ctx->has_tessellation || !handle);
1000
1001 if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
1002 ctx->tesseval_shader = handle;
1003 ctx->pipe->bind_tes_state(ctx->pipe, handle);
1004 }
1005 }
1006
1007 void cso_delete_tesseval_shader(struct cso_context *ctx, void *handle)
1008 {
1009 if (handle == ctx->tesseval_shader) {
1010 /* unbind before deleting */
1011 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
1012 ctx->tesseval_shader = NULL;
1013 }
1014 ctx->pipe->delete_tes_state(ctx->pipe, handle);
1015 }
1016
1017 static void
1018 cso_save_tesseval_shader(struct cso_context *ctx)
1019 {
1020 if (!ctx->has_tessellation) {
1021 return;
1022 }
1023
1024 assert(!ctx->tesseval_shader_saved);
1025 ctx->tesseval_shader_saved = ctx->tesseval_shader;
1026 }
1027
1028 static void
1029 cso_restore_tesseval_shader(struct cso_context *ctx)
1030 {
1031 if (!ctx->has_tessellation) {
1032 return;
1033 }
1034
1035 if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
1036 ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
1037 ctx->tesseval_shader = ctx->tesseval_shader_saved;
1038 }
1039 ctx->tesseval_shader_saved = NULL;
1040 }
1041
1042 void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
1043 {
1044 assert(ctx->has_compute_shader || !handle);
1045
1046 if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1047 ctx->compute_shader = handle;
1048 ctx->pipe->bind_compute_state(ctx->pipe, handle);
1049 }
1050 }
1051
1052 void cso_delete_compute_shader(struct cso_context *ctx, void *handle)
1053 {
1054 if (handle == ctx->compute_shader) {
1055 /* unbind before deleting */
1056 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
1057 ctx->compute_shader = NULL;
1058 }
1059 ctx->pipe->delete_compute_state(ctx->pipe, handle);
1060 }
1061
1062 static void
1063 cso_set_vertex_elements_direct(struct cso_context *ctx,
1064 const struct cso_velems_state *velems)
1065 {
1066 unsigned key_size, hash_key;
1067 struct cso_hash_iter iter;
1068 void *handle;
1069
1070 /* Need to include the count into the stored state data too.
1071 * Otherwise first few count pipe_vertex_elements could be identical
1072 * even if count is different, and there's no guarantee the hash would
1073 * be different in that case neither.
1074 */
1075 key_size = sizeof(struct pipe_vertex_element) * velems->count +
1076 sizeof(unsigned);
1077 hash_key = cso_construct_key((void*)velems, key_size);
1078 iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS,
1079 (void*)velems, key_size);
1080
1081 if (cso_hash_iter_is_null(iter)) {
1082 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1083 if (!cso)
1084 return;
1085
1086 memcpy(&cso->state, velems, key_size);
1087 cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe,
1088 velems->count,
1089 &cso->state.velems[0]);
1090 cso->delete_state =
1091 (cso_state_callback) ctx->pipe->delete_vertex_elements_state;
1092 cso->context = ctx->pipe;
1093
1094 iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
1095 if (cso_hash_iter_is_null(iter)) {
1096 FREE(cso);
1097 return;
1098 }
1099
1100 handle = cso->data;
1101 }
1102 else {
1103 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1104 }
1105
1106 if (ctx->velements != handle) {
1107 ctx->velements = handle;
1108 ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1109 }
1110 }
1111
1112 enum pipe_error
1113 cso_set_vertex_elements(struct cso_context *ctx,
1114 const struct cso_velems_state *velems)
1115 {
1116 struct u_vbuf *vbuf = ctx->vbuf_current;
1117
1118 if (vbuf) {
1119 u_vbuf_set_vertex_elements(vbuf, velems);
1120 return PIPE_OK;
1121 }
1122
1123 cso_set_vertex_elements_direct(ctx, velems);
1124 return PIPE_OK;
1125 }
1126
1127 static void
1128 cso_save_vertex_elements(struct cso_context *ctx)
1129 {
1130 struct u_vbuf *vbuf = ctx->vbuf_current;
1131
1132 if (vbuf) {
1133 u_vbuf_save_vertex_elements(vbuf);
1134 return;
1135 }
1136
1137 assert(!ctx->velements_saved);
1138 ctx->velements_saved = ctx->velements;
1139 }
1140
1141 static void
1142 cso_restore_vertex_elements(struct cso_context *ctx)
1143 {
1144 struct u_vbuf *vbuf = ctx->vbuf_current;
1145
1146 if (vbuf) {
1147 u_vbuf_restore_vertex_elements(vbuf);
1148 return;
1149 }
1150
1151 if (ctx->velements != ctx->velements_saved) {
1152 ctx->velements = ctx->velements_saved;
1153 ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1154 }
1155 ctx->velements_saved = NULL;
1156 }
1157
1158 /* vertex buffers */
1159
1160 static void
1161 cso_set_vertex_buffers_direct(struct cso_context *ctx,
1162 unsigned start_slot, unsigned count,
1163 const struct pipe_vertex_buffer *buffers)
1164 {
1165 /* Save what's in the auxiliary slot, so that we can save and restore it
1166 * for meta ops.
1167 */
1168 if (start_slot == 0) {
1169 if (buffers) {
1170 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
1171 buffers);
1172 } else {
1173 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
1174 }
1175 }
1176
1177 ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
1178 }
1179
1180
1181 void cso_set_vertex_buffers(struct cso_context *ctx,
1182 unsigned start_slot, unsigned count,
1183 const struct pipe_vertex_buffer *buffers)
1184 {
1185 struct u_vbuf *vbuf = ctx->vbuf_current;
1186
1187 if (!count)
1188 return;
1189
1190 if (vbuf) {
1191 u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
1192 return;
1193 }
1194
1195 cso_set_vertex_buffers_direct(ctx, start_slot, count, buffers);
1196 }
1197
1198 static void
1199 cso_save_vertex_buffer0(struct cso_context *ctx)
1200 {
1201 struct u_vbuf *vbuf = ctx->vbuf_current;
1202
1203 if (vbuf) {
1204 u_vbuf_save_vertex_buffer0(vbuf);
1205 return;
1206 }
1207
1208 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
1209 &ctx->vertex_buffer0_current);
1210 }
1211
1212 static void
1213 cso_restore_vertex_buffer0(struct cso_context *ctx)
1214 {
1215 struct u_vbuf *vbuf = ctx->vbuf_current;
1216
1217 if (vbuf) {
1218 u_vbuf_restore_vertex_buffer0(vbuf);
1219 return;
1220 }
1221
1222 cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
1223 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
1224 }
1225
1226 /**
1227 * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1228 * for user vertex buffers and user vertex buffers are not set by this call.
1229 * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1230 *
1231 * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1232 * such as VBOs, glBegin/End, and display lists.
1233 *
1234 * Internal operations that do "save states, draw, restore states" shouldn't
1235 * use this, because the states are only saved in either cso_context or
1236 * u_vbuf, not both.
1237 */
1238 void
1239 cso_set_vertex_buffers_and_elements(struct cso_context *ctx,
1240 const struct cso_velems_state *velems,
1241 unsigned vb_count,
1242 unsigned unbind_trailing_vb_count,
1243 const struct pipe_vertex_buffer *vbuffers,
1244 bool uses_user_vertex_buffers)
1245 {
1246 struct u_vbuf *vbuf = ctx->vbuf;
1247
1248 if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1249 if (!ctx->vbuf_current) {
1250 /* Unbind all buffers in cso_context, because we'll use u_vbuf. */
1251 unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1252 if (unbind_vb_count)
1253 cso_set_vertex_buffers_direct(ctx, 0, unbind_vb_count, NULL);
1254
1255 /* Unset this to make sure the CSO is re-bound on the next use. */
1256 ctx->velements = NULL;
1257 ctx->vbuf_current = vbuf;
1258 } else if (unbind_trailing_vb_count) {
1259 u_vbuf_set_vertex_buffers(vbuf, vb_count, unbind_trailing_vb_count,
1260 NULL);
1261 }
1262
1263 if (vb_count)
1264 u_vbuf_set_vertex_buffers(vbuf, 0, vb_count, vbuffers);
1265 u_vbuf_set_vertex_elements(vbuf, velems);
1266 return;
1267 }
1268
1269 if (ctx->vbuf_current) {
1270 /* Unbind all buffers in u_vbuf, because we'll use cso_context. */
1271 unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1272 if (unbind_vb_count)
1273 u_vbuf_set_vertex_buffers(vbuf, 0, unbind_vb_count, NULL);
1274
1275 /* Unset this to make sure the CSO is re-bound on the next use. */
1276 u_vbuf_unset_vertex_elements(vbuf);
1277 ctx->vbuf_current = NULL;
1278 } else if (unbind_trailing_vb_count) {
1279 cso_set_vertex_buffers_direct(ctx, vb_count, unbind_trailing_vb_count,
1280 NULL);
1281 }
1282
1283 if (vb_count)
1284 cso_set_vertex_buffers_direct(ctx, 0, vb_count, vbuffers);
1285 cso_set_vertex_elements_direct(ctx, velems);
1286 }
1287
1288 void
1289 cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1290 unsigned idx, const struct pipe_sampler_state *templ)
1291 {
1292 if (templ) {
1293 unsigned key_size = sizeof(struct pipe_sampler_state);
1294 unsigned hash_key = cso_construct_key((void*)templ, key_size);
1295 struct cso_sampler *cso;
1296 struct cso_hash_iter iter =
1297 cso_find_state_template(ctx->cache,
1298 hash_key, CSO_SAMPLER,
1299 (void *) templ, key_size);
1300
1301 if (cso_hash_iter_is_null(iter)) {
1302 cso = MALLOC(sizeof(struct cso_sampler));
1303 if (!cso)
1304 return;
1305
1306 memcpy(&cso->state, templ, sizeof(*templ));
1307 cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1308 cso->delete_state =
1309 (cso_state_callback) ctx->pipe->delete_sampler_state;
1310 cso->context = ctx->pipe;
1311 cso->hash_key = hash_key;
1312
1313 iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
1314 if (cso_hash_iter_is_null(iter)) {
1315 FREE(cso);
1316 return;
1317 }
1318 }
1319 else {
1320 cso = cso_hash_iter_data(iter);
1321 }
1322
1323 ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1324 ctx->samplers[shader_stage].samplers[idx] = cso->data;
1325 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1326 }
1327 }
1328
1329
1330 /**
1331 * Send staged sampler state to the driver.
1332 */
1333 void
1334 cso_single_sampler_done(struct cso_context *ctx,
1335 enum pipe_shader_type shader_stage)
1336 {
1337 struct sampler_info *info = &ctx->samplers[shader_stage];
1338
1339 if (ctx->max_sampler_seen == -1)
1340 return;
1341
1342 ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1343 ctx->max_sampler_seen + 1,
1344 info->samplers);
1345 ctx->max_sampler_seen = -1;
1346 }
1347
1348
1349 /*
1350 * If the function encouters any errors it will return the
1351 * last one. Done to always try to set as many samplers
1352 * as possible.
1353 */
1354 void
1355 cso_set_samplers(struct cso_context *ctx,
1356 enum pipe_shader_type shader_stage,
1357 unsigned nr,
1358 const struct pipe_sampler_state **templates)
1359 {
1360 for (unsigned i = 0; i < nr; i++)
1361 cso_single_sampler(ctx, shader_stage, i, templates[i]);
1362
1363 cso_single_sampler_done(ctx, shader_stage);
1364 }
1365
1366 static void
1367 cso_save_fragment_samplers(struct cso_context *ctx)
1368 {
1369 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1370 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1371
1372 memcpy(saved->cso_samplers, info->cso_samplers,
1373 sizeof(info->cso_samplers));
1374 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1375 }
1376
1377
1378 static void
1379 cso_restore_fragment_samplers(struct cso_context *ctx)
1380 {
1381 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1382 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1383
1384 memcpy(info->cso_samplers, saved->cso_samplers,
1385 sizeof(info->cso_samplers));
1386 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1387
1388 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1389 if (info->samplers[i]) {
1390 ctx->max_sampler_seen = i;
1391 break;
1392 }
1393 }
1394
1395 cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1396 }
1397
1398
1399 void
1400 cso_set_sampler_views(struct cso_context *ctx,
1401 enum pipe_shader_type shader_stage,
1402 unsigned count,
1403 struct pipe_sampler_view **views)
1404 {
1405 if (shader_stage == PIPE_SHADER_FRAGMENT) {
1406 unsigned i;
1407 boolean any_change = FALSE;
1408
1409 /* reference new views */
1410 for (i = 0; i < count; i++) {
1411 any_change |= ctx->fragment_views[i] != views[i];
1412 pipe_sampler_view_reference(&ctx->fragment_views[i], views[i]);
1413 }
1414 /* unref extra old views, if any */
1415 for (; i < ctx->nr_fragment_views; i++) {
1416 any_change |= ctx->fragment_views[i] != NULL;
1417 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1418 }
1419
1420 /* bind the new sampler views */
1421 if (any_change) {
1422 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0,
1423 MAX2(ctx->nr_fragment_views, count),
1424 ctx->fragment_views);
1425 }
1426
1427 ctx->nr_fragment_views = count;
1428 }
1429 else
1430 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, count, views);
1431 }
1432
1433
1434 static void
1435 cso_save_fragment_sampler_views(struct cso_context *ctx)
1436 {
1437 unsigned i;
1438
1439 ctx->nr_fragment_views_saved = ctx->nr_fragment_views;
1440
1441 for (i = 0; i < ctx->nr_fragment_views; i++) {
1442 assert(!ctx->fragment_views_saved[i]);
1443 pipe_sampler_view_reference(&ctx->fragment_views_saved[i],
1444 ctx->fragment_views[i]);
1445 }
1446 }
1447
1448
1449 static void
1450 cso_restore_fragment_sampler_views(struct cso_context *ctx)
1451 {
1452 unsigned i, nr_saved = ctx->nr_fragment_views_saved;
1453 unsigned num;
1454
1455 for (i = 0; i < nr_saved; i++) {
1456 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1457 /* move the reference from one pointer to another */
1458 ctx->fragment_views[i] = ctx->fragment_views_saved[i];
1459 ctx->fragment_views_saved[i] = NULL;
1460 }
1461 for (; i < ctx->nr_fragment_views; i++) {
1462 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1463 }
1464
1465 num = MAX2(ctx->nr_fragment_views, nr_saved);
1466
1467 /* bind the old/saved sampler views */
1468 ctx->pipe->set_sampler_views(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, num,
1469 ctx->fragment_views);
1470
1471 ctx->nr_fragment_views = nr_saved;
1472 ctx->nr_fragment_views_saved = 0;
1473 }
1474
1475
1476 void
1477 cso_set_shader_images(struct cso_context *ctx,
1478 enum pipe_shader_type shader_stage,
1479 unsigned start, unsigned count,
1480 struct pipe_image_view *images)
1481 {
1482 if (shader_stage == PIPE_SHADER_FRAGMENT && start == 0 && count >= 1) {
1483 util_copy_image_view(&ctx->fragment_image0_current, &images[0]);
1484 }
1485
1486 ctx->pipe->set_shader_images(ctx->pipe, shader_stage, start, count, images);
1487 }
1488
1489
1490 static void
1491 cso_save_fragment_image0(struct cso_context *ctx)
1492 {
1493 util_copy_image_view(&ctx->fragment_image0_saved,
1494 &ctx->fragment_image0_current);
1495 }
1496
1497
1498 static void
1499 cso_restore_fragment_image0(struct cso_context *ctx)
1500 {
1501 cso_set_shader_images(ctx, PIPE_SHADER_FRAGMENT, 0, 1,
1502 &ctx->fragment_image0_saved);
1503 }
1504
1505
1506 void
1507 cso_set_stream_outputs(struct cso_context *ctx,
1508 unsigned num_targets,
1509 struct pipe_stream_output_target **targets,
1510 const unsigned *offsets)
1511 {
1512 struct pipe_context *pipe = ctx->pipe;
1513 uint i;
1514
1515 if (!ctx->has_streamout) {
1516 assert(num_targets == 0);
1517 return;
1518 }
1519
1520 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1521 /* Nothing to do. */
1522 return;
1523 }
1524
1525 /* reference new targets */
1526 for (i = 0; i < num_targets; i++) {
1527 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1528 }
1529 /* unref extra old targets, if any */
1530 for (; i < ctx->nr_so_targets; i++) {
1531 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1532 }
1533
1534 pipe->set_stream_output_targets(pipe, num_targets, targets,
1535 offsets);
1536 ctx->nr_so_targets = num_targets;
1537 }
1538
1539 static void
1540 cso_save_stream_outputs(struct cso_context *ctx)
1541 {
1542 uint i;
1543
1544 if (!ctx->has_streamout) {
1545 return;
1546 }
1547
1548 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1549
1550 for (i = 0; i < ctx->nr_so_targets; i++) {
1551 assert(!ctx->so_targets_saved[i]);
1552 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1553 }
1554 }
1555
1556 static void
1557 cso_restore_stream_outputs(struct cso_context *ctx)
1558 {
1559 struct pipe_context *pipe = ctx->pipe;
1560 uint i;
1561 unsigned offset[PIPE_MAX_SO_BUFFERS];
1562
1563 if (!ctx->has_streamout) {
1564 return;
1565 }
1566
1567 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1568 /* Nothing to do. */
1569 return;
1570 }
1571
1572 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1573 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1574 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1575 /* move the reference from one pointer to another */
1576 ctx->so_targets[i] = ctx->so_targets_saved[i];
1577 ctx->so_targets_saved[i] = NULL;
1578 /* -1 means append */
1579 offset[i] = (unsigned)-1;
1580 }
1581 for (; i < ctx->nr_so_targets; i++) {
1582 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1583 }
1584
1585 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1586 ctx->so_targets, offset);
1587
1588 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1589 ctx->nr_so_targets_saved = 0;
1590 }
1591
1592 /* constant buffers */
1593
1594 void
1595 cso_set_constant_buffer(struct cso_context *cso,
1596 enum pipe_shader_type shader_stage,
1597 unsigned index, struct pipe_constant_buffer *cb)
1598 {
1599 struct pipe_context *pipe = cso->pipe;
1600
1601 pipe->set_constant_buffer(pipe, shader_stage, index, cb);
1602
1603 if (index == 0) {
1604 util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb);
1605 }
1606 }
1607
1608 void
1609 cso_set_constant_buffer_resource(struct cso_context *cso,
1610 enum pipe_shader_type shader_stage,
1611 unsigned index,
1612 struct pipe_resource *buffer)
1613 {
1614 if (buffer) {
1615 struct pipe_constant_buffer cb;
1616 cb.buffer = buffer;
1617 cb.buffer_offset = 0;
1618 cb.buffer_size = buffer->width0;
1619 cb.user_buffer = NULL;
1620 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1621 } else {
1622 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1623 }
1624 }
1625
1626 void
1627 cso_set_constant_user_buffer(struct cso_context *cso,
1628 enum pipe_shader_type shader_stage,
1629 unsigned index, void *ptr, unsigned size)
1630 {
1631 if (ptr) {
1632 struct pipe_constant_buffer cb;
1633 cb.buffer = NULL;
1634 cb.buffer_offset = 0;
1635 cb.buffer_size = size;
1636 cb.user_buffer = ptr;
1637 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1638 } else {
1639 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1640 }
1641 }
1642
1643 void
1644 cso_save_constant_buffer_slot0(struct cso_context *cso,
1645 enum pipe_shader_type shader_stage)
1646 {
1647 util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage],
1648 &cso->aux_constbuf_current[shader_stage]);
1649 }
1650
1651 void
1652 cso_restore_constant_buffer_slot0(struct cso_context *cso,
1653 enum pipe_shader_type shader_stage)
1654 {
1655 cso_set_constant_buffer(cso, shader_stage, 0,
1656 &cso->aux_constbuf_saved[shader_stage]);
1657 pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer,
1658 NULL);
1659 }
1660
1661
1662 /**
1663 * Save all the CSO state items specified by the state_mask bitmask
1664 * of CSO_BIT_x flags.
1665 */
1666 void
1667 cso_save_state(struct cso_context *cso, unsigned state_mask)
1668 {
1669 assert(cso->saved_state == 0);
1670
1671 cso->saved_state = state_mask;
1672
1673 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1674 cso_save_vertex_buffer0(cso);
1675 if (state_mask & CSO_BIT_BLEND)
1676 cso_save_blend(cso);
1677 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1678 cso_save_depth_stencil_alpha(cso);
1679 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1680 cso_save_fragment_samplers(cso);
1681 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1682 cso_save_fragment_sampler_views(cso);
1683 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1684 cso_save_fragment_shader(cso);
1685 if (state_mask & CSO_BIT_FRAMEBUFFER)
1686 cso_save_framebuffer(cso);
1687 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1688 cso_save_geometry_shader(cso);
1689 if (state_mask & CSO_BIT_MIN_SAMPLES)
1690 cso_save_min_samples(cso);
1691 if (state_mask & CSO_BIT_RASTERIZER)
1692 cso_save_rasterizer(cso);
1693 if (state_mask & CSO_BIT_RENDER_CONDITION)
1694 cso_save_render_condition(cso);
1695 if (state_mask & CSO_BIT_SAMPLE_MASK)
1696 cso_save_sample_mask(cso);
1697 if (state_mask & CSO_BIT_STENCIL_REF)
1698 cso_save_stencil_ref(cso);
1699 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1700 cso_save_stream_outputs(cso);
1701 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1702 cso_save_tessctrl_shader(cso);
1703 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1704 cso_save_tesseval_shader(cso);
1705 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1706 cso_save_vertex_elements(cso);
1707 if (state_mask & CSO_BIT_VERTEX_SHADER)
1708 cso_save_vertex_shader(cso);
1709 if (state_mask & CSO_BIT_VIEWPORT)
1710 cso_save_viewport(cso);
1711 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1712 cso->pipe->set_active_query_state(cso->pipe, false);
1713 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1714 cso_save_fragment_image0(cso);
1715 }
1716
1717
1718 /**
1719 * Restore the state which was saved by cso_save_state().
1720 */
1721 void
1722 cso_restore_state(struct cso_context *cso)
1723 {
1724 unsigned state_mask = cso->saved_state;
1725
1726 assert(state_mask);
1727
1728 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1729 cso_restore_vertex_buffer0(cso);
1730 if (state_mask & CSO_BIT_BLEND)
1731 cso_restore_blend(cso);
1732 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1733 cso_restore_depth_stencil_alpha(cso);
1734 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1735 cso_restore_fragment_samplers(cso);
1736 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1737 cso_restore_fragment_sampler_views(cso);
1738 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1739 cso_restore_fragment_shader(cso);
1740 if (state_mask & CSO_BIT_FRAMEBUFFER)
1741 cso_restore_framebuffer(cso);
1742 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1743 cso_restore_geometry_shader(cso);
1744 if (state_mask & CSO_BIT_MIN_SAMPLES)
1745 cso_restore_min_samples(cso);
1746 if (state_mask & CSO_BIT_RASTERIZER)
1747 cso_restore_rasterizer(cso);
1748 if (state_mask & CSO_BIT_RENDER_CONDITION)
1749 cso_restore_render_condition(cso);
1750 if (state_mask & CSO_BIT_SAMPLE_MASK)
1751 cso_restore_sample_mask(cso);
1752 if (state_mask & CSO_BIT_STENCIL_REF)
1753 cso_restore_stencil_ref(cso);
1754 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1755 cso_restore_stream_outputs(cso);
1756 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1757 cso_restore_tessctrl_shader(cso);
1758 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1759 cso_restore_tesseval_shader(cso);
1760 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1761 cso_restore_vertex_elements(cso);
1762 if (state_mask & CSO_BIT_VERTEX_SHADER)
1763 cso_restore_vertex_shader(cso);
1764 if (state_mask & CSO_BIT_VIEWPORT)
1765 cso_restore_viewport(cso);
1766 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1767 cso->pipe->set_active_query_state(cso->pipe, true);
1768 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1769 cso_restore_fragment_image0(cso);
1770
1771 cso->saved_state = 0;
1772 }
1773
1774
1775
1776 /* drawing */
1777
1778 void
1779 cso_draw_vbo(struct cso_context *cso,
1780 const struct pipe_draw_info *info)
1781 {
1782 struct u_vbuf *vbuf = cso->vbuf_current;
1783
1784 /* We can't have both indirect drawing and SO-vertex-count drawing */
1785 assert(info->indirect == NULL || info->count_from_stream_output == NULL);
1786
1787 /* We can't have SO-vertex-count drawing with an index buffer */
1788 assert(info->count_from_stream_output == NULL || info->index_size == 0);
1789
1790 if (vbuf) {
1791 u_vbuf_draw_vbo(vbuf, info);
1792 } else {
1793 struct pipe_context *pipe = cso->pipe;
1794 pipe->draw_vbo(pipe, info);
1795 }
1796 }
1797
1798 void
1799 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1800 {
1801 struct pipe_draw_info info;
1802
1803 util_draw_init_info(&info);
1804
1805 info.mode = mode;
1806 info.start = start;
1807 info.count = count;
1808 info.min_index = start;
1809 info.max_index = start + count - 1;
1810
1811 cso_draw_vbo(cso, &info);
1812 }
1813
1814 void
1815 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1816 uint start, uint count,
1817 uint start_instance, uint instance_count)
1818 {
1819 struct pipe_draw_info info;
1820
1821 util_draw_init_info(&info);
1822
1823 info.mode = mode;
1824 info.start = start;
1825 info.count = count;
1826 info.min_index = start;
1827 info.max_index = start + count - 1;
1828 info.start_instance = start_instance;
1829 info.instance_count = instance_count;
1830
1831 cso_draw_vbo(cso, &info);
1832 }