gallium/cso_hash: make cso_hash declared within structures instead of alloc'd
[mesa.git] / src / gallium / auxiliary / cso_cache / cso_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51
52
53 /**
54 * Per-shader sampler information.
55 */
56 struct sampler_info
57 {
58 struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
59 void *samplers[PIPE_MAX_SAMPLERS];
60 };
61
62
63
64 struct cso_context {
65 struct pipe_context *pipe;
66 struct cso_cache *cache;
67
68 struct u_vbuf *vbuf;
69 struct u_vbuf *vbuf_current;
70 bool always_use_vbuf;
71
72 boolean has_geometry_shader;
73 boolean has_tessellation;
74 boolean has_compute_shader;
75 boolean has_streamout;
76
77 unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
78
79 struct pipe_sampler_view *fragment_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
80 unsigned nr_fragment_views;
81
82 struct pipe_sampler_view *fragment_views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS];
83 unsigned nr_fragment_views_saved;
84
85 struct sampler_info fragment_samplers_saved;
86 struct sampler_info samplers[PIPE_SHADER_TYPES];
87
88 /* Temporary number until cso_single_sampler_done is called.
89 * It tracks the highest sampler seen in cso_single_sampler.
90 */
91 int max_sampler_seen;
92
93 struct pipe_vertex_buffer vertex_buffer0_current;
94 struct pipe_vertex_buffer vertex_buffer0_saved;
95
96 struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
97 struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
98
99 struct pipe_image_view fragment_image0_current;
100 struct pipe_image_view fragment_image0_saved;
101
102 unsigned nr_so_targets;
103 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
104
105 unsigned nr_so_targets_saved;
106 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
107
108 /** Current and saved state.
109 * The saved state is used as a 1-deep stack.
110 */
111 void *blend, *blend_saved;
112 void *depth_stencil, *depth_stencil_saved;
113 void *rasterizer, *rasterizer_saved;
114 void *fragment_shader, *fragment_shader_saved;
115 void *vertex_shader, *vertex_shader_saved;
116 void *geometry_shader, *geometry_shader_saved;
117 void *tessctrl_shader, *tessctrl_shader_saved;
118 void *tesseval_shader, *tesseval_shader_saved;
119 void *compute_shader;
120 void *velements, *velements_saved;
121 struct pipe_query *render_condition, *render_condition_saved;
122 uint render_condition_mode, render_condition_mode_saved;
123 boolean render_condition_cond, render_condition_cond_saved;
124
125 struct pipe_framebuffer_state fb, fb_saved;
126 struct pipe_viewport_state vp, vp_saved;
127 struct pipe_blend_color blend_color;
128 unsigned sample_mask, sample_mask_saved;
129 unsigned min_samples, min_samples_saved;
130 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
131 };
132
133 struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
134 {
135 return cso->pipe;
136 }
137
138 static boolean delete_blend_state(struct cso_context *ctx, void *state)
139 {
140 struct cso_blend *cso = (struct cso_blend *)state;
141
142 if (ctx->blend == cso->data)
143 return FALSE;
144
145 if (cso->delete_state)
146 cso->delete_state(cso->context, cso->data);
147 FREE(state);
148 return TRUE;
149 }
150
151 static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
152 {
153 struct cso_depth_stencil_alpha *cso =
154 (struct cso_depth_stencil_alpha *)state;
155
156 if (ctx->depth_stencil == cso->data)
157 return FALSE;
158
159 if (cso->delete_state)
160 cso->delete_state(cso->context, cso->data);
161 FREE(state);
162
163 return TRUE;
164 }
165
166 static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
167 {
168 struct cso_sampler *cso = (struct cso_sampler *)state;
169 if (cso->delete_state)
170 cso->delete_state(cso->context, cso->data);
171 FREE(state);
172 return TRUE;
173 }
174
175 static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
176 {
177 struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
178
179 if (ctx->rasterizer == cso->data)
180 return FALSE;
181 if (cso->delete_state)
182 cso->delete_state(cso->context, cso->data);
183 FREE(state);
184 return TRUE;
185 }
186
187 static boolean delete_vertex_elements(struct cso_context *ctx,
188 void *state)
189 {
190 struct cso_velements *cso = (struct cso_velements *)state;
191
192 if (ctx->velements == cso->data)
193 return FALSE;
194
195 if (cso->delete_state)
196 cso->delete_state(cso->context, cso->data);
197 FREE(state);
198 return TRUE;
199 }
200
201
202 static inline boolean delete_cso(struct cso_context *ctx,
203 void *state, enum cso_cache_type type)
204 {
205 switch (type) {
206 case CSO_BLEND:
207 return delete_blend_state(ctx, state);
208 case CSO_SAMPLER:
209 return delete_sampler_state(ctx, state);
210 case CSO_DEPTH_STENCIL_ALPHA:
211 return delete_depth_stencil_state(ctx, state);
212 case CSO_RASTERIZER:
213 return delete_rasterizer_state(ctx, state);
214 case CSO_VELEMENTS:
215 return delete_vertex_elements(ctx, state);
216 default:
217 assert(0);
218 FREE(state);
219 }
220 return FALSE;
221 }
222
223 static inline void
224 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
225 int max_size, void *user_data)
226 {
227 struct cso_context *ctx = (struct cso_context *)user_data;
228 /* if we're approach the maximum size, remove fourth of the entries
229 * otherwise every subsequent call will go through the same */
230 int hash_size = cso_hash_size(hash);
231 int max_entries = (max_size > hash_size) ? max_size : hash_size;
232 int to_remove = (max_size < max_entries) * max_entries/4;
233 struct cso_hash_iter iter;
234 struct cso_sampler **samplers_to_restore = NULL;
235 unsigned to_restore = 0;
236
237 if (hash_size > max_size)
238 to_remove += hash_size - max_size;
239
240 if (to_remove == 0)
241 return;
242
243 if (type == CSO_SAMPLER) {
244 int i, j;
245
246 samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
247 sizeof(*samplers_to_restore));
248
249 /* Temporarily remove currently bound sampler states from the hash
250 * table, to prevent them from being deleted
251 */
252 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
253 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
254 struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
255
256 if (sampler && cso_hash_take(hash, sampler->hash_key))
257 samplers_to_restore[to_restore++] = sampler;
258 }
259 }
260 }
261
262 iter = cso_hash_first_node(hash);
263 while (to_remove) {
264 /*remove elements until we're good */
265 /*fixme: currently we pick the nodes to remove at random*/
266 void *cso = cso_hash_iter_data(iter);
267
268 if (!cso)
269 break;
270
271 if (delete_cso(ctx, cso, type)) {
272 iter = cso_hash_erase(hash, iter);
273 --to_remove;
274 } else
275 iter = cso_hash_iter_next(iter);
276 }
277
278 if (type == CSO_SAMPLER) {
279 /* Put currently bound sampler states back into the hash table */
280 while (to_restore--) {
281 struct cso_sampler *sampler = samplers_to_restore[to_restore];
282
283 cso_hash_insert(hash, sampler->hash_key, sampler);
284 }
285
286 FREE(samplers_to_restore);
287 }
288 }
289
290 static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
291 {
292 struct u_vbuf_caps caps;
293 bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
294
295 u_vbuf_get_caps(cso->pipe->screen, &caps);
296
297 /* Enable u_vbuf if needed. */
298 if (caps.fallback_always ||
299 (uses_user_vertex_buffers &&
300 caps.fallback_only_for_user_vbuffers)) {
301 cso->vbuf = u_vbuf_create(cso->pipe, &caps);
302 cso->vbuf_current = cso->vbuf;
303 cso->always_use_vbuf = caps.fallback_always;
304 }
305 }
306
307 struct cso_context *
308 cso_create_context(struct pipe_context *pipe, unsigned flags)
309 {
310 struct cso_context *ctx = CALLOC_STRUCT(cso_context);
311 if (!ctx)
312 return NULL;
313
314 ctx->cache = cso_cache_create();
315 if (ctx->cache == NULL)
316 goto out;
317 cso_cache_set_sanitize_callback(ctx->cache,
318 sanitize_hash,
319 ctx);
320
321 ctx->pipe = pipe;
322 ctx->sample_mask = ~0;
323
324 cso_init_vbuf(ctx, flags);
325
326 /* Enable for testing: */
327 if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
328
329 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
330 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
331 ctx->has_geometry_shader = TRUE;
332 }
333 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
334 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
335 ctx->has_tessellation = TRUE;
336 }
337 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
338 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
339 int supported_irs =
340 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
341 PIPE_SHADER_CAP_SUPPORTED_IRS);
342 if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
343 (1 << PIPE_SHADER_IR_NIR))) {
344 ctx->has_compute_shader = TRUE;
345 }
346 }
347 if (pipe->screen->get_param(pipe->screen,
348 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
349 ctx->has_streamout = TRUE;
350 }
351
352 ctx->max_sampler_seen = -1;
353 return ctx;
354
355 out:
356 cso_destroy_context( ctx );
357 return NULL;
358 }
359
360 /**
361 * Free the CSO context.
362 */
363 void cso_destroy_context( struct cso_context *ctx )
364 {
365 unsigned i;
366
367 if (ctx->pipe) {
368 ctx->pipe->bind_blend_state( ctx->pipe, NULL );
369 ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
370
371 {
372 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
373 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
374 struct pipe_screen *scr = ctx->pipe->screen;
375 enum pipe_shader_type sh;
376 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
377 int maxsam = scr->get_shader_param(scr, sh,
378 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
379 int maxview = scr->get_shader_param(scr, sh,
380 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
381 assert(maxsam <= PIPE_MAX_SAMPLERS);
382 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
383 if (maxsam > 0) {
384 ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
385 }
386 if (maxview > 0) {
387 ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views);
388 }
389 }
390 }
391
392 ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
393 ctx->pipe->bind_fs_state( ctx->pipe, NULL );
394 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
395 ctx->pipe->bind_vs_state( ctx->pipe, NULL );
396 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL);
397 if (ctx->has_geometry_shader) {
398 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
399 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL);
400 }
401 if (ctx->has_tessellation) {
402 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
403 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL);
404 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
405 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL);
406 }
407 if (ctx->has_compute_shader) {
408 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
409 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_COMPUTE, 0, NULL);
410 }
411 ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
412
413 if (ctx->has_streamout)
414 ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
415 }
416
417 for (i = 0; i < ctx->nr_fragment_views; i++) {
418 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
419 }
420 for (i = 0; i < ctx->nr_fragment_views_saved; i++) {
421 pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL);
422 }
423
424 util_unreference_framebuffer_state(&ctx->fb);
425 util_unreference_framebuffer_state(&ctx->fb_saved);
426
427 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
428 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
429
430 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
431 pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
432 pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL);
433 }
434
435 pipe_resource_reference(&ctx->fragment_image0_current.resource, NULL);
436 pipe_resource_reference(&ctx->fragment_image0_saved.resource, NULL);
437
438 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
439 pipe_so_target_reference(&ctx->so_targets[i], NULL);
440 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
441 }
442
443 if (ctx->cache) {
444 cso_cache_delete( ctx->cache );
445 ctx->cache = NULL;
446 }
447
448 if (ctx->vbuf)
449 u_vbuf_destroy(ctx->vbuf);
450 FREE( ctx );
451 }
452
453
454 /* Those function will either find the state of the given template
455 * in the cache or they will create a new state from the given
456 * template, insert it in the cache and return it.
457 */
458
459 /*
460 * If the driver returns 0 from the create method then they will assign
461 * the data member of the cso to be the template itself.
462 */
463
464 enum pipe_error cso_set_blend(struct cso_context *ctx,
465 const struct pipe_blend_state *templ)
466 {
467 unsigned key_size, hash_key;
468 struct cso_hash_iter iter;
469 void *handle;
470
471 key_size = templ->independent_blend_enable ?
472 sizeof(struct pipe_blend_state) :
473 (char *)&(templ->rt[1]) - (char *)templ;
474 hash_key = cso_construct_key((void*)templ, key_size);
475 iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND,
476 (void*)templ, key_size);
477
478 if (cso_hash_iter_is_null(iter)) {
479 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
480 if (!cso)
481 return PIPE_ERROR_OUT_OF_MEMORY;
482
483 memset(&cso->state, 0, sizeof cso->state);
484 memcpy(&cso->state, templ, key_size);
485 cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
486 cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
487 cso->context = ctx->pipe;
488
489 iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
490 if (cso_hash_iter_is_null(iter)) {
491 FREE(cso);
492 return PIPE_ERROR_OUT_OF_MEMORY;
493 }
494
495 handle = cso->data;
496 }
497 else {
498 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
499 }
500
501 if (ctx->blend != handle) {
502 ctx->blend = handle;
503 ctx->pipe->bind_blend_state(ctx->pipe, handle);
504 }
505 return PIPE_OK;
506 }
507
508 static void
509 cso_save_blend(struct cso_context *ctx)
510 {
511 assert(!ctx->blend_saved);
512 ctx->blend_saved = ctx->blend;
513 }
514
515 static void
516 cso_restore_blend(struct cso_context *ctx)
517 {
518 if (ctx->blend != ctx->blend_saved) {
519 ctx->blend = ctx->blend_saved;
520 ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
521 }
522 ctx->blend_saved = NULL;
523 }
524
525
526
527 enum pipe_error
528 cso_set_depth_stencil_alpha(struct cso_context *ctx,
529 const struct pipe_depth_stencil_alpha_state *templ)
530 {
531 unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
532 unsigned hash_key = cso_construct_key((void*)templ, key_size);
533 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
534 hash_key,
535 CSO_DEPTH_STENCIL_ALPHA,
536 (void*)templ, key_size);
537 void *handle;
538
539 if (cso_hash_iter_is_null(iter)) {
540 struct cso_depth_stencil_alpha *cso =
541 MALLOC(sizeof(struct cso_depth_stencil_alpha));
542 if (!cso)
543 return PIPE_ERROR_OUT_OF_MEMORY;
544
545 memcpy(&cso->state, templ, sizeof(*templ));
546 cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
547 &cso->state);
548 cso->delete_state =
549 (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
550 cso->context = ctx->pipe;
551
552 iter = cso_insert_state(ctx->cache, hash_key,
553 CSO_DEPTH_STENCIL_ALPHA, cso);
554 if (cso_hash_iter_is_null(iter)) {
555 FREE(cso);
556 return PIPE_ERROR_OUT_OF_MEMORY;
557 }
558
559 handle = cso->data;
560 }
561 else {
562 handle = ((struct cso_depth_stencil_alpha *)
563 cso_hash_iter_data(iter))->data;
564 }
565
566 if (ctx->depth_stencil != handle) {
567 ctx->depth_stencil = handle;
568 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
569 }
570 return PIPE_OK;
571 }
572
573 static void
574 cso_save_depth_stencil_alpha(struct cso_context *ctx)
575 {
576 assert(!ctx->depth_stencil_saved);
577 ctx->depth_stencil_saved = ctx->depth_stencil;
578 }
579
580 static void
581 cso_restore_depth_stencil_alpha(struct cso_context *ctx)
582 {
583 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
584 ctx->depth_stencil = ctx->depth_stencil_saved;
585 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
586 ctx->depth_stencil_saved);
587 }
588 ctx->depth_stencil_saved = NULL;
589 }
590
591
592
593 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
594 const struct pipe_rasterizer_state *templ)
595 {
596 unsigned key_size = sizeof(struct pipe_rasterizer_state);
597 unsigned hash_key = cso_construct_key((void*)templ, key_size);
598 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
599 hash_key,
600 CSO_RASTERIZER,
601 (void*)templ, key_size);
602 void *handle = NULL;
603
604 /* We can't have both point_quad_rasterization (sprites) and point_smooth
605 * (round AA points) enabled at the same time.
606 */
607 assert(!(templ->point_quad_rasterization && templ->point_smooth));
608
609 if (cso_hash_iter_is_null(iter)) {
610 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
611 if (!cso)
612 return PIPE_ERROR_OUT_OF_MEMORY;
613
614 memcpy(&cso->state, templ, sizeof(*templ));
615 cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
616 cso->delete_state =
617 (cso_state_callback)ctx->pipe->delete_rasterizer_state;
618 cso->context = ctx->pipe;
619
620 iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
621 if (cso_hash_iter_is_null(iter)) {
622 FREE(cso);
623 return PIPE_ERROR_OUT_OF_MEMORY;
624 }
625
626 handle = cso->data;
627 }
628 else {
629 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
630 }
631
632 if (ctx->rasterizer != handle) {
633 ctx->rasterizer = handle;
634 ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
635 }
636 return PIPE_OK;
637 }
638
639 static void
640 cso_save_rasterizer(struct cso_context *ctx)
641 {
642 assert(!ctx->rasterizer_saved);
643 ctx->rasterizer_saved = ctx->rasterizer;
644 }
645
646 static void
647 cso_restore_rasterizer(struct cso_context *ctx)
648 {
649 if (ctx->rasterizer != ctx->rasterizer_saved) {
650 ctx->rasterizer = ctx->rasterizer_saved;
651 ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
652 }
653 ctx->rasterizer_saved = NULL;
654 }
655
656
657 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
658 {
659 if (ctx->fragment_shader != handle) {
660 ctx->fragment_shader = handle;
661 ctx->pipe->bind_fs_state(ctx->pipe, handle);
662 }
663 }
664
665 void cso_delete_fragment_shader(struct cso_context *ctx, void *handle )
666 {
667 if (handle == ctx->fragment_shader) {
668 /* unbind before deleting */
669 ctx->pipe->bind_fs_state(ctx->pipe, NULL);
670 ctx->fragment_shader = NULL;
671 }
672 ctx->pipe->delete_fs_state(ctx->pipe, handle);
673 }
674
675 static void
676 cso_save_fragment_shader(struct cso_context *ctx)
677 {
678 assert(!ctx->fragment_shader_saved);
679 ctx->fragment_shader_saved = ctx->fragment_shader;
680 }
681
682 static void
683 cso_restore_fragment_shader(struct cso_context *ctx)
684 {
685 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
686 ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
687 ctx->fragment_shader = ctx->fragment_shader_saved;
688 }
689 ctx->fragment_shader_saved = NULL;
690 }
691
692
693 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
694 {
695 if (ctx->vertex_shader != handle) {
696 ctx->vertex_shader = handle;
697 ctx->pipe->bind_vs_state(ctx->pipe, handle);
698 }
699 }
700
701 void cso_delete_vertex_shader(struct cso_context *ctx, void *handle )
702 {
703 if (handle == ctx->vertex_shader) {
704 /* unbind before deleting */
705 ctx->pipe->bind_vs_state(ctx->pipe, NULL);
706 ctx->vertex_shader = NULL;
707 }
708 ctx->pipe->delete_vs_state(ctx->pipe, handle);
709 }
710
711 static void
712 cso_save_vertex_shader(struct cso_context *ctx)
713 {
714 assert(!ctx->vertex_shader_saved);
715 ctx->vertex_shader_saved = ctx->vertex_shader;
716 }
717
718 static void
719 cso_restore_vertex_shader(struct cso_context *ctx)
720 {
721 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
722 ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
723 ctx->vertex_shader = ctx->vertex_shader_saved;
724 }
725 ctx->vertex_shader_saved = NULL;
726 }
727
728
729 void cso_set_framebuffer(struct cso_context *ctx,
730 const struct pipe_framebuffer_state *fb)
731 {
732 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
733 util_copy_framebuffer_state(&ctx->fb, fb);
734 ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
735 }
736 }
737
738 static void
739 cso_save_framebuffer(struct cso_context *ctx)
740 {
741 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
742 }
743
744 static void
745 cso_restore_framebuffer(struct cso_context *ctx)
746 {
747 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
748 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
749 ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
750 util_unreference_framebuffer_state(&ctx->fb_saved);
751 }
752 }
753
754
755 void cso_set_viewport(struct cso_context *ctx,
756 const struct pipe_viewport_state *vp)
757 {
758 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
759 ctx->vp = *vp;
760 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
761 }
762 }
763
764 /**
765 * Setup viewport state for given width and height (position is always (0,0)).
766 * Invert the Y axis if 'invert' is true.
767 */
768 void
769 cso_set_viewport_dims(struct cso_context *ctx,
770 float width, float height, boolean invert)
771 {
772 struct pipe_viewport_state vp;
773 vp.scale[0] = width * 0.5f;
774 vp.scale[1] = height * (invert ? -0.5f : 0.5f);
775 vp.scale[2] = 0.5f;
776 vp.translate[0] = 0.5f * width;
777 vp.translate[1] = 0.5f * height;
778 vp.translate[2] = 0.5f;
779 cso_set_viewport(ctx, &vp);
780 }
781
782 static void
783 cso_save_viewport(struct cso_context *ctx)
784 {
785 ctx->vp_saved = ctx->vp;
786 }
787
788
789 static void
790 cso_restore_viewport(struct cso_context *ctx)
791 {
792 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
793 ctx->vp = ctx->vp_saved;
794 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
795 }
796 }
797
798
799 void cso_set_blend_color(struct cso_context *ctx,
800 const struct pipe_blend_color *bc)
801 {
802 if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
803 ctx->blend_color = *bc;
804 ctx->pipe->set_blend_color(ctx->pipe, bc);
805 }
806 }
807
808 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
809 {
810 if (ctx->sample_mask != sample_mask) {
811 ctx->sample_mask = sample_mask;
812 ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
813 }
814 }
815
816 static void
817 cso_save_sample_mask(struct cso_context *ctx)
818 {
819 ctx->sample_mask_saved = ctx->sample_mask;
820 }
821
822 static void
823 cso_restore_sample_mask(struct cso_context *ctx)
824 {
825 cso_set_sample_mask(ctx, ctx->sample_mask_saved);
826 }
827
828 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
829 {
830 if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
831 ctx->min_samples = min_samples;
832 ctx->pipe->set_min_samples(ctx->pipe, min_samples);
833 }
834 }
835
836 static void
837 cso_save_min_samples(struct cso_context *ctx)
838 {
839 ctx->min_samples_saved = ctx->min_samples;
840 }
841
842 static void
843 cso_restore_min_samples(struct cso_context *ctx)
844 {
845 cso_set_min_samples(ctx, ctx->min_samples_saved);
846 }
847
848 void cso_set_stencil_ref(struct cso_context *ctx,
849 const struct pipe_stencil_ref *sr)
850 {
851 if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) {
852 ctx->stencil_ref = *sr;
853 ctx->pipe->set_stencil_ref(ctx->pipe, sr);
854 }
855 }
856
857 static void
858 cso_save_stencil_ref(struct cso_context *ctx)
859 {
860 ctx->stencil_ref_saved = ctx->stencil_ref;
861 }
862
863
864 static void
865 cso_restore_stencil_ref(struct cso_context *ctx)
866 {
867 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
868 sizeof(ctx->stencil_ref))) {
869 ctx->stencil_ref = ctx->stencil_ref_saved;
870 ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref);
871 }
872 }
873
874 void cso_set_render_condition(struct cso_context *ctx,
875 struct pipe_query *query,
876 boolean condition,
877 enum pipe_render_cond_flag mode)
878 {
879 struct pipe_context *pipe = ctx->pipe;
880
881 if (ctx->render_condition != query ||
882 ctx->render_condition_mode != mode ||
883 ctx->render_condition_cond != condition) {
884 pipe->render_condition(pipe, query, condition, mode);
885 ctx->render_condition = query;
886 ctx->render_condition_cond = condition;
887 ctx->render_condition_mode = mode;
888 }
889 }
890
891 static void
892 cso_save_render_condition(struct cso_context *ctx)
893 {
894 ctx->render_condition_saved = ctx->render_condition;
895 ctx->render_condition_cond_saved = ctx->render_condition_cond;
896 ctx->render_condition_mode_saved = ctx->render_condition_mode;
897 }
898
899 static void
900 cso_restore_render_condition(struct cso_context *ctx)
901 {
902 cso_set_render_condition(ctx, ctx->render_condition_saved,
903 ctx->render_condition_cond_saved,
904 ctx->render_condition_mode_saved);
905 }
906
907 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
908 {
909 assert(ctx->has_geometry_shader || !handle);
910
911 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
912 ctx->geometry_shader = handle;
913 ctx->pipe->bind_gs_state(ctx->pipe, handle);
914 }
915 }
916
917 void cso_delete_geometry_shader(struct cso_context *ctx, void *handle)
918 {
919 if (handle == ctx->geometry_shader) {
920 /* unbind before deleting */
921 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
922 ctx->geometry_shader = NULL;
923 }
924 ctx->pipe->delete_gs_state(ctx->pipe, handle);
925 }
926
927 static void
928 cso_save_geometry_shader(struct cso_context *ctx)
929 {
930 if (!ctx->has_geometry_shader) {
931 return;
932 }
933
934 assert(!ctx->geometry_shader_saved);
935 ctx->geometry_shader_saved = ctx->geometry_shader;
936 }
937
938 static void
939 cso_restore_geometry_shader(struct cso_context *ctx)
940 {
941 if (!ctx->has_geometry_shader) {
942 return;
943 }
944
945 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
946 ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
947 ctx->geometry_shader = ctx->geometry_shader_saved;
948 }
949 ctx->geometry_shader_saved = NULL;
950 }
951
952 void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
953 {
954 assert(ctx->has_tessellation || !handle);
955
956 if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
957 ctx->tessctrl_shader = handle;
958 ctx->pipe->bind_tcs_state(ctx->pipe, handle);
959 }
960 }
961
962 void cso_delete_tessctrl_shader(struct cso_context *ctx, void *handle)
963 {
964 if (handle == ctx->tessctrl_shader) {
965 /* unbind before deleting */
966 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
967 ctx->tessctrl_shader = NULL;
968 }
969 ctx->pipe->delete_tcs_state(ctx->pipe, handle);
970 }
971
972 static void
973 cso_save_tessctrl_shader(struct cso_context *ctx)
974 {
975 if (!ctx->has_tessellation) {
976 return;
977 }
978
979 assert(!ctx->tessctrl_shader_saved);
980 ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
981 }
982
983 static void
984 cso_restore_tessctrl_shader(struct cso_context *ctx)
985 {
986 if (!ctx->has_tessellation) {
987 return;
988 }
989
990 if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
991 ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
992 ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
993 }
994 ctx->tessctrl_shader_saved = NULL;
995 }
996
997 void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
998 {
999 assert(ctx->has_tessellation || !handle);
1000
1001 if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
1002 ctx->tesseval_shader = handle;
1003 ctx->pipe->bind_tes_state(ctx->pipe, handle);
1004 }
1005 }
1006
1007 void cso_delete_tesseval_shader(struct cso_context *ctx, void *handle)
1008 {
1009 if (handle == ctx->tesseval_shader) {
1010 /* unbind before deleting */
1011 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
1012 ctx->tesseval_shader = NULL;
1013 }
1014 ctx->pipe->delete_tes_state(ctx->pipe, handle);
1015 }
1016
1017 static void
1018 cso_save_tesseval_shader(struct cso_context *ctx)
1019 {
1020 if (!ctx->has_tessellation) {
1021 return;
1022 }
1023
1024 assert(!ctx->tesseval_shader_saved);
1025 ctx->tesseval_shader_saved = ctx->tesseval_shader;
1026 }
1027
1028 static void
1029 cso_restore_tesseval_shader(struct cso_context *ctx)
1030 {
1031 if (!ctx->has_tessellation) {
1032 return;
1033 }
1034
1035 if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
1036 ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
1037 ctx->tesseval_shader = ctx->tesseval_shader_saved;
1038 }
1039 ctx->tesseval_shader_saved = NULL;
1040 }
1041
1042 void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
1043 {
1044 assert(ctx->has_compute_shader || !handle);
1045
1046 if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1047 ctx->compute_shader = handle;
1048 ctx->pipe->bind_compute_state(ctx->pipe, handle);
1049 }
1050 }
1051
1052 void cso_delete_compute_shader(struct cso_context *ctx, void *handle)
1053 {
1054 if (handle == ctx->compute_shader) {
1055 /* unbind before deleting */
1056 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
1057 ctx->compute_shader = NULL;
1058 }
1059 ctx->pipe->delete_compute_state(ctx->pipe, handle);
1060 }
1061
1062 static void
1063 cso_set_vertex_elements_direct(struct cso_context *ctx,
1064 unsigned count,
1065 const struct pipe_vertex_element *states)
1066 {
1067 unsigned key_size, hash_key;
1068 struct cso_hash_iter iter;
1069 void *handle;
1070 struct cso_velems_state velems_state;
1071
1072 /* Need to include the count into the stored state data too.
1073 * Otherwise first few count pipe_vertex_elements could be identical
1074 * even if count is different, and there's no guarantee the hash would
1075 * be different in that case neither.
1076 */
1077 key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned);
1078 velems_state.count = count;
1079 memcpy(velems_state.velems, states,
1080 sizeof(struct pipe_vertex_element) * count);
1081 hash_key = cso_construct_key((void*)&velems_state, key_size);
1082 iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS,
1083 (void*)&velems_state, key_size);
1084
1085 if (cso_hash_iter_is_null(iter)) {
1086 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1087 if (!cso)
1088 return;
1089
1090 memcpy(&cso->state, &velems_state, key_size);
1091 cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, count,
1092 &cso->state.velems[0]);
1093 cso->delete_state =
1094 (cso_state_callback) ctx->pipe->delete_vertex_elements_state;
1095 cso->context = ctx->pipe;
1096
1097 iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
1098 if (cso_hash_iter_is_null(iter)) {
1099 FREE(cso);
1100 return;
1101 }
1102
1103 handle = cso->data;
1104 }
1105 else {
1106 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1107 }
1108
1109 if (ctx->velements != handle) {
1110 ctx->velements = handle;
1111 ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1112 }
1113 }
1114
1115 enum pipe_error
1116 cso_set_vertex_elements(struct cso_context *ctx,
1117 unsigned count,
1118 const struct pipe_vertex_element *states)
1119 {
1120 struct u_vbuf *vbuf = ctx->vbuf_current;
1121
1122 if (vbuf) {
1123 u_vbuf_set_vertex_elements(vbuf, count, states);
1124 return PIPE_OK;
1125 }
1126
1127 cso_set_vertex_elements_direct(ctx, count, states);
1128 return PIPE_OK;
1129 }
1130
1131 static void
1132 cso_save_vertex_elements(struct cso_context *ctx)
1133 {
1134 struct u_vbuf *vbuf = ctx->vbuf_current;
1135
1136 if (vbuf) {
1137 u_vbuf_save_vertex_elements(vbuf);
1138 return;
1139 }
1140
1141 assert(!ctx->velements_saved);
1142 ctx->velements_saved = ctx->velements;
1143 }
1144
1145 static void
1146 cso_restore_vertex_elements(struct cso_context *ctx)
1147 {
1148 struct u_vbuf *vbuf = ctx->vbuf_current;
1149
1150 if (vbuf) {
1151 u_vbuf_restore_vertex_elements(vbuf);
1152 return;
1153 }
1154
1155 if (ctx->velements != ctx->velements_saved) {
1156 ctx->velements = ctx->velements_saved;
1157 ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1158 }
1159 ctx->velements_saved = NULL;
1160 }
1161
1162 /* vertex buffers */
1163
1164 static void
1165 cso_set_vertex_buffers_direct(struct cso_context *ctx,
1166 unsigned start_slot, unsigned count,
1167 const struct pipe_vertex_buffer *buffers)
1168 {
1169 /* Save what's in the auxiliary slot, so that we can save and restore it
1170 * for meta ops.
1171 */
1172 if (start_slot == 0) {
1173 if (buffers) {
1174 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
1175 buffers);
1176 } else {
1177 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
1178 }
1179 }
1180
1181 ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
1182 }
1183
1184
1185 void cso_set_vertex_buffers(struct cso_context *ctx,
1186 unsigned start_slot, unsigned count,
1187 const struct pipe_vertex_buffer *buffers)
1188 {
1189 struct u_vbuf *vbuf = ctx->vbuf_current;
1190
1191 if (!count)
1192 return;
1193
1194 if (vbuf) {
1195 u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
1196 return;
1197 }
1198
1199 cso_set_vertex_buffers_direct(ctx, start_slot, count, buffers);
1200 }
1201
1202 static void
1203 cso_save_vertex_buffer0(struct cso_context *ctx)
1204 {
1205 struct u_vbuf *vbuf = ctx->vbuf_current;
1206
1207 if (vbuf) {
1208 u_vbuf_save_vertex_buffer0(vbuf);
1209 return;
1210 }
1211
1212 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
1213 &ctx->vertex_buffer0_current);
1214 }
1215
1216 static void
1217 cso_restore_vertex_buffer0(struct cso_context *ctx)
1218 {
1219 struct u_vbuf *vbuf = ctx->vbuf_current;
1220
1221 if (vbuf) {
1222 u_vbuf_restore_vertex_buffer0(vbuf);
1223 return;
1224 }
1225
1226 cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
1227 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
1228 }
1229
1230 /**
1231 * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1232 * for user vertex buffers and user vertex buffers are not set by this call.
1233 * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1234 *
1235 * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1236 * such as VBOs, glBegin/End, and display lists.
1237 *
1238 * Internal operations that do "save states, draw, restore states" shouldn't
1239 * use this, because the states are only saved in either cso_context or
1240 * u_vbuf, not both.
1241 */
1242 void
1243 cso_set_vertex_buffers_and_elements(struct cso_context *ctx,
1244 unsigned velem_count,
1245 const struct pipe_vertex_element *velems,
1246 unsigned vb_count,
1247 unsigned unbind_trailing_vb_count,
1248 const struct pipe_vertex_buffer *vbuffers,
1249 bool uses_user_vertex_buffers)
1250 {
1251 struct u_vbuf *vbuf = ctx->vbuf;
1252
1253 if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1254 if (!ctx->vbuf_current) {
1255 /* Unbind all buffers in cso_context, because we'll use u_vbuf. */
1256 unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1257 if (unbind_vb_count)
1258 cso_set_vertex_buffers_direct(ctx, 0, unbind_vb_count, NULL);
1259
1260 /* Unset this to make sure the CSO is re-bound on the next use. */
1261 ctx->velements = NULL;
1262 ctx->vbuf_current = vbuf;
1263 } else if (unbind_trailing_vb_count) {
1264 u_vbuf_set_vertex_buffers(vbuf, vb_count, unbind_trailing_vb_count,
1265 NULL);
1266 }
1267
1268 if (vb_count)
1269 u_vbuf_set_vertex_buffers(vbuf, 0, vb_count, vbuffers);
1270 u_vbuf_set_vertex_elements(vbuf, velem_count, velems);
1271 return;
1272 }
1273
1274 if (ctx->vbuf_current) {
1275 /* Unbind all buffers in u_vbuf, because we'll use cso_context. */
1276 unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1277 if (unbind_vb_count)
1278 u_vbuf_set_vertex_buffers(vbuf, 0, unbind_vb_count, NULL);
1279
1280 /* Unset this to make sure the CSO is re-bound on the next use. */
1281 u_vbuf_unset_vertex_elements(vbuf);
1282 ctx->vbuf_current = NULL;
1283 } else if (unbind_trailing_vb_count) {
1284 cso_set_vertex_buffers_direct(ctx, vb_count, unbind_trailing_vb_count,
1285 NULL);
1286 }
1287
1288 if (vb_count)
1289 cso_set_vertex_buffers_direct(ctx, 0, vb_count, vbuffers);
1290 cso_set_vertex_elements_direct(ctx, velem_count, velems);
1291 }
1292
1293 void
1294 cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1295 unsigned idx, const struct pipe_sampler_state *templ)
1296 {
1297 if (templ) {
1298 unsigned key_size = sizeof(struct pipe_sampler_state);
1299 unsigned hash_key = cso_construct_key((void*)templ, key_size);
1300 struct cso_sampler *cso;
1301 struct cso_hash_iter iter =
1302 cso_find_state_template(ctx->cache,
1303 hash_key, CSO_SAMPLER,
1304 (void *) templ, key_size);
1305
1306 if (cso_hash_iter_is_null(iter)) {
1307 cso = MALLOC(sizeof(struct cso_sampler));
1308 if (!cso)
1309 return;
1310
1311 memcpy(&cso->state, templ, sizeof(*templ));
1312 cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1313 cso->delete_state =
1314 (cso_state_callback) ctx->pipe->delete_sampler_state;
1315 cso->context = ctx->pipe;
1316 cso->hash_key = hash_key;
1317
1318 iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
1319 if (cso_hash_iter_is_null(iter)) {
1320 FREE(cso);
1321 return;
1322 }
1323 }
1324 else {
1325 cso = cso_hash_iter_data(iter);
1326 }
1327
1328 ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1329 ctx->samplers[shader_stage].samplers[idx] = cso->data;
1330 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1331 }
1332 }
1333
1334
1335 /**
1336 * Send staged sampler state to the driver.
1337 */
1338 void
1339 cso_single_sampler_done(struct cso_context *ctx,
1340 enum pipe_shader_type shader_stage)
1341 {
1342 struct sampler_info *info = &ctx->samplers[shader_stage];
1343
1344 if (ctx->max_sampler_seen == -1)
1345 return;
1346
1347 ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1348 ctx->max_sampler_seen + 1,
1349 info->samplers);
1350 ctx->max_sampler_seen = -1;
1351 }
1352
1353
1354 /*
1355 * If the function encouters any errors it will return the
1356 * last one. Done to always try to set as many samplers
1357 * as possible.
1358 */
1359 void
1360 cso_set_samplers(struct cso_context *ctx,
1361 enum pipe_shader_type shader_stage,
1362 unsigned nr,
1363 const struct pipe_sampler_state **templates)
1364 {
1365 for (unsigned i = 0; i < nr; i++)
1366 cso_single_sampler(ctx, shader_stage, i, templates[i]);
1367
1368 cso_single_sampler_done(ctx, shader_stage);
1369 }
1370
1371 static void
1372 cso_save_fragment_samplers(struct cso_context *ctx)
1373 {
1374 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1375 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1376
1377 memcpy(saved->cso_samplers, info->cso_samplers,
1378 sizeof(info->cso_samplers));
1379 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1380 }
1381
1382
1383 static void
1384 cso_restore_fragment_samplers(struct cso_context *ctx)
1385 {
1386 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1387 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1388
1389 memcpy(info->cso_samplers, saved->cso_samplers,
1390 sizeof(info->cso_samplers));
1391 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1392
1393 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1394 if (info->samplers[i]) {
1395 ctx->max_sampler_seen = i;
1396 break;
1397 }
1398 }
1399
1400 cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1401 }
1402
1403
1404 void
1405 cso_set_sampler_views(struct cso_context *ctx,
1406 enum pipe_shader_type shader_stage,
1407 unsigned count,
1408 struct pipe_sampler_view **views)
1409 {
1410 if (shader_stage == PIPE_SHADER_FRAGMENT) {
1411 unsigned i;
1412 boolean any_change = FALSE;
1413
1414 /* reference new views */
1415 for (i = 0; i < count; i++) {
1416 any_change |= ctx->fragment_views[i] != views[i];
1417 pipe_sampler_view_reference(&ctx->fragment_views[i], views[i]);
1418 }
1419 /* unref extra old views, if any */
1420 for (; i < ctx->nr_fragment_views; i++) {
1421 any_change |= ctx->fragment_views[i] != NULL;
1422 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1423 }
1424
1425 /* bind the new sampler views */
1426 if (any_change) {
1427 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0,
1428 MAX2(ctx->nr_fragment_views, count),
1429 ctx->fragment_views);
1430 }
1431
1432 ctx->nr_fragment_views = count;
1433 }
1434 else
1435 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, count, views);
1436 }
1437
1438
1439 static void
1440 cso_save_fragment_sampler_views(struct cso_context *ctx)
1441 {
1442 unsigned i;
1443
1444 ctx->nr_fragment_views_saved = ctx->nr_fragment_views;
1445
1446 for (i = 0; i < ctx->nr_fragment_views; i++) {
1447 assert(!ctx->fragment_views_saved[i]);
1448 pipe_sampler_view_reference(&ctx->fragment_views_saved[i],
1449 ctx->fragment_views[i]);
1450 }
1451 }
1452
1453
1454 static void
1455 cso_restore_fragment_sampler_views(struct cso_context *ctx)
1456 {
1457 unsigned i, nr_saved = ctx->nr_fragment_views_saved;
1458 unsigned num;
1459
1460 for (i = 0; i < nr_saved; i++) {
1461 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1462 /* move the reference from one pointer to another */
1463 ctx->fragment_views[i] = ctx->fragment_views_saved[i];
1464 ctx->fragment_views_saved[i] = NULL;
1465 }
1466 for (; i < ctx->nr_fragment_views; i++) {
1467 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1468 }
1469
1470 num = MAX2(ctx->nr_fragment_views, nr_saved);
1471
1472 /* bind the old/saved sampler views */
1473 ctx->pipe->set_sampler_views(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, num,
1474 ctx->fragment_views);
1475
1476 ctx->nr_fragment_views = nr_saved;
1477 ctx->nr_fragment_views_saved = 0;
1478 }
1479
1480
1481 void
1482 cso_set_shader_images(struct cso_context *ctx,
1483 enum pipe_shader_type shader_stage,
1484 unsigned start, unsigned count,
1485 struct pipe_image_view *images)
1486 {
1487 if (shader_stage == PIPE_SHADER_FRAGMENT && start == 0 && count >= 1) {
1488 util_copy_image_view(&ctx->fragment_image0_current, &images[0]);
1489 }
1490
1491 ctx->pipe->set_shader_images(ctx->pipe, shader_stage, start, count, images);
1492 }
1493
1494
1495 static void
1496 cso_save_fragment_image0(struct cso_context *ctx)
1497 {
1498 util_copy_image_view(&ctx->fragment_image0_saved,
1499 &ctx->fragment_image0_current);
1500 }
1501
1502
1503 static void
1504 cso_restore_fragment_image0(struct cso_context *ctx)
1505 {
1506 cso_set_shader_images(ctx, PIPE_SHADER_FRAGMENT, 0, 1,
1507 &ctx->fragment_image0_saved);
1508 }
1509
1510
1511 void
1512 cso_set_stream_outputs(struct cso_context *ctx,
1513 unsigned num_targets,
1514 struct pipe_stream_output_target **targets,
1515 const unsigned *offsets)
1516 {
1517 struct pipe_context *pipe = ctx->pipe;
1518 uint i;
1519
1520 if (!ctx->has_streamout) {
1521 assert(num_targets == 0);
1522 return;
1523 }
1524
1525 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1526 /* Nothing to do. */
1527 return;
1528 }
1529
1530 /* reference new targets */
1531 for (i = 0; i < num_targets; i++) {
1532 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1533 }
1534 /* unref extra old targets, if any */
1535 for (; i < ctx->nr_so_targets; i++) {
1536 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1537 }
1538
1539 pipe->set_stream_output_targets(pipe, num_targets, targets,
1540 offsets);
1541 ctx->nr_so_targets = num_targets;
1542 }
1543
1544 static void
1545 cso_save_stream_outputs(struct cso_context *ctx)
1546 {
1547 uint i;
1548
1549 if (!ctx->has_streamout) {
1550 return;
1551 }
1552
1553 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1554
1555 for (i = 0; i < ctx->nr_so_targets; i++) {
1556 assert(!ctx->so_targets_saved[i]);
1557 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1558 }
1559 }
1560
1561 static void
1562 cso_restore_stream_outputs(struct cso_context *ctx)
1563 {
1564 struct pipe_context *pipe = ctx->pipe;
1565 uint i;
1566 unsigned offset[PIPE_MAX_SO_BUFFERS];
1567
1568 if (!ctx->has_streamout) {
1569 return;
1570 }
1571
1572 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1573 /* Nothing to do. */
1574 return;
1575 }
1576
1577 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1578 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1579 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1580 /* move the reference from one pointer to another */
1581 ctx->so_targets[i] = ctx->so_targets_saved[i];
1582 ctx->so_targets_saved[i] = NULL;
1583 /* -1 means append */
1584 offset[i] = (unsigned)-1;
1585 }
1586 for (; i < ctx->nr_so_targets; i++) {
1587 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1588 }
1589
1590 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1591 ctx->so_targets, offset);
1592
1593 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1594 ctx->nr_so_targets_saved = 0;
1595 }
1596
1597 /* constant buffers */
1598
1599 void
1600 cso_set_constant_buffer(struct cso_context *cso,
1601 enum pipe_shader_type shader_stage,
1602 unsigned index, struct pipe_constant_buffer *cb)
1603 {
1604 struct pipe_context *pipe = cso->pipe;
1605
1606 pipe->set_constant_buffer(pipe, shader_stage, index, cb);
1607
1608 if (index == 0) {
1609 util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb);
1610 }
1611 }
1612
1613 void
1614 cso_set_constant_buffer_resource(struct cso_context *cso,
1615 enum pipe_shader_type shader_stage,
1616 unsigned index,
1617 struct pipe_resource *buffer)
1618 {
1619 if (buffer) {
1620 struct pipe_constant_buffer cb;
1621 cb.buffer = buffer;
1622 cb.buffer_offset = 0;
1623 cb.buffer_size = buffer->width0;
1624 cb.user_buffer = NULL;
1625 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1626 } else {
1627 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1628 }
1629 }
1630
1631 void
1632 cso_set_constant_user_buffer(struct cso_context *cso,
1633 enum pipe_shader_type shader_stage,
1634 unsigned index, void *ptr, unsigned size)
1635 {
1636 if (ptr) {
1637 struct pipe_constant_buffer cb;
1638 cb.buffer = NULL;
1639 cb.buffer_offset = 0;
1640 cb.buffer_size = size;
1641 cb.user_buffer = ptr;
1642 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1643 } else {
1644 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1645 }
1646 }
1647
1648 void
1649 cso_save_constant_buffer_slot0(struct cso_context *cso,
1650 enum pipe_shader_type shader_stage)
1651 {
1652 util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage],
1653 &cso->aux_constbuf_current[shader_stage]);
1654 }
1655
1656 void
1657 cso_restore_constant_buffer_slot0(struct cso_context *cso,
1658 enum pipe_shader_type shader_stage)
1659 {
1660 cso_set_constant_buffer(cso, shader_stage, 0,
1661 &cso->aux_constbuf_saved[shader_stage]);
1662 pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer,
1663 NULL);
1664 }
1665
1666
1667 /**
1668 * Save all the CSO state items specified by the state_mask bitmask
1669 * of CSO_BIT_x flags.
1670 */
1671 void
1672 cso_save_state(struct cso_context *cso, unsigned state_mask)
1673 {
1674 assert(cso->saved_state == 0);
1675
1676 cso->saved_state = state_mask;
1677
1678 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1679 cso_save_vertex_buffer0(cso);
1680 if (state_mask & CSO_BIT_BLEND)
1681 cso_save_blend(cso);
1682 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1683 cso_save_depth_stencil_alpha(cso);
1684 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1685 cso_save_fragment_samplers(cso);
1686 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1687 cso_save_fragment_sampler_views(cso);
1688 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1689 cso_save_fragment_shader(cso);
1690 if (state_mask & CSO_BIT_FRAMEBUFFER)
1691 cso_save_framebuffer(cso);
1692 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1693 cso_save_geometry_shader(cso);
1694 if (state_mask & CSO_BIT_MIN_SAMPLES)
1695 cso_save_min_samples(cso);
1696 if (state_mask & CSO_BIT_RASTERIZER)
1697 cso_save_rasterizer(cso);
1698 if (state_mask & CSO_BIT_RENDER_CONDITION)
1699 cso_save_render_condition(cso);
1700 if (state_mask & CSO_BIT_SAMPLE_MASK)
1701 cso_save_sample_mask(cso);
1702 if (state_mask & CSO_BIT_STENCIL_REF)
1703 cso_save_stencil_ref(cso);
1704 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1705 cso_save_stream_outputs(cso);
1706 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1707 cso_save_tessctrl_shader(cso);
1708 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1709 cso_save_tesseval_shader(cso);
1710 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1711 cso_save_vertex_elements(cso);
1712 if (state_mask & CSO_BIT_VERTEX_SHADER)
1713 cso_save_vertex_shader(cso);
1714 if (state_mask & CSO_BIT_VIEWPORT)
1715 cso_save_viewport(cso);
1716 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1717 cso->pipe->set_active_query_state(cso->pipe, false);
1718 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1719 cso_save_fragment_image0(cso);
1720 }
1721
1722
1723 /**
1724 * Restore the state which was saved by cso_save_state().
1725 */
1726 void
1727 cso_restore_state(struct cso_context *cso)
1728 {
1729 unsigned state_mask = cso->saved_state;
1730
1731 assert(state_mask);
1732
1733 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1734 cso_restore_vertex_buffer0(cso);
1735 if (state_mask & CSO_BIT_BLEND)
1736 cso_restore_blend(cso);
1737 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1738 cso_restore_depth_stencil_alpha(cso);
1739 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1740 cso_restore_fragment_samplers(cso);
1741 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1742 cso_restore_fragment_sampler_views(cso);
1743 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1744 cso_restore_fragment_shader(cso);
1745 if (state_mask & CSO_BIT_FRAMEBUFFER)
1746 cso_restore_framebuffer(cso);
1747 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1748 cso_restore_geometry_shader(cso);
1749 if (state_mask & CSO_BIT_MIN_SAMPLES)
1750 cso_restore_min_samples(cso);
1751 if (state_mask & CSO_BIT_RASTERIZER)
1752 cso_restore_rasterizer(cso);
1753 if (state_mask & CSO_BIT_RENDER_CONDITION)
1754 cso_restore_render_condition(cso);
1755 if (state_mask & CSO_BIT_SAMPLE_MASK)
1756 cso_restore_sample_mask(cso);
1757 if (state_mask & CSO_BIT_STENCIL_REF)
1758 cso_restore_stencil_ref(cso);
1759 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1760 cso_restore_stream_outputs(cso);
1761 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1762 cso_restore_tessctrl_shader(cso);
1763 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1764 cso_restore_tesseval_shader(cso);
1765 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1766 cso_restore_vertex_elements(cso);
1767 if (state_mask & CSO_BIT_VERTEX_SHADER)
1768 cso_restore_vertex_shader(cso);
1769 if (state_mask & CSO_BIT_VIEWPORT)
1770 cso_restore_viewport(cso);
1771 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1772 cso->pipe->set_active_query_state(cso->pipe, true);
1773 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1774 cso_restore_fragment_image0(cso);
1775
1776 cso->saved_state = 0;
1777 }
1778
1779
1780
1781 /* drawing */
1782
1783 void
1784 cso_draw_vbo(struct cso_context *cso,
1785 const struct pipe_draw_info *info)
1786 {
1787 struct u_vbuf *vbuf = cso->vbuf_current;
1788
1789 /* We can't have both indirect drawing and SO-vertex-count drawing */
1790 assert(info->indirect == NULL || info->count_from_stream_output == NULL);
1791
1792 /* We can't have SO-vertex-count drawing with an index buffer */
1793 assert(info->count_from_stream_output == NULL || info->index_size == 0);
1794
1795 if (vbuf) {
1796 u_vbuf_draw_vbo(vbuf, info);
1797 } else {
1798 struct pipe_context *pipe = cso->pipe;
1799 pipe->draw_vbo(pipe, info);
1800 }
1801 }
1802
1803 void
1804 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1805 {
1806 struct pipe_draw_info info;
1807
1808 util_draw_init_info(&info);
1809
1810 info.mode = mode;
1811 info.start = start;
1812 info.count = count;
1813 info.min_index = start;
1814 info.max_index = start + count - 1;
1815
1816 cso_draw_vbo(cso, &info);
1817 }
1818
1819 void
1820 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1821 uint start, uint count,
1822 uint start_instance, uint instance_count)
1823 {
1824 struct pipe_draw_info info;
1825
1826 util_draw_init_info(&info);
1827
1828 info.mode = mode;
1829 info.start = start;
1830 info.count = count;
1831 info.min_index = start;
1832 info.max_index = start + count - 1;
1833 info.start_instance = start_instance;
1834 info.instance_count = instance_count;
1835
1836 cso_draw_vbo(cso, &info);
1837 }