mesa/st: Expose compute shaders when NIR support is advertised.
[mesa.git] / src / gallium / auxiliary / cso_cache / cso_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_inlines.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_vbuf.h"
45 #include "tgsi/tgsi_parse.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51
52
53 /**
54 * Per-shader sampler information.
55 */
56 struct sampler_info
57 {
58 struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
59 void *samplers[PIPE_MAX_SAMPLERS];
60 };
61
62
63
64 struct cso_context {
65 struct pipe_context *pipe;
66 struct cso_cache *cache;
67 struct u_vbuf *vbuf;
68
69 boolean has_geometry_shader;
70 boolean has_tessellation;
71 boolean has_compute_shader;
72 boolean has_streamout;
73
74 unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
75
76 struct pipe_sampler_view *fragment_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
77 unsigned nr_fragment_views;
78
79 struct pipe_sampler_view *fragment_views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS];
80 unsigned nr_fragment_views_saved;
81
82 struct sampler_info fragment_samplers_saved;
83 struct sampler_info samplers[PIPE_SHADER_TYPES];
84
85 /* Temporary number until cso_single_sampler_done is called.
86 * It tracks the highest sampler seen in cso_single_sampler.
87 */
88 int max_sampler_seen;
89
90 struct pipe_vertex_buffer vertex_buffer0_current;
91 struct pipe_vertex_buffer vertex_buffer0_saved;
92
93 struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES];
94 struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES];
95
96 struct pipe_image_view fragment_image0_current;
97 struct pipe_image_view fragment_image0_saved;
98
99 unsigned nr_so_targets;
100 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
101
102 unsigned nr_so_targets_saved;
103 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
104
105 /** Current and saved state.
106 * The saved state is used as a 1-deep stack.
107 */
108 void *blend, *blend_saved;
109 void *depth_stencil, *depth_stencil_saved;
110 void *rasterizer, *rasterizer_saved;
111 void *fragment_shader, *fragment_shader_saved;
112 void *vertex_shader, *vertex_shader_saved;
113 void *geometry_shader, *geometry_shader_saved;
114 void *tessctrl_shader, *tessctrl_shader_saved;
115 void *tesseval_shader, *tesseval_shader_saved;
116 void *compute_shader;
117 void *velements, *velements_saved;
118 struct pipe_query *render_condition, *render_condition_saved;
119 uint render_condition_mode, render_condition_mode_saved;
120 boolean render_condition_cond, render_condition_cond_saved;
121
122 struct pipe_framebuffer_state fb, fb_saved;
123 struct pipe_viewport_state vp, vp_saved;
124 struct pipe_blend_color blend_color;
125 unsigned sample_mask, sample_mask_saved;
126 unsigned min_samples, min_samples_saved;
127 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
128 };
129
130 struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
131 {
132 return cso->pipe;
133 }
134
135 static boolean delete_blend_state(struct cso_context *ctx, void *state)
136 {
137 struct cso_blend *cso = (struct cso_blend *)state;
138
139 if (ctx->blend == cso->data)
140 return FALSE;
141
142 if (cso->delete_state)
143 cso->delete_state(cso->context, cso->data);
144 FREE(state);
145 return TRUE;
146 }
147
148 static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
149 {
150 struct cso_depth_stencil_alpha *cso =
151 (struct cso_depth_stencil_alpha *)state;
152
153 if (ctx->depth_stencil == cso->data)
154 return FALSE;
155
156 if (cso->delete_state)
157 cso->delete_state(cso->context, cso->data);
158 FREE(state);
159
160 return TRUE;
161 }
162
163 static boolean delete_sampler_state(UNUSED struct cso_context *ctx, void *state)
164 {
165 struct cso_sampler *cso = (struct cso_sampler *)state;
166 if (cso->delete_state)
167 cso->delete_state(cso->context, cso->data);
168 FREE(state);
169 return TRUE;
170 }
171
172 static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
173 {
174 struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
175
176 if (ctx->rasterizer == cso->data)
177 return FALSE;
178 if (cso->delete_state)
179 cso->delete_state(cso->context, cso->data);
180 FREE(state);
181 return TRUE;
182 }
183
184 static boolean delete_vertex_elements(struct cso_context *ctx,
185 void *state)
186 {
187 struct cso_velements *cso = (struct cso_velements *)state;
188
189 if (ctx->velements == cso->data)
190 return FALSE;
191
192 if (cso->delete_state)
193 cso->delete_state(cso->context, cso->data);
194 FREE(state);
195 return TRUE;
196 }
197
198
199 static inline boolean delete_cso(struct cso_context *ctx,
200 void *state, enum cso_cache_type type)
201 {
202 switch (type) {
203 case CSO_BLEND:
204 return delete_blend_state(ctx, state);
205 case CSO_SAMPLER:
206 return delete_sampler_state(ctx, state);
207 case CSO_DEPTH_STENCIL_ALPHA:
208 return delete_depth_stencil_state(ctx, state);
209 case CSO_RASTERIZER:
210 return delete_rasterizer_state(ctx, state);
211 case CSO_VELEMENTS:
212 return delete_vertex_elements(ctx, state);
213 default:
214 assert(0);
215 FREE(state);
216 }
217 return FALSE;
218 }
219
220 static inline void
221 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
222 int max_size, void *user_data)
223 {
224 struct cso_context *ctx = (struct cso_context *)user_data;
225 /* if we're approach the maximum size, remove fourth of the entries
226 * otherwise every subsequent call will go through the same */
227 int hash_size = cso_hash_size(hash);
228 int max_entries = (max_size > hash_size) ? max_size : hash_size;
229 int to_remove = (max_size < max_entries) * max_entries/4;
230 struct cso_hash_iter iter;
231 struct cso_sampler **samplers_to_restore = NULL;
232 unsigned to_restore = 0;
233
234 if (hash_size > max_size)
235 to_remove += hash_size - max_size;
236
237 if (to_remove == 0)
238 return;
239
240 if (type == CSO_SAMPLER) {
241 int i, j;
242
243 samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
244 sizeof(*samplers_to_restore));
245
246 /* Temporarily remove currently bound sampler states from the hash
247 * table, to prevent them from being deleted
248 */
249 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
250 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
251 struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
252
253 if (sampler && cso_hash_take(hash, sampler->hash_key))
254 samplers_to_restore[to_restore++] = sampler;
255 }
256 }
257 }
258
259 iter = cso_hash_first_node(hash);
260 while (to_remove) {
261 /*remove elements until we're good */
262 /*fixme: currently we pick the nodes to remove at random*/
263 void *cso = cso_hash_iter_data(iter);
264
265 if (!cso)
266 break;
267
268 if (delete_cso(ctx, cso, type)) {
269 iter = cso_hash_erase(hash, iter);
270 --to_remove;
271 } else
272 iter = cso_hash_iter_next(iter);
273 }
274
275 if (type == CSO_SAMPLER) {
276 /* Put currently bound sampler states back into the hash table */
277 while (to_restore--) {
278 struct cso_sampler *sampler = samplers_to_restore[to_restore];
279
280 cso_hash_insert(hash, sampler->hash_key, sampler);
281 }
282
283 FREE(samplers_to_restore);
284 }
285 }
286
287 static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
288 {
289 struct u_vbuf_caps caps;
290
291 /* Install u_vbuf if there is anything unsupported. */
292 if (u_vbuf_get_caps(cso->pipe->screen, &caps, flags)) {
293 cso->vbuf = u_vbuf_create(cso->pipe, &caps);
294 }
295 }
296
297 struct cso_context *
298 cso_create_context(struct pipe_context *pipe, unsigned u_vbuf_flags)
299 {
300 struct cso_context *ctx = CALLOC_STRUCT(cso_context);
301 if (!ctx)
302 return NULL;
303
304 ctx->cache = cso_cache_create();
305 if (ctx->cache == NULL)
306 goto out;
307 cso_cache_set_sanitize_callback(ctx->cache,
308 sanitize_hash,
309 ctx);
310
311 ctx->pipe = pipe;
312 ctx->sample_mask = ~0;
313
314 cso_init_vbuf(ctx, u_vbuf_flags);
315
316 /* Enable for testing: */
317 if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
318
319 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
320 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
321 ctx->has_geometry_shader = TRUE;
322 }
323 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
324 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
325 ctx->has_tessellation = TRUE;
326 }
327 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
328 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
329 int supported_irs =
330 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
331 PIPE_SHADER_CAP_SUPPORTED_IRS);
332 if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
333 (1 << PIPE_SHADER_IR_NIR))) {
334 ctx->has_compute_shader = TRUE;
335 }
336 }
337 if (pipe->screen->get_param(pipe->screen,
338 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
339 ctx->has_streamout = TRUE;
340 }
341
342 ctx->max_sampler_seen = -1;
343 return ctx;
344
345 out:
346 cso_destroy_context( ctx );
347 return NULL;
348 }
349
350 /**
351 * Free the CSO context.
352 */
353 void cso_destroy_context( struct cso_context *ctx )
354 {
355 unsigned i;
356
357 if (ctx->pipe) {
358 ctx->pipe->bind_blend_state( ctx->pipe, NULL );
359 ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
360
361 {
362 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
363 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
364 struct pipe_screen *scr = ctx->pipe->screen;
365 enum pipe_shader_type sh;
366 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
367 int maxsam = scr->get_shader_param(scr, sh,
368 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
369 int maxview = scr->get_shader_param(scr, sh,
370 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
371 assert(maxsam <= PIPE_MAX_SAMPLERS);
372 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
373 if (maxsam > 0) {
374 ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
375 }
376 if (maxview > 0) {
377 ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views);
378 }
379 }
380 }
381
382 ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
383 ctx->pipe->bind_fs_state( ctx->pipe, NULL );
384 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
385 ctx->pipe->bind_vs_state( ctx->pipe, NULL );
386 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL);
387 if (ctx->has_geometry_shader) {
388 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
389 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL);
390 }
391 if (ctx->has_tessellation) {
392 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
393 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL);
394 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
395 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL);
396 }
397 if (ctx->has_compute_shader) {
398 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
399 ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_COMPUTE, 0, NULL);
400 }
401 ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
402
403 if (ctx->has_streamout)
404 ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
405 }
406
407 for (i = 0; i < ctx->nr_fragment_views; i++) {
408 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
409 }
410 for (i = 0; i < ctx->nr_fragment_views_saved; i++) {
411 pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL);
412 }
413
414 util_unreference_framebuffer_state(&ctx->fb);
415 util_unreference_framebuffer_state(&ctx->fb_saved);
416
417 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
418 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
419
420 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
421 pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
422 pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL);
423 }
424
425 pipe_resource_reference(&ctx->fragment_image0_current.resource, NULL);
426 pipe_resource_reference(&ctx->fragment_image0_saved.resource, NULL);
427
428 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
429 pipe_so_target_reference(&ctx->so_targets[i], NULL);
430 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
431 }
432
433 if (ctx->cache) {
434 cso_cache_delete( ctx->cache );
435 ctx->cache = NULL;
436 }
437
438 if (ctx->vbuf)
439 u_vbuf_destroy(ctx->vbuf);
440 FREE( ctx );
441 }
442
443
444 /* Those function will either find the state of the given template
445 * in the cache or they will create a new state from the given
446 * template, insert it in the cache and return it.
447 */
448
449 /*
450 * If the driver returns 0 from the create method then they will assign
451 * the data member of the cso to be the template itself.
452 */
453
454 enum pipe_error cso_set_blend(struct cso_context *ctx,
455 const struct pipe_blend_state *templ)
456 {
457 unsigned key_size, hash_key;
458 struct cso_hash_iter iter;
459 void *handle;
460
461 key_size = templ->independent_blend_enable ?
462 sizeof(struct pipe_blend_state) :
463 (char *)&(templ->rt[1]) - (char *)templ;
464 hash_key = cso_construct_key((void*)templ, key_size);
465 iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND,
466 (void*)templ, key_size);
467
468 if (cso_hash_iter_is_null(iter)) {
469 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
470 if (!cso)
471 return PIPE_ERROR_OUT_OF_MEMORY;
472
473 memset(&cso->state, 0, sizeof cso->state);
474 memcpy(&cso->state, templ, key_size);
475 cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
476 cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
477 cso->context = ctx->pipe;
478
479 iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
480 if (cso_hash_iter_is_null(iter)) {
481 FREE(cso);
482 return PIPE_ERROR_OUT_OF_MEMORY;
483 }
484
485 handle = cso->data;
486 }
487 else {
488 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
489 }
490
491 if (ctx->blend != handle) {
492 ctx->blend = handle;
493 ctx->pipe->bind_blend_state(ctx->pipe, handle);
494 }
495 return PIPE_OK;
496 }
497
498 static void
499 cso_save_blend(struct cso_context *ctx)
500 {
501 assert(!ctx->blend_saved);
502 ctx->blend_saved = ctx->blend;
503 }
504
505 static void
506 cso_restore_blend(struct cso_context *ctx)
507 {
508 if (ctx->blend != ctx->blend_saved) {
509 ctx->blend = ctx->blend_saved;
510 ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
511 }
512 ctx->blend_saved = NULL;
513 }
514
515
516
517 enum pipe_error
518 cso_set_depth_stencil_alpha(struct cso_context *ctx,
519 const struct pipe_depth_stencil_alpha_state *templ)
520 {
521 unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
522 unsigned hash_key = cso_construct_key((void*)templ, key_size);
523 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
524 hash_key,
525 CSO_DEPTH_STENCIL_ALPHA,
526 (void*)templ, key_size);
527 void *handle;
528
529 if (cso_hash_iter_is_null(iter)) {
530 struct cso_depth_stencil_alpha *cso =
531 MALLOC(sizeof(struct cso_depth_stencil_alpha));
532 if (!cso)
533 return PIPE_ERROR_OUT_OF_MEMORY;
534
535 memcpy(&cso->state, templ, sizeof(*templ));
536 cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
537 &cso->state);
538 cso->delete_state =
539 (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
540 cso->context = ctx->pipe;
541
542 iter = cso_insert_state(ctx->cache, hash_key,
543 CSO_DEPTH_STENCIL_ALPHA, cso);
544 if (cso_hash_iter_is_null(iter)) {
545 FREE(cso);
546 return PIPE_ERROR_OUT_OF_MEMORY;
547 }
548
549 handle = cso->data;
550 }
551 else {
552 handle = ((struct cso_depth_stencil_alpha *)
553 cso_hash_iter_data(iter))->data;
554 }
555
556 if (ctx->depth_stencil != handle) {
557 ctx->depth_stencil = handle;
558 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
559 }
560 return PIPE_OK;
561 }
562
563 static void
564 cso_save_depth_stencil_alpha(struct cso_context *ctx)
565 {
566 assert(!ctx->depth_stencil_saved);
567 ctx->depth_stencil_saved = ctx->depth_stencil;
568 }
569
570 static void
571 cso_restore_depth_stencil_alpha(struct cso_context *ctx)
572 {
573 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
574 ctx->depth_stencil = ctx->depth_stencil_saved;
575 ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
576 ctx->depth_stencil_saved);
577 }
578 ctx->depth_stencil_saved = NULL;
579 }
580
581
582
583 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
584 const struct pipe_rasterizer_state *templ)
585 {
586 unsigned key_size = sizeof(struct pipe_rasterizer_state);
587 unsigned hash_key = cso_construct_key((void*)templ, key_size);
588 struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
589 hash_key,
590 CSO_RASTERIZER,
591 (void*)templ, key_size);
592 void *handle = NULL;
593
594 /* We can't have both point_quad_rasterization (sprites) and point_smooth
595 * (round AA points) enabled at the same time.
596 */
597 assert(!(templ->point_quad_rasterization && templ->point_smooth));
598
599 if (cso_hash_iter_is_null(iter)) {
600 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
601 if (!cso)
602 return PIPE_ERROR_OUT_OF_MEMORY;
603
604 memcpy(&cso->state, templ, sizeof(*templ));
605 cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
606 cso->delete_state =
607 (cso_state_callback)ctx->pipe->delete_rasterizer_state;
608 cso->context = ctx->pipe;
609
610 iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
611 if (cso_hash_iter_is_null(iter)) {
612 FREE(cso);
613 return PIPE_ERROR_OUT_OF_MEMORY;
614 }
615
616 handle = cso->data;
617 }
618 else {
619 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
620 }
621
622 if (ctx->rasterizer != handle) {
623 ctx->rasterizer = handle;
624 ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
625 }
626 return PIPE_OK;
627 }
628
629 static void
630 cso_save_rasterizer(struct cso_context *ctx)
631 {
632 assert(!ctx->rasterizer_saved);
633 ctx->rasterizer_saved = ctx->rasterizer;
634 }
635
636 static void
637 cso_restore_rasterizer(struct cso_context *ctx)
638 {
639 if (ctx->rasterizer != ctx->rasterizer_saved) {
640 ctx->rasterizer = ctx->rasterizer_saved;
641 ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
642 }
643 ctx->rasterizer_saved = NULL;
644 }
645
646
647 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
648 {
649 if (ctx->fragment_shader != handle) {
650 ctx->fragment_shader = handle;
651 ctx->pipe->bind_fs_state(ctx->pipe, handle);
652 }
653 }
654
655 void cso_delete_fragment_shader(struct cso_context *ctx, void *handle )
656 {
657 if (handle == ctx->fragment_shader) {
658 /* unbind before deleting */
659 ctx->pipe->bind_fs_state(ctx->pipe, NULL);
660 ctx->fragment_shader = NULL;
661 }
662 ctx->pipe->delete_fs_state(ctx->pipe, handle);
663 }
664
665 static void
666 cso_save_fragment_shader(struct cso_context *ctx)
667 {
668 assert(!ctx->fragment_shader_saved);
669 ctx->fragment_shader_saved = ctx->fragment_shader;
670 }
671
672 static void
673 cso_restore_fragment_shader(struct cso_context *ctx)
674 {
675 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
676 ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
677 ctx->fragment_shader = ctx->fragment_shader_saved;
678 }
679 ctx->fragment_shader_saved = NULL;
680 }
681
682
683 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
684 {
685 if (ctx->vertex_shader != handle) {
686 ctx->vertex_shader = handle;
687 ctx->pipe->bind_vs_state(ctx->pipe, handle);
688 }
689 }
690
691 void cso_delete_vertex_shader(struct cso_context *ctx, void *handle )
692 {
693 if (handle == ctx->vertex_shader) {
694 /* unbind before deleting */
695 ctx->pipe->bind_vs_state(ctx->pipe, NULL);
696 ctx->vertex_shader = NULL;
697 }
698 ctx->pipe->delete_vs_state(ctx->pipe, handle);
699 }
700
701 static void
702 cso_save_vertex_shader(struct cso_context *ctx)
703 {
704 assert(!ctx->vertex_shader_saved);
705 ctx->vertex_shader_saved = ctx->vertex_shader;
706 }
707
708 static void
709 cso_restore_vertex_shader(struct cso_context *ctx)
710 {
711 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
712 ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
713 ctx->vertex_shader = ctx->vertex_shader_saved;
714 }
715 ctx->vertex_shader_saved = NULL;
716 }
717
718
719 void cso_set_framebuffer(struct cso_context *ctx,
720 const struct pipe_framebuffer_state *fb)
721 {
722 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
723 util_copy_framebuffer_state(&ctx->fb, fb);
724 ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
725 }
726 }
727
728 static void
729 cso_save_framebuffer(struct cso_context *ctx)
730 {
731 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
732 }
733
734 static void
735 cso_restore_framebuffer(struct cso_context *ctx)
736 {
737 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
738 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
739 ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
740 util_unreference_framebuffer_state(&ctx->fb_saved);
741 }
742 }
743
744
745 void cso_set_viewport(struct cso_context *ctx,
746 const struct pipe_viewport_state *vp)
747 {
748 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
749 ctx->vp = *vp;
750 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
751 }
752 }
753
754 /**
755 * Setup viewport state for given width and height (position is always (0,0)).
756 * Invert the Y axis if 'invert' is true.
757 */
758 void
759 cso_set_viewport_dims(struct cso_context *ctx,
760 float width, float height, boolean invert)
761 {
762 struct pipe_viewport_state vp;
763 vp.scale[0] = width * 0.5f;
764 vp.scale[1] = height * (invert ? -0.5f : 0.5f);
765 vp.scale[2] = 0.5f;
766 vp.translate[0] = 0.5f * width;
767 vp.translate[1] = 0.5f * height;
768 vp.translate[2] = 0.5f;
769 cso_set_viewport(ctx, &vp);
770 }
771
772 static void
773 cso_save_viewport(struct cso_context *ctx)
774 {
775 ctx->vp_saved = ctx->vp;
776 }
777
778
779 static void
780 cso_restore_viewport(struct cso_context *ctx)
781 {
782 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
783 ctx->vp = ctx->vp_saved;
784 ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
785 }
786 }
787
788
789 void cso_set_blend_color(struct cso_context *ctx,
790 const struct pipe_blend_color *bc)
791 {
792 if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
793 ctx->blend_color = *bc;
794 ctx->pipe->set_blend_color(ctx->pipe, bc);
795 }
796 }
797
798 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
799 {
800 if (ctx->sample_mask != sample_mask) {
801 ctx->sample_mask = sample_mask;
802 ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
803 }
804 }
805
806 static void
807 cso_save_sample_mask(struct cso_context *ctx)
808 {
809 ctx->sample_mask_saved = ctx->sample_mask;
810 }
811
812 static void
813 cso_restore_sample_mask(struct cso_context *ctx)
814 {
815 cso_set_sample_mask(ctx, ctx->sample_mask_saved);
816 }
817
818 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
819 {
820 if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
821 ctx->min_samples = min_samples;
822 ctx->pipe->set_min_samples(ctx->pipe, min_samples);
823 }
824 }
825
826 static void
827 cso_save_min_samples(struct cso_context *ctx)
828 {
829 ctx->min_samples_saved = ctx->min_samples;
830 }
831
832 static void
833 cso_restore_min_samples(struct cso_context *ctx)
834 {
835 cso_set_min_samples(ctx, ctx->min_samples_saved);
836 }
837
838 void cso_set_stencil_ref(struct cso_context *ctx,
839 const struct pipe_stencil_ref *sr)
840 {
841 if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) {
842 ctx->stencil_ref = *sr;
843 ctx->pipe->set_stencil_ref(ctx->pipe, sr);
844 }
845 }
846
847 static void
848 cso_save_stencil_ref(struct cso_context *ctx)
849 {
850 ctx->stencil_ref_saved = ctx->stencil_ref;
851 }
852
853
854 static void
855 cso_restore_stencil_ref(struct cso_context *ctx)
856 {
857 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
858 sizeof(ctx->stencil_ref))) {
859 ctx->stencil_ref = ctx->stencil_ref_saved;
860 ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref);
861 }
862 }
863
864 void cso_set_render_condition(struct cso_context *ctx,
865 struct pipe_query *query,
866 boolean condition,
867 enum pipe_render_cond_flag mode)
868 {
869 struct pipe_context *pipe = ctx->pipe;
870
871 if (ctx->render_condition != query ||
872 ctx->render_condition_mode != mode ||
873 ctx->render_condition_cond != condition) {
874 pipe->render_condition(pipe, query, condition, mode);
875 ctx->render_condition = query;
876 ctx->render_condition_cond = condition;
877 ctx->render_condition_mode = mode;
878 }
879 }
880
881 static void
882 cso_save_render_condition(struct cso_context *ctx)
883 {
884 ctx->render_condition_saved = ctx->render_condition;
885 ctx->render_condition_cond_saved = ctx->render_condition_cond;
886 ctx->render_condition_mode_saved = ctx->render_condition_mode;
887 }
888
889 static void
890 cso_restore_render_condition(struct cso_context *ctx)
891 {
892 cso_set_render_condition(ctx, ctx->render_condition_saved,
893 ctx->render_condition_cond_saved,
894 ctx->render_condition_mode_saved);
895 }
896
897 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
898 {
899 assert(ctx->has_geometry_shader || !handle);
900
901 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
902 ctx->geometry_shader = handle;
903 ctx->pipe->bind_gs_state(ctx->pipe, handle);
904 }
905 }
906
907 void cso_delete_geometry_shader(struct cso_context *ctx, void *handle)
908 {
909 if (handle == ctx->geometry_shader) {
910 /* unbind before deleting */
911 ctx->pipe->bind_gs_state(ctx->pipe, NULL);
912 ctx->geometry_shader = NULL;
913 }
914 ctx->pipe->delete_gs_state(ctx->pipe, handle);
915 }
916
917 static void
918 cso_save_geometry_shader(struct cso_context *ctx)
919 {
920 if (!ctx->has_geometry_shader) {
921 return;
922 }
923
924 assert(!ctx->geometry_shader_saved);
925 ctx->geometry_shader_saved = ctx->geometry_shader;
926 }
927
928 static void
929 cso_restore_geometry_shader(struct cso_context *ctx)
930 {
931 if (!ctx->has_geometry_shader) {
932 return;
933 }
934
935 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
936 ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
937 ctx->geometry_shader = ctx->geometry_shader_saved;
938 }
939 ctx->geometry_shader_saved = NULL;
940 }
941
942 void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
943 {
944 assert(ctx->has_tessellation || !handle);
945
946 if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
947 ctx->tessctrl_shader = handle;
948 ctx->pipe->bind_tcs_state(ctx->pipe, handle);
949 }
950 }
951
952 void cso_delete_tessctrl_shader(struct cso_context *ctx, void *handle)
953 {
954 if (handle == ctx->tessctrl_shader) {
955 /* unbind before deleting */
956 ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
957 ctx->tessctrl_shader = NULL;
958 }
959 ctx->pipe->delete_tcs_state(ctx->pipe, handle);
960 }
961
962 static void
963 cso_save_tessctrl_shader(struct cso_context *ctx)
964 {
965 if (!ctx->has_tessellation) {
966 return;
967 }
968
969 assert(!ctx->tessctrl_shader_saved);
970 ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
971 }
972
973 static void
974 cso_restore_tessctrl_shader(struct cso_context *ctx)
975 {
976 if (!ctx->has_tessellation) {
977 return;
978 }
979
980 if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
981 ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
982 ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
983 }
984 ctx->tessctrl_shader_saved = NULL;
985 }
986
987 void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
988 {
989 assert(ctx->has_tessellation || !handle);
990
991 if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
992 ctx->tesseval_shader = handle;
993 ctx->pipe->bind_tes_state(ctx->pipe, handle);
994 }
995 }
996
997 void cso_delete_tesseval_shader(struct cso_context *ctx, void *handle)
998 {
999 if (handle == ctx->tesseval_shader) {
1000 /* unbind before deleting */
1001 ctx->pipe->bind_tes_state(ctx->pipe, NULL);
1002 ctx->tesseval_shader = NULL;
1003 }
1004 ctx->pipe->delete_tes_state(ctx->pipe, handle);
1005 }
1006
1007 static void
1008 cso_save_tesseval_shader(struct cso_context *ctx)
1009 {
1010 if (!ctx->has_tessellation) {
1011 return;
1012 }
1013
1014 assert(!ctx->tesseval_shader_saved);
1015 ctx->tesseval_shader_saved = ctx->tesseval_shader;
1016 }
1017
1018 static void
1019 cso_restore_tesseval_shader(struct cso_context *ctx)
1020 {
1021 if (!ctx->has_tessellation) {
1022 return;
1023 }
1024
1025 if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
1026 ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
1027 ctx->tesseval_shader = ctx->tesseval_shader_saved;
1028 }
1029 ctx->tesseval_shader_saved = NULL;
1030 }
1031
1032 void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
1033 {
1034 assert(ctx->has_compute_shader || !handle);
1035
1036 if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1037 ctx->compute_shader = handle;
1038 ctx->pipe->bind_compute_state(ctx->pipe, handle);
1039 }
1040 }
1041
1042 void cso_delete_compute_shader(struct cso_context *ctx, void *handle)
1043 {
1044 if (handle == ctx->compute_shader) {
1045 /* unbind before deleting */
1046 ctx->pipe->bind_compute_state(ctx->pipe, NULL);
1047 ctx->compute_shader = NULL;
1048 }
1049 ctx->pipe->delete_compute_state(ctx->pipe, handle);
1050 }
1051
1052 enum pipe_error
1053 cso_set_vertex_elements(struct cso_context *ctx,
1054 unsigned count,
1055 const struct pipe_vertex_element *states)
1056 {
1057 struct u_vbuf *vbuf = ctx->vbuf;
1058 unsigned key_size, hash_key;
1059 struct cso_hash_iter iter;
1060 void *handle;
1061 struct cso_velems_state velems_state;
1062
1063 if (vbuf) {
1064 u_vbuf_set_vertex_elements(vbuf, count, states);
1065 return PIPE_OK;
1066 }
1067
1068 /* Need to include the count into the stored state data too.
1069 * Otherwise first few count pipe_vertex_elements could be identical
1070 * even if count is different, and there's no guarantee the hash would
1071 * be different in that case neither.
1072 */
1073 key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned);
1074 velems_state.count = count;
1075 memcpy(velems_state.velems, states,
1076 sizeof(struct pipe_vertex_element) * count);
1077 hash_key = cso_construct_key((void*)&velems_state, key_size);
1078 iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS,
1079 (void*)&velems_state, key_size);
1080
1081 if (cso_hash_iter_is_null(iter)) {
1082 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1083 if (!cso)
1084 return PIPE_ERROR_OUT_OF_MEMORY;
1085
1086 memcpy(&cso->state, &velems_state, key_size);
1087 cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, count,
1088 &cso->state.velems[0]);
1089 cso->delete_state =
1090 (cso_state_callback) ctx->pipe->delete_vertex_elements_state;
1091 cso->context = ctx->pipe;
1092
1093 iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso);
1094 if (cso_hash_iter_is_null(iter)) {
1095 FREE(cso);
1096 return PIPE_ERROR_OUT_OF_MEMORY;
1097 }
1098
1099 handle = cso->data;
1100 }
1101 else {
1102 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1103 }
1104
1105 if (ctx->velements != handle) {
1106 ctx->velements = handle;
1107 ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1108 }
1109 return PIPE_OK;
1110 }
1111
1112 static void
1113 cso_save_vertex_elements(struct cso_context *ctx)
1114 {
1115 struct u_vbuf *vbuf = ctx->vbuf;
1116
1117 if (vbuf) {
1118 u_vbuf_save_vertex_elements(vbuf);
1119 return;
1120 }
1121
1122 assert(!ctx->velements_saved);
1123 ctx->velements_saved = ctx->velements;
1124 }
1125
1126 static void
1127 cso_restore_vertex_elements(struct cso_context *ctx)
1128 {
1129 struct u_vbuf *vbuf = ctx->vbuf;
1130
1131 if (vbuf) {
1132 u_vbuf_restore_vertex_elements(vbuf);
1133 return;
1134 }
1135
1136 if (ctx->velements != ctx->velements_saved) {
1137 ctx->velements = ctx->velements_saved;
1138 ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1139 }
1140 ctx->velements_saved = NULL;
1141 }
1142
1143 /* vertex buffers */
1144
1145 void cso_set_vertex_buffers(struct cso_context *ctx,
1146 unsigned start_slot, unsigned count,
1147 const struct pipe_vertex_buffer *buffers)
1148 {
1149 struct u_vbuf *vbuf = ctx->vbuf;
1150
1151 if (!count)
1152 return;
1153
1154 if (vbuf) {
1155 u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers);
1156 return;
1157 }
1158
1159 /* Save what's in the auxiliary slot, so that we can save and restore it
1160 * for meta ops. */
1161 if (start_slot == 0) {
1162 if (buffers) {
1163 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_current,
1164 buffers);
1165 } else {
1166 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_current);
1167 }
1168 }
1169
1170 ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers);
1171 }
1172
1173 static void
1174 cso_save_vertex_buffer0(struct cso_context *ctx)
1175 {
1176 struct u_vbuf *vbuf = ctx->vbuf;
1177
1178 if (vbuf) {
1179 u_vbuf_save_vertex_buffer0(vbuf);
1180 return;
1181 }
1182
1183 pipe_vertex_buffer_reference(&ctx->vertex_buffer0_saved,
1184 &ctx->vertex_buffer0_current);
1185 }
1186
1187 static void
1188 cso_restore_vertex_buffer0(struct cso_context *ctx)
1189 {
1190 struct u_vbuf *vbuf = ctx->vbuf;
1191
1192 if (vbuf) {
1193 u_vbuf_restore_vertex_buffer0(vbuf);
1194 return;
1195 }
1196
1197 cso_set_vertex_buffers(ctx, 0, 1, &ctx->vertex_buffer0_saved);
1198 pipe_vertex_buffer_unreference(&ctx->vertex_buffer0_saved);
1199 }
1200
1201
1202 void
1203 cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1204 unsigned idx, const struct pipe_sampler_state *templ)
1205 {
1206 if (templ) {
1207 unsigned key_size = sizeof(struct pipe_sampler_state);
1208 unsigned hash_key = cso_construct_key((void*)templ, key_size);
1209 struct cso_sampler *cso;
1210 struct cso_hash_iter iter =
1211 cso_find_state_template(ctx->cache,
1212 hash_key, CSO_SAMPLER,
1213 (void *) templ, key_size);
1214
1215 if (cso_hash_iter_is_null(iter)) {
1216 cso = MALLOC(sizeof(struct cso_sampler));
1217 if (!cso)
1218 return;
1219
1220 memcpy(&cso->state, templ, sizeof(*templ));
1221 cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1222 cso->delete_state =
1223 (cso_state_callback) ctx->pipe->delete_sampler_state;
1224 cso->context = ctx->pipe;
1225 cso->hash_key = hash_key;
1226
1227 iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
1228 if (cso_hash_iter_is_null(iter)) {
1229 FREE(cso);
1230 return;
1231 }
1232 }
1233 else {
1234 cso = cso_hash_iter_data(iter);
1235 }
1236
1237 ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1238 ctx->samplers[shader_stage].samplers[idx] = cso->data;
1239 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1240 }
1241 }
1242
1243
1244 /**
1245 * Send staged sampler state to the driver.
1246 */
1247 void
1248 cso_single_sampler_done(struct cso_context *ctx,
1249 enum pipe_shader_type shader_stage)
1250 {
1251 struct sampler_info *info = &ctx->samplers[shader_stage];
1252
1253 if (ctx->max_sampler_seen == -1)
1254 return;
1255
1256 ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1257 ctx->max_sampler_seen + 1,
1258 info->samplers);
1259 ctx->max_sampler_seen = -1;
1260 }
1261
1262
1263 /*
1264 * If the function encouters any errors it will return the
1265 * last one. Done to always try to set as many samplers
1266 * as possible.
1267 */
1268 void
1269 cso_set_samplers(struct cso_context *ctx,
1270 enum pipe_shader_type shader_stage,
1271 unsigned nr,
1272 const struct pipe_sampler_state **templates)
1273 {
1274 for (unsigned i = 0; i < nr; i++)
1275 cso_single_sampler(ctx, shader_stage, i, templates[i]);
1276
1277 cso_single_sampler_done(ctx, shader_stage);
1278 }
1279
1280 static void
1281 cso_save_fragment_samplers(struct cso_context *ctx)
1282 {
1283 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1284 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1285
1286 memcpy(saved->cso_samplers, info->cso_samplers,
1287 sizeof(info->cso_samplers));
1288 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1289 }
1290
1291
1292 static void
1293 cso_restore_fragment_samplers(struct cso_context *ctx)
1294 {
1295 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1296 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1297
1298 memcpy(info->cso_samplers, saved->cso_samplers,
1299 sizeof(info->cso_samplers));
1300 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1301
1302 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1303 if (info->samplers[i]) {
1304 ctx->max_sampler_seen = i;
1305 break;
1306 }
1307 }
1308
1309 cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1310 }
1311
1312
1313 void
1314 cso_set_sampler_views(struct cso_context *ctx,
1315 enum pipe_shader_type shader_stage,
1316 unsigned count,
1317 struct pipe_sampler_view **views)
1318 {
1319 if (shader_stage == PIPE_SHADER_FRAGMENT) {
1320 unsigned i;
1321 boolean any_change = FALSE;
1322
1323 /* reference new views */
1324 for (i = 0; i < count; i++) {
1325 any_change |= ctx->fragment_views[i] != views[i];
1326 pipe_sampler_view_reference(&ctx->fragment_views[i], views[i]);
1327 }
1328 /* unref extra old views, if any */
1329 for (; i < ctx->nr_fragment_views; i++) {
1330 any_change |= ctx->fragment_views[i] != NULL;
1331 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1332 }
1333
1334 /* bind the new sampler views */
1335 if (any_change) {
1336 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0,
1337 MAX2(ctx->nr_fragment_views, count),
1338 ctx->fragment_views);
1339 }
1340
1341 ctx->nr_fragment_views = count;
1342 }
1343 else
1344 ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, count, views);
1345 }
1346
1347
1348 static void
1349 cso_save_fragment_sampler_views(struct cso_context *ctx)
1350 {
1351 unsigned i;
1352
1353 ctx->nr_fragment_views_saved = ctx->nr_fragment_views;
1354
1355 for (i = 0; i < ctx->nr_fragment_views; i++) {
1356 assert(!ctx->fragment_views_saved[i]);
1357 pipe_sampler_view_reference(&ctx->fragment_views_saved[i],
1358 ctx->fragment_views[i]);
1359 }
1360 }
1361
1362
1363 static void
1364 cso_restore_fragment_sampler_views(struct cso_context *ctx)
1365 {
1366 unsigned i, nr_saved = ctx->nr_fragment_views_saved;
1367 unsigned num;
1368
1369 for (i = 0; i < nr_saved; i++) {
1370 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1371 /* move the reference from one pointer to another */
1372 ctx->fragment_views[i] = ctx->fragment_views_saved[i];
1373 ctx->fragment_views_saved[i] = NULL;
1374 }
1375 for (; i < ctx->nr_fragment_views; i++) {
1376 pipe_sampler_view_reference(&ctx->fragment_views[i], NULL);
1377 }
1378
1379 num = MAX2(ctx->nr_fragment_views, nr_saved);
1380
1381 /* bind the old/saved sampler views */
1382 ctx->pipe->set_sampler_views(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, num,
1383 ctx->fragment_views);
1384
1385 ctx->nr_fragment_views = nr_saved;
1386 ctx->nr_fragment_views_saved = 0;
1387 }
1388
1389
1390 void
1391 cso_set_shader_images(struct cso_context *ctx,
1392 enum pipe_shader_type shader_stage,
1393 unsigned start, unsigned count,
1394 struct pipe_image_view *images)
1395 {
1396 if (shader_stage == PIPE_SHADER_FRAGMENT && start == 0 && count >= 1) {
1397 util_copy_image_view(&ctx->fragment_image0_current, &images[0]);
1398 }
1399
1400 ctx->pipe->set_shader_images(ctx->pipe, shader_stage, start, count, images);
1401 }
1402
1403
1404 static void
1405 cso_save_fragment_image0(struct cso_context *ctx)
1406 {
1407 util_copy_image_view(&ctx->fragment_image0_saved,
1408 &ctx->fragment_image0_current);
1409 }
1410
1411
1412 static void
1413 cso_restore_fragment_image0(struct cso_context *ctx)
1414 {
1415 cso_set_shader_images(ctx, PIPE_SHADER_FRAGMENT, 0, 1,
1416 &ctx->fragment_image0_saved);
1417 }
1418
1419
1420 void
1421 cso_set_stream_outputs(struct cso_context *ctx,
1422 unsigned num_targets,
1423 struct pipe_stream_output_target **targets,
1424 const unsigned *offsets)
1425 {
1426 struct pipe_context *pipe = ctx->pipe;
1427 uint i;
1428
1429 if (!ctx->has_streamout) {
1430 assert(num_targets == 0);
1431 return;
1432 }
1433
1434 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1435 /* Nothing to do. */
1436 return;
1437 }
1438
1439 /* reference new targets */
1440 for (i = 0; i < num_targets; i++) {
1441 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1442 }
1443 /* unref extra old targets, if any */
1444 for (; i < ctx->nr_so_targets; i++) {
1445 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1446 }
1447
1448 pipe->set_stream_output_targets(pipe, num_targets, targets,
1449 offsets);
1450 ctx->nr_so_targets = num_targets;
1451 }
1452
1453 static void
1454 cso_save_stream_outputs(struct cso_context *ctx)
1455 {
1456 uint i;
1457
1458 if (!ctx->has_streamout) {
1459 return;
1460 }
1461
1462 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1463
1464 for (i = 0; i < ctx->nr_so_targets; i++) {
1465 assert(!ctx->so_targets_saved[i]);
1466 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1467 }
1468 }
1469
1470 static void
1471 cso_restore_stream_outputs(struct cso_context *ctx)
1472 {
1473 struct pipe_context *pipe = ctx->pipe;
1474 uint i;
1475 unsigned offset[PIPE_MAX_SO_BUFFERS];
1476
1477 if (!ctx->has_streamout) {
1478 return;
1479 }
1480
1481 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1482 /* Nothing to do. */
1483 return;
1484 }
1485
1486 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1487 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1488 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1489 /* move the reference from one pointer to another */
1490 ctx->so_targets[i] = ctx->so_targets_saved[i];
1491 ctx->so_targets_saved[i] = NULL;
1492 /* -1 means append */
1493 offset[i] = (unsigned)-1;
1494 }
1495 for (; i < ctx->nr_so_targets; i++) {
1496 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1497 }
1498
1499 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1500 ctx->so_targets, offset);
1501
1502 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1503 ctx->nr_so_targets_saved = 0;
1504 }
1505
1506 /* constant buffers */
1507
1508 void
1509 cso_set_constant_buffer(struct cso_context *cso,
1510 enum pipe_shader_type shader_stage,
1511 unsigned index, struct pipe_constant_buffer *cb)
1512 {
1513 struct pipe_context *pipe = cso->pipe;
1514
1515 pipe->set_constant_buffer(pipe, shader_stage, index, cb);
1516
1517 if (index == 0) {
1518 util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb);
1519 }
1520 }
1521
1522 void
1523 cso_set_constant_buffer_resource(struct cso_context *cso,
1524 enum pipe_shader_type shader_stage,
1525 unsigned index,
1526 struct pipe_resource *buffer)
1527 {
1528 if (buffer) {
1529 struct pipe_constant_buffer cb;
1530 cb.buffer = buffer;
1531 cb.buffer_offset = 0;
1532 cb.buffer_size = buffer->width0;
1533 cb.user_buffer = NULL;
1534 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1535 } else {
1536 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1537 }
1538 }
1539
1540 void
1541 cso_set_constant_user_buffer(struct cso_context *cso,
1542 enum pipe_shader_type shader_stage,
1543 unsigned index, void *ptr, unsigned size)
1544 {
1545 if (ptr) {
1546 struct pipe_constant_buffer cb;
1547 cb.buffer = NULL;
1548 cb.buffer_offset = 0;
1549 cb.buffer_size = size;
1550 cb.user_buffer = ptr;
1551 cso_set_constant_buffer(cso, shader_stage, index, &cb);
1552 } else {
1553 cso_set_constant_buffer(cso, shader_stage, index, NULL);
1554 }
1555 }
1556
1557 void
1558 cso_save_constant_buffer_slot0(struct cso_context *cso,
1559 enum pipe_shader_type shader_stage)
1560 {
1561 util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage],
1562 &cso->aux_constbuf_current[shader_stage]);
1563 }
1564
1565 void
1566 cso_restore_constant_buffer_slot0(struct cso_context *cso,
1567 enum pipe_shader_type shader_stage)
1568 {
1569 cso_set_constant_buffer(cso, shader_stage, 0,
1570 &cso->aux_constbuf_saved[shader_stage]);
1571 pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer,
1572 NULL);
1573 }
1574
1575
1576 /**
1577 * Save all the CSO state items specified by the state_mask bitmask
1578 * of CSO_BIT_x flags.
1579 */
1580 void
1581 cso_save_state(struct cso_context *cso, unsigned state_mask)
1582 {
1583 assert(cso->saved_state == 0);
1584
1585 cso->saved_state = state_mask;
1586
1587 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1588 cso_save_vertex_buffer0(cso);
1589 if (state_mask & CSO_BIT_BLEND)
1590 cso_save_blend(cso);
1591 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1592 cso_save_depth_stencil_alpha(cso);
1593 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1594 cso_save_fragment_samplers(cso);
1595 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1596 cso_save_fragment_sampler_views(cso);
1597 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1598 cso_save_fragment_shader(cso);
1599 if (state_mask & CSO_BIT_FRAMEBUFFER)
1600 cso_save_framebuffer(cso);
1601 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1602 cso_save_geometry_shader(cso);
1603 if (state_mask & CSO_BIT_MIN_SAMPLES)
1604 cso_save_min_samples(cso);
1605 if (state_mask & CSO_BIT_RASTERIZER)
1606 cso_save_rasterizer(cso);
1607 if (state_mask & CSO_BIT_RENDER_CONDITION)
1608 cso_save_render_condition(cso);
1609 if (state_mask & CSO_BIT_SAMPLE_MASK)
1610 cso_save_sample_mask(cso);
1611 if (state_mask & CSO_BIT_STENCIL_REF)
1612 cso_save_stencil_ref(cso);
1613 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1614 cso_save_stream_outputs(cso);
1615 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1616 cso_save_tessctrl_shader(cso);
1617 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1618 cso_save_tesseval_shader(cso);
1619 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1620 cso_save_vertex_elements(cso);
1621 if (state_mask & CSO_BIT_VERTEX_SHADER)
1622 cso_save_vertex_shader(cso);
1623 if (state_mask & CSO_BIT_VIEWPORT)
1624 cso_save_viewport(cso);
1625 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1626 cso->pipe->set_active_query_state(cso->pipe, false);
1627 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1628 cso_save_fragment_image0(cso);
1629 }
1630
1631
1632 /**
1633 * Restore the state which was saved by cso_save_state().
1634 */
1635 void
1636 cso_restore_state(struct cso_context *cso)
1637 {
1638 unsigned state_mask = cso->saved_state;
1639
1640 assert(state_mask);
1641
1642 if (state_mask & CSO_BIT_AUX_VERTEX_BUFFER_SLOT)
1643 cso_restore_vertex_buffer0(cso);
1644 if (state_mask & CSO_BIT_BLEND)
1645 cso_restore_blend(cso);
1646 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1647 cso_restore_depth_stencil_alpha(cso);
1648 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1649 cso_restore_fragment_samplers(cso);
1650 if (state_mask & CSO_BIT_FRAGMENT_SAMPLER_VIEWS)
1651 cso_restore_fragment_sampler_views(cso);
1652 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1653 cso_restore_fragment_shader(cso);
1654 if (state_mask & CSO_BIT_FRAMEBUFFER)
1655 cso_restore_framebuffer(cso);
1656 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1657 cso_restore_geometry_shader(cso);
1658 if (state_mask & CSO_BIT_MIN_SAMPLES)
1659 cso_restore_min_samples(cso);
1660 if (state_mask & CSO_BIT_RASTERIZER)
1661 cso_restore_rasterizer(cso);
1662 if (state_mask & CSO_BIT_RENDER_CONDITION)
1663 cso_restore_render_condition(cso);
1664 if (state_mask & CSO_BIT_SAMPLE_MASK)
1665 cso_restore_sample_mask(cso);
1666 if (state_mask & CSO_BIT_STENCIL_REF)
1667 cso_restore_stencil_ref(cso);
1668 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1669 cso_restore_stream_outputs(cso);
1670 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1671 cso_restore_tessctrl_shader(cso);
1672 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1673 cso_restore_tesseval_shader(cso);
1674 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1675 cso_restore_vertex_elements(cso);
1676 if (state_mask & CSO_BIT_VERTEX_SHADER)
1677 cso_restore_vertex_shader(cso);
1678 if (state_mask & CSO_BIT_VIEWPORT)
1679 cso_restore_viewport(cso);
1680 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1681 cso->pipe->set_active_query_state(cso->pipe, true);
1682 if (state_mask & CSO_BIT_FRAGMENT_IMAGE0)
1683 cso_restore_fragment_image0(cso);
1684
1685 cso->saved_state = 0;
1686 }
1687
1688
1689
1690 /* drawing */
1691
1692 void
1693 cso_draw_vbo(struct cso_context *cso,
1694 const struct pipe_draw_info *info)
1695 {
1696 struct u_vbuf *vbuf = cso->vbuf;
1697
1698 /* We can't have both indirect drawing and SO-vertex-count drawing */
1699 assert(info->indirect == NULL || info->count_from_stream_output == NULL);
1700
1701 /* We can't have SO-vertex-count drawing with an index buffer */
1702 assert(info->count_from_stream_output == NULL || info->index_size == 0);
1703
1704 if (vbuf) {
1705 u_vbuf_draw_vbo(vbuf, info);
1706 } else {
1707 struct pipe_context *pipe = cso->pipe;
1708 pipe->draw_vbo(pipe, info);
1709 }
1710 }
1711
1712 void
1713 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1714 {
1715 struct pipe_draw_info info;
1716
1717 util_draw_init_info(&info);
1718
1719 info.mode = mode;
1720 info.start = start;
1721 info.count = count;
1722 info.min_index = start;
1723 info.max_index = start + count - 1;
1724
1725 cso_draw_vbo(cso, &info);
1726 }
1727
1728 void
1729 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1730 uint start, uint count,
1731 uint start_instance, uint instance_count)
1732 {
1733 struct pipe_draw_info info;
1734
1735 util_draw_init_info(&info);
1736
1737 info.mode = mode;
1738 info.start = start;
1739 info.count = count;
1740 info.min_index = start;
1741 info.max_index = start + count - 1;
1742 info.start_instance = start_instance;
1743 info.instance_count = instance_count;
1744
1745 cso_draw_vbo(cso, &info);
1746 }