st/mesa: sink TCS/TES/GS/CS translate code into st_translate_common_program
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35 #include "main/imports.h"
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/prog_to_nir.h"
41 #include "program/programopt.h"
42
43 #include "compiler/nir/nir.h"
44
45 #include "pipe/p_context.h"
46 #include "pipe/p_defines.h"
47 #include "pipe/p_shader_tokens.h"
48 #include "draw/draw_context.h"
49 #include "tgsi/tgsi_dump.h"
50 #include "tgsi/tgsi_emulate.h"
51 #include "tgsi/tgsi_parse.h"
52 #include "tgsi/tgsi_ureg.h"
53
54 #include "st_debug.h"
55 #include "st_cb_bitmap.h"
56 #include "st_cb_drawpixels.h"
57 #include "st_context.h"
58 #include "st_tgsi_lower_depth_clamp.h"
59 #include "st_tgsi_lower_yuv.h"
60 #include "st_program.h"
61 #include "st_mesa_to_tgsi.h"
62 #include "st_atifs_to_tgsi.h"
63 #include "st_nir.h"
64 #include "st_shader_cache.h"
65 #include "cso_cache/cso_context.h"
66
67
68
69 static void
70 set_affected_state_flags(uint64_t *states,
71 struct gl_program *prog,
72 uint64_t new_constants,
73 uint64_t new_sampler_views,
74 uint64_t new_samplers,
75 uint64_t new_images,
76 uint64_t new_ubos,
77 uint64_t new_ssbos,
78 uint64_t new_atomics)
79 {
80 if (prog->Parameters->NumParameters)
81 *states |= new_constants;
82
83 if (prog->info.num_textures)
84 *states |= new_sampler_views | new_samplers;
85
86 if (prog->info.num_images)
87 *states |= new_images;
88
89 if (prog->info.num_ubos)
90 *states |= new_ubos;
91
92 if (prog->info.num_ssbos)
93 *states |= new_ssbos;
94
95 if (prog->info.num_abos)
96 *states |= new_atomics;
97 }
98
99 /**
100 * This determines which states will be updated when the shader is bound.
101 */
102 void
103 st_set_prog_affected_state_flags(struct gl_program *prog)
104 {
105 uint64_t *states;
106
107 switch (prog->info.stage) {
108 case MESA_SHADER_VERTEX:
109 states = &((struct st_vertex_program*)prog)->affected_states;
110
111 *states = ST_NEW_VS_STATE |
112 ST_NEW_RASTERIZER |
113 ST_NEW_VERTEX_ARRAYS;
114
115 set_affected_state_flags(states, prog,
116 ST_NEW_VS_CONSTANTS,
117 ST_NEW_VS_SAMPLER_VIEWS,
118 ST_NEW_VS_SAMPLERS,
119 ST_NEW_VS_IMAGES,
120 ST_NEW_VS_UBOS,
121 ST_NEW_VS_SSBOS,
122 ST_NEW_VS_ATOMICS);
123 break;
124
125 case MESA_SHADER_TESS_CTRL:
126 states = &(st_common_program(prog))->affected_states;
127
128 *states = ST_NEW_TCS_STATE;
129
130 set_affected_state_flags(states, prog,
131 ST_NEW_TCS_CONSTANTS,
132 ST_NEW_TCS_SAMPLER_VIEWS,
133 ST_NEW_TCS_SAMPLERS,
134 ST_NEW_TCS_IMAGES,
135 ST_NEW_TCS_UBOS,
136 ST_NEW_TCS_SSBOS,
137 ST_NEW_TCS_ATOMICS);
138 break;
139
140 case MESA_SHADER_TESS_EVAL:
141 states = &(st_common_program(prog))->affected_states;
142
143 *states = ST_NEW_TES_STATE |
144 ST_NEW_RASTERIZER;
145
146 set_affected_state_flags(states, prog,
147 ST_NEW_TES_CONSTANTS,
148 ST_NEW_TES_SAMPLER_VIEWS,
149 ST_NEW_TES_SAMPLERS,
150 ST_NEW_TES_IMAGES,
151 ST_NEW_TES_UBOS,
152 ST_NEW_TES_SSBOS,
153 ST_NEW_TES_ATOMICS);
154 break;
155
156 case MESA_SHADER_GEOMETRY:
157 states = &(st_common_program(prog))->affected_states;
158
159 *states = ST_NEW_GS_STATE |
160 ST_NEW_RASTERIZER;
161
162 set_affected_state_flags(states, prog,
163 ST_NEW_GS_CONSTANTS,
164 ST_NEW_GS_SAMPLER_VIEWS,
165 ST_NEW_GS_SAMPLERS,
166 ST_NEW_GS_IMAGES,
167 ST_NEW_GS_UBOS,
168 ST_NEW_GS_SSBOS,
169 ST_NEW_GS_ATOMICS);
170 break;
171
172 case MESA_SHADER_FRAGMENT:
173 states = &((struct st_fragment_program*)prog)->affected_states;
174
175 /* gl_FragCoord and glDrawPixels always use constants. */
176 *states = ST_NEW_FS_STATE |
177 ST_NEW_SAMPLE_SHADING |
178 ST_NEW_FS_CONSTANTS;
179
180 set_affected_state_flags(states, prog,
181 ST_NEW_FS_CONSTANTS,
182 ST_NEW_FS_SAMPLER_VIEWS,
183 ST_NEW_FS_SAMPLERS,
184 ST_NEW_FS_IMAGES,
185 ST_NEW_FS_UBOS,
186 ST_NEW_FS_SSBOS,
187 ST_NEW_FS_ATOMICS);
188 break;
189
190 case MESA_SHADER_COMPUTE:
191 states = &((struct st_common_program*)prog)->affected_states;
192
193 *states = ST_NEW_CS_STATE;
194
195 set_affected_state_flags(states, prog,
196 ST_NEW_CS_CONSTANTS,
197 ST_NEW_CS_SAMPLER_VIEWS,
198 ST_NEW_CS_SAMPLERS,
199 ST_NEW_CS_IMAGES,
200 ST_NEW_CS_UBOS,
201 ST_NEW_CS_SSBOS,
202 ST_NEW_CS_ATOMICS);
203 break;
204
205 default:
206 unreachable("unhandled shader stage");
207 }
208 }
209
210 static void
211 delete_ir(struct pipe_shader_state *ir)
212 {
213 if (ir->tokens) {
214 ureg_free_tokens(ir->tokens);
215 ir->tokens = NULL;
216 }
217
218 /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
219 * it has resulted in the driver taking ownership of the NIR. Those
220 * callers should be NULLing out the nir field in any pipe_shader_state
221 * that might have this called in order to indicate that.
222 *
223 * GLSL IR and ARB programs will have set gl_program->nir to the same
224 * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
225 */
226 }
227
228 /**
229 * Delete a vertex program variant. Note the caller must unlink
230 * the variant from the linked list.
231 */
232 static void
233 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
234 {
235 if (vpv->driver_shader) {
236 if (st->has_shareable_shaders || vpv->key.st == st) {
237 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
238 } else {
239 st_save_zombie_shader(vpv->key.st, PIPE_SHADER_VERTEX,
240 vpv->driver_shader);
241 }
242 }
243
244 if (vpv->draw_shader)
245 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
246
247 delete_ir(&vpv->tgsi);
248
249 free( vpv );
250 }
251
252
253
254 /**
255 * Clean out any old compilations:
256 */
257 void
258 st_release_vp_variants( struct st_context *st,
259 struct st_vertex_program *stvp )
260 {
261 struct st_vp_variant *vpv;
262
263 for (vpv = stvp->variants; vpv; ) {
264 struct st_vp_variant *next = vpv->next;
265 delete_vp_variant(st, vpv);
266 vpv = next;
267 }
268
269 stvp->variants = NULL;
270
271 delete_ir(&stvp->tgsi);
272 }
273
274
275
276 /**
277 * Delete a fragment program variant. Note the caller must unlink
278 * the variant from the linked list.
279 */
280 static void
281 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
282 {
283 if (fpv->driver_shader) {
284 if (st->has_shareable_shaders || fpv->key.st == st) {
285 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
286 } else {
287 st_save_zombie_shader(fpv->key.st, PIPE_SHADER_FRAGMENT,
288 fpv->driver_shader);
289 }
290 }
291
292 free(fpv);
293 }
294
295
296 /**
297 * Free all variants of a fragment program.
298 */
299 void
300 st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
301 {
302 struct st_fp_variant *fpv;
303
304 for (fpv = stfp->variants; fpv; ) {
305 struct st_fp_variant *next = fpv->next;
306 delete_fp_variant(st, fpv);
307 fpv = next;
308 }
309
310 stfp->variants = NULL;
311
312 delete_ir(&stfp->tgsi);
313 }
314
315
316 /**
317 * Delete a basic program variant. Note the caller must unlink
318 * the variant from the linked list.
319 */
320 static void
321 delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
322 GLenum target)
323 {
324 if (v->driver_shader) {
325 if (st->has_shareable_shaders || v->key.st == st) {
326 /* The shader's context matches the calling context, or we
327 * don't care.
328 */
329 switch (target) {
330 case GL_TESS_CONTROL_PROGRAM_NV:
331 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
332 break;
333 case GL_TESS_EVALUATION_PROGRAM_NV:
334 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
335 break;
336 case GL_GEOMETRY_PROGRAM_NV:
337 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
338 break;
339 case GL_COMPUTE_PROGRAM_NV:
340 cso_delete_compute_shader(st->cso_context, v->driver_shader);
341 break;
342 default:
343 unreachable("bad shader type in delete_basic_variant");
344 }
345 } else {
346 /* We can't delete a shader with a context different from the one
347 * that created it. Add it to the creating context's zombie list.
348 */
349 enum pipe_shader_type type;
350 switch (target) {
351 case GL_TESS_CONTROL_PROGRAM_NV:
352 type = PIPE_SHADER_TESS_CTRL;
353 break;
354 case GL_TESS_EVALUATION_PROGRAM_NV:
355 type = PIPE_SHADER_TESS_EVAL;
356 break;
357 case GL_GEOMETRY_PROGRAM_NV:
358 type = PIPE_SHADER_GEOMETRY;
359 break;
360 default:
361 unreachable("");
362 }
363 st_save_zombie_shader(v->key.st, type, v->driver_shader);
364 }
365 }
366
367 free(v);
368 }
369
370
371 /**
372 * Free all basic program variants.
373 */
374 void
375 st_release_basic_variants(struct st_context *st, struct st_common_program *p)
376 {
377 struct st_basic_variant *v;
378
379 for (v = p->variants; v; ) {
380 struct st_basic_variant *next = v->next;
381 delete_basic_variant(st, v, p->Base.Target);
382 v = next;
383 }
384
385 p->variants = NULL;
386 delete_ir(&p->tgsi);
387 }
388
389
390 /**
391 * Translate ARB (asm) program to NIR
392 */
393 static nir_shader *
394 st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
395 gl_shader_stage stage)
396 {
397 const struct gl_shader_compiler_options *options =
398 &st->ctx->Const.ShaderCompilerOptions[stage];
399
400 /* Translate to NIR */
401 nir_shader *nir = prog_to_nir(prog, options->NirOptions);
402 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
403 nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
404
405 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, st->pipe->screen);
406 NIR_PASS_V(nir, nir_lower_system_values);
407
408 /* Optimise NIR */
409 NIR_PASS_V(nir, nir_opt_constant_folding);
410 st_nir_opts(nir);
411 nir_validate_shader(nir, "after st/ptn NIR opts");
412
413 return nir;
414 }
415
416 /**
417 * Translate a vertex program.
418 */
419 bool
420 st_translate_vertex_program(struct st_context *st,
421 struct st_vertex_program *stvp)
422 {
423 struct ureg_program *ureg;
424 enum pipe_error error;
425 unsigned num_outputs = 0;
426 unsigned attr;
427 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
428 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
429
430 stvp->num_inputs = 0;
431 memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
432
433 if (stvp->Base.arb.IsPositionInvariant)
434 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
435
436 /*
437 * Determine number of inputs, the mappings between VERT_ATTRIB_x
438 * and TGSI generic input indexes, plus input attrib semantic info.
439 */
440 for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
441 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
442 stvp->input_to_index[attr] = stvp->num_inputs;
443 stvp->index_to_input[stvp->num_inputs] = attr;
444 stvp->num_inputs++;
445 if ((stvp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
446 /* add placeholder for second part of a double attribute */
447 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
448 stvp->num_inputs++;
449 }
450 }
451 }
452 /* bit of a hack, presetup potentially unused edgeflag input */
453 stvp->input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
454 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
455
456 /* Compute mapping of vertex program outputs to slots.
457 */
458 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
459 if ((stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) == 0) {
460 stvp->result_to_output[attr] = ~0;
461 }
462 else {
463 unsigned slot = num_outputs++;
464
465 stvp->result_to_output[attr] = slot;
466
467 unsigned semantic_name, semantic_index;
468 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
469 &semantic_name, &semantic_index);
470 output_semantic_name[slot] = semantic_name;
471 output_semantic_index[slot] = semantic_index;
472 }
473 }
474 /* similar hack to above, presetup potentially unused edgeflag output */
475 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
476 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
477 output_semantic_index[num_outputs] = 0;
478
479 /* ARB_vp: */
480 if (!stvp->glsl_to_tgsi && !stvp->shader_program) {
481 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
482
483 /* This determines which states will be updated when the assembly
484 * shader is bound.
485 */
486 stvp->affected_states = ST_NEW_VS_STATE |
487 ST_NEW_RASTERIZER |
488 ST_NEW_VERTEX_ARRAYS;
489
490 if (stvp->Base.Parameters->NumParameters)
491 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
492
493 /* No samplers are allowed in ARB_vp. */
494 }
495
496 if (stvp->shader_program) {
497 st_translate_stream_output_info(stvp->Base.sh.LinkedTransformFeedback,
498 stvp->result_to_output,
499 &stvp->tgsi.stream_output);
500
501 st_store_ir_in_disk_cache(st, &stvp->Base, true);
502 return true;
503 }
504
505 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
506 if (ureg == NULL)
507 return false;
508
509 if (stvp->Base.info.clip_distance_array_size)
510 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
511 stvp->Base.info.clip_distance_array_size);
512 if (stvp->Base.info.cull_distance_array_size)
513 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
514 stvp->Base.info.cull_distance_array_size);
515
516 if (ST_DEBUG & DEBUG_MESA) {
517 _mesa_print_program(&stvp->Base);
518 _mesa_print_program_parameters(st->ctx, &stvp->Base);
519 debug_printf("\n");
520 }
521
522 if (stvp->glsl_to_tgsi) {
523 error = st_translate_program(st->ctx,
524 PIPE_SHADER_VERTEX,
525 ureg,
526 stvp->glsl_to_tgsi,
527 &stvp->Base,
528 /* inputs */
529 stvp->num_inputs,
530 stvp->input_to_index,
531 NULL, /* inputSlotToAttr */
532 NULL, /* input semantic name */
533 NULL, /* input semantic index */
534 NULL, /* interp mode */
535 /* outputs */
536 num_outputs,
537 stvp->result_to_output,
538 output_semantic_name,
539 output_semantic_index);
540
541 st_translate_stream_output_info(stvp->Base.sh.LinkedTransformFeedback,
542 stvp->result_to_output,
543 &stvp->tgsi.stream_output);
544
545 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
546 } else
547 error = st_translate_mesa_program(st->ctx,
548 PIPE_SHADER_VERTEX,
549 ureg,
550 &stvp->Base,
551 /* inputs */
552 stvp->num_inputs,
553 stvp->input_to_index,
554 NULL, /* input semantic name */
555 NULL, /* input semantic index */
556 NULL,
557 /* outputs */
558 num_outputs,
559 stvp->result_to_output,
560 output_semantic_name,
561 output_semantic_index);
562
563 if (error) {
564 debug_printf("%s: failed to translate Mesa program:\n", __func__);
565 _mesa_print_program(&stvp->Base);
566 debug_assert(0);
567 return false;
568 }
569
570 stvp->tgsi.tokens = ureg_get_tokens(ureg, &stvp->num_tgsi_tokens);
571 ureg_destroy(ureg);
572
573 if (stvp->glsl_to_tgsi) {
574 stvp->glsl_to_tgsi = NULL;
575 st_store_ir_in_disk_cache(st, &stvp->Base, false);
576 }
577
578 bool use_nir = PIPE_SHADER_IR_NIR ==
579 st->pipe->screen->get_shader_param(st->pipe->screen, PIPE_SHADER_VERTEX,
580 PIPE_SHADER_CAP_PREFERRED_IR);
581
582 if (use_nir) {
583 nir_shader *nir =
584 st_translate_prog_to_nir(st, &stvp->Base, MESA_SHADER_VERTEX);
585
586 if (stvp->tgsi.ir.nir)
587 ralloc_free(stvp->tgsi.ir.nir);
588 stvp->tgsi.type = PIPE_SHADER_IR_NIR;
589 stvp->tgsi.ir.nir = nir;
590 stvp->Base.nir = nir;
591 return true;
592 }
593
594 return stvp->tgsi.tokens != NULL;
595 }
596
597 static const gl_state_index16 depth_range_state[STATE_LENGTH] =
598 { STATE_DEPTH_RANGE };
599
600 static struct st_vp_variant *
601 st_create_vp_variant(struct st_context *st,
602 struct st_vertex_program *stvp,
603 const struct st_vp_variant_key *key)
604 {
605 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
606 struct pipe_context *pipe = st->pipe;
607 struct gl_program_parameter_list *params = stvp->Base.Parameters;
608
609 vpv->key = *key;
610 vpv->tgsi.stream_output = stvp->tgsi.stream_output;
611 vpv->num_inputs = stvp->num_inputs;
612
613 /* When generating a NIR program, we usually don't have TGSI tokens.
614 * However, we do create them for ARB_vertex_program / fixed-function VS
615 * programs which we may need to use with the draw module for legacy
616 * feedback/select emulation. If they exist, copy them.
617 */
618 if (stvp->tgsi.tokens)
619 vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
620
621 if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
622 vpv->tgsi.type = PIPE_SHADER_IR_NIR;
623 vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
624 if (key->clamp_color)
625 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
626 if (key->passthrough_edgeflags) {
627 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
628 vpv->num_inputs++;
629 }
630
631 st_finalize_nir(st, &stvp->Base, stvp->shader_program,
632 vpv->tgsi.ir.nir);
633
634 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
635 /* driver takes ownership of IR: */
636 vpv->tgsi.ir.nir = NULL;
637 return vpv;
638 }
639
640 /* Emulate features. */
641 if (key->clamp_color || key->passthrough_edgeflags) {
642 const struct tgsi_token *tokens;
643 unsigned flags =
644 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
645 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
646
647 tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
648
649 if (tokens) {
650 tgsi_free_tokens(vpv->tgsi.tokens);
651 vpv->tgsi.tokens = tokens;
652
653 if (key->passthrough_edgeflags)
654 vpv->num_inputs++;
655 } else
656 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
657 }
658
659 if (key->lower_depth_clamp) {
660 unsigned depth_range_const =
661 _mesa_add_state_reference(params, depth_range_state);
662
663 const struct tgsi_token *tokens;
664 tokens = st_tgsi_lower_depth_clamp(vpv->tgsi.tokens, depth_range_const,
665 key->clip_negative_one_to_one);
666 if (tokens != vpv->tgsi.tokens)
667 tgsi_free_tokens(vpv->tgsi.tokens);
668 vpv->tgsi.tokens = tokens;
669 }
670
671 if (ST_DEBUG & DEBUG_TGSI) {
672 tgsi_dump(vpv->tgsi.tokens, 0);
673 debug_printf("\n");
674 }
675
676 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
677 return vpv;
678 }
679
680
681 /**
682 * Find/create a vertex program variant.
683 */
684 struct st_vp_variant *
685 st_get_vp_variant(struct st_context *st,
686 struct st_vertex_program *stvp,
687 const struct st_vp_variant_key *key)
688 {
689 struct st_vp_variant *vpv;
690
691 /* Search for existing variant */
692 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
693 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
694 break;
695 }
696 }
697
698 if (!vpv) {
699 /* create now */
700 vpv = st_create_vp_variant(st, stvp, key);
701 if (vpv) {
702 for (unsigned index = 0; index < vpv->num_inputs; ++index) {
703 unsigned attr = stvp->index_to_input[index];
704 if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
705 continue;
706 vpv->vert_attrib_mask |= 1u << attr;
707 }
708
709 /* insert into list */
710 vpv->next = stvp->variants;
711 stvp->variants = vpv;
712 }
713 }
714
715 return vpv;
716 }
717
718
719 /**
720 * Translate a Mesa fragment shader into a TGSI shader.
721 */
722 bool
723 st_translate_fragment_program(struct st_context *st,
724 struct st_fragment_program *stfp)
725 {
726 /* We have already compiled to NIR so just return */
727 if (stfp->shader_program) {
728 st_store_ir_in_disk_cache(st, &stfp->Base, true);
729 return true;
730 }
731
732 ubyte outputMapping[2 * FRAG_RESULT_MAX];
733 ubyte inputMapping[VARYING_SLOT_MAX];
734 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
735 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
736 GLuint attr;
737 GLbitfield64 inputsRead;
738 struct ureg_program *ureg;
739
740 GLboolean write_all = GL_FALSE;
741
742 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
743 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
744 uint fs_num_inputs = 0;
745
746 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
747 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
748 uint fs_num_outputs = 0;
749
750 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
751
752 /* Non-GLSL programs: */
753 if (!stfp->glsl_to_tgsi && !stfp->shader_program) {
754 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
755 if (st->ctx->Const.GLSLFragCoordIsSysVal)
756 _mesa_program_fragment_position_to_sysval(&stfp->Base);
757
758 /* This determines which states will be updated when the assembly
759 * shader is bound.
760 *
761 * fragment.position and glDrawPixels always use constants.
762 */
763 stfp->affected_states = ST_NEW_FS_STATE |
764 ST_NEW_SAMPLE_SHADING |
765 ST_NEW_FS_CONSTANTS;
766
767 if (stfp->ati_fs) {
768 /* Just set them for ATI_fs unconditionally. */
769 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
770 ST_NEW_FS_SAMPLERS;
771 } else {
772 /* ARB_fp */
773 if (stfp->Base.SamplersUsed)
774 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
775 ST_NEW_FS_SAMPLERS;
776 }
777 }
778
779
780 bool use_nir = PIPE_SHADER_IR_NIR ==
781 st->pipe->screen->get_shader_param(st->pipe->screen,
782 PIPE_SHADER_FRAGMENT,
783 PIPE_SHADER_CAP_PREFERRED_IR);
784
785 if (use_nir && !stfp->ati_fs) {
786 nir_shader *nir =
787 st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
788
789 if (stfp->tgsi.ir.nir)
790 ralloc_free(stfp->tgsi.ir.nir);
791 stfp->tgsi.type = PIPE_SHADER_IR_NIR;
792 stfp->tgsi.ir.nir = nir;
793 stfp->Base.nir = nir;
794 return true;
795 }
796
797 /*
798 * Convert Mesa program inputs to TGSI input register semantics.
799 */
800 inputsRead = stfp->Base.info.inputs_read;
801 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
802 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
803 const GLuint slot = fs_num_inputs++;
804
805 inputMapping[attr] = slot;
806 inputSlotToAttr[slot] = attr;
807
808 switch (attr) {
809 case VARYING_SLOT_POS:
810 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
811 input_semantic_index[slot] = 0;
812 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
813 break;
814 case VARYING_SLOT_COL0:
815 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
816 input_semantic_index[slot] = 0;
817 interpMode[slot] = stfp->glsl_to_tgsi ?
818 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
819 break;
820 case VARYING_SLOT_COL1:
821 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
822 input_semantic_index[slot] = 1;
823 interpMode[slot] = stfp->glsl_to_tgsi ?
824 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
825 break;
826 case VARYING_SLOT_FOGC:
827 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
828 input_semantic_index[slot] = 0;
829 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
830 break;
831 case VARYING_SLOT_FACE:
832 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
833 input_semantic_index[slot] = 0;
834 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
835 break;
836 case VARYING_SLOT_PRIMITIVE_ID:
837 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
838 input_semantic_index[slot] = 0;
839 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
840 break;
841 case VARYING_SLOT_LAYER:
842 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
843 input_semantic_index[slot] = 0;
844 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
845 break;
846 case VARYING_SLOT_VIEWPORT:
847 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
848 input_semantic_index[slot] = 0;
849 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
850 break;
851 case VARYING_SLOT_CLIP_DIST0:
852 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
853 input_semantic_index[slot] = 0;
854 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
855 break;
856 case VARYING_SLOT_CLIP_DIST1:
857 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
858 input_semantic_index[slot] = 1;
859 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
860 break;
861 case VARYING_SLOT_CULL_DIST0:
862 case VARYING_SLOT_CULL_DIST1:
863 /* these should have been lowered by GLSL */
864 assert(0);
865 break;
866 /* In most cases, there is nothing special about these
867 * inputs, so adopt a convention to use the generic
868 * semantic name and the mesa VARYING_SLOT_ number as the
869 * index.
870 *
871 * All that is required is that the vertex shader labels
872 * its own outputs similarly, and that the vertex shader
873 * generates at least every output required by the
874 * fragment shader plus fixed-function hardware (such as
875 * BFC).
876 *
877 * However, some drivers may need us to identify the PNTC and TEXi
878 * varyings if, for example, their capability to replace them with
879 * sprite coordinates is limited.
880 */
881 case VARYING_SLOT_PNTC:
882 if (st->needs_texcoord_semantic) {
883 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
884 input_semantic_index[slot] = 0;
885 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
886 break;
887 }
888 /* fall through */
889 case VARYING_SLOT_TEX0:
890 case VARYING_SLOT_TEX1:
891 case VARYING_SLOT_TEX2:
892 case VARYING_SLOT_TEX3:
893 case VARYING_SLOT_TEX4:
894 case VARYING_SLOT_TEX5:
895 case VARYING_SLOT_TEX6:
896 case VARYING_SLOT_TEX7:
897 if (st->needs_texcoord_semantic) {
898 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
899 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
900 interpMode[slot] = stfp->glsl_to_tgsi ?
901 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
902 break;
903 }
904 /* fall through */
905 case VARYING_SLOT_VAR0:
906 default:
907 /* Semantic indices should be zero-based because drivers may choose
908 * to assign a fixed slot determined by that index.
909 * This is useful because ARB_separate_shader_objects uses location
910 * qualifiers for linkage, and if the semantic index corresponds to
911 * these locations, linkage passes in the driver become unecessary.
912 *
913 * If needs_texcoord_semantic is true, no semantic indices will be
914 * consumed for the TEXi varyings, and we can base the locations of
915 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
916 */
917 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
918 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
919 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
920 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
921 if (attr == VARYING_SLOT_PNTC)
922 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
923 else {
924 interpMode[slot] = stfp->glsl_to_tgsi ?
925 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
926 }
927 break;
928 }
929 }
930 else {
931 inputMapping[attr] = -1;
932 }
933 }
934
935 /*
936 * Semantics and mapping for outputs
937 */
938 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
939
940 /* if z is written, emit that first */
941 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
942 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
943 fs_output_semantic_index[fs_num_outputs] = 0;
944 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
945 fs_num_outputs++;
946 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
947 }
948
949 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
950 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
951 fs_output_semantic_index[fs_num_outputs] = 0;
952 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
953 fs_num_outputs++;
954 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
955 }
956
957 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
958 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
959 fs_output_semantic_index[fs_num_outputs] = 0;
960 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
961 fs_num_outputs++;
962 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
963 }
964
965 /* handle remaining outputs (color) */
966 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
967 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
968 stfp->Base.SecondaryOutputsWritten;
969 const unsigned loc = attr % FRAG_RESULT_MAX;
970
971 if (written & BITFIELD64_BIT(loc)) {
972 switch (loc) {
973 case FRAG_RESULT_DEPTH:
974 case FRAG_RESULT_STENCIL:
975 case FRAG_RESULT_SAMPLE_MASK:
976 /* handled above */
977 assert(0);
978 break;
979 case FRAG_RESULT_COLOR:
980 write_all = GL_TRUE; /* fallthrough */
981 default: {
982 int index;
983 assert(loc == FRAG_RESULT_COLOR ||
984 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
985
986 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
987
988 if (attr >= FRAG_RESULT_MAX) {
989 /* Secondary color for dual source blending. */
990 assert(index == 0);
991 index++;
992 }
993
994 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
995 fs_output_semantic_index[fs_num_outputs] = index;
996 outputMapping[attr] = fs_num_outputs;
997 break;
998 }
999 }
1000
1001 fs_num_outputs++;
1002 }
1003 }
1004
1005 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
1006 if (ureg == NULL)
1007 return false;
1008
1009 if (ST_DEBUG & DEBUG_MESA) {
1010 _mesa_print_program(&stfp->Base);
1011 _mesa_print_program_parameters(st->ctx, &stfp->Base);
1012 debug_printf("\n");
1013 }
1014 if (write_all == GL_TRUE)
1015 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
1016
1017 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
1018 switch (stfp->Base.info.fs.depth_layout) {
1019 case FRAG_DEPTH_LAYOUT_ANY:
1020 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1021 TGSI_FS_DEPTH_LAYOUT_ANY);
1022 break;
1023 case FRAG_DEPTH_LAYOUT_GREATER:
1024 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1025 TGSI_FS_DEPTH_LAYOUT_GREATER);
1026 break;
1027 case FRAG_DEPTH_LAYOUT_LESS:
1028 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1029 TGSI_FS_DEPTH_LAYOUT_LESS);
1030 break;
1031 case FRAG_DEPTH_LAYOUT_UNCHANGED:
1032 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1033 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
1034 break;
1035 default:
1036 assert(0);
1037 }
1038 }
1039
1040 if (stfp->glsl_to_tgsi) {
1041 st_translate_program(st->ctx,
1042 PIPE_SHADER_FRAGMENT,
1043 ureg,
1044 stfp->glsl_to_tgsi,
1045 &stfp->Base,
1046 /* inputs */
1047 fs_num_inputs,
1048 inputMapping,
1049 inputSlotToAttr,
1050 input_semantic_name,
1051 input_semantic_index,
1052 interpMode,
1053 /* outputs */
1054 fs_num_outputs,
1055 outputMapping,
1056 fs_output_semantic_name,
1057 fs_output_semantic_index);
1058
1059 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
1060 } else if (stfp->ati_fs)
1061 st_translate_atifs_program(ureg,
1062 stfp->ati_fs,
1063 &stfp->Base,
1064 /* inputs */
1065 fs_num_inputs,
1066 inputMapping,
1067 input_semantic_name,
1068 input_semantic_index,
1069 interpMode,
1070 /* outputs */
1071 fs_num_outputs,
1072 outputMapping,
1073 fs_output_semantic_name,
1074 fs_output_semantic_index);
1075 else
1076 st_translate_mesa_program(st->ctx,
1077 PIPE_SHADER_FRAGMENT,
1078 ureg,
1079 &stfp->Base,
1080 /* inputs */
1081 fs_num_inputs,
1082 inputMapping,
1083 input_semantic_name,
1084 input_semantic_index,
1085 interpMode,
1086 /* outputs */
1087 fs_num_outputs,
1088 outputMapping,
1089 fs_output_semantic_name,
1090 fs_output_semantic_index);
1091
1092 stfp->tgsi.tokens = ureg_get_tokens(ureg, &stfp->num_tgsi_tokens);
1093 ureg_destroy(ureg);
1094
1095 if (stfp->glsl_to_tgsi) {
1096 stfp->glsl_to_tgsi = NULL;
1097 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1098 }
1099
1100 return stfp->tgsi.tokens != NULL;
1101 }
1102
1103 static struct st_fp_variant *
1104 st_create_fp_variant(struct st_context *st,
1105 struct st_fragment_program *stfp,
1106 const struct st_fp_variant_key *key)
1107 {
1108 struct pipe_context *pipe = st->pipe;
1109 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1110 struct pipe_shader_state tgsi = {0};
1111 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1112 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1113 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1114 static const gl_state_index16 scale_state[STATE_LENGTH] =
1115 { STATE_INTERNAL, STATE_PT_SCALE };
1116 static const gl_state_index16 bias_state[STATE_LENGTH] =
1117 { STATE_INTERNAL, STATE_PT_BIAS };
1118
1119 if (!variant)
1120 return NULL;
1121
1122 if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
1123 tgsi.type = PIPE_SHADER_IR_NIR;
1124 tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
1125
1126 if (key->clamp_color)
1127 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1128
1129 if (key->persample_shading) {
1130 nir_shader *shader = tgsi.ir.nir;
1131 nir_foreach_variable(var, &shader->inputs)
1132 var->data.sample = true;
1133 }
1134
1135 assert(!(key->bitmap && key->drawpixels));
1136
1137 /* glBitmap */
1138 if (key->bitmap) {
1139 nir_lower_bitmap_options options = {0};
1140
1141 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1142 options.sampler = variant->bitmap_sampler;
1143 options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
1144
1145 NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
1146 }
1147
1148 /* glDrawPixels (color only) */
1149 if (key->drawpixels) {
1150 nir_lower_drawpixels_options options = {{0}};
1151 unsigned samplers_used = stfp->Base.SamplersUsed;
1152
1153 /* Find the first unused slot. */
1154 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1155 options.drawpix_sampler = variant->drawpix_sampler;
1156 samplers_used |= (1 << variant->drawpix_sampler);
1157
1158 options.pixel_maps = key->pixelMaps;
1159 if (key->pixelMaps) {
1160 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1161 options.pixelmap_sampler = variant->pixelmap_sampler;
1162 }
1163
1164 options.scale_and_bias = key->scaleAndBias;
1165 if (key->scaleAndBias) {
1166 _mesa_add_state_reference(params, scale_state);
1167 memcpy(options.scale_state_tokens, scale_state,
1168 sizeof(options.scale_state_tokens));
1169 _mesa_add_state_reference(params, bias_state);
1170 memcpy(options.bias_state_tokens, bias_state,
1171 sizeof(options.bias_state_tokens));
1172 }
1173
1174 _mesa_add_state_reference(params, texcoord_state);
1175 memcpy(options.texcoord_state_tokens, texcoord_state,
1176 sizeof(options.texcoord_state_tokens));
1177
1178 NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
1179 }
1180
1181 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1182 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1183 key->external.lower_ayuv || key->external.lower_xyuv)) {
1184 nir_lower_tex_options options = {0};
1185 options.lower_y_uv_external = key->external.lower_nv12;
1186 options.lower_y_u_v_external = key->external.lower_iyuv;
1187 options.lower_xy_uxvx_external = key->external.lower_xy_uxvx;
1188 options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
1189 options.lower_ayuv_external = key->external.lower_ayuv;
1190 options.lower_xyuv_external = key->external.lower_xyuv;
1191 NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
1192 }
1193
1194 st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
1195
1196 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1197 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1198 /* This pass needs to happen *after* nir_lower_sampler */
1199 NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
1200 ~stfp->Base.SamplersUsed,
1201 key->external.lower_nv12 || key->external.lower_xy_uxvx ||
1202 key->external.lower_yx_xuxv,
1203 key->external.lower_iyuv);
1204 }
1205
1206 /* Some of the lowering above may have introduced new varyings */
1207 nir_shader_gather_info(tgsi.ir.nir,
1208 nir_shader_get_entrypoint(tgsi.ir.nir));
1209
1210 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1211 variant->key = *key;
1212
1213 return variant;
1214 }
1215
1216 tgsi.tokens = stfp->tgsi.tokens;
1217
1218 assert(!(key->bitmap && key->drawpixels));
1219
1220 /* Fix texture targets and add fog for ATI_fs */
1221 if (stfp->ati_fs) {
1222 const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
1223
1224 if (tokens)
1225 tgsi.tokens = tokens;
1226 else
1227 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1228 }
1229
1230 /* Emulate features. */
1231 if (key->clamp_color || key->persample_shading) {
1232 const struct tgsi_token *tokens;
1233 unsigned flags =
1234 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1235 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1236
1237 tokens = tgsi_emulate(tgsi.tokens, flags);
1238
1239 if (tokens) {
1240 if (tgsi.tokens != stfp->tgsi.tokens)
1241 tgsi_free_tokens(tgsi.tokens);
1242 tgsi.tokens = tokens;
1243 } else
1244 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1245 }
1246
1247 /* glBitmap */
1248 if (key->bitmap) {
1249 const struct tgsi_token *tokens;
1250
1251 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1252
1253 tokens = st_get_bitmap_shader(tgsi.tokens,
1254 st->internal_target,
1255 variant->bitmap_sampler,
1256 st->needs_texcoord_semantic,
1257 st->bitmap.tex_format ==
1258 PIPE_FORMAT_R8_UNORM);
1259
1260 if (tokens) {
1261 if (tgsi.tokens != stfp->tgsi.tokens)
1262 tgsi_free_tokens(tgsi.tokens);
1263 tgsi.tokens = tokens;
1264 } else
1265 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1266 }
1267
1268 /* glDrawPixels (color only) */
1269 if (key->drawpixels) {
1270 const struct tgsi_token *tokens;
1271 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1272
1273 /* Find the first unused slot. */
1274 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1275
1276 if (key->pixelMaps) {
1277 unsigned samplers_used = stfp->Base.SamplersUsed |
1278 (1 << variant->drawpix_sampler);
1279
1280 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1281 }
1282
1283 if (key->scaleAndBias) {
1284 scale_const = _mesa_add_state_reference(params, scale_state);
1285 bias_const = _mesa_add_state_reference(params, bias_state);
1286 }
1287
1288 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1289
1290 tokens = st_get_drawpix_shader(tgsi.tokens,
1291 st->needs_texcoord_semantic,
1292 key->scaleAndBias, scale_const,
1293 bias_const, key->pixelMaps,
1294 variant->drawpix_sampler,
1295 variant->pixelmap_sampler,
1296 texcoord_const, st->internal_target);
1297
1298 if (tokens) {
1299 if (tgsi.tokens != stfp->tgsi.tokens)
1300 tgsi_free_tokens(tgsi.tokens);
1301 tgsi.tokens = tokens;
1302 } else
1303 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1304 }
1305
1306 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1307 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1308 const struct tgsi_token *tokens;
1309
1310 /* samplers inserted would conflict, but this should be unpossible: */
1311 assert(!(key->bitmap || key->drawpixels));
1312
1313 tokens = st_tgsi_lower_yuv(tgsi.tokens,
1314 ~stfp->Base.SamplersUsed,
1315 key->external.lower_nv12 ||
1316 key->external.lower_xy_uxvx ||
1317 key->external.lower_yx_xuxv,
1318 key->external.lower_iyuv);
1319 if (tokens) {
1320 if (tgsi.tokens != stfp->tgsi.tokens)
1321 tgsi_free_tokens(tgsi.tokens);
1322 tgsi.tokens = tokens;
1323 } else {
1324 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1325 }
1326 }
1327
1328 if (key->lower_depth_clamp) {
1329 unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
1330
1331 const struct tgsi_token *tokens;
1332 tokens = st_tgsi_lower_depth_clamp_fs(tgsi.tokens, depth_range_const);
1333 if (tgsi.tokens != stfp->tgsi.tokens)
1334 tgsi_free_tokens(tgsi.tokens);
1335 tgsi.tokens = tokens;
1336 }
1337
1338 if (ST_DEBUG & DEBUG_TGSI) {
1339 tgsi_dump(tgsi.tokens, 0);
1340 debug_printf("\n");
1341 }
1342
1343 /* fill in variant */
1344 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1345 variant->key = *key;
1346
1347 if (tgsi.tokens != stfp->tgsi.tokens)
1348 tgsi_free_tokens(tgsi.tokens);
1349 return variant;
1350 }
1351
1352 /**
1353 * Translate fragment program if needed.
1354 */
1355 struct st_fp_variant *
1356 st_get_fp_variant(struct st_context *st,
1357 struct st_fragment_program *stfp,
1358 const struct st_fp_variant_key *key)
1359 {
1360 struct st_fp_variant *fpv;
1361
1362 /* Search for existing variant */
1363 for (fpv = stfp->variants; fpv; fpv = fpv->next) {
1364 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1365 break;
1366 }
1367 }
1368
1369 if (!fpv) {
1370 /* create new */
1371 fpv = st_create_fp_variant(st, stfp, key);
1372 if (fpv) {
1373 if (key->bitmap || key->drawpixels) {
1374 /* Regular variants should always come before the
1375 * bitmap & drawpixels variants, (unless there
1376 * are no regular variants) so that
1377 * st_update_fp can take a fast path when
1378 * shader_has_one_variant is set.
1379 */
1380 if (!stfp->variants) {
1381 stfp->variants = fpv;
1382 } else {
1383 /* insert into list after the first one */
1384 fpv->next = stfp->variants->next;
1385 stfp->variants->next = fpv;
1386 }
1387 } else {
1388 /* insert into list */
1389 fpv->next = stfp->variants;
1390 stfp->variants = fpv;
1391 }
1392 }
1393 }
1394
1395 return fpv;
1396 }
1397
1398 /**
1399 * Update stream-output info for GS/TCS/TES. Normally this is done in
1400 * st_translate_program_common() but that is not called for glsl_to_nir
1401 * case.
1402 */
1403 static void
1404 st_translate_program_stream_output(struct gl_program *prog,
1405 struct pipe_stream_output_info *stream_output)
1406 {
1407 if (!prog->sh.LinkedTransformFeedback)
1408 return;
1409
1410 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1411 GLuint attr;
1412 uint num_outputs = 0;
1413
1414 memset(outputMapping, 0, sizeof(outputMapping));
1415
1416 /*
1417 * Determine number of outputs, the (default) output register
1418 * mapping and the semantic information for each output.
1419 */
1420 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1421 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1422 GLuint slot = num_outputs++;
1423
1424 outputMapping[attr] = slot;
1425 }
1426 }
1427
1428 st_translate_stream_output_info(prog->sh.LinkedTransformFeedback,
1429 outputMapping,
1430 stream_output);
1431 }
1432
1433 /**
1434 * Translate a program. This is common code for geometry and tessellation
1435 * shaders.
1436 */
1437 bool
1438 st_translate_common_program(struct st_context *st,
1439 struct st_common_program *stcp)
1440 {
1441 /* We have already compiled to NIR so just return */
1442 if (stcp->shader_program) {
1443 /* No variants */
1444 st_finalize_nir(st, &stcp->Base, stcp->shader_program,
1445 stcp->tgsi.ir.nir);
1446 if (stcp->Base.info.stage == MESA_SHADER_TESS_EVAL ||
1447 stcp->Base.info.stage == MESA_SHADER_GEOMETRY) {
1448 st_translate_program_stream_output(&stcp->Base,
1449 &stcp->tgsi.stream_output);
1450 }
1451 st_store_ir_in_disk_cache(st, &stcp->Base, true);
1452 return true;
1453 }
1454
1455 struct gl_program *prog = &stcp->Base;
1456 enum pipe_shader_type stage =
1457 pipe_shader_type_from_mesa(stcp->Base.info.stage);
1458 struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
1459
1460 if (ureg == NULL)
1461 return false;
1462
1463 switch (stage) {
1464 case PIPE_SHADER_TESS_CTRL:
1465 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1466 stcp->Base.info.tess.tcs_vertices_out);
1467 break;
1468
1469 case PIPE_SHADER_TESS_EVAL:
1470 if (stcp->Base.info.tess.primitive_mode == GL_ISOLINES)
1471 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1472 else
1473 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1474 stcp->Base.info.tess.primitive_mode);
1475
1476 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1477 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1478 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1479 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1480 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1481
1482 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1483 (stcp->Base.info.tess.spacing + 1) % 3);
1484
1485 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1486 !stcp->Base.info.tess.ccw);
1487 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1488 stcp->Base.info.tess.point_mode);
1489 break;
1490
1491 case PIPE_SHADER_GEOMETRY:
1492 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1493 stcp->Base.info.gs.input_primitive);
1494 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1495 stcp->Base.info.gs.output_primitive);
1496 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1497 stcp->Base.info.gs.vertices_out);
1498 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1499 stcp->Base.info.gs.invocations);
1500 break;
1501
1502 default:
1503 break;
1504 }
1505
1506 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1507 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1508 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1509 GLuint attr;
1510
1511 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1512 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1513 uint num_inputs = 0;
1514
1515 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1516 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1517 uint num_outputs = 0;
1518
1519 GLint i;
1520
1521 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1522 memset(inputMapping, 0, sizeof(inputMapping));
1523 memset(outputMapping, 0, sizeof(outputMapping));
1524 memset(&stcp->tgsi, 0, sizeof(stcp->tgsi));
1525
1526 if (prog->info.clip_distance_array_size)
1527 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1528 prog->info.clip_distance_array_size);
1529 if (prog->info.cull_distance_array_size)
1530 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1531 prog->info.cull_distance_array_size);
1532
1533 /*
1534 * Convert Mesa program inputs to TGSI input register semantics.
1535 */
1536 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1537 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1538 continue;
1539
1540 unsigned slot = num_inputs++;
1541
1542 inputMapping[attr] = slot;
1543 inputSlotToAttr[slot] = attr;
1544
1545 unsigned semantic_name, semantic_index;
1546 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1547 &semantic_name, &semantic_index);
1548 input_semantic_name[slot] = semantic_name;
1549 input_semantic_index[slot] = semantic_index;
1550 }
1551
1552 /* Also add patch inputs. */
1553 for (attr = 0; attr < 32; attr++) {
1554 if (prog->info.patch_inputs_read & (1u << attr)) {
1555 GLuint slot = num_inputs++;
1556 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1557
1558 inputMapping[patch_attr] = slot;
1559 inputSlotToAttr[slot] = patch_attr;
1560 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1561 input_semantic_index[slot] = attr;
1562 }
1563 }
1564
1565 /* initialize output semantics to defaults */
1566 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1567 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1568 output_semantic_index[i] = 0;
1569 }
1570
1571 /*
1572 * Determine number of outputs, the (default) output register
1573 * mapping and the semantic information for each output.
1574 */
1575 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1576 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1577 GLuint slot = num_outputs++;
1578
1579 outputMapping[attr] = slot;
1580
1581 unsigned semantic_name, semantic_index;
1582 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1583 &semantic_name, &semantic_index);
1584 output_semantic_name[slot] = semantic_name;
1585 output_semantic_index[slot] = semantic_index;
1586 }
1587 }
1588
1589 /* Also add patch outputs. */
1590 for (attr = 0; attr < 32; attr++) {
1591 if (prog->info.patch_outputs_written & (1u << attr)) {
1592 GLuint slot = num_outputs++;
1593 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1594
1595 outputMapping[patch_attr] = slot;
1596 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1597 output_semantic_index[slot] = attr;
1598 }
1599 }
1600
1601 st_translate_program(st->ctx,
1602 stage,
1603 ureg,
1604 stcp->glsl_to_tgsi,
1605 prog,
1606 /* inputs */
1607 num_inputs,
1608 inputMapping,
1609 inputSlotToAttr,
1610 input_semantic_name,
1611 input_semantic_index,
1612 NULL,
1613 /* outputs */
1614 num_outputs,
1615 outputMapping,
1616 output_semantic_name,
1617 output_semantic_index);
1618
1619 stcp->tgsi.tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1620
1621 ureg_destroy(ureg);
1622
1623 st_translate_stream_output_info(prog->sh.LinkedTransformFeedback,
1624 outputMapping,
1625 &stcp->tgsi.stream_output);
1626
1627 st_store_ir_in_disk_cache(st, prog, false);
1628
1629 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1630 _mesa_print_program(prog);
1631 debug_printf("\n");
1632 }
1633
1634 if (ST_DEBUG & DEBUG_TGSI) {
1635 tgsi_dump(stcp->tgsi.tokens, 0);
1636 debug_printf("\n");
1637 }
1638
1639 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1640 stcp->glsl_to_tgsi = NULL;
1641 return true;
1642 }
1643
1644
1645 /**
1646 * Get/create a basic program variant.
1647 */
1648 struct st_basic_variant *
1649 st_get_basic_variant(struct st_context *st,
1650 unsigned pipe_shader,
1651 struct st_common_program *prog,
1652 const struct st_basic_variant_key *key)
1653 {
1654 struct pipe_context *pipe = st->pipe;
1655 struct st_basic_variant *v;
1656 struct pipe_shader_state tgsi = {0};
1657
1658 /* Search for existing variant */
1659 for (v = prog->variants; v; v = v->next) {
1660 if (memcmp(&v->key, key, sizeof(*key)) == 0) {
1661 break;
1662 }
1663 }
1664
1665 if (!v) {
1666 /* create new */
1667 v = CALLOC_STRUCT(st_basic_variant);
1668 if (v) {
1669
1670 if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
1671 tgsi.type = PIPE_SHADER_IR_NIR;
1672 tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
1673
1674 if (key->clamp_color)
1675 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1676
1677 tgsi.stream_output = prog->tgsi.stream_output;
1678 } else {
1679 if (key->lower_depth_clamp) {
1680 struct gl_program_parameter_list *params = prog->Base.Parameters;
1681
1682 unsigned depth_range_const =
1683 _mesa_add_state_reference(params, depth_range_state);
1684
1685 const struct tgsi_token *tokens;
1686 tokens =
1687 st_tgsi_lower_depth_clamp(prog->tgsi.tokens,
1688 depth_range_const,
1689 key->clip_negative_one_to_one);
1690
1691 if (tokens != prog->tgsi.tokens)
1692 tgsi_free_tokens(prog->tgsi.tokens);
1693
1694 prog->tgsi.tokens = tokens;
1695 prog->num_tgsi_tokens = tgsi_num_tokens(tokens);
1696 }
1697 tgsi = prog->tgsi;
1698 }
1699 /* fill in new variant */
1700 switch (pipe_shader) {
1701 case PIPE_SHADER_TESS_CTRL:
1702 v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
1703 break;
1704 case PIPE_SHADER_TESS_EVAL:
1705 v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
1706 break;
1707 case PIPE_SHADER_GEOMETRY:
1708 v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
1709 break;
1710 default:
1711 assert(!"unhandled shader type");
1712 free(v);
1713 return NULL;
1714 }
1715
1716 v->key = *key;
1717
1718 /* insert into list */
1719 v->next = prog->variants;
1720 prog->variants = v;
1721 }
1722 }
1723
1724 return v;
1725 }
1726
1727
1728 /**
1729 * Get/create compute program variant.
1730 */
1731 struct st_basic_variant *
1732 st_get_cp_variant(struct st_context *st,
1733 struct pipe_shader_state *tgsi,
1734 unsigned shared_size,
1735 struct st_basic_variant **variants)
1736 {
1737 struct pipe_context *pipe = st->pipe;
1738 struct st_basic_variant *v;
1739 struct st_basic_variant_key key;
1740
1741 /* use memset, not an initializer to be sure all memory is zeroed */
1742 memset(&key, 0, sizeof(key));
1743
1744 key.st = st->has_shareable_shaders ? NULL : st;
1745
1746 /* Search for existing variant */
1747 for (v = *variants; v; v = v->next) {
1748 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1749 break;
1750 }
1751 }
1752
1753 if (!v) {
1754 /* create new */
1755 v = CALLOC_STRUCT(st_basic_variant);
1756 if (v) {
1757 /* fill in new variant */
1758 struct pipe_compute_state cs = {0};
1759
1760 cs.ir_type = tgsi->type;
1761 cs.req_local_mem = shared_size;
1762
1763 if (tgsi->type == PIPE_SHADER_IR_NIR)
1764 cs.prog = nir_shader_clone(NULL, tgsi->ir.nir);
1765 else
1766 cs.prog = tgsi->tokens;
1767
1768 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1769 v->key = key;
1770
1771 /* insert into list */
1772 v->next = *variants;
1773 *variants = v;
1774 }
1775 }
1776
1777 return v;
1778 }
1779
1780
1781 /**
1782 * Vert/Geom/Frag programs have per-context variants. Free all the
1783 * variants attached to the given program which match the given context.
1784 */
1785 static void
1786 destroy_program_variants(struct st_context *st, struct gl_program *target)
1787 {
1788 if (!target || target == &_mesa_DummyProgram)
1789 return;
1790
1791 switch (target->Target) {
1792 case GL_VERTEX_PROGRAM_ARB:
1793 {
1794 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1795 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1796
1797 for (vpv = stvp->variants; vpv; ) {
1798 struct st_vp_variant *next = vpv->next;
1799 if (vpv->key.st == st) {
1800 /* unlink from list */
1801 *prevPtr = next;
1802 /* destroy this variant */
1803 delete_vp_variant(st, vpv);
1804 }
1805 else {
1806 prevPtr = &vpv->next;
1807 }
1808 vpv = next;
1809 }
1810 }
1811 break;
1812 case GL_FRAGMENT_PROGRAM_ARB:
1813 {
1814 struct st_fragment_program *stfp =
1815 (struct st_fragment_program *) target;
1816 struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
1817
1818 for (fpv = stfp->variants; fpv; ) {
1819 struct st_fp_variant *next = fpv->next;
1820 if (fpv->key.st == st) {
1821 /* unlink from list */
1822 *prevPtr = next;
1823 /* destroy this variant */
1824 delete_fp_variant(st, fpv);
1825 }
1826 else {
1827 prevPtr = &fpv->next;
1828 }
1829 fpv = next;
1830 }
1831 }
1832 break;
1833 case GL_GEOMETRY_PROGRAM_NV:
1834 case GL_TESS_CONTROL_PROGRAM_NV:
1835 case GL_TESS_EVALUATION_PROGRAM_NV:
1836 case GL_COMPUTE_PROGRAM_NV:
1837 {
1838 struct st_common_program *p = st_common_program(target);
1839 struct st_basic_variant *v, **prevPtr = &p->variants;
1840
1841 for (v = p->variants; v; ) {
1842 struct st_basic_variant *next = v->next;
1843 if (v->key.st == st) {
1844 /* unlink from list */
1845 *prevPtr = next;
1846 /* destroy this variant */
1847 delete_basic_variant(st, v, target->Target);
1848 }
1849 else {
1850 prevPtr = &v->next;
1851 }
1852 v = next;
1853 }
1854 }
1855 break;
1856 default:
1857 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1858 "destroy_program_variants_cb()", target->Target);
1859 }
1860 }
1861
1862
1863 /**
1864 * Callback for _mesa_HashWalk. Free all the shader's program variants
1865 * which match the given context.
1866 */
1867 static void
1868 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1869 {
1870 struct st_context *st = (struct st_context *) userData;
1871 struct gl_shader *shader = (struct gl_shader *) data;
1872
1873 switch (shader->Type) {
1874 case GL_SHADER_PROGRAM_MESA:
1875 {
1876 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1877 GLuint i;
1878
1879 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1880 if (shProg->_LinkedShaders[i])
1881 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1882 }
1883 }
1884 break;
1885 case GL_VERTEX_SHADER:
1886 case GL_FRAGMENT_SHADER:
1887 case GL_GEOMETRY_SHADER:
1888 case GL_TESS_CONTROL_SHADER:
1889 case GL_TESS_EVALUATION_SHADER:
1890 case GL_COMPUTE_SHADER:
1891 break;
1892 default:
1893 assert(0);
1894 }
1895 }
1896
1897
1898 /**
1899 * Callback for _mesa_HashWalk. Free all the program variants which match
1900 * the given context.
1901 */
1902 static void
1903 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1904 {
1905 struct st_context *st = (struct st_context *) userData;
1906 struct gl_program *program = (struct gl_program *) data;
1907 destroy_program_variants(st, program);
1908 }
1909
1910
1911 /**
1912 * Walk over all shaders and programs to delete any variants which
1913 * belong to the given context.
1914 * This is called during context tear-down.
1915 */
1916 void
1917 st_destroy_program_variants(struct st_context *st)
1918 {
1919 /* If shaders can be shared with other contexts, the last context will
1920 * call DeleteProgram on all shaders, releasing everything.
1921 */
1922 if (st->has_shareable_shaders)
1923 return;
1924
1925 /* ARB vert/frag program */
1926 _mesa_HashWalk(st->ctx->Shared->Programs,
1927 destroy_program_variants_cb, st);
1928
1929 /* GLSL vert/frag/geom shaders */
1930 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1931 destroy_shader_program_variants_cb, st);
1932 }
1933
1934
1935 /**
1936 * For debugging, print/dump the current vertex program.
1937 */
1938 void
1939 st_print_current_vertex_program(void)
1940 {
1941 GET_CURRENT_CONTEXT(ctx);
1942
1943 if (ctx->VertexProgram._Current) {
1944 struct st_vertex_program *stvp =
1945 (struct st_vertex_program *) ctx->VertexProgram._Current;
1946 struct st_vp_variant *stv;
1947
1948 debug_printf("Vertex program %u\n", stvp->Base.Id);
1949
1950 for (stv = stvp->variants; stv; stv = stv->next) {
1951 debug_printf("variant %p\n", stv);
1952 tgsi_dump(stv->tgsi.tokens, 0);
1953 }
1954 }
1955 }
1956
1957
1958 /**
1959 * Compile one shader variant.
1960 */
1961 void
1962 st_precompile_shader_variant(struct st_context *st,
1963 struct gl_program *prog)
1964 {
1965 switch (prog->Target) {
1966 case GL_VERTEX_PROGRAM_ARB: {
1967 struct st_vertex_program *p = (struct st_vertex_program *)prog;
1968 struct st_vp_variant_key key;
1969
1970 memset(&key, 0, sizeof(key));
1971
1972 key.st = st->has_shareable_shaders ? NULL : st;
1973 st_get_vp_variant(st, p, &key);
1974 break;
1975 }
1976
1977 case GL_TESS_CONTROL_PROGRAM_NV: {
1978 struct st_common_program *p = st_common_program(prog);
1979 struct st_basic_variant_key key;
1980
1981 memset(&key, 0, sizeof(key));
1982
1983 key.st = st->has_shareable_shaders ? NULL : st;
1984 st_get_basic_variant(st, PIPE_SHADER_TESS_CTRL, p, &key);
1985 break;
1986 }
1987
1988 case GL_TESS_EVALUATION_PROGRAM_NV: {
1989 struct st_common_program *p = st_common_program(prog);
1990 struct st_basic_variant_key key;
1991
1992 memset(&key, 0, sizeof(key));
1993
1994 key.st = st->has_shareable_shaders ? NULL : st;
1995 st_get_basic_variant(st, PIPE_SHADER_TESS_EVAL, p, &key);
1996 break;
1997 }
1998
1999 case GL_GEOMETRY_PROGRAM_NV: {
2000 struct st_common_program *p = st_common_program(prog);
2001 struct st_basic_variant_key key;
2002
2003 memset(&key, 0, sizeof(key));
2004
2005 key.st = st->has_shareable_shaders ? NULL : st;
2006 st_get_basic_variant(st, PIPE_SHADER_GEOMETRY, p, &key);
2007 break;
2008 }
2009
2010 case GL_FRAGMENT_PROGRAM_ARB: {
2011 struct st_fragment_program *p = (struct st_fragment_program *)prog;
2012 struct st_fp_variant_key key;
2013
2014 memset(&key, 0, sizeof(key));
2015
2016 key.st = st->has_shareable_shaders ? NULL : st;
2017 st_get_fp_variant(st, p, &key);
2018 break;
2019 }
2020
2021 case GL_COMPUTE_PROGRAM_NV: {
2022 struct st_common_program *p = (struct st_common_program *)prog;
2023 st_get_cp_variant(st, &p->tgsi, prog->info.cs.shared_size, &p->variants);
2024 break;
2025 }
2026
2027 default:
2028 assert(0);
2029 }
2030 }