4d017d8c61d9fd9bbe6bc6f98251ac8c5ae57719
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35 #include "main/imports.h"
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/programopt.h"
41
42 #include "compiler/nir/nir.h"
43
44 #include "pipe/p_context.h"
45 #include "pipe/p_defines.h"
46 #include "pipe/p_shader_tokens.h"
47 #include "draw/draw_context.h"
48 #include "tgsi/tgsi_dump.h"
49 #include "tgsi/tgsi_emulate.h"
50 #include "tgsi/tgsi_parse.h"
51 #include "tgsi/tgsi_ureg.h"
52
53 #include "st_debug.h"
54 #include "st_cb_bitmap.h"
55 #include "st_cb_drawpixels.h"
56 #include "st_context.h"
57 #include "st_tgsi_lower_yuv.h"
58 #include "st_program.h"
59 #include "st_mesa_to_tgsi.h"
60 #include "st_atifs_to_tgsi.h"
61 #include "st_nir.h"
62 #include "st_shader_cache.h"
63 #include "cso_cache/cso_context.h"
64
65
66
67 static void
68 set_affected_state_flags(uint64_t *states,
69 struct gl_program *prog,
70 uint64_t new_constants,
71 uint64_t new_sampler_views,
72 uint64_t new_samplers,
73 uint64_t new_images,
74 uint64_t new_ubos,
75 uint64_t new_ssbos,
76 uint64_t new_atomics)
77 {
78 if (prog->Parameters->NumParameters)
79 *states |= new_constants;
80
81 if (prog->info.num_textures)
82 *states |= new_sampler_views | new_samplers;
83
84 if (prog->info.num_images)
85 *states |= new_images;
86
87 if (prog->info.num_ubos)
88 *states |= new_ubos;
89
90 if (prog->info.num_ssbos)
91 *states |= new_ssbos;
92
93 if (prog->info.num_abos)
94 *states |= new_atomics;
95 }
96
97 /**
98 * This determines which states will be updated when the shader is bound.
99 */
100 void
101 st_set_prog_affected_state_flags(struct gl_program *prog)
102 {
103 uint64_t *states;
104
105 switch (prog->info.stage) {
106 case MESA_SHADER_VERTEX:
107 states = &((struct st_vertex_program*)prog)->affected_states;
108
109 *states = ST_NEW_VS_STATE |
110 ST_NEW_RASTERIZER |
111 ST_NEW_VERTEX_ARRAYS;
112
113 set_affected_state_flags(states, prog,
114 ST_NEW_VS_CONSTANTS,
115 ST_NEW_VS_SAMPLER_VIEWS,
116 ST_NEW_VS_SAMPLERS,
117 ST_NEW_VS_IMAGES,
118 ST_NEW_VS_UBOS,
119 ST_NEW_VS_SSBOS,
120 ST_NEW_VS_ATOMICS);
121 break;
122
123 case MESA_SHADER_TESS_CTRL:
124 states = &(st_common_program(prog))->affected_states;
125
126 *states = ST_NEW_TCS_STATE;
127
128 set_affected_state_flags(states, prog,
129 ST_NEW_TCS_CONSTANTS,
130 ST_NEW_TCS_SAMPLER_VIEWS,
131 ST_NEW_TCS_SAMPLERS,
132 ST_NEW_TCS_IMAGES,
133 ST_NEW_TCS_UBOS,
134 ST_NEW_TCS_SSBOS,
135 ST_NEW_TCS_ATOMICS);
136 break;
137
138 case MESA_SHADER_TESS_EVAL:
139 states = &(st_common_program(prog))->affected_states;
140
141 *states = ST_NEW_TES_STATE |
142 ST_NEW_RASTERIZER;
143
144 set_affected_state_flags(states, prog,
145 ST_NEW_TES_CONSTANTS,
146 ST_NEW_TES_SAMPLER_VIEWS,
147 ST_NEW_TES_SAMPLERS,
148 ST_NEW_TES_IMAGES,
149 ST_NEW_TES_UBOS,
150 ST_NEW_TES_SSBOS,
151 ST_NEW_TES_ATOMICS);
152 break;
153
154 case MESA_SHADER_GEOMETRY:
155 states = &(st_common_program(prog))->affected_states;
156
157 *states = ST_NEW_GS_STATE |
158 ST_NEW_RASTERIZER;
159
160 set_affected_state_flags(states, prog,
161 ST_NEW_GS_CONSTANTS,
162 ST_NEW_GS_SAMPLER_VIEWS,
163 ST_NEW_GS_SAMPLERS,
164 ST_NEW_GS_IMAGES,
165 ST_NEW_GS_UBOS,
166 ST_NEW_GS_SSBOS,
167 ST_NEW_GS_ATOMICS);
168 break;
169
170 case MESA_SHADER_FRAGMENT:
171 states = &((struct st_fragment_program*)prog)->affected_states;
172
173 /* gl_FragCoord and glDrawPixels always use constants. */
174 *states = ST_NEW_FS_STATE |
175 ST_NEW_SAMPLE_SHADING |
176 ST_NEW_FS_CONSTANTS;
177
178 set_affected_state_flags(states, prog,
179 ST_NEW_FS_CONSTANTS,
180 ST_NEW_FS_SAMPLER_VIEWS,
181 ST_NEW_FS_SAMPLERS,
182 ST_NEW_FS_IMAGES,
183 ST_NEW_FS_UBOS,
184 ST_NEW_FS_SSBOS,
185 ST_NEW_FS_ATOMICS);
186 break;
187
188 case MESA_SHADER_COMPUTE:
189 states = &((struct st_compute_program*)prog)->affected_states;
190
191 *states = ST_NEW_CS_STATE;
192
193 set_affected_state_flags(states, prog,
194 ST_NEW_CS_CONSTANTS,
195 ST_NEW_CS_SAMPLER_VIEWS,
196 ST_NEW_CS_SAMPLERS,
197 ST_NEW_CS_IMAGES,
198 ST_NEW_CS_UBOS,
199 ST_NEW_CS_SSBOS,
200 ST_NEW_CS_ATOMICS);
201 break;
202
203 default:
204 unreachable("unhandled shader stage");
205 }
206 }
207
208 /**
209 * Delete a vertex program variant. Note the caller must unlink
210 * the variant from the linked list.
211 */
212 static void
213 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
214 {
215 if (vpv->driver_shader)
216 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
217
218 if (vpv->draw_shader)
219 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
220
221 if (((vpv->tgsi.type == PIPE_SHADER_IR_TGSI)) && vpv->tgsi.tokens)
222 ureg_free_tokens(vpv->tgsi.tokens);
223
224 free( vpv );
225 }
226
227
228
229 /**
230 * Clean out any old compilations:
231 */
232 void
233 st_release_vp_variants( struct st_context *st,
234 struct st_vertex_program *stvp )
235 {
236 struct st_vp_variant *vpv;
237
238 for (vpv = stvp->variants; vpv; ) {
239 struct st_vp_variant *next = vpv->next;
240 delete_vp_variant(st, vpv);
241 vpv = next;
242 }
243
244 stvp->variants = NULL;
245
246 if ((stvp->tgsi.type == PIPE_SHADER_IR_TGSI) && stvp->tgsi.tokens) {
247 tgsi_free_tokens(stvp->tgsi.tokens);
248 stvp->tgsi.tokens = NULL;
249 }
250 }
251
252
253
254 /**
255 * Delete a fragment program variant. Note the caller must unlink
256 * the variant from the linked list.
257 */
258 static void
259 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
260 {
261 if (fpv->driver_shader)
262 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
263 free(fpv);
264 }
265
266
267 /**
268 * Free all variants of a fragment program.
269 */
270 void
271 st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
272 {
273 struct st_fp_variant *fpv;
274
275 for (fpv = stfp->variants; fpv; ) {
276 struct st_fp_variant *next = fpv->next;
277 delete_fp_variant(st, fpv);
278 fpv = next;
279 }
280
281 stfp->variants = NULL;
282
283 if ((stfp->tgsi.type == PIPE_SHADER_IR_TGSI) && stfp->tgsi.tokens) {
284 ureg_free_tokens(stfp->tgsi.tokens);
285 stfp->tgsi.tokens = NULL;
286 }
287 }
288
289
290 /**
291 * Delete a basic program variant. Note the caller must unlink
292 * the variant from the linked list.
293 */
294 static void
295 delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
296 GLenum target)
297 {
298 if (v->driver_shader) {
299 switch (target) {
300 case GL_TESS_CONTROL_PROGRAM_NV:
301 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
302 break;
303 case GL_TESS_EVALUATION_PROGRAM_NV:
304 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
305 break;
306 case GL_GEOMETRY_PROGRAM_NV:
307 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
308 break;
309 case GL_COMPUTE_PROGRAM_NV:
310 cso_delete_compute_shader(st->cso_context, v->driver_shader);
311 break;
312 default:
313 assert(!"this shouldn't occur");
314 }
315 }
316
317 free(v);
318 }
319
320
321 /**
322 * Free all basic program variants.
323 */
324 void
325 st_release_basic_variants(struct st_context *st, GLenum target,
326 struct st_basic_variant **variants,
327 struct pipe_shader_state *tgsi)
328 {
329 struct st_basic_variant *v;
330
331 for (v = *variants; v; ) {
332 struct st_basic_variant *next = v->next;
333 delete_basic_variant(st, v, target);
334 v = next;
335 }
336
337 *variants = NULL;
338
339 if (tgsi->tokens) {
340 ureg_free_tokens(tgsi->tokens);
341 tgsi->tokens = NULL;
342 }
343 }
344
345
346 /**
347 * Free all variants of a compute program.
348 */
349 void
350 st_release_cp_variants(struct st_context *st, struct st_compute_program *stcp)
351 {
352 struct st_basic_variant **variants = &stcp->variants;
353 struct st_basic_variant *v;
354
355 for (v = *variants; v; ) {
356 struct st_basic_variant *next = v->next;
357 delete_basic_variant(st, v, stcp->Base.Target);
358 v = next;
359 }
360
361 *variants = NULL;
362
363 if (stcp->tgsi.prog) {
364 switch (stcp->tgsi.ir_type) {
365 case PIPE_SHADER_IR_TGSI:
366 ureg_free_tokens(stcp->tgsi.prog);
367 stcp->tgsi.prog = NULL;
368 break;
369 case PIPE_SHADER_IR_NIR:
370 /* pipe driver took ownership of prog */
371 break;
372 case PIPE_SHADER_IR_NATIVE:
373 /* ??? */
374 stcp->tgsi.prog = NULL;
375 break;
376 }
377 }
378 }
379
380 /**
381 * Translate a vertex program.
382 */
383 bool
384 st_translate_vertex_program(struct st_context *st,
385 struct st_vertex_program *stvp)
386 {
387 struct ureg_program *ureg;
388 enum pipe_error error;
389 unsigned num_outputs = 0;
390 unsigned attr;
391 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
392 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
393
394 stvp->num_inputs = 0;
395 memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
396
397 if (stvp->Base.arb.IsPositionInvariant)
398 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
399
400 /*
401 * Determine number of inputs, the mappings between VERT_ATTRIB_x
402 * and TGSI generic input indexes, plus input attrib semantic info.
403 */
404 for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
405 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
406 stvp->input_to_index[attr] = stvp->num_inputs;
407 stvp->index_to_input[stvp->num_inputs] = attr;
408 stvp->num_inputs++;
409 if ((stvp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
410 /* add placeholder for second part of a double attribute */
411 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
412 stvp->num_inputs++;
413 }
414 }
415 }
416 /* bit of a hack, presetup potentially unused edgeflag input */
417 stvp->input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
418 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
419
420 /* Compute mapping of vertex program outputs to slots.
421 */
422 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
423 if ((stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) == 0) {
424 stvp->result_to_output[attr] = ~0;
425 }
426 else {
427 unsigned slot = num_outputs++;
428
429 stvp->result_to_output[attr] = slot;
430
431 unsigned semantic_name, semantic_index;
432 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
433 &semantic_name, &semantic_index);
434 output_semantic_name[slot] = semantic_name;
435 output_semantic_index[slot] = semantic_index;
436 }
437 }
438 /* similar hack to above, presetup potentially unused edgeflag output */
439 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
440 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
441 output_semantic_index[num_outputs] = 0;
442
443 /* ARB_vp: */
444 if (!stvp->glsl_to_tgsi && !stvp->shader_program) {
445 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
446
447 /* This determines which states will be updated when the assembly
448 * shader is bound.
449 */
450 stvp->affected_states = ST_NEW_VS_STATE |
451 ST_NEW_RASTERIZER |
452 ST_NEW_VERTEX_ARRAYS;
453
454 if (stvp->Base.Parameters->NumParameters)
455 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
456
457 /* No samplers are allowed in ARB_vp. */
458 }
459
460 if (stvp->shader_program) {
461 st_translate_stream_output_info(stvp->Base.sh.LinkedTransformFeedback,
462 stvp->result_to_output,
463 &stvp->tgsi.stream_output);
464
465 st_store_ir_in_disk_cache(st, &stvp->Base, true);
466 return true;
467 }
468
469 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
470 if (ureg == NULL)
471 return false;
472
473 if (stvp->Base.info.clip_distance_array_size)
474 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
475 stvp->Base.info.clip_distance_array_size);
476 if (stvp->Base.info.cull_distance_array_size)
477 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
478 stvp->Base.info.cull_distance_array_size);
479
480 if (ST_DEBUG & DEBUG_MESA) {
481 _mesa_print_program(&stvp->Base);
482 _mesa_print_program_parameters(st->ctx, &stvp->Base);
483 debug_printf("\n");
484 }
485
486 if (stvp->glsl_to_tgsi) {
487 error = st_translate_program(st->ctx,
488 PIPE_SHADER_VERTEX,
489 ureg,
490 stvp->glsl_to_tgsi,
491 &stvp->Base,
492 /* inputs */
493 stvp->num_inputs,
494 stvp->input_to_index,
495 NULL, /* inputSlotToAttr */
496 NULL, /* input semantic name */
497 NULL, /* input semantic index */
498 NULL, /* interp mode */
499 /* outputs */
500 num_outputs,
501 stvp->result_to_output,
502 output_semantic_name,
503 output_semantic_index);
504
505 st_translate_stream_output_info(stvp->Base.sh.LinkedTransformFeedback,
506 stvp->result_to_output,
507 &stvp->tgsi.stream_output);
508
509 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
510 } else
511 error = st_translate_mesa_program(st->ctx,
512 PIPE_SHADER_VERTEX,
513 ureg,
514 &stvp->Base,
515 /* inputs */
516 stvp->num_inputs,
517 stvp->input_to_index,
518 NULL, /* input semantic name */
519 NULL, /* input semantic index */
520 NULL,
521 /* outputs */
522 num_outputs,
523 stvp->result_to_output,
524 output_semantic_name,
525 output_semantic_index);
526
527 if (error) {
528 debug_printf("%s: failed to translate Mesa program:\n", __func__);
529 _mesa_print_program(&stvp->Base);
530 debug_assert(0);
531 return false;
532 }
533
534 stvp->tgsi.tokens = ureg_get_tokens(ureg, &stvp->num_tgsi_tokens);
535 ureg_destroy(ureg);
536
537 if (stvp->glsl_to_tgsi) {
538 stvp->glsl_to_tgsi = NULL;
539 st_store_ir_in_disk_cache(st, &stvp->Base, false);
540 }
541
542 return stvp->tgsi.tokens != NULL;
543 }
544
545 static struct st_vp_variant *
546 st_create_vp_variant(struct st_context *st,
547 struct st_vertex_program *stvp,
548 const struct st_vp_variant_key *key)
549 {
550 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
551 struct pipe_context *pipe = st->pipe;
552
553 vpv->key = *key;
554 vpv->tgsi.stream_output = stvp->tgsi.stream_output;
555 vpv->num_inputs = stvp->num_inputs;
556
557 /* When generating a NIR program, we usually don't have TGSI tokens.
558 * However, we do create them for ARB_vertex_program / fixed-function VS
559 * programs which we may need to use with the draw module for legacy
560 * feedback/select emulation. If they exist, copy them.
561 */
562 if (stvp->tgsi.tokens)
563 vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
564
565 if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
566 vpv->tgsi.type = PIPE_SHADER_IR_NIR;
567 vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
568 if (key->clamp_color)
569 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
570 if (key->passthrough_edgeflags) {
571 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
572 vpv->num_inputs++;
573 }
574
575 st_finalize_nir(st, &stvp->Base, stvp->shader_program,
576 vpv->tgsi.ir.nir);
577
578 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
579 /* driver takes ownership of IR: */
580 vpv->tgsi.ir.nir = NULL;
581 return vpv;
582 }
583
584 /* Emulate features. */
585 if (key->clamp_color || key->passthrough_edgeflags) {
586 const struct tgsi_token *tokens;
587 unsigned flags =
588 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
589 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
590
591 tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
592
593 if (tokens) {
594 tgsi_free_tokens(vpv->tgsi.tokens);
595 vpv->tgsi.tokens = tokens;
596
597 if (key->passthrough_edgeflags)
598 vpv->num_inputs++;
599 } else
600 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
601 }
602
603 if (ST_DEBUG & DEBUG_TGSI) {
604 tgsi_dump(vpv->tgsi.tokens, 0);
605 debug_printf("\n");
606 }
607
608 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
609 return vpv;
610 }
611
612
613 /**
614 * Find/create a vertex program variant.
615 */
616 struct st_vp_variant *
617 st_get_vp_variant(struct st_context *st,
618 struct st_vertex_program *stvp,
619 const struct st_vp_variant_key *key)
620 {
621 struct st_vp_variant *vpv;
622
623 /* Search for existing variant */
624 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
625 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
626 break;
627 }
628 }
629
630 if (!vpv) {
631 /* create now */
632 vpv = st_create_vp_variant(st, stvp, key);
633 if (vpv) {
634 for (unsigned index = 0; index < vpv->num_inputs; ++index) {
635 unsigned attr = stvp->index_to_input[index];
636 if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
637 continue;
638 vpv->vert_attrib_mask |= 1u << attr;
639 }
640
641 /* insert into list */
642 vpv->next = stvp->variants;
643 stvp->variants = vpv;
644 }
645 }
646
647 return vpv;
648 }
649
650
651 /**
652 * Translate a Mesa fragment shader into a TGSI shader.
653 */
654 bool
655 st_translate_fragment_program(struct st_context *st,
656 struct st_fragment_program *stfp)
657 {
658 /* We have already compiled to NIR so just return */
659 if (stfp->shader_program) {
660 st_store_ir_in_disk_cache(st, &stfp->Base, true);
661 return true;
662 }
663
664 ubyte outputMapping[2 * FRAG_RESULT_MAX];
665 ubyte inputMapping[VARYING_SLOT_MAX];
666 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
667 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
668 GLuint attr;
669 GLbitfield64 inputsRead;
670 struct ureg_program *ureg;
671
672 GLboolean write_all = GL_FALSE;
673
674 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
675 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
676 uint fs_num_inputs = 0;
677
678 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
679 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
680 uint fs_num_outputs = 0;
681
682 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
683
684 /* Non-GLSL programs: */
685 if (!stfp->glsl_to_tgsi && !stfp->shader_program) {
686 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
687 if (st->ctx->Const.GLSLFragCoordIsSysVal)
688 _mesa_program_fragment_position_to_sysval(&stfp->Base);
689
690 /* This determines which states will be updated when the assembly
691 * shader is bound.
692 *
693 * fragment.position and glDrawPixels always use constants.
694 */
695 stfp->affected_states = ST_NEW_FS_STATE |
696 ST_NEW_SAMPLE_SHADING |
697 ST_NEW_FS_CONSTANTS;
698
699 if (stfp->ati_fs) {
700 /* Just set them for ATI_fs unconditionally. */
701 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
702 ST_NEW_FS_SAMPLERS;
703 } else {
704 /* ARB_fp */
705 if (stfp->Base.SamplersUsed)
706 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
707 ST_NEW_FS_SAMPLERS;
708 }
709 }
710
711 /*
712 * Convert Mesa program inputs to TGSI input register semantics.
713 */
714 inputsRead = stfp->Base.info.inputs_read;
715 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
716 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
717 const GLuint slot = fs_num_inputs++;
718
719 inputMapping[attr] = slot;
720 inputSlotToAttr[slot] = attr;
721
722 switch (attr) {
723 case VARYING_SLOT_POS:
724 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
725 input_semantic_index[slot] = 0;
726 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
727 break;
728 case VARYING_SLOT_COL0:
729 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
730 input_semantic_index[slot] = 0;
731 interpMode[slot] = stfp->glsl_to_tgsi ?
732 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
733 break;
734 case VARYING_SLOT_COL1:
735 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
736 input_semantic_index[slot] = 1;
737 interpMode[slot] = stfp->glsl_to_tgsi ?
738 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
739 break;
740 case VARYING_SLOT_FOGC:
741 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
742 input_semantic_index[slot] = 0;
743 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
744 break;
745 case VARYING_SLOT_FACE:
746 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
747 input_semantic_index[slot] = 0;
748 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
749 break;
750 case VARYING_SLOT_PRIMITIVE_ID:
751 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
752 input_semantic_index[slot] = 0;
753 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
754 break;
755 case VARYING_SLOT_LAYER:
756 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
757 input_semantic_index[slot] = 0;
758 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
759 break;
760 case VARYING_SLOT_VIEWPORT:
761 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
762 input_semantic_index[slot] = 0;
763 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
764 break;
765 case VARYING_SLOT_CLIP_DIST0:
766 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
767 input_semantic_index[slot] = 0;
768 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
769 break;
770 case VARYING_SLOT_CLIP_DIST1:
771 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
772 input_semantic_index[slot] = 1;
773 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
774 break;
775 case VARYING_SLOT_CULL_DIST0:
776 case VARYING_SLOT_CULL_DIST1:
777 /* these should have been lowered by GLSL */
778 assert(0);
779 break;
780 /* In most cases, there is nothing special about these
781 * inputs, so adopt a convention to use the generic
782 * semantic name and the mesa VARYING_SLOT_ number as the
783 * index.
784 *
785 * All that is required is that the vertex shader labels
786 * its own outputs similarly, and that the vertex shader
787 * generates at least every output required by the
788 * fragment shader plus fixed-function hardware (such as
789 * BFC).
790 *
791 * However, some drivers may need us to identify the PNTC and TEXi
792 * varyings if, for example, their capability to replace them with
793 * sprite coordinates is limited.
794 */
795 case VARYING_SLOT_PNTC:
796 if (st->needs_texcoord_semantic) {
797 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
798 input_semantic_index[slot] = 0;
799 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
800 break;
801 }
802 /* fall through */
803 case VARYING_SLOT_TEX0:
804 case VARYING_SLOT_TEX1:
805 case VARYING_SLOT_TEX2:
806 case VARYING_SLOT_TEX3:
807 case VARYING_SLOT_TEX4:
808 case VARYING_SLOT_TEX5:
809 case VARYING_SLOT_TEX6:
810 case VARYING_SLOT_TEX7:
811 if (st->needs_texcoord_semantic) {
812 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
813 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
814 interpMode[slot] = stfp->glsl_to_tgsi ?
815 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
816 break;
817 }
818 /* fall through */
819 case VARYING_SLOT_VAR0:
820 default:
821 /* Semantic indices should be zero-based because drivers may choose
822 * to assign a fixed slot determined by that index.
823 * This is useful because ARB_separate_shader_objects uses location
824 * qualifiers for linkage, and if the semantic index corresponds to
825 * these locations, linkage passes in the driver become unecessary.
826 *
827 * If needs_texcoord_semantic is true, no semantic indices will be
828 * consumed for the TEXi varyings, and we can base the locations of
829 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
830 */
831 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
832 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
833 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
834 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
835 if (attr == VARYING_SLOT_PNTC)
836 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
837 else {
838 interpMode[slot] = stfp->glsl_to_tgsi ?
839 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
840 }
841 break;
842 }
843 }
844 else {
845 inputMapping[attr] = -1;
846 }
847 }
848
849 /*
850 * Semantics and mapping for outputs
851 */
852 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
853
854 /* if z is written, emit that first */
855 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
856 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
857 fs_output_semantic_index[fs_num_outputs] = 0;
858 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
859 fs_num_outputs++;
860 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
861 }
862
863 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
864 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
865 fs_output_semantic_index[fs_num_outputs] = 0;
866 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
867 fs_num_outputs++;
868 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
869 }
870
871 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
872 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
873 fs_output_semantic_index[fs_num_outputs] = 0;
874 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
875 fs_num_outputs++;
876 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
877 }
878
879 /* handle remaining outputs (color) */
880 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
881 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
882 stfp->Base.SecondaryOutputsWritten;
883 const unsigned loc = attr % FRAG_RESULT_MAX;
884
885 if (written & BITFIELD64_BIT(loc)) {
886 switch (loc) {
887 case FRAG_RESULT_DEPTH:
888 case FRAG_RESULT_STENCIL:
889 case FRAG_RESULT_SAMPLE_MASK:
890 /* handled above */
891 assert(0);
892 break;
893 case FRAG_RESULT_COLOR:
894 write_all = GL_TRUE; /* fallthrough */
895 default: {
896 int index;
897 assert(loc == FRAG_RESULT_COLOR ||
898 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
899
900 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
901
902 if (attr >= FRAG_RESULT_MAX) {
903 /* Secondary color for dual source blending. */
904 assert(index == 0);
905 index++;
906 }
907
908 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
909 fs_output_semantic_index[fs_num_outputs] = index;
910 outputMapping[attr] = fs_num_outputs;
911 break;
912 }
913 }
914
915 fs_num_outputs++;
916 }
917 }
918
919 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
920 if (ureg == NULL)
921 return false;
922
923 if (ST_DEBUG & DEBUG_MESA) {
924 _mesa_print_program(&stfp->Base);
925 _mesa_print_program_parameters(st->ctx, &stfp->Base);
926 debug_printf("\n");
927 }
928 if (write_all == GL_TRUE)
929 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
930
931 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
932 switch (stfp->Base.info.fs.depth_layout) {
933 case FRAG_DEPTH_LAYOUT_ANY:
934 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
935 TGSI_FS_DEPTH_LAYOUT_ANY);
936 break;
937 case FRAG_DEPTH_LAYOUT_GREATER:
938 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
939 TGSI_FS_DEPTH_LAYOUT_GREATER);
940 break;
941 case FRAG_DEPTH_LAYOUT_LESS:
942 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
943 TGSI_FS_DEPTH_LAYOUT_LESS);
944 break;
945 case FRAG_DEPTH_LAYOUT_UNCHANGED:
946 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
947 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
948 break;
949 default:
950 assert(0);
951 }
952 }
953
954 if (stfp->glsl_to_tgsi) {
955 st_translate_program(st->ctx,
956 PIPE_SHADER_FRAGMENT,
957 ureg,
958 stfp->glsl_to_tgsi,
959 &stfp->Base,
960 /* inputs */
961 fs_num_inputs,
962 inputMapping,
963 inputSlotToAttr,
964 input_semantic_name,
965 input_semantic_index,
966 interpMode,
967 /* outputs */
968 fs_num_outputs,
969 outputMapping,
970 fs_output_semantic_name,
971 fs_output_semantic_index);
972
973 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
974 } else if (stfp->ati_fs)
975 st_translate_atifs_program(ureg,
976 stfp->ati_fs,
977 &stfp->Base,
978 /* inputs */
979 fs_num_inputs,
980 inputMapping,
981 input_semantic_name,
982 input_semantic_index,
983 interpMode,
984 /* outputs */
985 fs_num_outputs,
986 outputMapping,
987 fs_output_semantic_name,
988 fs_output_semantic_index);
989 else
990 st_translate_mesa_program(st->ctx,
991 PIPE_SHADER_FRAGMENT,
992 ureg,
993 &stfp->Base,
994 /* inputs */
995 fs_num_inputs,
996 inputMapping,
997 input_semantic_name,
998 input_semantic_index,
999 interpMode,
1000 /* outputs */
1001 fs_num_outputs,
1002 outputMapping,
1003 fs_output_semantic_name,
1004 fs_output_semantic_index);
1005
1006 stfp->tgsi.tokens = ureg_get_tokens(ureg, &stfp->num_tgsi_tokens);
1007 ureg_destroy(ureg);
1008
1009 if (stfp->glsl_to_tgsi) {
1010 stfp->glsl_to_tgsi = NULL;
1011 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1012 }
1013
1014 return stfp->tgsi.tokens != NULL;
1015 }
1016
1017 static struct st_fp_variant *
1018 st_create_fp_variant(struct st_context *st,
1019 struct st_fragment_program *stfp,
1020 const struct st_fp_variant_key *key)
1021 {
1022 struct pipe_context *pipe = st->pipe;
1023 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1024 struct pipe_shader_state tgsi = {0};
1025 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1026 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1027 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1028 static const gl_state_index16 scale_state[STATE_LENGTH] =
1029 { STATE_INTERNAL, STATE_PT_SCALE };
1030 static const gl_state_index16 bias_state[STATE_LENGTH] =
1031 { STATE_INTERNAL, STATE_PT_BIAS };
1032
1033 if (!variant)
1034 return NULL;
1035
1036 if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
1037 tgsi.type = PIPE_SHADER_IR_NIR;
1038 tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
1039
1040 if (key->clamp_color)
1041 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1042
1043 if (key->persample_shading) {
1044 nir_shader *shader = tgsi.ir.nir;
1045 nir_foreach_variable(var, &shader->inputs)
1046 var->data.sample = true;
1047 }
1048
1049 assert(!(key->bitmap && key->drawpixels));
1050
1051 /* glBitmap */
1052 if (key->bitmap) {
1053 nir_lower_bitmap_options options = {0};
1054
1055 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1056 options.sampler = variant->bitmap_sampler;
1057 options.swizzle_xxxx = (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM);
1058
1059 NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
1060 }
1061
1062 /* glDrawPixels (color only) */
1063 if (key->drawpixels) {
1064 nir_lower_drawpixels_options options = {{0}};
1065 unsigned samplers_used = stfp->Base.SamplersUsed;
1066
1067 /* Find the first unused slot. */
1068 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1069 options.drawpix_sampler = variant->drawpix_sampler;
1070 samplers_used |= (1 << variant->drawpix_sampler);
1071
1072 options.pixel_maps = key->pixelMaps;
1073 if (key->pixelMaps) {
1074 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1075 options.pixelmap_sampler = variant->pixelmap_sampler;
1076 }
1077
1078 options.scale_and_bias = key->scaleAndBias;
1079 if (key->scaleAndBias) {
1080 _mesa_add_state_reference(params, scale_state);
1081 memcpy(options.scale_state_tokens, scale_state,
1082 sizeof(options.scale_state_tokens));
1083 _mesa_add_state_reference(params, bias_state);
1084 memcpy(options.bias_state_tokens, bias_state,
1085 sizeof(options.bias_state_tokens));
1086 }
1087
1088 _mesa_add_state_reference(params, texcoord_state);
1089 memcpy(options.texcoord_state_tokens, texcoord_state,
1090 sizeof(options.texcoord_state_tokens));
1091
1092 NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
1093 }
1094
1095 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1096 nir_lower_tex_options options = {0};
1097 options.lower_y_uv_external = key->external.lower_nv12;
1098 options.lower_y_u_v_external = key->external.lower_iyuv;
1099 NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
1100 }
1101
1102 st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
1103
1104 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1105 /* This pass needs to happen *after* nir_lower_sampler */
1106 NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
1107 ~stfp->Base.SamplersUsed,
1108 key->external.lower_nv12,
1109 key->external.lower_iyuv);
1110 }
1111
1112 /* Some of the lowering above may have introduced new varyings */
1113 nir_shader_gather_info(tgsi.ir.nir,
1114 nir_shader_get_entrypoint(tgsi.ir.nir));
1115
1116 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1117 variant->key = *key;
1118
1119 return variant;
1120 }
1121
1122 tgsi.tokens = stfp->tgsi.tokens;
1123
1124 assert(!(key->bitmap && key->drawpixels));
1125
1126 /* Fix texture targets and add fog for ATI_fs */
1127 if (stfp->ati_fs) {
1128 const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
1129
1130 if (tokens)
1131 tgsi.tokens = tokens;
1132 else
1133 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1134 }
1135
1136 /* Emulate features. */
1137 if (key->clamp_color || key->persample_shading) {
1138 const struct tgsi_token *tokens;
1139 unsigned flags =
1140 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1141 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1142
1143 tokens = tgsi_emulate(tgsi.tokens, flags);
1144
1145 if (tokens) {
1146 if (tgsi.tokens != stfp->tgsi.tokens)
1147 tgsi_free_tokens(tgsi.tokens);
1148 tgsi.tokens = tokens;
1149 } else
1150 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1151 }
1152
1153 /* glBitmap */
1154 if (key->bitmap) {
1155 const struct tgsi_token *tokens;
1156
1157 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1158
1159 tokens = st_get_bitmap_shader(tgsi.tokens,
1160 st->internal_target,
1161 variant->bitmap_sampler,
1162 st->needs_texcoord_semantic,
1163 st->bitmap.tex_format ==
1164 PIPE_FORMAT_L8_UNORM);
1165
1166 if (tokens) {
1167 if (tgsi.tokens != stfp->tgsi.tokens)
1168 tgsi_free_tokens(tgsi.tokens);
1169 tgsi.tokens = tokens;
1170 } else
1171 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1172 }
1173
1174 /* glDrawPixels (color only) */
1175 if (key->drawpixels) {
1176 const struct tgsi_token *tokens;
1177 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1178
1179 /* Find the first unused slot. */
1180 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1181
1182 if (key->pixelMaps) {
1183 unsigned samplers_used = stfp->Base.SamplersUsed |
1184 (1 << variant->drawpix_sampler);
1185
1186 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1187 }
1188
1189 if (key->scaleAndBias) {
1190 scale_const = _mesa_add_state_reference(params, scale_state);
1191 bias_const = _mesa_add_state_reference(params, bias_state);
1192 }
1193
1194 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1195
1196 tokens = st_get_drawpix_shader(tgsi.tokens,
1197 st->needs_texcoord_semantic,
1198 key->scaleAndBias, scale_const,
1199 bias_const, key->pixelMaps,
1200 variant->drawpix_sampler,
1201 variant->pixelmap_sampler,
1202 texcoord_const, st->internal_target);
1203
1204 if (tokens) {
1205 if (tgsi.tokens != stfp->tgsi.tokens)
1206 tgsi_free_tokens(tgsi.tokens);
1207 tgsi.tokens = tokens;
1208 } else
1209 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1210 }
1211
1212 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1213 const struct tgsi_token *tokens;
1214
1215 /* samplers inserted would conflict, but this should be unpossible: */
1216 assert(!(key->bitmap || key->drawpixels));
1217
1218 tokens = st_tgsi_lower_yuv(tgsi.tokens,
1219 ~stfp->Base.SamplersUsed,
1220 key->external.lower_nv12,
1221 key->external.lower_iyuv);
1222 if (tokens) {
1223 if (tgsi.tokens != stfp->tgsi.tokens)
1224 tgsi_free_tokens(tgsi.tokens);
1225 tgsi.tokens = tokens;
1226 } else {
1227 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1228 }
1229 }
1230
1231 if (ST_DEBUG & DEBUG_TGSI) {
1232 tgsi_dump(tgsi.tokens, 0);
1233 debug_printf("\n");
1234 }
1235
1236 /* fill in variant */
1237 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1238 variant->key = *key;
1239
1240 if (tgsi.tokens != stfp->tgsi.tokens)
1241 tgsi_free_tokens(tgsi.tokens);
1242 return variant;
1243 }
1244
1245 /**
1246 * Translate fragment program if needed.
1247 */
1248 struct st_fp_variant *
1249 st_get_fp_variant(struct st_context *st,
1250 struct st_fragment_program *stfp,
1251 const struct st_fp_variant_key *key)
1252 {
1253 struct st_fp_variant *fpv;
1254
1255 /* Search for existing variant */
1256 for (fpv = stfp->variants; fpv; fpv = fpv->next) {
1257 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1258 break;
1259 }
1260 }
1261
1262 if (!fpv) {
1263 /* create new */
1264 fpv = st_create_fp_variant(st, stfp, key);
1265 if (fpv) {
1266 if (key->bitmap || key->drawpixels) {
1267 /* Regular variants should always come before the
1268 * bitmap & drawpixels variants, (unless there
1269 * are no regular variants) so that
1270 * st_update_fp can take a fast path when
1271 * shader_has_one_variant is set.
1272 */
1273 if (!stfp->variants) {
1274 stfp->variants = fpv;
1275 } else {
1276 /* insert into list after the first one */
1277 fpv->next = stfp->variants->next;
1278 stfp->variants->next = fpv;
1279 }
1280 } else {
1281 /* insert into list */
1282 fpv->next = stfp->variants;
1283 stfp->variants = fpv;
1284 }
1285 }
1286 }
1287
1288 return fpv;
1289 }
1290
1291
1292 /**
1293 * Translate a program. This is common code for geometry and tessellation
1294 * shaders.
1295 */
1296 static void
1297 st_translate_program_common(struct st_context *st,
1298 struct gl_program *prog,
1299 struct glsl_to_tgsi_visitor *glsl_to_tgsi,
1300 struct ureg_program *ureg,
1301 unsigned tgsi_processor,
1302 struct pipe_shader_state *out_state)
1303 {
1304 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1305 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1306 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1307 GLuint attr;
1308
1309 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1310 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1311 uint num_inputs = 0;
1312
1313 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1314 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1315 uint num_outputs = 0;
1316
1317 GLint i;
1318
1319 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1320 memset(inputMapping, 0, sizeof(inputMapping));
1321 memset(outputMapping, 0, sizeof(outputMapping));
1322 memset(out_state, 0, sizeof(*out_state));
1323
1324 if (prog->info.clip_distance_array_size)
1325 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1326 prog->info.clip_distance_array_size);
1327 if (prog->info.cull_distance_array_size)
1328 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1329 prog->info.cull_distance_array_size);
1330
1331 /*
1332 * Convert Mesa program inputs to TGSI input register semantics.
1333 */
1334 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1335 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1336 continue;
1337
1338 unsigned slot = num_inputs++;
1339
1340 inputMapping[attr] = slot;
1341 inputSlotToAttr[slot] = attr;
1342
1343 unsigned semantic_name, semantic_index;
1344 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1345 &semantic_name, &semantic_index);
1346 input_semantic_name[slot] = semantic_name;
1347 input_semantic_index[slot] = semantic_index;
1348 }
1349
1350 /* Also add patch inputs. */
1351 for (attr = 0; attr < 32; attr++) {
1352 if (prog->info.patch_inputs_read & (1u << attr)) {
1353 GLuint slot = num_inputs++;
1354 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1355
1356 inputMapping[patch_attr] = slot;
1357 inputSlotToAttr[slot] = patch_attr;
1358 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1359 input_semantic_index[slot] = attr;
1360 }
1361 }
1362
1363 /* initialize output semantics to defaults */
1364 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1365 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1366 output_semantic_index[i] = 0;
1367 }
1368
1369 /*
1370 * Determine number of outputs, the (default) output register
1371 * mapping and the semantic information for each output.
1372 */
1373 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1374 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1375 GLuint slot = num_outputs++;
1376
1377 outputMapping[attr] = slot;
1378
1379 unsigned semantic_name, semantic_index;
1380 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1381 &semantic_name, &semantic_index);
1382 output_semantic_name[slot] = semantic_name;
1383 output_semantic_index[slot] = semantic_index;
1384 }
1385 }
1386
1387 /* Also add patch outputs. */
1388 for (attr = 0; attr < 32; attr++) {
1389 if (prog->info.patch_outputs_written & (1u << attr)) {
1390 GLuint slot = num_outputs++;
1391 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1392
1393 outputMapping[patch_attr] = slot;
1394 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1395 output_semantic_index[slot] = attr;
1396 }
1397 }
1398
1399 st_translate_program(st->ctx,
1400 tgsi_processor,
1401 ureg,
1402 glsl_to_tgsi,
1403 prog,
1404 /* inputs */
1405 num_inputs,
1406 inputMapping,
1407 inputSlotToAttr,
1408 input_semantic_name,
1409 input_semantic_index,
1410 NULL,
1411 /* outputs */
1412 num_outputs,
1413 outputMapping,
1414 output_semantic_name,
1415 output_semantic_index);
1416
1417 if (tgsi_processor == PIPE_SHADER_COMPUTE) {
1418 struct st_compute_program *stcp = (struct st_compute_program *) prog;
1419 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1420 stcp->tgsi.prog = out_state->tokens;
1421 } else {
1422 struct st_common_program *stcp = (struct st_common_program *) prog;
1423 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1424 }
1425 ureg_destroy(ureg);
1426
1427 st_translate_stream_output_info(prog->sh.LinkedTransformFeedback,
1428 outputMapping,
1429 &out_state->stream_output);
1430
1431 st_store_ir_in_disk_cache(st, prog, false);
1432
1433 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1434 _mesa_print_program(prog);
1435 debug_printf("\n");
1436 }
1437
1438 if (ST_DEBUG & DEBUG_TGSI) {
1439 tgsi_dump(out_state->tokens, 0);
1440 debug_printf("\n");
1441 }
1442 }
1443
1444 /**
1445 * Update stream-output info for GS/TCS/TES. Normally this is done in
1446 * st_translate_program_common() but that is not called for glsl_to_nir
1447 * case.
1448 */
1449 static void
1450 st_translate_program_stream_output(struct gl_program *prog,
1451 struct pipe_stream_output_info *stream_output)
1452 {
1453 if (!prog->sh.LinkedTransformFeedback)
1454 return;
1455
1456 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1457 GLuint attr;
1458 uint num_outputs = 0;
1459
1460 memset(outputMapping, 0, sizeof(outputMapping));
1461
1462 /*
1463 * Determine number of outputs, the (default) output register
1464 * mapping and the semantic information for each output.
1465 */
1466 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1467 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1468 GLuint slot = num_outputs++;
1469
1470 outputMapping[attr] = slot;
1471 }
1472 }
1473
1474 st_translate_stream_output_info(prog->sh.LinkedTransformFeedback,
1475 outputMapping,
1476 stream_output);
1477 }
1478
1479 /**
1480 * Translate a geometry program to create a new variant.
1481 */
1482 bool
1483 st_translate_geometry_program(struct st_context *st,
1484 struct st_common_program *stgp)
1485 {
1486 struct ureg_program *ureg;
1487
1488 /* We have already compiled to NIR so just return */
1489 if (stgp->shader_program) {
1490 /* No variants */
1491 st_finalize_nir(st, &stgp->Base, stgp->shader_program,
1492 stgp->tgsi.ir.nir);
1493 st_translate_program_stream_output(&stgp->Base, &stgp->tgsi.stream_output);
1494 st_store_ir_in_disk_cache(st, &stgp->Base, true);
1495 return true;
1496 }
1497
1498 ureg = ureg_create_with_screen(PIPE_SHADER_GEOMETRY, st->pipe->screen);
1499 if (ureg == NULL)
1500 return false;
1501
1502 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1503 stgp->Base.info.gs.input_primitive);
1504 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1505 stgp->Base.info.gs.output_primitive);
1506 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1507 stgp->Base.info.gs.vertices_out);
1508 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1509 stgp->Base.info.gs.invocations);
1510
1511 st_translate_program_common(st, &stgp->Base, stgp->glsl_to_tgsi, ureg,
1512 PIPE_SHADER_GEOMETRY, &stgp->tgsi);
1513
1514 free_glsl_to_tgsi_visitor(stgp->glsl_to_tgsi);
1515 stgp->glsl_to_tgsi = NULL;
1516 return true;
1517 }
1518
1519
1520 /**
1521 * Get/create a basic program variant.
1522 */
1523 struct st_basic_variant *
1524 st_get_basic_variant(struct st_context *st,
1525 unsigned pipe_shader,
1526 struct st_common_program *prog)
1527 {
1528 struct pipe_context *pipe = st->pipe;
1529 struct st_basic_variant *v;
1530 struct st_basic_variant_key key;
1531 struct pipe_shader_state tgsi = {0};
1532 memset(&key, 0, sizeof(key));
1533 key.st = st->has_shareable_shaders ? NULL : st;
1534
1535 /* Search for existing variant */
1536 for (v = prog->variants; v; v = v->next) {
1537 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1538 break;
1539 }
1540 }
1541
1542 if (!v) {
1543 /* create new */
1544 v = CALLOC_STRUCT(st_basic_variant);
1545 if (v) {
1546
1547 if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
1548 tgsi.type = PIPE_SHADER_IR_NIR;
1549 tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
1550 tgsi.stream_output = prog->tgsi.stream_output;
1551 } else
1552 tgsi = prog->tgsi;
1553 /* fill in new variant */
1554 switch (pipe_shader) {
1555 case PIPE_SHADER_TESS_CTRL:
1556 v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
1557 break;
1558 case PIPE_SHADER_TESS_EVAL:
1559 v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
1560 break;
1561 case PIPE_SHADER_GEOMETRY:
1562 v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
1563 break;
1564 default:
1565 assert(!"unhandled shader type");
1566 free(v);
1567 return NULL;
1568 }
1569
1570 v->key = key;
1571
1572 /* insert into list */
1573 v->next = prog->variants;
1574 prog->variants = v;
1575 }
1576 }
1577
1578 return v;
1579 }
1580
1581
1582 /**
1583 * Translate a tessellation control program to create a new variant.
1584 */
1585 bool
1586 st_translate_tessctrl_program(struct st_context *st,
1587 struct st_common_program *sttcp)
1588 {
1589 struct ureg_program *ureg;
1590
1591 /* We have already compiled to NIR so just return */
1592 if (sttcp->shader_program) {
1593 /* No variants */
1594 st_finalize_nir(st, &sttcp->Base, sttcp->shader_program,
1595 sttcp->tgsi.ir.nir);
1596 st_store_ir_in_disk_cache(st, &sttcp->Base, true);
1597 return true;
1598 }
1599
1600 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_CTRL, st->pipe->screen);
1601 if (ureg == NULL)
1602 return false;
1603
1604 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1605 sttcp->Base.info.tess.tcs_vertices_out);
1606
1607 st_translate_program_common(st, &sttcp->Base, sttcp->glsl_to_tgsi, ureg,
1608 PIPE_SHADER_TESS_CTRL, &sttcp->tgsi);
1609
1610 free_glsl_to_tgsi_visitor(sttcp->glsl_to_tgsi);
1611 sttcp->glsl_to_tgsi = NULL;
1612 return true;
1613 }
1614
1615
1616 /**
1617 * Translate a tessellation evaluation program to create a new variant.
1618 */
1619 bool
1620 st_translate_tesseval_program(struct st_context *st,
1621 struct st_common_program *sttep)
1622 {
1623 struct ureg_program *ureg;
1624
1625 /* We have already compiled to NIR so just return */
1626 if (sttep->shader_program) {
1627 /* No variants */
1628 st_finalize_nir(st, &sttep->Base, sttep->shader_program,
1629 sttep->tgsi.ir.nir);
1630 st_translate_program_stream_output(&sttep->Base, &sttep->tgsi.stream_output);
1631 st_store_ir_in_disk_cache(st, &sttep->Base, true);
1632 return true;
1633 }
1634
1635 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_EVAL, st->pipe->screen);
1636 if (ureg == NULL)
1637 return false;
1638
1639 if (sttep->Base.info.tess.primitive_mode == GL_ISOLINES)
1640 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1641 else
1642 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1643 sttep->Base.info.tess.primitive_mode);
1644
1645 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1646 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1647 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1648 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1649 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1650
1651 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1652 (sttep->Base.info.tess.spacing + 1) % 3);
1653
1654 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1655 !sttep->Base.info.tess.ccw);
1656 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1657 sttep->Base.info.tess.point_mode);
1658
1659 st_translate_program_common(st, &sttep->Base, sttep->glsl_to_tgsi,
1660 ureg, PIPE_SHADER_TESS_EVAL, &sttep->tgsi);
1661
1662 free_glsl_to_tgsi_visitor(sttep->glsl_to_tgsi);
1663 sttep->glsl_to_tgsi = NULL;
1664 return true;
1665 }
1666
1667
1668 /**
1669 * Translate a compute program to create a new variant.
1670 */
1671 bool
1672 st_translate_compute_program(struct st_context *st,
1673 struct st_compute_program *stcp)
1674 {
1675 struct ureg_program *ureg;
1676 struct pipe_shader_state prog;
1677
1678 stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size;
1679
1680 if (stcp->shader_program) {
1681 /* no compute variants: */
1682 st_finalize_nir(st, &stcp->Base, stcp->shader_program,
1683 (struct nir_shader *) stcp->tgsi.prog);
1684 st_store_ir_in_disk_cache(st, &stcp->Base, true);
1685 return true;
1686 }
1687
1688 ureg = ureg_create_with_screen(PIPE_SHADER_COMPUTE, st->pipe->screen);
1689 if (ureg == NULL)
1690 return false;
1691
1692 st_translate_program_common(st, &stcp->Base, stcp->glsl_to_tgsi, ureg,
1693 PIPE_SHADER_COMPUTE, &prog);
1694
1695 stcp->tgsi.ir_type = PIPE_SHADER_IR_TGSI;
1696 stcp->tgsi.req_private_mem = 0;
1697 stcp->tgsi.req_input_mem = 0;
1698
1699 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1700 stcp->glsl_to_tgsi = NULL;
1701 return true;
1702 }
1703
1704
1705 /**
1706 * Get/create compute program variant.
1707 */
1708 struct st_basic_variant *
1709 st_get_cp_variant(struct st_context *st,
1710 struct pipe_compute_state *tgsi,
1711 struct st_basic_variant **variants)
1712 {
1713 struct pipe_context *pipe = st->pipe;
1714 struct st_basic_variant *v;
1715 struct st_basic_variant_key key;
1716
1717 memset(&key, 0, sizeof(key));
1718 key.st = st->has_shareable_shaders ? NULL : st;
1719
1720 /* Search for existing variant */
1721 for (v = *variants; v; v = v->next) {
1722 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1723 break;
1724 }
1725 }
1726
1727 if (!v) {
1728 /* create new */
1729 v = CALLOC_STRUCT(st_basic_variant);
1730 if (v) {
1731 /* fill in new variant */
1732 struct pipe_compute_state cs = *tgsi;
1733 if (tgsi->ir_type == PIPE_SHADER_IR_NIR)
1734 cs.prog = nir_shader_clone(NULL, tgsi->prog);
1735 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1736 v->key = key;
1737
1738 /* insert into list */
1739 v->next = *variants;
1740 *variants = v;
1741 }
1742 }
1743
1744 return v;
1745 }
1746
1747
1748 /**
1749 * Vert/Geom/Frag programs have per-context variants. Free all the
1750 * variants attached to the given program which match the given context.
1751 */
1752 static void
1753 destroy_program_variants(struct st_context *st, struct gl_program *target)
1754 {
1755 if (!target || target == &_mesa_DummyProgram)
1756 return;
1757
1758 switch (target->Target) {
1759 case GL_VERTEX_PROGRAM_ARB:
1760 {
1761 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1762 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1763
1764 for (vpv = stvp->variants; vpv; ) {
1765 struct st_vp_variant *next = vpv->next;
1766 if (vpv->key.st == st) {
1767 /* unlink from list */
1768 *prevPtr = next;
1769 /* destroy this variant */
1770 delete_vp_variant(st, vpv);
1771 }
1772 else {
1773 prevPtr = &vpv->next;
1774 }
1775 vpv = next;
1776 }
1777 }
1778 break;
1779 case GL_FRAGMENT_PROGRAM_ARB:
1780 {
1781 struct st_fragment_program *stfp =
1782 (struct st_fragment_program *) target;
1783 struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
1784
1785 for (fpv = stfp->variants; fpv; ) {
1786 struct st_fp_variant *next = fpv->next;
1787 if (fpv->key.st == st) {
1788 /* unlink from list */
1789 *prevPtr = next;
1790 /* destroy this variant */
1791 delete_fp_variant(st, fpv);
1792 }
1793 else {
1794 prevPtr = &fpv->next;
1795 }
1796 fpv = next;
1797 }
1798 }
1799 break;
1800 case GL_GEOMETRY_PROGRAM_NV:
1801 case GL_TESS_CONTROL_PROGRAM_NV:
1802 case GL_TESS_EVALUATION_PROGRAM_NV:
1803 case GL_COMPUTE_PROGRAM_NV:
1804 {
1805 struct st_common_program *p = st_common_program(target);
1806 struct st_compute_program *cp = (struct st_compute_program*)target;
1807 struct st_basic_variant **variants =
1808 target->Target == GL_COMPUTE_PROGRAM_NV ? &cp->variants :
1809 &p->variants;
1810 struct st_basic_variant *v, **prevPtr = variants;
1811
1812 for (v = *variants; v; ) {
1813 struct st_basic_variant *next = v->next;
1814 if (v->key.st == st) {
1815 /* unlink from list */
1816 *prevPtr = next;
1817 /* destroy this variant */
1818 delete_basic_variant(st, v, target->Target);
1819 }
1820 else {
1821 prevPtr = &v->next;
1822 }
1823 v = next;
1824 }
1825 }
1826 break;
1827 default:
1828 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1829 "destroy_program_variants_cb()", target->Target);
1830 }
1831 }
1832
1833
1834 /**
1835 * Callback for _mesa_HashWalk. Free all the shader's program variants
1836 * which match the given context.
1837 */
1838 static void
1839 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1840 {
1841 struct st_context *st = (struct st_context *) userData;
1842 struct gl_shader *shader = (struct gl_shader *) data;
1843
1844 switch (shader->Type) {
1845 case GL_SHADER_PROGRAM_MESA:
1846 {
1847 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1848 GLuint i;
1849
1850 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1851 if (shProg->_LinkedShaders[i])
1852 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1853 }
1854 }
1855 break;
1856 case GL_VERTEX_SHADER:
1857 case GL_FRAGMENT_SHADER:
1858 case GL_GEOMETRY_SHADER:
1859 case GL_TESS_CONTROL_SHADER:
1860 case GL_TESS_EVALUATION_SHADER:
1861 case GL_COMPUTE_SHADER:
1862 break;
1863 default:
1864 assert(0);
1865 }
1866 }
1867
1868
1869 /**
1870 * Callback for _mesa_HashWalk. Free all the program variants which match
1871 * the given context.
1872 */
1873 static void
1874 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1875 {
1876 struct st_context *st = (struct st_context *) userData;
1877 struct gl_program *program = (struct gl_program *) data;
1878 destroy_program_variants(st, program);
1879 }
1880
1881
1882 /**
1883 * Walk over all shaders and programs to delete any variants which
1884 * belong to the given context.
1885 * This is called during context tear-down.
1886 */
1887 void
1888 st_destroy_program_variants(struct st_context *st)
1889 {
1890 /* If shaders can be shared with other contexts, the last context will
1891 * call DeleteProgram on all shaders, releasing everything.
1892 */
1893 if (st->has_shareable_shaders)
1894 return;
1895
1896 /* ARB vert/frag program */
1897 _mesa_HashWalk(st->ctx->Shared->Programs,
1898 destroy_program_variants_cb, st);
1899
1900 /* GLSL vert/frag/geom shaders */
1901 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1902 destroy_shader_program_variants_cb, st);
1903 }
1904
1905
1906 /**
1907 * For debugging, print/dump the current vertex program.
1908 */
1909 void
1910 st_print_current_vertex_program(void)
1911 {
1912 GET_CURRENT_CONTEXT(ctx);
1913
1914 if (ctx->VertexProgram._Current) {
1915 struct st_vertex_program *stvp =
1916 (struct st_vertex_program *) ctx->VertexProgram._Current;
1917 struct st_vp_variant *stv;
1918
1919 debug_printf("Vertex program %u\n", stvp->Base.Id);
1920
1921 for (stv = stvp->variants; stv; stv = stv->next) {
1922 debug_printf("variant %p\n", stv);
1923 tgsi_dump(stv->tgsi.tokens, 0);
1924 }
1925 }
1926 }
1927
1928
1929 /**
1930 * Compile one shader variant.
1931 */
1932 void
1933 st_precompile_shader_variant(struct st_context *st,
1934 struct gl_program *prog)
1935 {
1936 switch (prog->Target) {
1937 case GL_VERTEX_PROGRAM_ARB: {
1938 struct st_vertex_program *p = (struct st_vertex_program *)prog;
1939 struct st_vp_variant_key key;
1940
1941 memset(&key, 0, sizeof(key));
1942 key.st = st->has_shareable_shaders ? NULL : st;
1943 st_get_vp_variant(st, p, &key);
1944 break;
1945 }
1946
1947 case GL_TESS_CONTROL_PROGRAM_NV: {
1948 struct st_common_program *p = st_common_program(prog);
1949 st_get_basic_variant(st, PIPE_SHADER_TESS_CTRL, p);
1950 break;
1951 }
1952
1953 case GL_TESS_EVALUATION_PROGRAM_NV: {
1954 struct st_common_program *p = st_common_program(prog);
1955 st_get_basic_variant(st, PIPE_SHADER_TESS_EVAL, p);
1956 break;
1957 }
1958
1959 case GL_GEOMETRY_PROGRAM_NV: {
1960 struct st_common_program *p = st_common_program(prog);
1961 st_get_basic_variant(st, PIPE_SHADER_GEOMETRY, p);
1962 break;
1963 }
1964
1965 case GL_FRAGMENT_PROGRAM_ARB: {
1966 struct st_fragment_program *p = (struct st_fragment_program *)prog;
1967 struct st_fp_variant_key key;
1968
1969 memset(&key, 0, sizeof(key));
1970 key.st = st->has_shareable_shaders ? NULL : st;
1971 st_get_fp_variant(st, p, &key);
1972 break;
1973 }
1974
1975 case GL_COMPUTE_PROGRAM_NV: {
1976 struct st_compute_program *p = (struct st_compute_program *)prog;
1977 st_get_cp_variant(st, &p->tgsi, &p->variants);
1978 break;
1979 }
1980
1981 default:
1982 assert(0);
1983 }
1984 }