meson: fix missing dependencies
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/imports.h"
35 #include "main/hash.h"
36 #include "main/mtypes.h"
37 #include "program/prog_parameter.h"
38 #include "program/prog_print.h"
39 #include "program/programopt.h"
40
41 #include "compiler/nir/nir.h"
42
43 #include "pipe/p_context.h"
44 #include "pipe/p_defines.h"
45 #include "pipe/p_shader_tokens.h"
46 #include "draw/draw_context.h"
47 #include "tgsi/tgsi_dump.h"
48 #include "tgsi/tgsi_emulate.h"
49 #include "tgsi/tgsi_parse.h"
50 #include "tgsi/tgsi_ureg.h"
51
52 #include "st_debug.h"
53 #include "st_cb_bitmap.h"
54 #include "st_cb_drawpixels.h"
55 #include "st_context.h"
56 #include "st_tgsi_lower_yuv.h"
57 #include "st_program.h"
58 #include "st_mesa_to_tgsi.h"
59 #include "st_atifs_to_tgsi.h"
60 #include "st_nir.h"
61 #include "st_shader_cache.h"
62 #include "cso_cache/cso_context.h"
63
64
65
66 static void
67 set_affected_state_flags(uint64_t *states,
68 struct gl_program *prog,
69 uint64_t new_constants,
70 uint64_t new_sampler_views,
71 uint64_t new_samplers,
72 uint64_t new_images,
73 uint64_t new_ubos,
74 uint64_t new_ssbos,
75 uint64_t new_atomics)
76 {
77 if (prog->Parameters->NumParameters)
78 *states |= new_constants;
79
80 if (prog->info.num_textures)
81 *states |= new_sampler_views | new_samplers;
82
83 if (prog->info.num_images)
84 *states |= new_images;
85
86 if (prog->info.num_ubos)
87 *states |= new_ubos;
88
89 if (prog->info.num_ssbos)
90 *states |= new_ssbos;
91
92 if (prog->info.num_abos)
93 *states |= new_atomics;
94 }
95
96 /**
97 * This determines which states will be updated when the shader is bound.
98 */
99 void
100 st_set_prog_affected_state_flags(struct gl_program *prog)
101 {
102 uint64_t *states;
103
104 switch (prog->info.stage) {
105 case MESA_SHADER_VERTEX:
106 states = &((struct st_vertex_program*)prog)->affected_states;
107
108 *states = ST_NEW_VS_STATE |
109 ST_NEW_RASTERIZER |
110 ST_NEW_VERTEX_ARRAYS;
111
112 set_affected_state_flags(states, prog,
113 ST_NEW_VS_CONSTANTS,
114 ST_NEW_VS_SAMPLER_VIEWS,
115 ST_NEW_VS_SAMPLERS,
116 ST_NEW_VS_IMAGES,
117 ST_NEW_VS_UBOS,
118 ST_NEW_VS_SSBOS,
119 ST_NEW_VS_ATOMICS);
120 break;
121
122 case MESA_SHADER_TESS_CTRL:
123 states = &(st_common_program(prog))->affected_states;
124
125 *states = ST_NEW_TCS_STATE;
126
127 set_affected_state_flags(states, prog,
128 ST_NEW_TCS_CONSTANTS,
129 ST_NEW_TCS_SAMPLER_VIEWS,
130 ST_NEW_TCS_SAMPLERS,
131 ST_NEW_TCS_IMAGES,
132 ST_NEW_TCS_UBOS,
133 ST_NEW_TCS_SSBOS,
134 ST_NEW_TCS_ATOMICS);
135 break;
136
137 case MESA_SHADER_TESS_EVAL:
138 states = &(st_common_program(prog))->affected_states;
139
140 *states = ST_NEW_TES_STATE |
141 ST_NEW_RASTERIZER;
142
143 set_affected_state_flags(states, prog,
144 ST_NEW_TES_CONSTANTS,
145 ST_NEW_TES_SAMPLER_VIEWS,
146 ST_NEW_TES_SAMPLERS,
147 ST_NEW_TES_IMAGES,
148 ST_NEW_TES_UBOS,
149 ST_NEW_TES_SSBOS,
150 ST_NEW_TES_ATOMICS);
151 break;
152
153 case MESA_SHADER_GEOMETRY:
154 states = &(st_common_program(prog))->affected_states;
155
156 *states = ST_NEW_GS_STATE |
157 ST_NEW_RASTERIZER;
158
159 set_affected_state_flags(states, prog,
160 ST_NEW_GS_CONSTANTS,
161 ST_NEW_GS_SAMPLER_VIEWS,
162 ST_NEW_GS_SAMPLERS,
163 ST_NEW_GS_IMAGES,
164 ST_NEW_GS_UBOS,
165 ST_NEW_GS_SSBOS,
166 ST_NEW_GS_ATOMICS);
167 break;
168
169 case MESA_SHADER_FRAGMENT:
170 states = &((struct st_fragment_program*)prog)->affected_states;
171
172 /* gl_FragCoord and glDrawPixels always use constants. */
173 *states = ST_NEW_FS_STATE |
174 ST_NEW_SAMPLE_SHADING |
175 ST_NEW_FS_CONSTANTS;
176
177 set_affected_state_flags(states, prog,
178 ST_NEW_FS_CONSTANTS,
179 ST_NEW_FS_SAMPLER_VIEWS,
180 ST_NEW_FS_SAMPLERS,
181 ST_NEW_FS_IMAGES,
182 ST_NEW_FS_UBOS,
183 ST_NEW_FS_SSBOS,
184 ST_NEW_FS_ATOMICS);
185 break;
186
187 case MESA_SHADER_COMPUTE:
188 states = &((struct st_compute_program*)prog)->affected_states;
189
190 *states = ST_NEW_CS_STATE;
191
192 set_affected_state_flags(states, prog,
193 ST_NEW_CS_CONSTANTS,
194 ST_NEW_CS_SAMPLER_VIEWS,
195 ST_NEW_CS_SAMPLERS,
196 ST_NEW_CS_IMAGES,
197 ST_NEW_CS_UBOS,
198 ST_NEW_CS_SSBOS,
199 ST_NEW_CS_ATOMICS);
200 break;
201
202 default:
203 unreachable("unhandled shader stage");
204 }
205 }
206
207 /**
208 * Delete a vertex program variant. Note the caller must unlink
209 * the variant from the linked list.
210 */
211 static void
212 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
213 {
214 if (vpv->driver_shader)
215 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
216
217 if (vpv->draw_shader)
218 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
219
220 if (((vpv->tgsi.type == PIPE_SHADER_IR_TGSI)) && vpv->tgsi.tokens)
221 ureg_free_tokens(vpv->tgsi.tokens);
222
223 free( vpv );
224 }
225
226
227
228 /**
229 * Clean out any old compilations:
230 */
231 void
232 st_release_vp_variants( struct st_context *st,
233 struct st_vertex_program *stvp )
234 {
235 struct st_vp_variant *vpv;
236
237 for (vpv = stvp->variants; vpv; ) {
238 struct st_vp_variant *next = vpv->next;
239 delete_vp_variant(st, vpv);
240 vpv = next;
241 }
242
243 stvp->variants = NULL;
244
245 if ((stvp->tgsi.type == PIPE_SHADER_IR_TGSI) && stvp->tgsi.tokens) {
246 tgsi_free_tokens(stvp->tgsi.tokens);
247 stvp->tgsi.tokens = NULL;
248 }
249 }
250
251
252
253 /**
254 * Delete a fragment program variant. Note the caller must unlink
255 * the variant from the linked list.
256 */
257 static void
258 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
259 {
260 if (fpv->driver_shader)
261 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
262 free(fpv);
263 }
264
265
266 /**
267 * Free all variants of a fragment program.
268 */
269 void
270 st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
271 {
272 struct st_fp_variant *fpv;
273
274 for (fpv = stfp->variants; fpv; ) {
275 struct st_fp_variant *next = fpv->next;
276 delete_fp_variant(st, fpv);
277 fpv = next;
278 }
279
280 stfp->variants = NULL;
281
282 if ((stfp->tgsi.type == PIPE_SHADER_IR_TGSI) && stfp->tgsi.tokens) {
283 ureg_free_tokens(stfp->tgsi.tokens);
284 stfp->tgsi.tokens = NULL;
285 }
286 }
287
288
289 /**
290 * Delete a basic program variant. Note the caller must unlink
291 * the variant from the linked list.
292 */
293 static void
294 delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
295 GLenum target)
296 {
297 if (v->driver_shader) {
298 switch (target) {
299 case GL_TESS_CONTROL_PROGRAM_NV:
300 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
301 break;
302 case GL_TESS_EVALUATION_PROGRAM_NV:
303 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
304 break;
305 case GL_GEOMETRY_PROGRAM_NV:
306 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
307 break;
308 case GL_COMPUTE_PROGRAM_NV:
309 cso_delete_compute_shader(st->cso_context, v->driver_shader);
310 break;
311 default:
312 assert(!"this shouldn't occur");
313 }
314 }
315
316 free(v);
317 }
318
319
320 /**
321 * Free all basic program variants.
322 */
323 void
324 st_release_basic_variants(struct st_context *st, GLenum target,
325 struct st_basic_variant **variants,
326 struct pipe_shader_state *tgsi)
327 {
328 struct st_basic_variant *v;
329
330 for (v = *variants; v; ) {
331 struct st_basic_variant *next = v->next;
332 delete_basic_variant(st, v, target);
333 v = next;
334 }
335
336 *variants = NULL;
337
338 if (tgsi->tokens) {
339 ureg_free_tokens(tgsi->tokens);
340 tgsi->tokens = NULL;
341 }
342 }
343
344
345 /**
346 * Free all variants of a compute program.
347 */
348 void
349 st_release_cp_variants(struct st_context *st, struct st_compute_program *stcp)
350 {
351 struct st_basic_variant **variants = &stcp->variants;
352 struct st_basic_variant *v;
353
354 for (v = *variants; v; ) {
355 struct st_basic_variant *next = v->next;
356 delete_basic_variant(st, v, stcp->Base.Target);
357 v = next;
358 }
359
360 *variants = NULL;
361
362 if (stcp->tgsi.prog) {
363 switch (stcp->tgsi.ir_type) {
364 case PIPE_SHADER_IR_TGSI:
365 ureg_free_tokens(stcp->tgsi.prog);
366 stcp->tgsi.prog = NULL;
367 break;
368 case PIPE_SHADER_IR_NIR:
369 /* pipe driver took ownership of prog */
370 break;
371 case PIPE_SHADER_IR_LLVM:
372 case PIPE_SHADER_IR_NATIVE:
373 /* ??? */
374 stcp->tgsi.prog = NULL;
375 break;
376 }
377 }
378 }
379
380 /**
381 * Translate a vertex program.
382 */
383 bool
384 st_translate_vertex_program(struct st_context *st,
385 struct st_vertex_program *stvp)
386 {
387 struct ureg_program *ureg;
388 enum pipe_error error;
389 unsigned num_outputs = 0;
390 unsigned attr;
391 ubyte input_to_index[VERT_ATTRIB_MAX] = {0};
392 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
393 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
394
395 stvp->num_inputs = 0;
396
397 if (stvp->Base.arb.IsPositionInvariant)
398 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
399
400 /*
401 * Determine number of inputs, the mappings between VERT_ATTRIB_x
402 * and TGSI generic input indexes, plus input attrib semantic info.
403 */
404 for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
405 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
406 input_to_index[attr] = stvp->num_inputs;
407 stvp->index_to_input[stvp->num_inputs] = attr;
408 stvp->num_inputs++;
409 if ((stvp->Base.info.double_inputs_read &
410 BITFIELD64_BIT(attr)) != 0) {
411 /* add placeholder for second part of a double attribute */
412 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
413 stvp->num_inputs++;
414 }
415 }
416 }
417 /* bit of a hack, presetup potentially unused edgeflag input */
418 input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
419 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
420
421 /* Compute mapping of vertex program outputs to slots.
422 */
423 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
424 if ((stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) == 0) {
425 stvp->result_to_output[attr] = ~0;
426 }
427 else {
428 unsigned slot = num_outputs++;
429
430 stvp->result_to_output[attr] = slot;
431
432 unsigned semantic_name, semantic_index;
433 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
434 &semantic_name, &semantic_index);
435 output_semantic_name[slot] = semantic_name;
436 output_semantic_index[slot] = semantic_index;
437 }
438 }
439 /* similar hack to above, presetup potentially unused edgeflag output */
440 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
441 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
442 output_semantic_index[num_outputs] = 0;
443
444 /* ARB_vp: */
445 if (!stvp->glsl_to_tgsi && !stvp->shader_program) {
446 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
447
448 /* This determines which states will be updated when the assembly
449 * shader is bound.
450 */
451 stvp->affected_states = ST_NEW_VS_STATE |
452 ST_NEW_RASTERIZER |
453 ST_NEW_VERTEX_ARRAYS;
454
455 if (stvp->Base.Parameters->NumParameters)
456 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
457
458 /* No samplers are allowed in ARB_vp. */
459 }
460
461 if (stvp->shader_program) {
462 struct gl_program *prog = stvp->shader_program->last_vert_prog;
463 if (prog) {
464 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
465 stvp->result_to_output,
466 &stvp->tgsi.stream_output);
467 }
468
469 return true;
470 }
471
472 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
473 if (ureg == NULL)
474 return false;
475
476 if (stvp->Base.info.clip_distance_array_size)
477 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
478 stvp->Base.info.clip_distance_array_size);
479 if (stvp->Base.info.cull_distance_array_size)
480 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
481 stvp->Base.info.cull_distance_array_size);
482
483 if (ST_DEBUG & DEBUG_MESA) {
484 _mesa_print_program(&stvp->Base);
485 _mesa_print_program_parameters(st->ctx, &stvp->Base);
486 debug_printf("\n");
487 }
488
489 if (stvp->glsl_to_tgsi) {
490 error = st_translate_program(st->ctx,
491 PIPE_SHADER_VERTEX,
492 ureg,
493 stvp->glsl_to_tgsi,
494 &stvp->Base,
495 /* inputs */
496 stvp->num_inputs,
497 input_to_index,
498 NULL, /* inputSlotToAttr */
499 NULL, /* input semantic name */
500 NULL, /* input semantic index */
501 NULL, /* interp mode */
502 /* outputs */
503 num_outputs,
504 stvp->result_to_output,
505 output_semantic_name,
506 output_semantic_index);
507
508 st_translate_stream_output_info(stvp->glsl_to_tgsi,
509 stvp->result_to_output,
510 &stvp->tgsi.stream_output);
511
512 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
513 } else
514 error = st_translate_mesa_program(st->ctx,
515 PIPE_SHADER_VERTEX,
516 ureg,
517 &stvp->Base,
518 /* inputs */
519 stvp->num_inputs,
520 input_to_index,
521 NULL, /* input semantic name */
522 NULL, /* input semantic index */
523 NULL,
524 /* outputs */
525 num_outputs,
526 stvp->result_to_output,
527 output_semantic_name,
528 output_semantic_index);
529
530 if (error) {
531 debug_printf("%s: failed to translate Mesa program:\n", __func__);
532 _mesa_print_program(&stvp->Base);
533 debug_assert(0);
534 return false;
535 }
536
537 stvp->tgsi.tokens = ureg_get_tokens(ureg, &stvp->num_tgsi_tokens);
538 ureg_destroy(ureg);
539
540 if (stvp->glsl_to_tgsi) {
541 stvp->glsl_to_tgsi = NULL;
542 st_store_tgsi_in_disk_cache(st, &stvp->Base);
543 }
544
545 return stvp->tgsi.tokens != NULL;
546 }
547
548 static struct st_vp_variant *
549 st_create_vp_variant(struct st_context *st,
550 struct st_vertex_program *stvp,
551 const struct st_vp_variant_key *key)
552 {
553 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
554 struct pipe_context *pipe = st->pipe;
555
556 vpv->key = *key;
557 vpv->tgsi.stream_output = stvp->tgsi.stream_output;
558 vpv->num_inputs = stvp->num_inputs;
559
560 if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
561 vpv->tgsi.type = PIPE_SHADER_IR_NIR;
562 vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
563 if (key->clamp_color)
564 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
565 if (key->passthrough_edgeflags) {
566 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
567 vpv->num_inputs++;
568 }
569
570 st_finalize_nir(st, &stvp->Base, stvp->shader_program,
571 vpv->tgsi.ir.nir);
572
573 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
574 /* driver takes ownership of IR: */
575 vpv->tgsi.ir.nir = NULL;
576 return vpv;
577 }
578
579 vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
580
581 /* Emulate features. */
582 if (key->clamp_color || key->passthrough_edgeflags) {
583 const struct tgsi_token *tokens;
584 unsigned flags =
585 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
586 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
587
588 tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
589
590 if (tokens) {
591 tgsi_free_tokens(vpv->tgsi.tokens);
592 vpv->tgsi.tokens = tokens;
593
594 if (key->passthrough_edgeflags)
595 vpv->num_inputs++;
596 } else
597 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
598 }
599
600 if (ST_DEBUG & DEBUG_TGSI) {
601 tgsi_dump(vpv->tgsi.tokens, 0);
602 debug_printf("\n");
603 }
604
605 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
606 return vpv;
607 }
608
609
610 /**
611 * Find/create a vertex program variant.
612 */
613 struct st_vp_variant *
614 st_get_vp_variant(struct st_context *st,
615 struct st_vertex_program *stvp,
616 const struct st_vp_variant_key *key)
617 {
618 struct st_vp_variant *vpv;
619
620 /* Search for existing variant */
621 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
622 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
623 break;
624 }
625 }
626
627 if (!vpv) {
628 /* create now */
629 vpv = st_create_vp_variant(st, stvp, key);
630 if (vpv) {
631 /* insert into list */
632 vpv->next = stvp->variants;
633 stvp->variants = vpv;
634 }
635 }
636
637 return vpv;
638 }
639
640
641 /**
642 * Translate a Mesa fragment shader into a TGSI shader.
643 */
644 bool
645 st_translate_fragment_program(struct st_context *st,
646 struct st_fragment_program *stfp)
647 {
648 ubyte outputMapping[2 * FRAG_RESULT_MAX];
649 ubyte inputMapping[VARYING_SLOT_MAX];
650 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
651 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
652 GLuint attr;
653 GLbitfield64 inputsRead;
654 struct ureg_program *ureg;
655
656 GLboolean write_all = GL_FALSE;
657
658 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
659 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
660 uint fs_num_inputs = 0;
661
662 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
663 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
664 uint fs_num_outputs = 0;
665
666 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
667
668 /* Non-GLSL programs: */
669 if (!stfp->glsl_to_tgsi && !stfp->shader_program) {
670 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
671 if (st->ctx->Const.GLSLFragCoordIsSysVal)
672 _mesa_program_fragment_position_to_sysval(&stfp->Base);
673
674 /* This determines which states will be updated when the assembly
675 * shader is bound.
676 *
677 * fragment.position and glDrawPixels always use constants.
678 */
679 stfp->affected_states = ST_NEW_FS_STATE |
680 ST_NEW_SAMPLE_SHADING |
681 ST_NEW_FS_CONSTANTS;
682
683 if (stfp->ati_fs) {
684 /* Just set them for ATI_fs unconditionally. */
685 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
686 ST_NEW_FS_SAMPLERS;
687 } else {
688 /* ARB_fp */
689 if (stfp->Base.SamplersUsed)
690 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
691 ST_NEW_FS_SAMPLERS;
692 }
693 }
694
695 /*
696 * Convert Mesa program inputs to TGSI input register semantics.
697 */
698 inputsRead = stfp->Base.info.inputs_read;
699 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
700 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
701 const GLuint slot = fs_num_inputs++;
702
703 inputMapping[attr] = slot;
704 inputSlotToAttr[slot] = attr;
705
706 switch (attr) {
707 case VARYING_SLOT_POS:
708 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
709 input_semantic_index[slot] = 0;
710 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
711 break;
712 case VARYING_SLOT_COL0:
713 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
714 input_semantic_index[slot] = 0;
715 interpMode[slot] = stfp->glsl_to_tgsi ?
716 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
717 break;
718 case VARYING_SLOT_COL1:
719 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
720 input_semantic_index[slot] = 1;
721 interpMode[slot] = stfp->glsl_to_tgsi ?
722 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
723 break;
724 case VARYING_SLOT_FOGC:
725 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
726 input_semantic_index[slot] = 0;
727 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
728 break;
729 case VARYING_SLOT_FACE:
730 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
731 input_semantic_index[slot] = 0;
732 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
733 break;
734 case VARYING_SLOT_PRIMITIVE_ID:
735 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
736 input_semantic_index[slot] = 0;
737 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
738 break;
739 case VARYING_SLOT_LAYER:
740 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
741 input_semantic_index[slot] = 0;
742 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
743 break;
744 case VARYING_SLOT_VIEWPORT:
745 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
746 input_semantic_index[slot] = 0;
747 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
748 break;
749 case VARYING_SLOT_CLIP_DIST0:
750 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
751 input_semantic_index[slot] = 0;
752 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
753 break;
754 case VARYING_SLOT_CLIP_DIST1:
755 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
756 input_semantic_index[slot] = 1;
757 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
758 break;
759 case VARYING_SLOT_CULL_DIST0:
760 case VARYING_SLOT_CULL_DIST1:
761 /* these should have been lowered by GLSL */
762 assert(0);
763 break;
764 /* In most cases, there is nothing special about these
765 * inputs, so adopt a convention to use the generic
766 * semantic name and the mesa VARYING_SLOT_ number as the
767 * index.
768 *
769 * All that is required is that the vertex shader labels
770 * its own outputs similarly, and that the vertex shader
771 * generates at least every output required by the
772 * fragment shader plus fixed-function hardware (such as
773 * BFC).
774 *
775 * However, some drivers may need us to identify the PNTC and TEXi
776 * varyings if, for example, their capability to replace them with
777 * sprite coordinates is limited.
778 */
779 case VARYING_SLOT_PNTC:
780 if (st->needs_texcoord_semantic) {
781 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
782 input_semantic_index[slot] = 0;
783 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
784 break;
785 }
786 /* fall through */
787 case VARYING_SLOT_TEX0:
788 case VARYING_SLOT_TEX1:
789 case VARYING_SLOT_TEX2:
790 case VARYING_SLOT_TEX3:
791 case VARYING_SLOT_TEX4:
792 case VARYING_SLOT_TEX5:
793 case VARYING_SLOT_TEX6:
794 case VARYING_SLOT_TEX7:
795 if (st->needs_texcoord_semantic) {
796 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
797 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
798 interpMode[slot] = stfp->glsl_to_tgsi ?
799 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
800 break;
801 }
802 /* fall through */
803 case VARYING_SLOT_VAR0:
804 default:
805 /* Semantic indices should be zero-based because drivers may choose
806 * to assign a fixed slot determined by that index.
807 * This is useful because ARB_separate_shader_objects uses location
808 * qualifiers for linkage, and if the semantic index corresponds to
809 * these locations, linkage passes in the driver become unecessary.
810 *
811 * If needs_texcoord_semantic is true, no semantic indices will be
812 * consumed for the TEXi varyings, and we can base the locations of
813 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
814 */
815 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
816 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
817 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
818 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
819 if (attr == VARYING_SLOT_PNTC)
820 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
821 else {
822 interpMode[slot] = stfp->glsl_to_tgsi ?
823 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
824 }
825 break;
826 }
827 }
828 else {
829 inputMapping[attr] = -1;
830 }
831 }
832
833 /*
834 * Semantics and mapping for outputs
835 */
836 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
837
838 /* if z is written, emit that first */
839 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
840 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
841 fs_output_semantic_index[fs_num_outputs] = 0;
842 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
843 fs_num_outputs++;
844 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
845 }
846
847 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
848 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
849 fs_output_semantic_index[fs_num_outputs] = 0;
850 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
851 fs_num_outputs++;
852 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
853 }
854
855 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
856 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
857 fs_output_semantic_index[fs_num_outputs] = 0;
858 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
859 fs_num_outputs++;
860 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
861 }
862
863 /* handle remaining outputs (color) */
864 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
865 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
866 stfp->Base.SecondaryOutputsWritten;
867 const unsigned loc = attr % FRAG_RESULT_MAX;
868
869 if (written & BITFIELD64_BIT(loc)) {
870 switch (loc) {
871 case FRAG_RESULT_DEPTH:
872 case FRAG_RESULT_STENCIL:
873 case FRAG_RESULT_SAMPLE_MASK:
874 /* handled above */
875 assert(0);
876 break;
877 case FRAG_RESULT_COLOR:
878 write_all = GL_TRUE; /* fallthrough */
879 default: {
880 int index;
881 assert(loc == FRAG_RESULT_COLOR ||
882 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
883
884 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
885
886 if (attr >= FRAG_RESULT_MAX) {
887 /* Secondary color for dual source blending. */
888 assert(index == 0);
889 index++;
890 }
891
892 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
893 fs_output_semantic_index[fs_num_outputs] = index;
894 outputMapping[attr] = fs_num_outputs;
895 break;
896 }
897 }
898
899 fs_num_outputs++;
900 }
901 }
902
903 /* We have already compiler to NIR so just return */
904 if (stfp->shader_program)
905 return true;
906
907 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
908 if (ureg == NULL)
909 return false;
910
911 if (ST_DEBUG & DEBUG_MESA) {
912 _mesa_print_program(&stfp->Base);
913 _mesa_print_program_parameters(st->ctx, &stfp->Base);
914 debug_printf("\n");
915 }
916 if (write_all == GL_TRUE)
917 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
918
919 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
920 switch (stfp->Base.info.fs.depth_layout) {
921 case FRAG_DEPTH_LAYOUT_ANY:
922 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
923 TGSI_FS_DEPTH_LAYOUT_ANY);
924 break;
925 case FRAG_DEPTH_LAYOUT_GREATER:
926 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
927 TGSI_FS_DEPTH_LAYOUT_GREATER);
928 break;
929 case FRAG_DEPTH_LAYOUT_LESS:
930 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
931 TGSI_FS_DEPTH_LAYOUT_LESS);
932 break;
933 case FRAG_DEPTH_LAYOUT_UNCHANGED:
934 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
935 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
936 break;
937 default:
938 assert(0);
939 }
940 }
941
942 if (stfp->glsl_to_tgsi) {
943 st_translate_program(st->ctx,
944 PIPE_SHADER_FRAGMENT,
945 ureg,
946 stfp->glsl_to_tgsi,
947 &stfp->Base,
948 /* inputs */
949 fs_num_inputs,
950 inputMapping,
951 inputSlotToAttr,
952 input_semantic_name,
953 input_semantic_index,
954 interpMode,
955 /* outputs */
956 fs_num_outputs,
957 outputMapping,
958 fs_output_semantic_name,
959 fs_output_semantic_index);
960
961 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
962 } else if (stfp->ati_fs)
963 st_translate_atifs_program(ureg,
964 stfp->ati_fs,
965 &stfp->Base,
966 /* inputs */
967 fs_num_inputs,
968 inputMapping,
969 input_semantic_name,
970 input_semantic_index,
971 interpMode,
972 /* outputs */
973 fs_num_outputs,
974 outputMapping,
975 fs_output_semantic_name,
976 fs_output_semantic_index);
977 else
978 st_translate_mesa_program(st->ctx,
979 PIPE_SHADER_FRAGMENT,
980 ureg,
981 &stfp->Base,
982 /* inputs */
983 fs_num_inputs,
984 inputMapping,
985 input_semantic_name,
986 input_semantic_index,
987 interpMode,
988 /* outputs */
989 fs_num_outputs,
990 outputMapping,
991 fs_output_semantic_name,
992 fs_output_semantic_index);
993
994 stfp->tgsi.tokens = ureg_get_tokens(ureg, &stfp->num_tgsi_tokens);
995 ureg_destroy(ureg);
996
997 if (stfp->glsl_to_tgsi) {
998 stfp->glsl_to_tgsi = NULL;
999 st_store_tgsi_in_disk_cache(st, &stfp->Base);
1000 }
1001
1002 return stfp->tgsi.tokens != NULL;
1003 }
1004
1005 static struct st_fp_variant *
1006 st_create_fp_variant(struct st_context *st,
1007 struct st_fragment_program *stfp,
1008 const struct st_fp_variant_key *key)
1009 {
1010 struct pipe_context *pipe = st->pipe;
1011 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1012 struct pipe_shader_state tgsi = {0};
1013 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1014 static const gl_state_index texcoord_state[STATE_LENGTH] =
1015 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1016 static const gl_state_index scale_state[STATE_LENGTH] =
1017 { STATE_INTERNAL, STATE_PT_SCALE };
1018 static const gl_state_index bias_state[STATE_LENGTH] =
1019 { STATE_INTERNAL, STATE_PT_BIAS };
1020
1021 if (!variant)
1022 return NULL;
1023
1024 if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
1025 tgsi.type = PIPE_SHADER_IR_NIR;
1026 tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
1027
1028 if (key->clamp_color)
1029 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1030
1031 if (key->persample_shading) {
1032 nir_shader *shader = tgsi.ir.nir;
1033 nir_foreach_variable(var, &shader->inputs)
1034 var->data.sample = true;
1035 }
1036
1037 assert(!(key->bitmap && key->drawpixels));
1038
1039 /* glBitmap */
1040 if (key->bitmap) {
1041 nir_lower_bitmap_options options = {0};
1042
1043 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1044 options.sampler = variant->bitmap_sampler;
1045 options.swizzle_xxxx = (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM);
1046
1047 NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
1048 }
1049
1050 /* glDrawPixels (color only) */
1051 if (key->drawpixels) {
1052 nir_lower_drawpixels_options options = {{0}};
1053 unsigned samplers_used = stfp->Base.SamplersUsed;
1054
1055 /* Find the first unused slot. */
1056 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1057 options.drawpix_sampler = variant->drawpix_sampler;
1058 samplers_used |= (1 << variant->drawpix_sampler);
1059
1060 options.pixel_maps = key->pixelMaps;
1061 if (key->pixelMaps) {
1062 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1063 options.pixelmap_sampler = variant->pixelmap_sampler;
1064 }
1065
1066 options.scale_and_bias = key->scaleAndBias;
1067 if (key->scaleAndBias) {
1068 _mesa_add_state_reference(params, scale_state);
1069 memcpy(options.scale_state_tokens, scale_state,
1070 sizeof(options.scale_state_tokens));
1071 _mesa_add_state_reference(params, bias_state);
1072 memcpy(options.bias_state_tokens, bias_state,
1073 sizeof(options.bias_state_tokens));
1074 }
1075
1076 _mesa_add_state_reference(params, texcoord_state);
1077 memcpy(options.texcoord_state_tokens, texcoord_state,
1078 sizeof(options.texcoord_state_tokens));
1079
1080 NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
1081 }
1082
1083 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1084 nir_lower_tex_options options = {0};
1085 options.lower_y_uv_external = key->external.lower_nv12;
1086 options.lower_y_u_v_external = key->external.lower_iyuv;
1087 NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
1088 }
1089
1090 st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
1091
1092 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1093 /* This pass needs to happen *after* nir_lower_sampler */
1094 NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
1095 ~stfp->Base.SamplersUsed,
1096 key->external.lower_nv12,
1097 key->external.lower_iyuv);
1098 }
1099
1100 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1101 variant->key = *key;
1102
1103 return variant;
1104 }
1105
1106 tgsi.tokens = stfp->tgsi.tokens;
1107
1108 assert(!(key->bitmap && key->drawpixels));
1109
1110 /* Fix texture targets and add fog for ATI_fs */
1111 if (stfp->ati_fs) {
1112 const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
1113
1114 if (tokens)
1115 tgsi.tokens = tokens;
1116 else
1117 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1118 }
1119
1120 /* Emulate features. */
1121 if (key->clamp_color || key->persample_shading) {
1122 const struct tgsi_token *tokens;
1123 unsigned flags =
1124 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1125 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1126
1127 tokens = tgsi_emulate(tgsi.tokens, flags);
1128
1129 if (tokens) {
1130 if (tgsi.tokens != stfp->tgsi.tokens)
1131 tgsi_free_tokens(tgsi.tokens);
1132 tgsi.tokens = tokens;
1133 } else
1134 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1135 }
1136
1137 /* glBitmap */
1138 if (key->bitmap) {
1139 const struct tgsi_token *tokens;
1140
1141 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1142
1143 tokens = st_get_bitmap_shader(tgsi.tokens,
1144 st->internal_target,
1145 variant->bitmap_sampler,
1146 st->needs_texcoord_semantic,
1147 st->bitmap.tex_format ==
1148 PIPE_FORMAT_L8_UNORM);
1149
1150 if (tokens) {
1151 if (tgsi.tokens != stfp->tgsi.tokens)
1152 tgsi_free_tokens(tgsi.tokens);
1153 tgsi.tokens = tokens;
1154 } else
1155 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1156 }
1157
1158 /* glDrawPixels (color only) */
1159 if (key->drawpixels) {
1160 const struct tgsi_token *tokens;
1161 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1162
1163 /* Find the first unused slot. */
1164 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1165
1166 if (key->pixelMaps) {
1167 unsigned samplers_used = stfp->Base.SamplersUsed |
1168 (1 << variant->drawpix_sampler);
1169
1170 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1171 }
1172
1173 if (key->scaleAndBias) {
1174 scale_const = _mesa_add_state_reference(params, scale_state);
1175 bias_const = _mesa_add_state_reference(params, bias_state);
1176 }
1177
1178 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1179
1180 tokens = st_get_drawpix_shader(tgsi.tokens,
1181 st->needs_texcoord_semantic,
1182 key->scaleAndBias, scale_const,
1183 bias_const, key->pixelMaps,
1184 variant->drawpix_sampler,
1185 variant->pixelmap_sampler,
1186 texcoord_const, st->internal_target);
1187
1188 if (tokens) {
1189 if (tgsi.tokens != stfp->tgsi.tokens)
1190 tgsi_free_tokens(tgsi.tokens);
1191 tgsi.tokens = tokens;
1192 } else
1193 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1194 }
1195
1196 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1197 const struct tgsi_token *tokens;
1198
1199 /* samplers inserted would conflict, but this should be unpossible: */
1200 assert(!(key->bitmap || key->drawpixels));
1201
1202 tokens = st_tgsi_lower_yuv(tgsi.tokens,
1203 ~stfp->Base.SamplersUsed,
1204 key->external.lower_nv12,
1205 key->external.lower_iyuv);
1206 if (tokens) {
1207 if (tgsi.tokens != stfp->tgsi.tokens)
1208 tgsi_free_tokens(tgsi.tokens);
1209 tgsi.tokens = tokens;
1210 } else {
1211 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1212 }
1213 }
1214
1215 if (ST_DEBUG & DEBUG_TGSI) {
1216 tgsi_dump(tgsi.tokens, 0);
1217 debug_printf("\n");
1218 }
1219
1220 /* fill in variant */
1221 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1222 variant->key = *key;
1223
1224 if (tgsi.tokens != stfp->tgsi.tokens)
1225 tgsi_free_tokens(tgsi.tokens);
1226 return variant;
1227 }
1228
1229 /**
1230 * Translate fragment program if needed.
1231 */
1232 struct st_fp_variant *
1233 st_get_fp_variant(struct st_context *st,
1234 struct st_fragment_program *stfp,
1235 const struct st_fp_variant_key *key)
1236 {
1237 struct st_fp_variant *fpv;
1238
1239 /* Search for existing variant */
1240 for (fpv = stfp->variants; fpv; fpv = fpv->next) {
1241 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1242 break;
1243 }
1244 }
1245
1246 if (!fpv) {
1247 /* create new */
1248 fpv = st_create_fp_variant(st, stfp, key);
1249 if (fpv) {
1250 if (key->bitmap || key->drawpixels) {
1251 /* Regular variants should always come before the
1252 * bitmap & drawpixels variants, (unless there
1253 * are no regular variants) so that
1254 * st_update_fp can take a fast path when
1255 * shader_has_one_variant is set.
1256 */
1257 if (!stfp->variants) {
1258 stfp->variants = fpv;
1259 } else {
1260 /* insert into list after the first one */
1261 fpv->next = stfp->variants->next;
1262 stfp->variants->next = fpv;
1263 }
1264 } else {
1265 /* insert into list */
1266 fpv->next = stfp->variants;
1267 stfp->variants = fpv;
1268 }
1269 }
1270 }
1271
1272 return fpv;
1273 }
1274
1275
1276 /**
1277 * Translate a program. This is common code for geometry and tessellation
1278 * shaders.
1279 */
1280 static void
1281 st_translate_program_common(struct st_context *st,
1282 struct gl_program *prog,
1283 struct glsl_to_tgsi_visitor *glsl_to_tgsi,
1284 struct ureg_program *ureg,
1285 unsigned tgsi_processor,
1286 struct pipe_shader_state *out_state)
1287 {
1288 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1289 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1290 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1291 GLuint attr;
1292
1293 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1294 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1295 uint num_inputs = 0;
1296
1297 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1298 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1299 uint num_outputs = 0;
1300
1301 GLint i;
1302
1303 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1304 memset(inputMapping, 0, sizeof(inputMapping));
1305 memset(outputMapping, 0, sizeof(outputMapping));
1306 memset(out_state, 0, sizeof(*out_state));
1307
1308 if (prog->info.clip_distance_array_size)
1309 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1310 prog->info.clip_distance_array_size);
1311 if (prog->info.cull_distance_array_size)
1312 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1313 prog->info.cull_distance_array_size);
1314
1315 /*
1316 * Convert Mesa program inputs to TGSI input register semantics.
1317 */
1318 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1319 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1320 continue;
1321
1322 unsigned slot = num_inputs++;
1323
1324 inputMapping[attr] = slot;
1325 inputSlotToAttr[slot] = attr;
1326
1327 unsigned semantic_name, semantic_index;
1328 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1329 &semantic_name, &semantic_index);
1330 input_semantic_name[slot] = semantic_name;
1331 input_semantic_index[slot] = semantic_index;
1332 }
1333
1334 /* Also add patch inputs. */
1335 for (attr = 0; attr < 32; attr++) {
1336 if (prog->info.patch_inputs_read & (1u << attr)) {
1337 GLuint slot = num_inputs++;
1338 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1339
1340 inputMapping[patch_attr] = slot;
1341 inputSlotToAttr[slot] = patch_attr;
1342 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1343 input_semantic_index[slot] = attr;
1344 }
1345 }
1346
1347 /* initialize output semantics to defaults */
1348 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1349 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1350 output_semantic_index[i] = 0;
1351 }
1352
1353 /*
1354 * Determine number of outputs, the (default) output register
1355 * mapping and the semantic information for each output.
1356 */
1357 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1358 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1359 GLuint slot = num_outputs++;
1360
1361 outputMapping[attr] = slot;
1362
1363 unsigned semantic_name, semantic_index;
1364 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1365 &semantic_name, &semantic_index);
1366 output_semantic_name[slot] = semantic_name;
1367 output_semantic_index[slot] = semantic_index;
1368 }
1369 }
1370
1371 /* Also add patch outputs. */
1372 for (attr = 0; attr < 32; attr++) {
1373 if (prog->info.patch_outputs_written & (1u << attr)) {
1374 GLuint slot = num_outputs++;
1375 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1376
1377 outputMapping[patch_attr] = slot;
1378 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1379 output_semantic_index[slot] = attr;
1380 }
1381 }
1382
1383 st_translate_program(st->ctx,
1384 tgsi_processor,
1385 ureg,
1386 glsl_to_tgsi,
1387 prog,
1388 /* inputs */
1389 num_inputs,
1390 inputMapping,
1391 inputSlotToAttr,
1392 input_semantic_name,
1393 input_semantic_index,
1394 NULL,
1395 /* outputs */
1396 num_outputs,
1397 outputMapping,
1398 output_semantic_name,
1399 output_semantic_index);
1400
1401 if (tgsi_processor == PIPE_SHADER_COMPUTE) {
1402 struct st_compute_program *stcp = (struct st_compute_program *) prog;
1403 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1404 stcp->tgsi.prog = out_state->tokens;
1405 } else {
1406 struct st_common_program *stcp = (struct st_common_program *) prog;
1407 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1408 }
1409 ureg_destroy(ureg);
1410
1411 st_translate_stream_output_info(glsl_to_tgsi,
1412 outputMapping,
1413 &out_state->stream_output);
1414
1415 st_store_tgsi_in_disk_cache(st, prog);
1416
1417 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1418 _mesa_print_program(prog);
1419 debug_printf("\n");
1420 }
1421
1422 if (ST_DEBUG & DEBUG_TGSI) {
1423 tgsi_dump(out_state->tokens, 0);
1424 debug_printf("\n");
1425 }
1426 }
1427
1428 /**
1429 * Update stream-output info for GS/TCS/TES. Normally this is done in
1430 * st_translate_program_common() but that is not called for glsl_to_nir
1431 * case.
1432 */
1433 static void
1434 st_translate_program_stream_output(struct gl_program *prog,
1435 struct pipe_stream_output_info *stream_output)
1436 {
1437 if (!prog->sh.LinkedTransformFeedback)
1438 return;
1439
1440 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1441 GLuint attr;
1442 uint num_outputs = 0;
1443
1444 memset(outputMapping, 0, sizeof(outputMapping));
1445
1446 /*
1447 * Determine number of outputs, the (default) output register
1448 * mapping and the semantic information for each output.
1449 */
1450 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1451 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1452 GLuint slot = num_outputs++;
1453
1454 outputMapping[attr] = slot;
1455 }
1456 }
1457
1458 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
1459 outputMapping,
1460 stream_output);
1461 }
1462
1463 /**
1464 * Translate a geometry program to create a new variant.
1465 */
1466 bool
1467 st_translate_geometry_program(struct st_context *st,
1468 struct st_common_program *stgp)
1469 {
1470 struct ureg_program *ureg;
1471
1472 /* We have already compiled to NIR so just return */
1473 if (stgp->shader_program) {
1474 st_translate_program_stream_output(&stgp->Base, &stgp->tgsi.stream_output);
1475 return true;
1476 }
1477
1478 ureg = ureg_create_with_screen(PIPE_SHADER_GEOMETRY, st->pipe->screen);
1479 if (ureg == NULL)
1480 return false;
1481
1482 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1483 stgp->Base.info.gs.input_primitive);
1484 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1485 stgp->Base.info.gs.output_primitive);
1486 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1487 stgp->Base.info.gs.vertices_out);
1488 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1489 stgp->Base.info.gs.invocations);
1490
1491 st_translate_program_common(st, &stgp->Base, stgp->glsl_to_tgsi, ureg,
1492 PIPE_SHADER_GEOMETRY, &stgp->tgsi);
1493
1494 free_glsl_to_tgsi_visitor(stgp->glsl_to_tgsi);
1495 stgp->glsl_to_tgsi = NULL;
1496 return true;
1497 }
1498
1499
1500 /**
1501 * Get/create a basic program variant.
1502 */
1503 struct st_basic_variant *
1504 st_get_basic_variant(struct st_context *st,
1505 unsigned pipe_shader,
1506 struct st_common_program *prog)
1507 {
1508 struct pipe_context *pipe = st->pipe;
1509 struct st_basic_variant *v;
1510 struct st_basic_variant_key key;
1511 struct pipe_shader_state tgsi = {0};
1512 memset(&key, 0, sizeof(key));
1513 key.st = st->has_shareable_shaders ? NULL : st;
1514
1515 /* Search for existing variant */
1516 for (v = prog->variants; v; v = v->next) {
1517 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1518 break;
1519 }
1520 }
1521
1522 if (!v) {
1523 /* create new */
1524 v = CALLOC_STRUCT(st_basic_variant);
1525 if (v) {
1526
1527 if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
1528 tgsi.type = PIPE_SHADER_IR_NIR;
1529 tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
1530 st_finalize_nir(st, &prog->Base, prog->shader_program,
1531 tgsi.ir.nir);
1532 tgsi.stream_output = prog->tgsi.stream_output;
1533 } else
1534 tgsi = prog->tgsi;
1535 /* fill in new variant */
1536 switch (pipe_shader) {
1537 case PIPE_SHADER_TESS_CTRL:
1538 v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
1539 break;
1540 case PIPE_SHADER_TESS_EVAL:
1541 v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
1542 break;
1543 case PIPE_SHADER_GEOMETRY:
1544 v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
1545 break;
1546 default:
1547 assert(!"unhandled shader type");
1548 free(v);
1549 return NULL;
1550 }
1551
1552 v->key = key;
1553
1554 /* insert into list */
1555 v->next = prog->variants;
1556 prog->variants = v;
1557 }
1558 }
1559
1560 return v;
1561 }
1562
1563
1564 /**
1565 * Translate a tessellation control program to create a new variant.
1566 */
1567 bool
1568 st_translate_tessctrl_program(struct st_context *st,
1569 struct st_common_program *sttcp)
1570 {
1571 struct ureg_program *ureg;
1572
1573 /* We have already compiled to NIR so just return */
1574 if (sttcp->shader_program)
1575 return true;
1576
1577 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_CTRL, st->pipe->screen);
1578 if (ureg == NULL)
1579 return false;
1580
1581 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1582 sttcp->Base.info.tess.tcs_vertices_out);
1583
1584 st_translate_program_common(st, &sttcp->Base, sttcp->glsl_to_tgsi, ureg,
1585 PIPE_SHADER_TESS_CTRL, &sttcp->tgsi);
1586
1587 free_glsl_to_tgsi_visitor(sttcp->glsl_to_tgsi);
1588 sttcp->glsl_to_tgsi = NULL;
1589 return true;
1590 }
1591
1592
1593 /**
1594 * Translate a tessellation evaluation program to create a new variant.
1595 */
1596 bool
1597 st_translate_tesseval_program(struct st_context *st,
1598 struct st_common_program *sttep)
1599 {
1600 struct ureg_program *ureg;
1601
1602 /* We have already compiled to NIR so just return */
1603 if (sttep->shader_program) {
1604 st_translate_program_stream_output(&sttep->Base, &sttep->tgsi.stream_output);
1605 return true;
1606 }
1607
1608 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_EVAL, st->pipe->screen);
1609 if (ureg == NULL)
1610 return false;
1611
1612 if (sttep->Base.info.tess.primitive_mode == GL_ISOLINES)
1613 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1614 else
1615 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1616 sttep->Base.info.tess.primitive_mode);
1617
1618 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1619 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1620 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1621 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1622 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1623
1624 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1625 (sttep->Base.info.tess.spacing + 1) % 3);
1626
1627 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1628 !sttep->Base.info.tess.ccw);
1629 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1630 sttep->Base.info.tess.point_mode);
1631
1632 st_translate_program_common(st, &sttep->Base, sttep->glsl_to_tgsi,
1633 ureg, PIPE_SHADER_TESS_EVAL, &sttep->tgsi);
1634
1635 free_glsl_to_tgsi_visitor(sttep->glsl_to_tgsi);
1636 sttep->glsl_to_tgsi = NULL;
1637 return true;
1638 }
1639
1640
1641 /**
1642 * Translate a compute program to create a new variant.
1643 */
1644 bool
1645 st_translate_compute_program(struct st_context *st,
1646 struct st_compute_program *stcp)
1647 {
1648 struct ureg_program *ureg;
1649 struct pipe_shader_state prog;
1650
1651 if (stcp->shader_program) {
1652 /* no compute variants: */
1653 st_finalize_nir(st, &stcp->Base, stcp->shader_program,
1654 (struct nir_shader *) stcp->tgsi.prog);
1655
1656 return true;
1657 }
1658
1659 ureg = ureg_create_with_screen(PIPE_SHADER_COMPUTE, st->pipe->screen);
1660 if (ureg == NULL)
1661 return false;
1662
1663 st_translate_program_common(st, &stcp->Base, stcp->glsl_to_tgsi, ureg,
1664 PIPE_SHADER_COMPUTE, &prog);
1665
1666 stcp->tgsi.ir_type = PIPE_SHADER_IR_TGSI;
1667 stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size;
1668 stcp->tgsi.req_private_mem = 0;
1669 stcp->tgsi.req_input_mem = 0;
1670
1671 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1672 stcp->glsl_to_tgsi = NULL;
1673 return true;
1674 }
1675
1676
1677 /**
1678 * Get/create compute program variant.
1679 */
1680 struct st_basic_variant *
1681 st_get_cp_variant(struct st_context *st,
1682 struct pipe_compute_state *tgsi,
1683 struct st_basic_variant **variants)
1684 {
1685 struct pipe_context *pipe = st->pipe;
1686 struct st_basic_variant *v;
1687 struct st_basic_variant_key key;
1688
1689 memset(&key, 0, sizeof(key));
1690 key.st = st->has_shareable_shaders ? NULL : st;
1691
1692 /* Search for existing variant */
1693 for (v = *variants; v; v = v->next) {
1694 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1695 break;
1696 }
1697 }
1698
1699 if (!v) {
1700 /* create new */
1701 v = CALLOC_STRUCT(st_basic_variant);
1702 if (v) {
1703 /* fill in new variant */
1704 struct pipe_compute_state cs = *tgsi;
1705 if (tgsi->ir_type == PIPE_SHADER_IR_NIR)
1706 cs.prog = nir_shader_clone(NULL, tgsi->prog);
1707 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1708 v->key = key;
1709
1710 /* insert into list */
1711 v->next = *variants;
1712 *variants = v;
1713 }
1714 }
1715
1716 return v;
1717 }
1718
1719
1720 /**
1721 * Vert/Geom/Frag programs have per-context variants. Free all the
1722 * variants attached to the given program which match the given context.
1723 */
1724 static void
1725 destroy_program_variants(struct st_context *st, struct gl_program *target)
1726 {
1727 if (!target || target == &_mesa_DummyProgram)
1728 return;
1729
1730 switch (target->Target) {
1731 case GL_VERTEX_PROGRAM_ARB:
1732 {
1733 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1734 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1735
1736 for (vpv = stvp->variants; vpv; ) {
1737 struct st_vp_variant *next = vpv->next;
1738 if (vpv->key.st == st) {
1739 /* unlink from list */
1740 *prevPtr = next;
1741 /* destroy this variant */
1742 delete_vp_variant(st, vpv);
1743 }
1744 else {
1745 prevPtr = &vpv->next;
1746 }
1747 vpv = next;
1748 }
1749 }
1750 break;
1751 case GL_FRAGMENT_PROGRAM_ARB:
1752 {
1753 struct st_fragment_program *stfp =
1754 (struct st_fragment_program *) target;
1755 struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
1756
1757 for (fpv = stfp->variants; fpv; ) {
1758 struct st_fp_variant *next = fpv->next;
1759 if (fpv->key.st == st) {
1760 /* unlink from list */
1761 *prevPtr = next;
1762 /* destroy this variant */
1763 delete_fp_variant(st, fpv);
1764 }
1765 else {
1766 prevPtr = &fpv->next;
1767 }
1768 fpv = next;
1769 }
1770 }
1771 break;
1772 case GL_GEOMETRY_PROGRAM_NV:
1773 case GL_TESS_CONTROL_PROGRAM_NV:
1774 case GL_TESS_EVALUATION_PROGRAM_NV:
1775 case GL_COMPUTE_PROGRAM_NV:
1776 {
1777 struct st_common_program *p = st_common_program(target);
1778 struct st_compute_program *cp = (struct st_compute_program*)target;
1779 struct st_basic_variant **variants =
1780 target->Target == GL_COMPUTE_PROGRAM_NV ? &cp->variants :
1781 &p->variants;
1782 struct st_basic_variant *v, **prevPtr = variants;
1783
1784 for (v = *variants; v; ) {
1785 struct st_basic_variant *next = v->next;
1786 if (v->key.st == st) {
1787 /* unlink from list */
1788 *prevPtr = next;
1789 /* destroy this variant */
1790 delete_basic_variant(st, v, target->Target);
1791 }
1792 else {
1793 prevPtr = &v->next;
1794 }
1795 v = next;
1796 }
1797 }
1798 break;
1799 default:
1800 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1801 "destroy_program_variants_cb()", target->Target);
1802 }
1803 }
1804
1805
1806 /**
1807 * Callback for _mesa_HashWalk. Free all the shader's program variants
1808 * which match the given context.
1809 */
1810 static void
1811 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1812 {
1813 struct st_context *st = (struct st_context *) userData;
1814 struct gl_shader *shader = (struct gl_shader *) data;
1815
1816 switch (shader->Type) {
1817 case GL_SHADER_PROGRAM_MESA:
1818 {
1819 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1820 GLuint i;
1821
1822 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1823 if (shProg->_LinkedShaders[i])
1824 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1825 }
1826 }
1827 break;
1828 case GL_VERTEX_SHADER:
1829 case GL_FRAGMENT_SHADER:
1830 case GL_GEOMETRY_SHADER:
1831 case GL_TESS_CONTROL_SHADER:
1832 case GL_TESS_EVALUATION_SHADER:
1833 case GL_COMPUTE_SHADER:
1834 break;
1835 default:
1836 assert(0);
1837 }
1838 }
1839
1840
1841 /**
1842 * Callback for _mesa_HashWalk. Free all the program variants which match
1843 * the given context.
1844 */
1845 static void
1846 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1847 {
1848 struct st_context *st = (struct st_context *) userData;
1849 struct gl_program *program = (struct gl_program *) data;
1850 destroy_program_variants(st, program);
1851 }
1852
1853
1854 /**
1855 * Walk over all shaders and programs to delete any variants which
1856 * belong to the given context.
1857 * This is called during context tear-down.
1858 */
1859 void
1860 st_destroy_program_variants(struct st_context *st)
1861 {
1862 /* If shaders can be shared with other contexts, the last context will
1863 * call DeleteProgram on all shaders, releasing everything.
1864 */
1865 if (st->has_shareable_shaders)
1866 return;
1867
1868 /* ARB vert/frag program */
1869 _mesa_HashWalk(st->ctx->Shared->Programs,
1870 destroy_program_variants_cb, st);
1871
1872 /* GLSL vert/frag/geom shaders */
1873 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1874 destroy_shader_program_variants_cb, st);
1875 }
1876
1877
1878 /**
1879 * For debugging, print/dump the current vertex program.
1880 */
1881 void
1882 st_print_current_vertex_program(void)
1883 {
1884 GET_CURRENT_CONTEXT(ctx);
1885
1886 if (ctx->VertexProgram._Current) {
1887 struct st_vertex_program *stvp =
1888 (struct st_vertex_program *) ctx->VertexProgram._Current;
1889 struct st_vp_variant *stv;
1890
1891 debug_printf("Vertex program %u\n", stvp->Base.Id);
1892
1893 for (stv = stvp->variants; stv; stv = stv->next) {
1894 debug_printf("variant %p\n", stv);
1895 tgsi_dump(stv->tgsi.tokens, 0);
1896 }
1897 }
1898 }
1899
1900
1901 /**
1902 * Compile one shader variant.
1903 */
1904 void
1905 st_precompile_shader_variant(struct st_context *st,
1906 struct gl_program *prog)
1907 {
1908 switch (prog->Target) {
1909 case GL_VERTEX_PROGRAM_ARB: {
1910 struct st_vertex_program *p = (struct st_vertex_program *)prog;
1911 struct st_vp_variant_key key;
1912
1913 memset(&key, 0, sizeof(key));
1914 key.st = st->has_shareable_shaders ? NULL : st;
1915 st_get_vp_variant(st, p, &key);
1916 break;
1917 }
1918
1919 case GL_TESS_CONTROL_PROGRAM_NV: {
1920 struct st_common_program *p = st_common_program(prog);
1921 st_get_basic_variant(st, PIPE_SHADER_TESS_CTRL, p);
1922 break;
1923 }
1924
1925 case GL_TESS_EVALUATION_PROGRAM_NV: {
1926 struct st_common_program *p = st_common_program(prog);
1927 st_get_basic_variant(st, PIPE_SHADER_TESS_EVAL, p);
1928 break;
1929 }
1930
1931 case GL_GEOMETRY_PROGRAM_NV: {
1932 struct st_common_program *p = st_common_program(prog);
1933 st_get_basic_variant(st, PIPE_SHADER_GEOMETRY, p);
1934 break;
1935 }
1936
1937 case GL_FRAGMENT_PROGRAM_ARB: {
1938 struct st_fragment_program *p = (struct st_fragment_program *)prog;
1939 struct st_fp_variant_key key;
1940
1941 memset(&key, 0, sizeof(key));
1942 key.st = st->has_shareable_shaders ? NULL : st;
1943 st_get_fp_variant(st, p, &key);
1944 break;
1945 }
1946
1947 case GL_COMPUTE_PROGRAM_NV: {
1948 struct st_compute_program *p = (struct st_compute_program *)prog;
1949 st_get_cp_variant(st, &p->tgsi, &p->variants);
1950 break;
1951 }
1952
1953 default:
1954 assert(0);
1955 }
1956 }