mesa: include mtypes.h less
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35 #include "main/imports.h"
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/programopt.h"
41
42 #include "compiler/nir/nir.h"
43
44 #include "pipe/p_context.h"
45 #include "pipe/p_defines.h"
46 #include "pipe/p_shader_tokens.h"
47 #include "draw/draw_context.h"
48 #include "tgsi/tgsi_dump.h"
49 #include "tgsi/tgsi_emulate.h"
50 #include "tgsi/tgsi_parse.h"
51 #include "tgsi/tgsi_ureg.h"
52
53 #include "st_debug.h"
54 #include "st_cb_bitmap.h"
55 #include "st_cb_drawpixels.h"
56 #include "st_context.h"
57 #include "st_tgsi_lower_yuv.h"
58 #include "st_program.h"
59 #include "st_mesa_to_tgsi.h"
60 #include "st_atifs_to_tgsi.h"
61 #include "st_nir.h"
62 #include "st_shader_cache.h"
63 #include "cso_cache/cso_context.h"
64
65
66
67 static void
68 set_affected_state_flags(uint64_t *states,
69 struct gl_program *prog,
70 uint64_t new_constants,
71 uint64_t new_sampler_views,
72 uint64_t new_samplers,
73 uint64_t new_images,
74 uint64_t new_ubos,
75 uint64_t new_ssbos,
76 uint64_t new_atomics)
77 {
78 if (prog->Parameters->NumParameters)
79 *states |= new_constants;
80
81 if (prog->info.num_textures)
82 *states |= new_sampler_views | new_samplers;
83
84 if (prog->info.num_images)
85 *states |= new_images;
86
87 if (prog->info.num_ubos)
88 *states |= new_ubos;
89
90 if (prog->info.num_ssbos)
91 *states |= new_ssbos;
92
93 if (prog->info.num_abos)
94 *states |= new_atomics;
95 }
96
97 /**
98 * This determines which states will be updated when the shader is bound.
99 */
100 void
101 st_set_prog_affected_state_flags(struct gl_program *prog)
102 {
103 uint64_t *states;
104
105 switch (prog->info.stage) {
106 case MESA_SHADER_VERTEX:
107 states = &((struct st_vertex_program*)prog)->affected_states;
108
109 *states = ST_NEW_VS_STATE |
110 ST_NEW_RASTERIZER |
111 ST_NEW_VERTEX_ARRAYS;
112
113 set_affected_state_flags(states, prog,
114 ST_NEW_VS_CONSTANTS,
115 ST_NEW_VS_SAMPLER_VIEWS,
116 ST_NEW_VS_SAMPLERS,
117 ST_NEW_VS_IMAGES,
118 ST_NEW_VS_UBOS,
119 ST_NEW_VS_SSBOS,
120 ST_NEW_VS_ATOMICS);
121 break;
122
123 case MESA_SHADER_TESS_CTRL:
124 states = &(st_common_program(prog))->affected_states;
125
126 *states = ST_NEW_TCS_STATE;
127
128 set_affected_state_flags(states, prog,
129 ST_NEW_TCS_CONSTANTS,
130 ST_NEW_TCS_SAMPLER_VIEWS,
131 ST_NEW_TCS_SAMPLERS,
132 ST_NEW_TCS_IMAGES,
133 ST_NEW_TCS_UBOS,
134 ST_NEW_TCS_SSBOS,
135 ST_NEW_TCS_ATOMICS);
136 break;
137
138 case MESA_SHADER_TESS_EVAL:
139 states = &(st_common_program(prog))->affected_states;
140
141 *states = ST_NEW_TES_STATE |
142 ST_NEW_RASTERIZER;
143
144 set_affected_state_flags(states, prog,
145 ST_NEW_TES_CONSTANTS,
146 ST_NEW_TES_SAMPLER_VIEWS,
147 ST_NEW_TES_SAMPLERS,
148 ST_NEW_TES_IMAGES,
149 ST_NEW_TES_UBOS,
150 ST_NEW_TES_SSBOS,
151 ST_NEW_TES_ATOMICS);
152 break;
153
154 case MESA_SHADER_GEOMETRY:
155 states = &(st_common_program(prog))->affected_states;
156
157 *states = ST_NEW_GS_STATE |
158 ST_NEW_RASTERIZER;
159
160 set_affected_state_flags(states, prog,
161 ST_NEW_GS_CONSTANTS,
162 ST_NEW_GS_SAMPLER_VIEWS,
163 ST_NEW_GS_SAMPLERS,
164 ST_NEW_GS_IMAGES,
165 ST_NEW_GS_UBOS,
166 ST_NEW_GS_SSBOS,
167 ST_NEW_GS_ATOMICS);
168 break;
169
170 case MESA_SHADER_FRAGMENT:
171 states = &((struct st_fragment_program*)prog)->affected_states;
172
173 /* gl_FragCoord and glDrawPixels always use constants. */
174 *states = ST_NEW_FS_STATE |
175 ST_NEW_SAMPLE_SHADING |
176 ST_NEW_FS_CONSTANTS;
177
178 set_affected_state_flags(states, prog,
179 ST_NEW_FS_CONSTANTS,
180 ST_NEW_FS_SAMPLER_VIEWS,
181 ST_NEW_FS_SAMPLERS,
182 ST_NEW_FS_IMAGES,
183 ST_NEW_FS_UBOS,
184 ST_NEW_FS_SSBOS,
185 ST_NEW_FS_ATOMICS);
186 break;
187
188 case MESA_SHADER_COMPUTE:
189 states = &((struct st_compute_program*)prog)->affected_states;
190
191 *states = ST_NEW_CS_STATE;
192
193 set_affected_state_flags(states, prog,
194 ST_NEW_CS_CONSTANTS,
195 ST_NEW_CS_SAMPLER_VIEWS,
196 ST_NEW_CS_SAMPLERS,
197 ST_NEW_CS_IMAGES,
198 ST_NEW_CS_UBOS,
199 ST_NEW_CS_SSBOS,
200 ST_NEW_CS_ATOMICS);
201 break;
202
203 default:
204 unreachable("unhandled shader stage");
205 }
206 }
207
208 /**
209 * Delete a vertex program variant. Note the caller must unlink
210 * the variant from the linked list.
211 */
212 static void
213 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
214 {
215 if (vpv->driver_shader)
216 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
217
218 if (vpv->draw_shader)
219 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
220
221 if (((vpv->tgsi.type == PIPE_SHADER_IR_TGSI)) && vpv->tgsi.tokens)
222 ureg_free_tokens(vpv->tgsi.tokens);
223
224 free( vpv );
225 }
226
227
228
229 /**
230 * Clean out any old compilations:
231 */
232 void
233 st_release_vp_variants( struct st_context *st,
234 struct st_vertex_program *stvp )
235 {
236 struct st_vp_variant *vpv;
237
238 for (vpv = stvp->variants; vpv; ) {
239 struct st_vp_variant *next = vpv->next;
240 delete_vp_variant(st, vpv);
241 vpv = next;
242 }
243
244 stvp->variants = NULL;
245
246 if ((stvp->tgsi.type == PIPE_SHADER_IR_TGSI) && stvp->tgsi.tokens) {
247 tgsi_free_tokens(stvp->tgsi.tokens);
248 stvp->tgsi.tokens = NULL;
249 }
250 }
251
252
253
254 /**
255 * Delete a fragment program variant. Note the caller must unlink
256 * the variant from the linked list.
257 */
258 static void
259 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
260 {
261 if (fpv->driver_shader)
262 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
263 free(fpv);
264 }
265
266
267 /**
268 * Free all variants of a fragment program.
269 */
270 void
271 st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
272 {
273 struct st_fp_variant *fpv;
274
275 for (fpv = stfp->variants; fpv; ) {
276 struct st_fp_variant *next = fpv->next;
277 delete_fp_variant(st, fpv);
278 fpv = next;
279 }
280
281 stfp->variants = NULL;
282
283 if ((stfp->tgsi.type == PIPE_SHADER_IR_TGSI) && stfp->tgsi.tokens) {
284 ureg_free_tokens(stfp->tgsi.tokens);
285 stfp->tgsi.tokens = NULL;
286 }
287 }
288
289
290 /**
291 * Delete a basic program variant. Note the caller must unlink
292 * the variant from the linked list.
293 */
294 static void
295 delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
296 GLenum target)
297 {
298 if (v->driver_shader) {
299 switch (target) {
300 case GL_TESS_CONTROL_PROGRAM_NV:
301 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
302 break;
303 case GL_TESS_EVALUATION_PROGRAM_NV:
304 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
305 break;
306 case GL_GEOMETRY_PROGRAM_NV:
307 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
308 break;
309 case GL_COMPUTE_PROGRAM_NV:
310 cso_delete_compute_shader(st->cso_context, v->driver_shader);
311 break;
312 default:
313 assert(!"this shouldn't occur");
314 }
315 }
316
317 free(v);
318 }
319
320
321 /**
322 * Free all basic program variants.
323 */
324 void
325 st_release_basic_variants(struct st_context *st, GLenum target,
326 struct st_basic_variant **variants,
327 struct pipe_shader_state *tgsi)
328 {
329 struct st_basic_variant *v;
330
331 for (v = *variants; v; ) {
332 struct st_basic_variant *next = v->next;
333 delete_basic_variant(st, v, target);
334 v = next;
335 }
336
337 *variants = NULL;
338
339 if (tgsi->tokens) {
340 ureg_free_tokens(tgsi->tokens);
341 tgsi->tokens = NULL;
342 }
343 }
344
345
346 /**
347 * Free all variants of a compute program.
348 */
349 void
350 st_release_cp_variants(struct st_context *st, struct st_compute_program *stcp)
351 {
352 struct st_basic_variant **variants = &stcp->variants;
353 struct st_basic_variant *v;
354
355 for (v = *variants; v; ) {
356 struct st_basic_variant *next = v->next;
357 delete_basic_variant(st, v, stcp->Base.Target);
358 v = next;
359 }
360
361 *variants = NULL;
362
363 if (stcp->tgsi.prog) {
364 switch (stcp->tgsi.ir_type) {
365 case PIPE_SHADER_IR_TGSI:
366 ureg_free_tokens(stcp->tgsi.prog);
367 stcp->tgsi.prog = NULL;
368 break;
369 case PIPE_SHADER_IR_NIR:
370 /* pipe driver took ownership of prog */
371 break;
372 case PIPE_SHADER_IR_NATIVE:
373 /* ??? */
374 stcp->tgsi.prog = NULL;
375 break;
376 }
377 }
378 }
379
380 /**
381 * Translate a vertex program.
382 */
383 bool
384 st_translate_vertex_program(struct st_context *st,
385 struct st_vertex_program *stvp)
386 {
387 struct ureg_program *ureg;
388 enum pipe_error error;
389 unsigned num_outputs = 0;
390 unsigned attr;
391 ubyte input_to_index[VERT_ATTRIB_MAX] = {0};
392 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
393 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
394
395 stvp->num_inputs = 0;
396
397 if (stvp->Base.arb.IsPositionInvariant)
398 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
399
400 /*
401 * Determine number of inputs, the mappings between VERT_ATTRIB_x
402 * and TGSI generic input indexes, plus input attrib semantic info.
403 */
404 for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
405 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
406 input_to_index[attr] = stvp->num_inputs;
407 stvp->index_to_input[stvp->num_inputs] = attr;
408 stvp->num_inputs++;
409 if ((stvp->Base.info.vs.double_inputs_read &
410 BITFIELD64_BIT(attr)) != 0) {
411 /* add placeholder for second part of a double attribute */
412 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
413 stvp->num_inputs++;
414 }
415 }
416 }
417 /* bit of a hack, presetup potentially unused edgeflag input */
418 input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
419 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
420
421 /* Compute mapping of vertex program outputs to slots.
422 */
423 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
424 if ((stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) == 0) {
425 stvp->result_to_output[attr] = ~0;
426 }
427 else {
428 unsigned slot = num_outputs++;
429
430 stvp->result_to_output[attr] = slot;
431
432 unsigned semantic_name, semantic_index;
433 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
434 &semantic_name, &semantic_index);
435 output_semantic_name[slot] = semantic_name;
436 output_semantic_index[slot] = semantic_index;
437 }
438 }
439 /* similar hack to above, presetup potentially unused edgeflag output */
440 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
441 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
442 output_semantic_index[num_outputs] = 0;
443
444 /* ARB_vp: */
445 if (!stvp->glsl_to_tgsi && !stvp->shader_program) {
446 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
447
448 /* This determines which states will be updated when the assembly
449 * shader is bound.
450 */
451 stvp->affected_states = ST_NEW_VS_STATE |
452 ST_NEW_RASTERIZER |
453 ST_NEW_VERTEX_ARRAYS;
454
455 if (stvp->Base.Parameters->NumParameters)
456 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
457
458 /* No samplers are allowed in ARB_vp. */
459 }
460
461 if (stvp->shader_program) {
462 struct gl_program *prog = stvp->shader_program->last_vert_prog;
463 if (prog) {
464 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
465 stvp->result_to_output,
466 &stvp->tgsi.stream_output);
467 }
468
469 st_store_ir_in_disk_cache(st, &stvp->Base, true);
470 return true;
471 }
472
473 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
474 if (ureg == NULL)
475 return false;
476
477 if (stvp->Base.info.clip_distance_array_size)
478 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
479 stvp->Base.info.clip_distance_array_size);
480 if (stvp->Base.info.cull_distance_array_size)
481 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
482 stvp->Base.info.cull_distance_array_size);
483
484 if (ST_DEBUG & DEBUG_MESA) {
485 _mesa_print_program(&stvp->Base);
486 _mesa_print_program_parameters(st->ctx, &stvp->Base);
487 debug_printf("\n");
488 }
489
490 if (stvp->glsl_to_tgsi) {
491 error = st_translate_program(st->ctx,
492 PIPE_SHADER_VERTEX,
493 ureg,
494 stvp->glsl_to_tgsi,
495 &stvp->Base,
496 /* inputs */
497 stvp->num_inputs,
498 input_to_index,
499 NULL, /* inputSlotToAttr */
500 NULL, /* input semantic name */
501 NULL, /* input semantic index */
502 NULL, /* interp mode */
503 /* outputs */
504 num_outputs,
505 stvp->result_to_output,
506 output_semantic_name,
507 output_semantic_index);
508
509 st_translate_stream_output_info(stvp->glsl_to_tgsi,
510 stvp->result_to_output,
511 &stvp->tgsi.stream_output);
512
513 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
514 } else
515 error = st_translate_mesa_program(st->ctx,
516 PIPE_SHADER_VERTEX,
517 ureg,
518 &stvp->Base,
519 /* inputs */
520 stvp->num_inputs,
521 input_to_index,
522 NULL, /* input semantic name */
523 NULL, /* input semantic index */
524 NULL,
525 /* outputs */
526 num_outputs,
527 stvp->result_to_output,
528 output_semantic_name,
529 output_semantic_index);
530
531 if (error) {
532 debug_printf("%s: failed to translate Mesa program:\n", __func__);
533 _mesa_print_program(&stvp->Base);
534 debug_assert(0);
535 return false;
536 }
537
538 stvp->tgsi.tokens = ureg_get_tokens(ureg, &stvp->num_tgsi_tokens);
539 ureg_destroy(ureg);
540
541 if (stvp->glsl_to_tgsi) {
542 stvp->glsl_to_tgsi = NULL;
543 st_store_ir_in_disk_cache(st, &stvp->Base, false);
544 }
545
546 return stvp->tgsi.tokens != NULL;
547 }
548
549 static struct st_vp_variant *
550 st_create_vp_variant(struct st_context *st,
551 struct st_vertex_program *stvp,
552 const struct st_vp_variant_key *key)
553 {
554 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
555 struct pipe_context *pipe = st->pipe;
556
557 vpv->key = *key;
558 vpv->tgsi.stream_output = stvp->tgsi.stream_output;
559 vpv->num_inputs = stvp->num_inputs;
560
561 if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
562 vpv->tgsi.type = PIPE_SHADER_IR_NIR;
563 vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
564 if (key->clamp_color)
565 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
566 if (key->passthrough_edgeflags) {
567 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
568 vpv->num_inputs++;
569 }
570
571 st_finalize_nir(st, &stvp->Base, stvp->shader_program,
572 vpv->tgsi.ir.nir);
573
574 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
575 /* driver takes ownership of IR: */
576 vpv->tgsi.ir.nir = NULL;
577 return vpv;
578 }
579
580 vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
581
582 /* Emulate features. */
583 if (key->clamp_color || key->passthrough_edgeflags) {
584 const struct tgsi_token *tokens;
585 unsigned flags =
586 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
587 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
588
589 tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
590
591 if (tokens) {
592 tgsi_free_tokens(vpv->tgsi.tokens);
593 vpv->tgsi.tokens = tokens;
594
595 if (key->passthrough_edgeflags)
596 vpv->num_inputs++;
597 } else
598 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
599 }
600
601 if (ST_DEBUG & DEBUG_TGSI) {
602 tgsi_dump(vpv->tgsi.tokens, 0);
603 debug_printf("\n");
604 }
605
606 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
607 return vpv;
608 }
609
610
611 /**
612 * Find/create a vertex program variant.
613 */
614 struct st_vp_variant *
615 st_get_vp_variant(struct st_context *st,
616 struct st_vertex_program *stvp,
617 const struct st_vp_variant_key *key)
618 {
619 struct st_vp_variant *vpv;
620
621 /* Search for existing variant */
622 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
623 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
624 break;
625 }
626 }
627
628 if (!vpv) {
629 /* create now */
630 vpv = st_create_vp_variant(st, stvp, key);
631 if (vpv) {
632 /* insert into list */
633 vpv->next = stvp->variants;
634 stvp->variants = vpv;
635 }
636 }
637
638 return vpv;
639 }
640
641
642 /**
643 * Translate a Mesa fragment shader into a TGSI shader.
644 */
645 bool
646 st_translate_fragment_program(struct st_context *st,
647 struct st_fragment_program *stfp)
648 {
649 /* We have already compiled to NIR so just return */
650 if (stfp->shader_program) {
651 st_store_ir_in_disk_cache(st, &stfp->Base, true);
652 return true;
653 }
654
655 ubyte outputMapping[2 * FRAG_RESULT_MAX];
656 ubyte inputMapping[VARYING_SLOT_MAX];
657 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
658 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
659 GLuint attr;
660 GLbitfield64 inputsRead;
661 struct ureg_program *ureg;
662
663 GLboolean write_all = GL_FALSE;
664
665 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
666 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
667 uint fs_num_inputs = 0;
668
669 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
670 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
671 uint fs_num_outputs = 0;
672
673 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
674
675 /* Non-GLSL programs: */
676 if (!stfp->glsl_to_tgsi && !stfp->shader_program) {
677 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
678 if (st->ctx->Const.GLSLFragCoordIsSysVal)
679 _mesa_program_fragment_position_to_sysval(&stfp->Base);
680
681 /* This determines which states will be updated when the assembly
682 * shader is bound.
683 *
684 * fragment.position and glDrawPixels always use constants.
685 */
686 stfp->affected_states = ST_NEW_FS_STATE |
687 ST_NEW_SAMPLE_SHADING |
688 ST_NEW_FS_CONSTANTS;
689
690 if (stfp->ati_fs) {
691 /* Just set them for ATI_fs unconditionally. */
692 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
693 ST_NEW_FS_SAMPLERS;
694 } else {
695 /* ARB_fp */
696 if (stfp->Base.SamplersUsed)
697 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
698 ST_NEW_FS_SAMPLERS;
699 }
700 }
701
702 /*
703 * Convert Mesa program inputs to TGSI input register semantics.
704 */
705 inputsRead = stfp->Base.info.inputs_read;
706 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
707 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
708 const GLuint slot = fs_num_inputs++;
709
710 inputMapping[attr] = slot;
711 inputSlotToAttr[slot] = attr;
712
713 switch (attr) {
714 case VARYING_SLOT_POS:
715 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
716 input_semantic_index[slot] = 0;
717 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
718 break;
719 case VARYING_SLOT_COL0:
720 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
721 input_semantic_index[slot] = 0;
722 interpMode[slot] = stfp->glsl_to_tgsi ?
723 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
724 break;
725 case VARYING_SLOT_COL1:
726 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
727 input_semantic_index[slot] = 1;
728 interpMode[slot] = stfp->glsl_to_tgsi ?
729 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
730 break;
731 case VARYING_SLOT_FOGC:
732 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
733 input_semantic_index[slot] = 0;
734 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
735 break;
736 case VARYING_SLOT_FACE:
737 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
738 input_semantic_index[slot] = 0;
739 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
740 break;
741 case VARYING_SLOT_PRIMITIVE_ID:
742 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
743 input_semantic_index[slot] = 0;
744 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
745 break;
746 case VARYING_SLOT_LAYER:
747 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
748 input_semantic_index[slot] = 0;
749 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
750 break;
751 case VARYING_SLOT_VIEWPORT:
752 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
753 input_semantic_index[slot] = 0;
754 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
755 break;
756 case VARYING_SLOT_CLIP_DIST0:
757 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
758 input_semantic_index[slot] = 0;
759 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
760 break;
761 case VARYING_SLOT_CLIP_DIST1:
762 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
763 input_semantic_index[slot] = 1;
764 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
765 break;
766 case VARYING_SLOT_CULL_DIST0:
767 case VARYING_SLOT_CULL_DIST1:
768 /* these should have been lowered by GLSL */
769 assert(0);
770 break;
771 /* In most cases, there is nothing special about these
772 * inputs, so adopt a convention to use the generic
773 * semantic name and the mesa VARYING_SLOT_ number as the
774 * index.
775 *
776 * All that is required is that the vertex shader labels
777 * its own outputs similarly, and that the vertex shader
778 * generates at least every output required by the
779 * fragment shader plus fixed-function hardware (such as
780 * BFC).
781 *
782 * However, some drivers may need us to identify the PNTC and TEXi
783 * varyings if, for example, their capability to replace them with
784 * sprite coordinates is limited.
785 */
786 case VARYING_SLOT_PNTC:
787 if (st->needs_texcoord_semantic) {
788 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
789 input_semantic_index[slot] = 0;
790 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
791 break;
792 }
793 /* fall through */
794 case VARYING_SLOT_TEX0:
795 case VARYING_SLOT_TEX1:
796 case VARYING_SLOT_TEX2:
797 case VARYING_SLOT_TEX3:
798 case VARYING_SLOT_TEX4:
799 case VARYING_SLOT_TEX5:
800 case VARYING_SLOT_TEX6:
801 case VARYING_SLOT_TEX7:
802 if (st->needs_texcoord_semantic) {
803 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
804 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
805 interpMode[slot] = stfp->glsl_to_tgsi ?
806 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
807 break;
808 }
809 /* fall through */
810 case VARYING_SLOT_VAR0:
811 default:
812 /* Semantic indices should be zero-based because drivers may choose
813 * to assign a fixed slot determined by that index.
814 * This is useful because ARB_separate_shader_objects uses location
815 * qualifiers for linkage, and if the semantic index corresponds to
816 * these locations, linkage passes in the driver become unecessary.
817 *
818 * If needs_texcoord_semantic is true, no semantic indices will be
819 * consumed for the TEXi varyings, and we can base the locations of
820 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
821 */
822 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
823 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
824 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
825 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
826 if (attr == VARYING_SLOT_PNTC)
827 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
828 else {
829 interpMode[slot] = stfp->glsl_to_tgsi ?
830 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
831 }
832 break;
833 }
834 }
835 else {
836 inputMapping[attr] = -1;
837 }
838 }
839
840 /*
841 * Semantics and mapping for outputs
842 */
843 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
844
845 /* if z is written, emit that first */
846 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
847 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
848 fs_output_semantic_index[fs_num_outputs] = 0;
849 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
850 fs_num_outputs++;
851 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
852 }
853
854 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
855 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
856 fs_output_semantic_index[fs_num_outputs] = 0;
857 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
858 fs_num_outputs++;
859 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
860 }
861
862 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
863 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
864 fs_output_semantic_index[fs_num_outputs] = 0;
865 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
866 fs_num_outputs++;
867 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
868 }
869
870 /* handle remaining outputs (color) */
871 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
872 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
873 stfp->Base.SecondaryOutputsWritten;
874 const unsigned loc = attr % FRAG_RESULT_MAX;
875
876 if (written & BITFIELD64_BIT(loc)) {
877 switch (loc) {
878 case FRAG_RESULT_DEPTH:
879 case FRAG_RESULT_STENCIL:
880 case FRAG_RESULT_SAMPLE_MASK:
881 /* handled above */
882 assert(0);
883 break;
884 case FRAG_RESULT_COLOR:
885 write_all = GL_TRUE; /* fallthrough */
886 default: {
887 int index;
888 assert(loc == FRAG_RESULT_COLOR ||
889 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
890
891 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
892
893 if (attr >= FRAG_RESULT_MAX) {
894 /* Secondary color for dual source blending. */
895 assert(index == 0);
896 index++;
897 }
898
899 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
900 fs_output_semantic_index[fs_num_outputs] = index;
901 outputMapping[attr] = fs_num_outputs;
902 break;
903 }
904 }
905
906 fs_num_outputs++;
907 }
908 }
909
910 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
911 if (ureg == NULL)
912 return false;
913
914 if (ST_DEBUG & DEBUG_MESA) {
915 _mesa_print_program(&stfp->Base);
916 _mesa_print_program_parameters(st->ctx, &stfp->Base);
917 debug_printf("\n");
918 }
919 if (write_all == GL_TRUE)
920 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
921
922 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
923 switch (stfp->Base.info.fs.depth_layout) {
924 case FRAG_DEPTH_LAYOUT_ANY:
925 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
926 TGSI_FS_DEPTH_LAYOUT_ANY);
927 break;
928 case FRAG_DEPTH_LAYOUT_GREATER:
929 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
930 TGSI_FS_DEPTH_LAYOUT_GREATER);
931 break;
932 case FRAG_DEPTH_LAYOUT_LESS:
933 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
934 TGSI_FS_DEPTH_LAYOUT_LESS);
935 break;
936 case FRAG_DEPTH_LAYOUT_UNCHANGED:
937 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
938 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
939 break;
940 default:
941 assert(0);
942 }
943 }
944
945 if (stfp->glsl_to_tgsi) {
946 st_translate_program(st->ctx,
947 PIPE_SHADER_FRAGMENT,
948 ureg,
949 stfp->glsl_to_tgsi,
950 &stfp->Base,
951 /* inputs */
952 fs_num_inputs,
953 inputMapping,
954 inputSlotToAttr,
955 input_semantic_name,
956 input_semantic_index,
957 interpMode,
958 /* outputs */
959 fs_num_outputs,
960 outputMapping,
961 fs_output_semantic_name,
962 fs_output_semantic_index);
963
964 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
965 } else if (stfp->ati_fs)
966 st_translate_atifs_program(ureg,
967 stfp->ati_fs,
968 &stfp->Base,
969 /* inputs */
970 fs_num_inputs,
971 inputMapping,
972 input_semantic_name,
973 input_semantic_index,
974 interpMode,
975 /* outputs */
976 fs_num_outputs,
977 outputMapping,
978 fs_output_semantic_name,
979 fs_output_semantic_index);
980 else
981 st_translate_mesa_program(st->ctx,
982 PIPE_SHADER_FRAGMENT,
983 ureg,
984 &stfp->Base,
985 /* inputs */
986 fs_num_inputs,
987 inputMapping,
988 input_semantic_name,
989 input_semantic_index,
990 interpMode,
991 /* outputs */
992 fs_num_outputs,
993 outputMapping,
994 fs_output_semantic_name,
995 fs_output_semantic_index);
996
997 stfp->tgsi.tokens = ureg_get_tokens(ureg, &stfp->num_tgsi_tokens);
998 ureg_destroy(ureg);
999
1000 if (stfp->glsl_to_tgsi) {
1001 stfp->glsl_to_tgsi = NULL;
1002 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1003 }
1004
1005 return stfp->tgsi.tokens != NULL;
1006 }
1007
1008 static struct st_fp_variant *
1009 st_create_fp_variant(struct st_context *st,
1010 struct st_fragment_program *stfp,
1011 const struct st_fp_variant_key *key)
1012 {
1013 struct pipe_context *pipe = st->pipe;
1014 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1015 struct pipe_shader_state tgsi = {0};
1016 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1017 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1018 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1019 static const gl_state_index16 scale_state[STATE_LENGTH] =
1020 { STATE_INTERNAL, STATE_PT_SCALE };
1021 static const gl_state_index16 bias_state[STATE_LENGTH] =
1022 { STATE_INTERNAL, STATE_PT_BIAS };
1023
1024 if (!variant)
1025 return NULL;
1026
1027 if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
1028 tgsi.type = PIPE_SHADER_IR_NIR;
1029 tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
1030
1031 if (key->clamp_color)
1032 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1033
1034 if (key->persample_shading) {
1035 nir_shader *shader = tgsi.ir.nir;
1036 nir_foreach_variable(var, &shader->inputs)
1037 var->data.sample = true;
1038 }
1039
1040 assert(!(key->bitmap && key->drawpixels));
1041
1042 /* glBitmap */
1043 if (key->bitmap) {
1044 nir_lower_bitmap_options options = {0};
1045
1046 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1047 options.sampler = variant->bitmap_sampler;
1048 options.swizzle_xxxx = (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM);
1049
1050 NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
1051 }
1052
1053 /* glDrawPixels (color only) */
1054 if (key->drawpixels) {
1055 nir_lower_drawpixels_options options = {{0}};
1056 unsigned samplers_used = stfp->Base.SamplersUsed;
1057
1058 /* Find the first unused slot. */
1059 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1060 options.drawpix_sampler = variant->drawpix_sampler;
1061 samplers_used |= (1 << variant->drawpix_sampler);
1062
1063 options.pixel_maps = key->pixelMaps;
1064 if (key->pixelMaps) {
1065 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1066 options.pixelmap_sampler = variant->pixelmap_sampler;
1067 }
1068
1069 options.scale_and_bias = key->scaleAndBias;
1070 if (key->scaleAndBias) {
1071 _mesa_add_state_reference(params, scale_state);
1072 memcpy(options.scale_state_tokens, scale_state,
1073 sizeof(options.scale_state_tokens));
1074 _mesa_add_state_reference(params, bias_state);
1075 memcpy(options.bias_state_tokens, bias_state,
1076 sizeof(options.bias_state_tokens));
1077 }
1078
1079 _mesa_add_state_reference(params, texcoord_state);
1080 memcpy(options.texcoord_state_tokens, texcoord_state,
1081 sizeof(options.texcoord_state_tokens));
1082
1083 NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
1084 }
1085
1086 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1087 nir_lower_tex_options options = {0};
1088 options.lower_y_uv_external = key->external.lower_nv12;
1089 options.lower_y_u_v_external = key->external.lower_iyuv;
1090 NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
1091 }
1092
1093 st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
1094
1095 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1096 /* This pass needs to happen *after* nir_lower_sampler */
1097 NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
1098 ~stfp->Base.SamplersUsed,
1099 key->external.lower_nv12,
1100 key->external.lower_iyuv);
1101 }
1102
1103 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1104 variant->key = *key;
1105
1106 return variant;
1107 }
1108
1109 tgsi.tokens = stfp->tgsi.tokens;
1110
1111 assert(!(key->bitmap && key->drawpixels));
1112
1113 /* Fix texture targets and add fog for ATI_fs */
1114 if (stfp->ati_fs) {
1115 const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
1116
1117 if (tokens)
1118 tgsi.tokens = tokens;
1119 else
1120 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1121 }
1122
1123 /* Emulate features. */
1124 if (key->clamp_color || key->persample_shading) {
1125 const struct tgsi_token *tokens;
1126 unsigned flags =
1127 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1128 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1129
1130 tokens = tgsi_emulate(tgsi.tokens, flags);
1131
1132 if (tokens) {
1133 if (tgsi.tokens != stfp->tgsi.tokens)
1134 tgsi_free_tokens(tgsi.tokens);
1135 tgsi.tokens = tokens;
1136 } else
1137 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1138 }
1139
1140 /* glBitmap */
1141 if (key->bitmap) {
1142 const struct tgsi_token *tokens;
1143
1144 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1145
1146 tokens = st_get_bitmap_shader(tgsi.tokens,
1147 st->internal_target,
1148 variant->bitmap_sampler,
1149 st->needs_texcoord_semantic,
1150 st->bitmap.tex_format ==
1151 PIPE_FORMAT_L8_UNORM);
1152
1153 if (tokens) {
1154 if (tgsi.tokens != stfp->tgsi.tokens)
1155 tgsi_free_tokens(tgsi.tokens);
1156 tgsi.tokens = tokens;
1157 } else
1158 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1159 }
1160
1161 /* glDrawPixels (color only) */
1162 if (key->drawpixels) {
1163 const struct tgsi_token *tokens;
1164 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1165
1166 /* Find the first unused slot. */
1167 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1168
1169 if (key->pixelMaps) {
1170 unsigned samplers_used = stfp->Base.SamplersUsed |
1171 (1 << variant->drawpix_sampler);
1172
1173 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1174 }
1175
1176 if (key->scaleAndBias) {
1177 scale_const = _mesa_add_state_reference(params, scale_state);
1178 bias_const = _mesa_add_state_reference(params, bias_state);
1179 }
1180
1181 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1182
1183 tokens = st_get_drawpix_shader(tgsi.tokens,
1184 st->needs_texcoord_semantic,
1185 key->scaleAndBias, scale_const,
1186 bias_const, key->pixelMaps,
1187 variant->drawpix_sampler,
1188 variant->pixelmap_sampler,
1189 texcoord_const, st->internal_target);
1190
1191 if (tokens) {
1192 if (tgsi.tokens != stfp->tgsi.tokens)
1193 tgsi_free_tokens(tgsi.tokens);
1194 tgsi.tokens = tokens;
1195 } else
1196 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1197 }
1198
1199 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1200 const struct tgsi_token *tokens;
1201
1202 /* samplers inserted would conflict, but this should be unpossible: */
1203 assert(!(key->bitmap || key->drawpixels));
1204
1205 tokens = st_tgsi_lower_yuv(tgsi.tokens,
1206 ~stfp->Base.SamplersUsed,
1207 key->external.lower_nv12,
1208 key->external.lower_iyuv);
1209 if (tokens) {
1210 if (tgsi.tokens != stfp->tgsi.tokens)
1211 tgsi_free_tokens(tgsi.tokens);
1212 tgsi.tokens = tokens;
1213 } else {
1214 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1215 }
1216 }
1217
1218 if (ST_DEBUG & DEBUG_TGSI) {
1219 tgsi_dump(tgsi.tokens, 0);
1220 debug_printf("\n");
1221 }
1222
1223 /* fill in variant */
1224 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1225 variant->key = *key;
1226
1227 if (tgsi.tokens != stfp->tgsi.tokens)
1228 tgsi_free_tokens(tgsi.tokens);
1229 return variant;
1230 }
1231
1232 /**
1233 * Translate fragment program if needed.
1234 */
1235 struct st_fp_variant *
1236 st_get_fp_variant(struct st_context *st,
1237 struct st_fragment_program *stfp,
1238 const struct st_fp_variant_key *key)
1239 {
1240 struct st_fp_variant *fpv;
1241
1242 /* Search for existing variant */
1243 for (fpv = stfp->variants; fpv; fpv = fpv->next) {
1244 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1245 break;
1246 }
1247 }
1248
1249 if (!fpv) {
1250 /* create new */
1251 fpv = st_create_fp_variant(st, stfp, key);
1252 if (fpv) {
1253 if (key->bitmap || key->drawpixels) {
1254 /* Regular variants should always come before the
1255 * bitmap & drawpixels variants, (unless there
1256 * are no regular variants) so that
1257 * st_update_fp can take a fast path when
1258 * shader_has_one_variant is set.
1259 */
1260 if (!stfp->variants) {
1261 stfp->variants = fpv;
1262 } else {
1263 /* insert into list after the first one */
1264 fpv->next = stfp->variants->next;
1265 stfp->variants->next = fpv;
1266 }
1267 } else {
1268 /* insert into list */
1269 fpv->next = stfp->variants;
1270 stfp->variants = fpv;
1271 }
1272 }
1273 }
1274
1275 return fpv;
1276 }
1277
1278
1279 /**
1280 * Translate a program. This is common code for geometry and tessellation
1281 * shaders.
1282 */
1283 static void
1284 st_translate_program_common(struct st_context *st,
1285 struct gl_program *prog,
1286 struct glsl_to_tgsi_visitor *glsl_to_tgsi,
1287 struct ureg_program *ureg,
1288 unsigned tgsi_processor,
1289 struct pipe_shader_state *out_state)
1290 {
1291 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1292 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1293 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1294 GLuint attr;
1295
1296 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1297 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1298 uint num_inputs = 0;
1299
1300 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1301 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1302 uint num_outputs = 0;
1303
1304 GLint i;
1305
1306 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1307 memset(inputMapping, 0, sizeof(inputMapping));
1308 memset(outputMapping, 0, sizeof(outputMapping));
1309 memset(out_state, 0, sizeof(*out_state));
1310
1311 if (prog->info.clip_distance_array_size)
1312 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1313 prog->info.clip_distance_array_size);
1314 if (prog->info.cull_distance_array_size)
1315 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1316 prog->info.cull_distance_array_size);
1317
1318 /*
1319 * Convert Mesa program inputs to TGSI input register semantics.
1320 */
1321 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1322 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1323 continue;
1324
1325 unsigned slot = num_inputs++;
1326
1327 inputMapping[attr] = slot;
1328 inputSlotToAttr[slot] = attr;
1329
1330 unsigned semantic_name, semantic_index;
1331 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1332 &semantic_name, &semantic_index);
1333 input_semantic_name[slot] = semantic_name;
1334 input_semantic_index[slot] = semantic_index;
1335 }
1336
1337 /* Also add patch inputs. */
1338 for (attr = 0; attr < 32; attr++) {
1339 if (prog->info.patch_inputs_read & (1u << attr)) {
1340 GLuint slot = num_inputs++;
1341 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1342
1343 inputMapping[patch_attr] = slot;
1344 inputSlotToAttr[slot] = patch_attr;
1345 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1346 input_semantic_index[slot] = attr;
1347 }
1348 }
1349
1350 /* initialize output semantics to defaults */
1351 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1352 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1353 output_semantic_index[i] = 0;
1354 }
1355
1356 /*
1357 * Determine number of outputs, the (default) output register
1358 * mapping and the semantic information for each output.
1359 */
1360 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1361 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1362 GLuint slot = num_outputs++;
1363
1364 outputMapping[attr] = slot;
1365
1366 unsigned semantic_name, semantic_index;
1367 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1368 &semantic_name, &semantic_index);
1369 output_semantic_name[slot] = semantic_name;
1370 output_semantic_index[slot] = semantic_index;
1371 }
1372 }
1373
1374 /* Also add patch outputs. */
1375 for (attr = 0; attr < 32; attr++) {
1376 if (prog->info.patch_outputs_written & (1u << attr)) {
1377 GLuint slot = num_outputs++;
1378 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1379
1380 outputMapping[patch_attr] = slot;
1381 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1382 output_semantic_index[slot] = attr;
1383 }
1384 }
1385
1386 st_translate_program(st->ctx,
1387 tgsi_processor,
1388 ureg,
1389 glsl_to_tgsi,
1390 prog,
1391 /* inputs */
1392 num_inputs,
1393 inputMapping,
1394 inputSlotToAttr,
1395 input_semantic_name,
1396 input_semantic_index,
1397 NULL,
1398 /* outputs */
1399 num_outputs,
1400 outputMapping,
1401 output_semantic_name,
1402 output_semantic_index);
1403
1404 if (tgsi_processor == PIPE_SHADER_COMPUTE) {
1405 struct st_compute_program *stcp = (struct st_compute_program *) prog;
1406 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1407 stcp->tgsi.prog = out_state->tokens;
1408 } else {
1409 struct st_common_program *stcp = (struct st_common_program *) prog;
1410 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1411 }
1412 ureg_destroy(ureg);
1413
1414 st_translate_stream_output_info(glsl_to_tgsi,
1415 outputMapping,
1416 &out_state->stream_output);
1417
1418 st_store_ir_in_disk_cache(st, prog, false);
1419
1420 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1421 _mesa_print_program(prog);
1422 debug_printf("\n");
1423 }
1424
1425 if (ST_DEBUG & DEBUG_TGSI) {
1426 tgsi_dump(out_state->tokens, 0);
1427 debug_printf("\n");
1428 }
1429 }
1430
1431 /**
1432 * Update stream-output info for GS/TCS/TES. Normally this is done in
1433 * st_translate_program_common() but that is not called for glsl_to_nir
1434 * case.
1435 */
1436 static void
1437 st_translate_program_stream_output(struct gl_program *prog,
1438 struct pipe_stream_output_info *stream_output)
1439 {
1440 if (!prog->sh.LinkedTransformFeedback)
1441 return;
1442
1443 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1444 GLuint attr;
1445 uint num_outputs = 0;
1446
1447 memset(outputMapping, 0, sizeof(outputMapping));
1448
1449 /*
1450 * Determine number of outputs, the (default) output register
1451 * mapping and the semantic information for each output.
1452 */
1453 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1454 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1455 GLuint slot = num_outputs++;
1456
1457 outputMapping[attr] = slot;
1458 }
1459 }
1460
1461 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
1462 outputMapping,
1463 stream_output);
1464 }
1465
1466 /**
1467 * Translate a geometry program to create a new variant.
1468 */
1469 bool
1470 st_translate_geometry_program(struct st_context *st,
1471 struct st_common_program *stgp)
1472 {
1473 struct ureg_program *ureg;
1474
1475 /* We have already compiled to NIR so just return */
1476 if (stgp->shader_program) {
1477 /* No variants */
1478 st_finalize_nir(st, &stgp->Base, stgp->shader_program,
1479 stgp->tgsi.ir.nir);
1480 st_translate_program_stream_output(&stgp->Base, &stgp->tgsi.stream_output);
1481 st_store_ir_in_disk_cache(st, &stgp->Base, true);
1482 return true;
1483 }
1484
1485 ureg = ureg_create_with_screen(PIPE_SHADER_GEOMETRY, st->pipe->screen);
1486 if (ureg == NULL)
1487 return false;
1488
1489 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1490 stgp->Base.info.gs.input_primitive);
1491 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1492 stgp->Base.info.gs.output_primitive);
1493 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1494 stgp->Base.info.gs.vertices_out);
1495 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1496 stgp->Base.info.gs.invocations);
1497
1498 st_translate_program_common(st, &stgp->Base, stgp->glsl_to_tgsi, ureg,
1499 PIPE_SHADER_GEOMETRY, &stgp->tgsi);
1500
1501 free_glsl_to_tgsi_visitor(stgp->glsl_to_tgsi);
1502 stgp->glsl_to_tgsi = NULL;
1503 return true;
1504 }
1505
1506
1507 /**
1508 * Get/create a basic program variant.
1509 */
1510 struct st_basic_variant *
1511 st_get_basic_variant(struct st_context *st,
1512 unsigned pipe_shader,
1513 struct st_common_program *prog)
1514 {
1515 struct pipe_context *pipe = st->pipe;
1516 struct st_basic_variant *v;
1517 struct st_basic_variant_key key;
1518 struct pipe_shader_state tgsi = {0};
1519 memset(&key, 0, sizeof(key));
1520 key.st = st->has_shareable_shaders ? NULL : st;
1521
1522 /* Search for existing variant */
1523 for (v = prog->variants; v; v = v->next) {
1524 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1525 break;
1526 }
1527 }
1528
1529 if (!v) {
1530 /* create new */
1531 v = CALLOC_STRUCT(st_basic_variant);
1532 if (v) {
1533
1534 if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
1535 tgsi.type = PIPE_SHADER_IR_NIR;
1536 tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
1537 tgsi.stream_output = prog->tgsi.stream_output;
1538 } else
1539 tgsi = prog->tgsi;
1540 /* fill in new variant */
1541 switch (pipe_shader) {
1542 case PIPE_SHADER_TESS_CTRL:
1543 v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
1544 break;
1545 case PIPE_SHADER_TESS_EVAL:
1546 v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
1547 break;
1548 case PIPE_SHADER_GEOMETRY:
1549 v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
1550 break;
1551 default:
1552 assert(!"unhandled shader type");
1553 free(v);
1554 return NULL;
1555 }
1556
1557 v->key = key;
1558
1559 /* insert into list */
1560 v->next = prog->variants;
1561 prog->variants = v;
1562 }
1563 }
1564
1565 return v;
1566 }
1567
1568
1569 /**
1570 * Translate a tessellation control program to create a new variant.
1571 */
1572 bool
1573 st_translate_tessctrl_program(struct st_context *st,
1574 struct st_common_program *sttcp)
1575 {
1576 struct ureg_program *ureg;
1577
1578 /* We have already compiled to NIR so just return */
1579 if (sttcp->shader_program) {
1580 /* No variants */
1581 st_finalize_nir(st, &sttcp->Base, sttcp->shader_program,
1582 sttcp->tgsi.ir.nir);
1583 st_store_ir_in_disk_cache(st, &sttcp->Base, true);
1584 return true;
1585 }
1586
1587 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_CTRL, st->pipe->screen);
1588 if (ureg == NULL)
1589 return false;
1590
1591 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1592 sttcp->Base.info.tess.tcs_vertices_out);
1593
1594 st_translate_program_common(st, &sttcp->Base, sttcp->glsl_to_tgsi, ureg,
1595 PIPE_SHADER_TESS_CTRL, &sttcp->tgsi);
1596
1597 free_glsl_to_tgsi_visitor(sttcp->glsl_to_tgsi);
1598 sttcp->glsl_to_tgsi = NULL;
1599 return true;
1600 }
1601
1602
1603 /**
1604 * Translate a tessellation evaluation program to create a new variant.
1605 */
1606 bool
1607 st_translate_tesseval_program(struct st_context *st,
1608 struct st_common_program *sttep)
1609 {
1610 struct ureg_program *ureg;
1611
1612 /* We have already compiled to NIR so just return */
1613 if (sttep->shader_program) {
1614 /* No variants */
1615 st_finalize_nir(st, &sttep->Base, sttep->shader_program,
1616 sttep->tgsi.ir.nir);
1617 st_translate_program_stream_output(&sttep->Base, &sttep->tgsi.stream_output);
1618 st_store_ir_in_disk_cache(st, &sttep->Base, true);
1619 return true;
1620 }
1621
1622 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_EVAL, st->pipe->screen);
1623 if (ureg == NULL)
1624 return false;
1625
1626 if (sttep->Base.info.tess.primitive_mode == GL_ISOLINES)
1627 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1628 else
1629 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1630 sttep->Base.info.tess.primitive_mode);
1631
1632 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1633 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1634 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1635 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1636 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1637
1638 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1639 (sttep->Base.info.tess.spacing + 1) % 3);
1640
1641 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1642 !sttep->Base.info.tess.ccw);
1643 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1644 sttep->Base.info.tess.point_mode);
1645
1646 st_translate_program_common(st, &sttep->Base, sttep->glsl_to_tgsi,
1647 ureg, PIPE_SHADER_TESS_EVAL, &sttep->tgsi);
1648
1649 free_glsl_to_tgsi_visitor(sttep->glsl_to_tgsi);
1650 sttep->glsl_to_tgsi = NULL;
1651 return true;
1652 }
1653
1654
1655 /**
1656 * Translate a compute program to create a new variant.
1657 */
1658 bool
1659 st_translate_compute_program(struct st_context *st,
1660 struct st_compute_program *stcp)
1661 {
1662 struct ureg_program *ureg;
1663 struct pipe_shader_state prog;
1664
1665 stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size;
1666
1667 if (stcp->shader_program) {
1668 /* no compute variants: */
1669 st_finalize_nir(st, &stcp->Base, stcp->shader_program,
1670 (struct nir_shader *) stcp->tgsi.prog);
1671 st_store_ir_in_disk_cache(st, &stcp->Base, true);
1672 return true;
1673 }
1674
1675 ureg = ureg_create_with_screen(PIPE_SHADER_COMPUTE, st->pipe->screen);
1676 if (ureg == NULL)
1677 return false;
1678
1679 st_translate_program_common(st, &stcp->Base, stcp->glsl_to_tgsi, ureg,
1680 PIPE_SHADER_COMPUTE, &prog);
1681
1682 stcp->tgsi.ir_type = PIPE_SHADER_IR_TGSI;
1683 stcp->tgsi.req_private_mem = 0;
1684 stcp->tgsi.req_input_mem = 0;
1685
1686 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1687 stcp->glsl_to_tgsi = NULL;
1688 return true;
1689 }
1690
1691
1692 /**
1693 * Get/create compute program variant.
1694 */
1695 struct st_basic_variant *
1696 st_get_cp_variant(struct st_context *st,
1697 struct pipe_compute_state *tgsi,
1698 struct st_basic_variant **variants)
1699 {
1700 struct pipe_context *pipe = st->pipe;
1701 struct st_basic_variant *v;
1702 struct st_basic_variant_key key;
1703
1704 memset(&key, 0, sizeof(key));
1705 key.st = st->has_shareable_shaders ? NULL : st;
1706
1707 /* Search for existing variant */
1708 for (v = *variants; v; v = v->next) {
1709 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1710 break;
1711 }
1712 }
1713
1714 if (!v) {
1715 /* create new */
1716 v = CALLOC_STRUCT(st_basic_variant);
1717 if (v) {
1718 /* fill in new variant */
1719 struct pipe_compute_state cs = *tgsi;
1720 if (tgsi->ir_type == PIPE_SHADER_IR_NIR)
1721 cs.prog = nir_shader_clone(NULL, tgsi->prog);
1722 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1723 v->key = key;
1724
1725 /* insert into list */
1726 v->next = *variants;
1727 *variants = v;
1728 }
1729 }
1730
1731 return v;
1732 }
1733
1734
1735 /**
1736 * Vert/Geom/Frag programs have per-context variants. Free all the
1737 * variants attached to the given program which match the given context.
1738 */
1739 static void
1740 destroy_program_variants(struct st_context *st, struct gl_program *target)
1741 {
1742 if (!target || target == &_mesa_DummyProgram)
1743 return;
1744
1745 switch (target->Target) {
1746 case GL_VERTEX_PROGRAM_ARB:
1747 {
1748 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1749 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1750
1751 for (vpv = stvp->variants; vpv; ) {
1752 struct st_vp_variant *next = vpv->next;
1753 if (vpv->key.st == st) {
1754 /* unlink from list */
1755 *prevPtr = next;
1756 /* destroy this variant */
1757 delete_vp_variant(st, vpv);
1758 }
1759 else {
1760 prevPtr = &vpv->next;
1761 }
1762 vpv = next;
1763 }
1764 }
1765 break;
1766 case GL_FRAGMENT_PROGRAM_ARB:
1767 {
1768 struct st_fragment_program *stfp =
1769 (struct st_fragment_program *) target;
1770 struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
1771
1772 for (fpv = stfp->variants; fpv; ) {
1773 struct st_fp_variant *next = fpv->next;
1774 if (fpv->key.st == st) {
1775 /* unlink from list */
1776 *prevPtr = next;
1777 /* destroy this variant */
1778 delete_fp_variant(st, fpv);
1779 }
1780 else {
1781 prevPtr = &fpv->next;
1782 }
1783 fpv = next;
1784 }
1785 }
1786 break;
1787 case GL_GEOMETRY_PROGRAM_NV:
1788 case GL_TESS_CONTROL_PROGRAM_NV:
1789 case GL_TESS_EVALUATION_PROGRAM_NV:
1790 case GL_COMPUTE_PROGRAM_NV:
1791 {
1792 struct st_common_program *p = st_common_program(target);
1793 struct st_compute_program *cp = (struct st_compute_program*)target;
1794 struct st_basic_variant **variants =
1795 target->Target == GL_COMPUTE_PROGRAM_NV ? &cp->variants :
1796 &p->variants;
1797 struct st_basic_variant *v, **prevPtr = variants;
1798
1799 for (v = *variants; v; ) {
1800 struct st_basic_variant *next = v->next;
1801 if (v->key.st == st) {
1802 /* unlink from list */
1803 *prevPtr = next;
1804 /* destroy this variant */
1805 delete_basic_variant(st, v, target->Target);
1806 }
1807 else {
1808 prevPtr = &v->next;
1809 }
1810 v = next;
1811 }
1812 }
1813 break;
1814 default:
1815 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1816 "destroy_program_variants_cb()", target->Target);
1817 }
1818 }
1819
1820
1821 /**
1822 * Callback for _mesa_HashWalk. Free all the shader's program variants
1823 * which match the given context.
1824 */
1825 static void
1826 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1827 {
1828 struct st_context *st = (struct st_context *) userData;
1829 struct gl_shader *shader = (struct gl_shader *) data;
1830
1831 switch (shader->Type) {
1832 case GL_SHADER_PROGRAM_MESA:
1833 {
1834 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1835 GLuint i;
1836
1837 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1838 if (shProg->_LinkedShaders[i])
1839 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1840 }
1841 }
1842 break;
1843 case GL_VERTEX_SHADER:
1844 case GL_FRAGMENT_SHADER:
1845 case GL_GEOMETRY_SHADER:
1846 case GL_TESS_CONTROL_SHADER:
1847 case GL_TESS_EVALUATION_SHADER:
1848 case GL_COMPUTE_SHADER:
1849 break;
1850 default:
1851 assert(0);
1852 }
1853 }
1854
1855
1856 /**
1857 * Callback for _mesa_HashWalk. Free all the program variants which match
1858 * the given context.
1859 */
1860 static void
1861 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1862 {
1863 struct st_context *st = (struct st_context *) userData;
1864 struct gl_program *program = (struct gl_program *) data;
1865 destroy_program_variants(st, program);
1866 }
1867
1868
1869 /**
1870 * Walk over all shaders and programs to delete any variants which
1871 * belong to the given context.
1872 * This is called during context tear-down.
1873 */
1874 void
1875 st_destroy_program_variants(struct st_context *st)
1876 {
1877 /* If shaders can be shared with other contexts, the last context will
1878 * call DeleteProgram on all shaders, releasing everything.
1879 */
1880 if (st->has_shareable_shaders)
1881 return;
1882
1883 /* ARB vert/frag program */
1884 _mesa_HashWalk(st->ctx->Shared->Programs,
1885 destroy_program_variants_cb, st);
1886
1887 /* GLSL vert/frag/geom shaders */
1888 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1889 destroy_shader_program_variants_cb, st);
1890 }
1891
1892
1893 /**
1894 * For debugging, print/dump the current vertex program.
1895 */
1896 void
1897 st_print_current_vertex_program(void)
1898 {
1899 GET_CURRENT_CONTEXT(ctx);
1900
1901 if (ctx->VertexProgram._Current) {
1902 struct st_vertex_program *stvp =
1903 (struct st_vertex_program *) ctx->VertexProgram._Current;
1904 struct st_vp_variant *stv;
1905
1906 debug_printf("Vertex program %u\n", stvp->Base.Id);
1907
1908 for (stv = stvp->variants; stv; stv = stv->next) {
1909 debug_printf("variant %p\n", stv);
1910 tgsi_dump(stv->tgsi.tokens, 0);
1911 }
1912 }
1913 }
1914
1915
1916 /**
1917 * Compile one shader variant.
1918 */
1919 void
1920 st_precompile_shader_variant(struct st_context *st,
1921 struct gl_program *prog)
1922 {
1923 switch (prog->Target) {
1924 case GL_VERTEX_PROGRAM_ARB: {
1925 struct st_vertex_program *p = (struct st_vertex_program *)prog;
1926 struct st_vp_variant_key key;
1927
1928 memset(&key, 0, sizeof(key));
1929 key.st = st->has_shareable_shaders ? NULL : st;
1930 st_get_vp_variant(st, p, &key);
1931 break;
1932 }
1933
1934 case GL_TESS_CONTROL_PROGRAM_NV: {
1935 struct st_common_program *p = st_common_program(prog);
1936 st_get_basic_variant(st, PIPE_SHADER_TESS_CTRL, p);
1937 break;
1938 }
1939
1940 case GL_TESS_EVALUATION_PROGRAM_NV: {
1941 struct st_common_program *p = st_common_program(prog);
1942 st_get_basic_variant(st, PIPE_SHADER_TESS_EVAL, p);
1943 break;
1944 }
1945
1946 case GL_GEOMETRY_PROGRAM_NV: {
1947 struct st_common_program *p = st_common_program(prog);
1948 st_get_basic_variant(st, PIPE_SHADER_GEOMETRY, p);
1949 break;
1950 }
1951
1952 case GL_FRAGMENT_PROGRAM_ARB: {
1953 struct st_fragment_program *p = (struct st_fragment_program *)prog;
1954 struct st_fp_variant_key key;
1955
1956 memset(&key, 0, sizeof(key));
1957 key.st = st->has_shareable_shaders ? NULL : st;
1958 st_get_fp_variant(st, p, &key);
1959 break;
1960 }
1961
1962 case GL_COMPUTE_PROGRAM_NV: {
1963 struct st_compute_program *p = (struct st_compute_program *)prog;
1964 st_get_cp_variant(st, &p->tgsi, &p->variants);
1965 break;
1966 }
1967
1968 default:
1969 assert(0);
1970 }
1971 }