3f8df31da1814e610f6d35b89cf70127f2863c60
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/imports.h"
35 #include "main/hash.h"
36 #include "main/mtypes.h"
37 #include "program/prog_parameter.h"
38 #include "program/prog_print.h"
39 #include "program/programopt.h"
40
41 #include "compiler/nir/nir.h"
42
43 #include "pipe/p_context.h"
44 #include "pipe/p_defines.h"
45 #include "pipe/p_shader_tokens.h"
46 #include "draw/draw_context.h"
47 #include "tgsi/tgsi_dump.h"
48 #include "tgsi/tgsi_emulate.h"
49 #include "tgsi/tgsi_parse.h"
50 #include "tgsi/tgsi_ureg.h"
51
52 #include "st_debug.h"
53 #include "st_cb_bitmap.h"
54 #include "st_cb_drawpixels.h"
55 #include "st_context.h"
56 #include "st_tgsi_lower_yuv.h"
57 #include "st_program.h"
58 #include "st_mesa_to_tgsi.h"
59 #include "st_atifs_to_tgsi.h"
60 #include "st_nir.h"
61 #include "st_shader_cache.h"
62 #include "cso_cache/cso_context.h"
63
64
65
66 static void
67 set_affected_state_flags(uint64_t *states,
68 struct gl_program *prog,
69 uint64_t new_constants,
70 uint64_t new_sampler_views,
71 uint64_t new_samplers,
72 uint64_t new_images,
73 uint64_t new_ubos,
74 uint64_t new_ssbos,
75 uint64_t new_atomics)
76 {
77 if (prog->Parameters->NumParameters)
78 *states |= new_constants;
79
80 if (prog->info.num_textures)
81 *states |= new_sampler_views | new_samplers;
82
83 if (prog->info.num_images)
84 *states |= new_images;
85
86 if (prog->info.num_ubos)
87 *states |= new_ubos;
88
89 if (prog->info.num_ssbos)
90 *states |= new_ssbos;
91
92 if (prog->info.num_abos)
93 *states |= new_atomics;
94 }
95
96 /**
97 * This determines which states will be updated when the shader is bound.
98 */
99 void
100 st_set_prog_affected_state_flags(struct gl_program *prog)
101 {
102 uint64_t *states;
103
104 switch (prog->info.stage) {
105 case MESA_SHADER_VERTEX:
106 states = &((struct st_vertex_program*)prog)->affected_states;
107
108 *states = ST_NEW_VS_STATE |
109 ST_NEW_RASTERIZER |
110 ST_NEW_VERTEX_ARRAYS;
111
112 set_affected_state_flags(states, prog,
113 ST_NEW_VS_CONSTANTS,
114 ST_NEW_VS_SAMPLER_VIEWS,
115 ST_NEW_VS_SAMPLERS,
116 ST_NEW_VS_IMAGES,
117 ST_NEW_VS_UBOS,
118 ST_NEW_VS_SSBOS,
119 ST_NEW_VS_ATOMICS);
120 break;
121
122 case MESA_SHADER_TESS_CTRL:
123 states = &(st_common_program(prog))->affected_states;
124
125 *states = ST_NEW_TCS_STATE;
126
127 set_affected_state_flags(states, prog,
128 ST_NEW_TCS_CONSTANTS,
129 ST_NEW_TCS_SAMPLER_VIEWS,
130 ST_NEW_TCS_SAMPLERS,
131 ST_NEW_TCS_IMAGES,
132 ST_NEW_TCS_UBOS,
133 ST_NEW_TCS_SSBOS,
134 ST_NEW_TCS_ATOMICS);
135 break;
136
137 case MESA_SHADER_TESS_EVAL:
138 states = &(st_common_program(prog))->affected_states;
139
140 *states = ST_NEW_TES_STATE |
141 ST_NEW_RASTERIZER;
142
143 set_affected_state_flags(states, prog,
144 ST_NEW_TES_CONSTANTS,
145 ST_NEW_TES_SAMPLER_VIEWS,
146 ST_NEW_TES_SAMPLERS,
147 ST_NEW_TES_IMAGES,
148 ST_NEW_TES_UBOS,
149 ST_NEW_TES_SSBOS,
150 ST_NEW_TES_ATOMICS);
151 break;
152
153 case MESA_SHADER_GEOMETRY:
154 states = &(st_common_program(prog))->affected_states;
155
156 *states = ST_NEW_GS_STATE |
157 ST_NEW_RASTERIZER;
158
159 set_affected_state_flags(states, prog,
160 ST_NEW_GS_CONSTANTS,
161 ST_NEW_GS_SAMPLER_VIEWS,
162 ST_NEW_GS_SAMPLERS,
163 ST_NEW_GS_IMAGES,
164 ST_NEW_GS_UBOS,
165 ST_NEW_GS_SSBOS,
166 ST_NEW_GS_ATOMICS);
167 break;
168
169 case MESA_SHADER_FRAGMENT:
170 states = &((struct st_fragment_program*)prog)->affected_states;
171
172 /* gl_FragCoord and glDrawPixels always use constants. */
173 *states = ST_NEW_FS_STATE |
174 ST_NEW_SAMPLE_SHADING |
175 ST_NEW_FS_CONSTANTS;
176
177 set_affected_state_flags(states, prog,
178 ST_NEW_FS_CONSTANTS,
179 ST_NEW_FS_SAMPLER_VIEWS,
180 ST_NEW_FS_SAMPLERS,
181 ST_NEW_FS_IMAGES,
182 ST_NEW_FS_UBOS,
183 ST_NEW_FS_SSBOS,
184 ST_NEW_FS_ATOMICS);
185 break;
186
187 case MESA_SHADER_COMPUTE:
188 states = &((struct st_compute_program*)prog)->affected_states;
189
190 *states = ST_NEW_CS_STATE;
191
192 set_affected_state_flags(states, prog,
193 ST_NEW_CS_CONSTANTS,
194 ST_NEW_CS_SAMPLER_VIEWS,
195 ST_NEW_CS_SAMPLERS,
196 ST_NEW_CS_IMAGES,
197 ST_NEW_CS_UBOS,
198 ST_NEW_CS_SSBOS,
199 ST_NEW_CS_ATOMICS);
200 break;
201
202 default:
203 unreachable("unhandled shader stage");
204 }
205 }
206
207 /**
208 * Delete a vertex program variant. Note the caller must unlink
209 * the variant from the linked list.
210 */
211 static void
212 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
213 {
214 if (vpv->driver_shader)
215 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
216
217 if (vpv->draw_shader)
218 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
219
220 if (((vpv->tgsi.type == PIPE_SHADER_IR_TGSI)) && vpv->tgsi.tokens)
221 ureg_free_tokens(vpv->tgsi.tokens);
222
223 free( vpv );
224 }
225
226
227
228 /**
229 * Clean out any old compilations:
230 */
231 void
232 st_release_vp_variants( struct st_context *st,
233 struct st_vertex_program *stvp )
234 {
235 struct st_vp_variant *vpv;
236
237 for (vpv = stvp->variants; vpv; ) {
238 struct st_vp_variant *next = vpv->next;
239 delete_vp_variant(st, vpv);
240 vpv = next;
241 }
242
243 stvp->variants = NULL;
244
245 if ((stvp->tgsi.type == PIPE_SHADER_IR_TGSI) && stvp->tgsi.tokens) {
246 tgsi_free_tokens(stvp->tgsi.tokens);
247 stvp->tgsi.tokens = NULL;
248 }
249 }
250
251
252
253 /**
254 * Delete a fragment program variant. Note the caller must unlink
255 * the variant from the linked list.
256 */
257 static void
258 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
259 {
260 if (fpv->driver_shader)
261 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
262 free(fpv);
263 }
264
265
266 /**
267 * Free all variants of a fragment program.
268 */
269 void
270 st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
271 {
272 struct st_fp_variant *fpv;
273
274 for (fpv = stfp->variants; fpv; ) {
275 struct st_fp_variant *next = fpv->next;
276 delete_fp_variant(st, fpv);
277 fpv = next;
278 }
279
280 stfp->variants = NULL;
281
282 if ((stfp->tgsi.type == PIPE_SHADER_IR_TGSI) && stfp->tgsi.tokens) {
283 ureg_free_tokens(stfp->tgsi.tokens);
284 stfp->tgsi.tokens = NULL;
285 }
286 }
287
288
289 /**
290 * Delete a basic program variant. Note the caller must unlink
291 * the variant from the linked list.
292 */
293 static void
294 delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
295 GLenum target)
296 {
297 if (v->driver_shader) {
298 switch (target) {
299 case GL_TESS_CONTROL_PROGRAM_NV:
300 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
301 break;
302 case GL_TESS_EVALUATION_PROGRAM_NV:
303 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
304 break;
305 case GL_GEOMETRY_PROGRAM_NV:
306 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
307 break;
308 case GL_COMPUTE_PROGRAM_NV:
309 cso_delete_compute_shader(st->cso_context, v->driver_shader);
310 break;
311 default:
312 assert(!"this shouldn't occur");
313 }
314 }
315
316 free(v);
317 }
318
319
320 /**
321 * Free all basic program variants.
322 */
323 void
324 st_release_basic_variants(struct st_context *st, GLenum target,
325 struct st_basic_variant **variants,
326 struct pipe_shader_state *tgsi)
327 {
328 struct st_basic_variant *v;
329
330 for (v = *variants; v; ) {
331 struct st_basic_variant *next = v->next;
332 delete_basic_variant(st, v, target);
333 v = next;
334 }
335
336 *variants = NULL;
337
338 if (tgsi->tokens) {
339 ureg_free_tokens(tgsi->tokens);
340 tgsi->tokens = NULL;
341 }
342 }
343
344
345 /**
346 * Free all variants of a compute program.
347 */
348 void
349 st_release_cp_variants(struct st_context *st, struct st_compute_program *stcp)
350 {
351 struct st_basic_variant **variants = &stcp->variants;
352 struct st_basic_variant *v;
353
354 for (v = *variants; v; ) {
355 struct st_basic_variant *next = v->next;
356 delete_basic_variant(st, v, stcp->Base.Target);
357 v = next;
358 }
359
360 *variants = NULL;
361
362 if (stcp->tgsi.prog) {
363 switch (stcp->tgsi.ir_type) {
364 case PIPE_SHADER_IR_TGSI:
365 ureg_free_tokens(stcp->tgsi.prog);
366 stcp->tgsi.prog = NULL;
367 break;
368 case PIPE_SHADER_IR_NIR:
369 /* pipe driver took ownership of prog */
370 break;
371 case PIPE_SHADER_IR_NATIVE:
372 /* ??? */
373 stcp->tgsi.prog = NULL;
374 break;
375 }
376 }
377 }
378
379 /**
380 * Translate a vertex program.
381 */
382 bool
383 st_translate_vertex_program(struct st_context *st,
384 struct st_vertex_program *stvp)
385 {
386 struct ureg_program *ureg;
387 enum pipe_error error;
388 unsigned num_outputs = 0;
389 unsigned attr;
390 ubyte input_to_index[VERT_ATTRIB_MAX] = {0};
391 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
392 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
393
394 stvp->num_inputs = 0;
395
396 if (stvp->Base.arb.IsPositionInvariant)
397 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
398
399 /*
400 * Determine number of inputs, the mappings between VERT_ATTRIB_x
401 * and TGSI generic input indexes, plus input attrib semantic info.
402 */
403 for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
404 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
405 input_to_index[attr] = stvp->num_inputs;
406 stvp->index_to_input[stvp->num_inputs] = attr;
407 stvp->num_inputs++;
408 if ((stvp->Base.info.vs.double_inputs_read &
409 BITFIELD64_BIT(attr)) != 0) {
410 /* add placeholder for second part of a double attribute */
411 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
412 stvp->num_inputs++;
413 }
414 }
415 }
416 /* bit of a hack, presetup potentially unused edgeflag input */
417 input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
418 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
419
420 /* Compute mapping of vertex program outputs to slots.
421 */
422 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
423 if ((stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) == 0) {
424 stvp->result_to_output[attr] = ~0;
425 }
426 else {
427 unsigned slot = num_outputs++;
428
429 stvp->result_to_output[attr] = slot;
430
431 unsigned semantic_name, semantic_index;
432 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
433 &semantic_name, &semantic_index);
434 output_semantic_name[slot] = semantic_name;
435 output_semantic_index[slot] = semantic_index;
436 }
437 }
438 /* similar hack to above, presetup potentially unused edgeflag output */
439 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
440 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
441 output_semantic_index[num_outputs] = 0;
442
443 /* ARB_vp: */
444 if (!stvp->glsl_to_tgsi && !stvp->shader_program) {
445 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
446
447 /* This determines which states will be updated when the assembly
448 * shader is bound.
449 */
450 stvp->affected_states = ST_NEW_VS_STATE |
451 ST_NEW_RASTERIZER |
452 ST_NEW_VERTEX_ARRAYS;
453
454 if (stvp->Base.Parameters->NumParameters)
455 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
456
457 /* No samplers are allowed in ARB_vp. */
458 }
459
460 if (stvp->shader_program) {
461 struct gl_program *prog = stvp->shader_program->last_vert_prog;
462 if (prog) {
463 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
464 stvp->result_to_output,
465 &stvp->tgsi.stream_output);
466 }
467
468 st_store_ir_in_disk_cache(st, &stvp->Base, true);
469 return true;
470 }
471
472 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
473 if (ureg == NULL)
474 return false;
475
476 if (stvp->Base.info.clip_distance_array_size)
477 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
478 stvp->Base.info.clip_distance_array_size);
479 if (stvp->Base.info.cull_distance_array_size)
480 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
481 stvp->Base.info.cull_distance_array_size);
482
483 if (ST_DEBUG & DEBUG_MESA) {
484 _mesa_print_program(&stvp->Base);
485 _mesa_print_program_parameters(st->ctx, &stvp->Base);
486 debug_printf("\n");
487 }
488
489 if (stvp->glsl_to_tgsi) {
490 error = st_translate_program(st->ctx,
491 PIPE_SHADER_VERTEX,
492 ureg,
493 stvp->glsl_to_tgsi,
494 &stvp->Base,
495 /* inputs */
496 stvp->num_inputs,
497 input_to_index,
498 NULL, /* inputSlotToAttr */
499 NULL, /* input semantic name */
500 NULL, /* input semantic index */
501 NULL, /* interp mode */
502 /* outputs */
503 num_outputs,
504 stvp->result_to_output,
505 output_semantic_name,
506 output_semantic_index);
507
508 st_translate_stream_output_info(stvp->glsl_to_tgsi,
509 stvp->result_to_output,
510 &stvp->tgsi.stream_output);
511
512 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
513 } else
514 error = st_translate_mesa_program(st->ctx,
515 PIPE_SHADER_VERTEX,
516 ureg,
517 &stvp->Base,
518 /* inputs */
519 stvp->num_inputs,
520 input_to_index,
521 NULL, /* input semantic name */
522 NULL, /* input semantic index */
523 NULL,
524 /* outputs */
525 num_outputs,
526 stvp->result_to_output,
527 output_semantic_name,
528 output_semantic_index);
529
530 if (error) {
531 debug_printf("%s: failed to translate Mesa program:\n", __func__);
532 _mesa_print_program(&stvp->Base);
533 debug_assert(0);
534 return false;
535 }
536
537 stvp->tgsi.tokens = ureg_get_tokens(ureg, &stvp->num_tgsi_tokens);
538 ureg_destroy(ureg);
539
540 if (stvp->glsl_to_tgsi) {
541 stvp->glsl_to_tgsi = NULL;
542 st_store_ir_in_disk_cache(st, &stvp->Base, false);
543 }
544
545 return stvp->tgsi.tokens != NULL;
546 }
547
548 static struct st_vp_variant *
549 st_create_vp_variant(struct st_context *st,
550 struct st_vertex_program *stvp,
551 const struct st_vp_variant_key *key)
552 {
553 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
554 struct pipe_context *pipe = st->pipe;
555
556 vpv->key = *key;
557 vpv->tgsi.stream_output = stvp->tgsi.stream_output;
558 vpv->num_inputs = stvp->num_inputs;
559
560 if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
561 vpv->tgsi.type = PIPE_SHADER_IR_NIR;
562 vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
563 if (key->clamp_color)
564 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
565 if (key->passthrough_edgeflags) {
566 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
567 vpv->num_inputs++;
568 }
569
570 st_finalize_nir(st, &stvp->Base, stvp->shader_program,
571 vpv->tgsi.ir.nir);
572
573 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
574 /* driver takes ownership of IR: */
575 vpv->tgsi.ir.nir = NULL;
576 return vpv;
577 }
578
579 vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
580
581 /* Emulate features. */
582 if (key->clamp_color || key->passthrough_edgeflags) {
583 const struct tgsi_token *tokens;
584 unsigned flags =
585 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
586 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
587
588 tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
589
590 if (tokens) {
591 tgsi_free_tokens(vpv->tgsi.tokens);
592 vpv->tgsi.tokens = tokens;
593
594 if (key->passthrough_edgeflags)
595 vpv->num_inputs++;
596 } else
597 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
598 }
599
600 if (ST_DEBUG & DEBUG_TGSI) {
601 tgsi_dump(vpv->tgsi.tokens, 0);
602 debug_printf("\n");
603 }
604
605 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
606 return vpv;
607 }
608
609
610 /**
611 * Find/create a vertex program variant.
612 */
613 struct st_vp_variant *
614 st_get_vp_variant(struct st_context *st,
615 struct st_vertex_program *stvp,
616 const struct st_vp_variant_key *key)
617 {
618 struct st_vp_variant *vpv;
619
620 /* Search for existing variant */
621 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
622 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
623 break;
624 }
625 }
626
627 if (!vpv) {
628 /* create now */
629 vpv = st_create_vp_variant(st, stvp, key);
630 if (vpv) {
631 /* insert into list */
632 vpv->next = stvp->variants;
633 stvp->variants = vpv;
634 }
635 }
636
637 return vpv;
638 }
639
640
641 /**
642 * Translate a Mesa fragment shader into a TGSI shader.
643 */
644 bool
645 st_translate_fragment_program(struct st_context *st,
646 struct st_fragment_program *stfp)
647 {
648 /* We have already compiled to NIR so just return */
649 if (stfp->shader_program) {
650 st_store_ir_in_disk_cache(st, &stfp->Base, true);
651 return true;
652 }
653
654 ubyte outputMapping[2 * FRAG_RESULT_MAX];
655 ubyte inputMapping[VARYING_SLOT_MAX];
656 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
657 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
658 GLuint attr;
659 GLbitfield64 inputsRead;
660 struct ureg_program *ureg;
661
662 GLboolean write_all = GL_FALSE;
663
664 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
665 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
666 uint fs_num_inputs = 0;
667
668 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
669 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
670 uint fs_num_outputs = 0;
671
672 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
673
674 /* Non-GLSL programs: */
675 if (!stfp->glsl_to_tgsi && !stfp->shader_program) {
676 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
677 if (st->ctx->Const.GLSLFragCoordIsSysVal)
678 _mesa_program_fragment_position_to_sysval(&stfp->Base);
679
680 /* This determines which states will be updated when the assembly
681 * shader is bound.
682 *
683 * fragment.position and glDrawPixels always use constants.
684 */
685 stfp->affected_states = ST_NEW_FS_STATE |
686 ST_NEW_SAMPLE_SHADING |
687 ST_NEW_FS_CONSTANTS;
688
689 if (stfp->ati_fs) {
690 /* Just set them for ATI_fs unconditionally. */
691 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
692 ST_NEW_FS_SAMPLERS;
693 } else {
694 /* ARB_fp */
695 if (stfp->Base.SamplersUsed)
696 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
697 ST_NEW_FS_SAMPLERS;
698 }
699 }
700
701 /*
702 * Convert Mesa program inputs to TGSI input register semantics.
703 */
704 inputsRead = stfp->Base.info.inputs_read;
705 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
706 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
707 const GLuint slot = fs_num_inputs++;
708
709 inputMapping[attr] = slot;
710 inputSlotToAttr[slot] = attr;
711
712 switch (attr) {
713 case VARYING_SLOT_POS:
714 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
715 input_semantic_index[slot] = 0;
716 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
717 break;
718 case VARYING_SLOT_COL0:
719 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
720 input_semantic_index[slot] = 0;
721 interpMode[slot] = stfp->glsl_to_tgsi ?
722 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
723 break;
724 case VARYING_SLOT_COL1:
725 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
726 input_semantic_index[slot] = 1;
727 interpMode[slot] = stfp->glsl_to_tgsi ?
728 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
729 break;
730 case VARYING_SLOT_FOGC:
731 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
732 input_semantic_index[slot] = 0;
733 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
734 break;
735 case VARYING_SLOT_FACE:
736 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
737 input_semantic_index[slot] = 0;
738 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
739 break;
740 case VARYING_SLOT_PRIMITIVE_ID:
741 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
742 input_semantic_index[slot] = 0;
743 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
744 break;
745 case VARYING_SLOT_LAYER:
746 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
747 input_semantic_index[slot] = 0;
748 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
749 break;
750 case VARYING_SLOT_VIEWPORT:
751 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
752 input_semantic_index[slot] = 0;
753 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
754 break;
755 case VARYING_SLOT_CLIP_DIST0:
756 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
757 input_semantic_index[slot] = 0;
758 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
759 break;
760 case VARYING_SLOT_CLIP_DIST1:
761 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
762 input_semantic_index[slot] = 1;
763 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
764 break;
765 case VARYING_SLOT_CULL_DIST0:
766 case VARYING_SLOT_CULL_DIST1:
767 /* these should have been lowered by GLSL */
768 assert(0);
769 break;
770 /* In most cases, there is nothing special about these
771 * inputs, so adopt a convention to use the generic
772 * semantic name and the mesa VARYING_SLOT_ number as the
773 * index.
774 *
775 * All that is required is that the vertex shader labels
776 * its own outputs similarly, and that the vertex shader
777 * generates at least every output required by the
778 * fragment shader plus fixed-function hardware (such as
779 * BFC).
780 *
781 * However, some drivers may need us to identify the PNTC and TEXi
782 * varyings if, for example, their capability to replace them with
783 * sprite coordinates is limited.
784 */
785 case VARYING_SLOT_PNTC:
786 if (st->needs_texcoord_semantic) {
787 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
788 input_semantic_index[slot] = 0;
789 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
790 break;
791 }
792 /* fall through */
793 case VARYING_SLOT_TEX0:
794 case VARYING_SLOT_TEX1:
795 case VARYING_SLOT_TEX2:
796 case VARYING_SLOT_TEX3:
797 case VARYING_SLOT_TEX4:
798 case VARYING_SLOT_TEX5:
799 case VARYING_SLOT_TEX6:
800 case VARYING_SLOT_TEX7:
801 if (st->needs_texcoord_semantic) {
802 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
803 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
804 interpMode[slot] = stfp->glsl_to_tgsi ?
805 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
806 break;
807 }
808 /* fall through */
809 case VARYING_SLOT_VAR0:
810 default:
811 /* Semantic indices should be zero-based because drivers may choose
812 * to assign a fixed slot determined by that index.
813 * This is useful because ARB_separate_shader_objects uses location
814 * qualifiers for linkage, and if the semantic index corresponds to
815 * these locations, linkage passes in the driver become unecessary.
816 *
817 * If needs_texcoord_semantic is true, no semantic indices will be
818 * consumed for the TEXi varyings, and we can base the locations of
819 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
820 */
821 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
822 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
823 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
824 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
825 if (attr == VARYING_SLOT_PNTC)
826 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
827 else {
828 interpMode[slot] = stfp->glsl_to_tgsi ?
829 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
830 }
831 break;
832 }
833 }
834 else {
835 inputMapping[attr] = -1;
836 }
837 }
838
839 /*
840 * Semantics and mapping for outputs
841 */
842 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
843
844 /* if z is written, emit that first */
845 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
846 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
847 fs_output_semantic_index[fs_num_outputs] = 0;
848 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
849 fs_num_outputs++;
850 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
851 }
852
853 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
854 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
855 fs_output_semantic_index[fs_num_outputs] = 0;
856 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
857 fs_num_outputs++;
858 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
859 }
860
861 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
862 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
863 fs_output_semantic_index[fs_num_outputs] = 0;
864 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
865 fs_num_outputs++;
866 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
867 }
868
869 /* handle remaining outputs (color) */
870 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
871 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
872 stfp->Base.SecondaryOutputsWritten;
873 const unsigned loc = attr % FRAG_RESULT_MAX;
874
875 if (written & BITFIELD64_BIT(loc)) {
876 switch (loc) {
877 case FRAG_RESULT_DEPTH:
878 case FRAG_RESULT_STENCIL:
879 case FRAG_RESULT_SAMPLE_MASK:
880 /* handled above */
881 assert(0);
882 break;
883 case FRAG_RESULT_COLOR:
884 write_all = GL_TRUE; /* fallthrough */
885 default: {
886 int index;
887 assert(loc == FRAG_RESULT_COLOR ||
888 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
889
890 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
891
892 if (attr >= FRAG_RESULT_MAX) {
893 /* Secondary color for dual source blending. */
894 assert(index == 0);
895 index++;
896 }
897
898 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
899 fs_output_semantic_index[fs_num_outputs] = index;
900 outputMapping[attr] = fs_num_outputs;
901 break;
902 }
903 }
904
905 fs_num_outputs++;
906 }
907 }
908
909 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
910 if (ureg == NULL)
911 return false;
912
913 if (ST_DEBUG & DEBUG_MESA) {
914 _mesa_print_program(&stfp->Base);
915 _mesa_print_program_parameters(st->ctx, &stfp->Base);
916 debug_printf("\n");
917 }
918 if (write_all == GL_TRUE)
919 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
920
921 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
922 switch (stfp->Base.info.fs.depth_layout) {
923 case FRAG_DEPTH_LAYOUT_ANY:
924 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
925 TGSI_FS_DEPTH_LAYOUT_ANY);
926 break;
927 case FRAG_DEPTH_LAYOUT_GREATER:
928 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
929 TGSI_FS_DEPTH_LAYOUT_GREATER);
930 break;
931 case FRAG_DEPTH_LAYOUT_LESS:
932 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
933 TGSI_FS_DEPTH_LAYOUT_LESS);
934 break;
935 case FRAG_DEPTH_LAYOUT_UNCHANGED:
936 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
937 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
938 break;
939 default:
940 assert(0);
941 }
942 }
943
944 if (stfp->glsl_to_tgsi) {
945 st_translate_program(st->ctx,
946 PIPE_SHADER_FRAGMENT,
947 ureg,
948 stfp->glsl_to_tgsi,
949 &stfp->Base,
950 /* inputs */
951 fs_num_inputs,
952 inputMapping,
953 inputSlotToAttr,
954 input_semantic_name,
955 input_semantic_index,
956 interpMode,
957 /* outputs */
958 fs_num_outputs,
959 outputMapping,
960 fs_output_semantic_name,
961 fs_output_semantic_index);
962
963 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
964 } else if (stfp->ati_fs)
965 st_translate_atifs_program(ureg,
966 stfp->ati_fs,
967 &stfp->Base,
968 /* inputs */
969 fs_num_inputs,
970 inputMapping,
971 input_semantic_name,
972 input_semantic_index,
973 interpMode,
974 /* outputs */
975 fs_num_outputs,
976 outputMapping,
977 fs_output_semantic_name,
978 fs_output_semantic_index);
979 else
980 st_translate_mesa_program(st->ctx,
981 PIPE_SHADER_FRAGMENT,
982 ureg,
983 &stfp->Base,
984 /* inputs */
985 fs_num_inputs,
986 inputMapping,
987 input_semantic_name,
988 input_semantic_index,
989 interpMode,
990 /* outputs */
991 fs_num_outputs,
992 outputMapping,
993 fs_output_semantic_name,
994 fs_output_semantic_index);
995
996 stfp->tgsi.tokens = ureg_get_tokens(ureg, &stfp->num_tgsi_tokens);
997 ureg_destroy(ureg);
998
999 if (stfp->glsl_to_tgsi) {
1000 stfp->glsl_to_tgsi = NULL;
1001 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1002 }
1003
1004 return stfp->tgsi.tokens != NULL;
1005 }
1006
1007 static struct st_fp_variant *
1008 st_create_fp_variant(struct st_context *st,
1009 struct st_fragment_program *stfp,
1010 const struct st_fp_variant_key *key)
1011 {
1012 struct pipe_context *pipe = st->pipe;
1013 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1014 struct pipe_shader_state tgsi = {0};
1015 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1016 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1017 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1018 static const gl_state_index16 scale_state[STATE_LENGTH] =
1019 { STATE_INTERNAL, STATE_PT_SCALE };
1020 static const gl_state_index16 bias_state[STATE_LENGTH] =
1021 { STATE_INTERNAL, STATE_PT_BIAS };
1022
1023 if (!variant)
1024 return NULL;
1025
1026 if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
1027 tgsi.type = PIPE_SHADER_IR_NIR;
1028 tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
1029
1030 if (key->clamp_color)
1031 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1032
1033 if (key->persample_shading) {
1034 nir_shader *shader = tgsi.ir.nir;
1035 nir_foreach_variable(var, &shader->inputs)
1036 var->data.sample = true;
1037 }
1038
1039 assert(!(key->bitmap && key->drawpixels));
1040
1041 /* glBitmap */
1042 if (key->bitmap) {
1043 nir_lower_bitmap_options options = {0};
1044
1045 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1046 options.sampler = variant->bitmap_sampler;
1047 options.swizzle_xxxx = (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM);
1048
1049 NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
1050 }
1051
1052 /* glDrawPixels (color only) */
1053 if (key->drawpixels) {
1054 nir_lower_drawpixels_options options = {{0}};
1055 unsigned samplers_used = stfp->Base.SamplersUsed;
1056
1057 /* Find the first unused slot. */
1058 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1059 options.drawpix_sampler = variant->drawpix_sampler;
1060 samplers_used |= (1 << variant->drawpix_sampler);
1061
1062 options.pixel_maps = key->pixelMaps;
1063 if (key->pixelMaps) {
1064 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1065 options.pixelmap_sampler = variant->pixelmap_sampler;
1066 }
1067
1068 options.scale_and_bias = key->scaleAndBias;
1069 if (key->scaleAndBias) {
1070 _mesa_add_state_reference(params, scale_state);
1071 memcpy(options.scale_state_tokens, scale_state,
1072 sizeof(options.scale_state_tokens));
1073 _mesa_add_state_reference(params, bias_state);
1074 memcpy(options.bias_state_tokens, bias_state,
1075 sizeof(options.bias_state_tokens));
1076 }
1077
1078 _mesa_add_state_reference(params, texcoord_state);
1079 memcpy(options.texcoord_state_tokens, texcoord_state,
1080 sizeof(options.texcoord_state_tokens));
1081
1082 NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
1083 }
1084
1085 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1086 nir_lower_tex_options options = {0};
1087 options.lower_y_uv_external = key->external.lower_nv12;
1088 options.lower_y_u_v_external = key->external.lower_iyuv;
1089 NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
1090 }
1091
1092 st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
1093
1094 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1095 /* This pass needs to happen *after* nir_lower_sampler */
1096 NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
1097 ~stfp->Base.SamplersUsed,
1098 key->external.lower_nv12,
1099 key->external.lower_iyuv);
1100 }
1101
1102 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1103 variant->key = *key;
1104
1105 return variant;
1106 }
1107
1108 tgsi.tokens = stfp->tgsi.tokens;
1109
1110 assert(!(key->bitmap && key->drawpixels));
1111
1112 /* Fix texture targets and add fog for ATI_fs */
1113 if (stfp->ati_fs) {
1114 const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
1115
1116 if (tokens)
1117 tgsi.tokens = tokens;
1118 else
1119 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1120 }
1121
1122 /* Emulate features. */
1123 if (key->clamp_color || key->persample_shading) {
1124 const struct tgsi_token *tokens;
1125 unsigned flags =
1126 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1127 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1128
1129 tokens = tgsi_emulate(tgsi.tokens, flags);
1130
1131 if (tokens) {
1132 if (tgsi.tokens != stfp->tgsi.tokens)
1133 tgsi_free_tokens(tgsi.tokens);
1134 tgsi.tokens = tokens;
1135 } else
1136 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1137 }
1138
1139 /* glBitmap */
1140 if (key->bitmap) {
1141 const struct tgsi_token *tokens;
1142
1143 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1144
1145 tokens = st_get_bitmap_shader(tgsi.tokens,
1146 st->internal_target,
1147 variant->bitmap_sampler,
1148 st->needs_texcoord_semantic,
1149 st->bitmap.tex_format ==
1150 PIPE_FORMAT_L8_UNORM);
1151
1152 if (tokens) {
1153 if (tgsi.tokens != stfp->tgsi.tokens)
1154 tgsi_free_tokens(tgsi.tokens);
1155 tgsi.tokens = tokens;
1156 } else
1157 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1158 }
1159
1160 /* glDrawPixels (color only) */
1161 if (key->drawpixels) {
1162 const struct tgsi_token *tokens;
1163 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1164
1165 /* Find the first unused slot. */
1166 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1167
1168 if (key->pixelMaps) {
1169 unsigned samplers_used = stfp->Base.SamplersUsed |
1170 (1 << variant->drawpix_sampler);
1171
1172 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1173 }
1174
1175 if (key->scaleAndBias) {
1176 scale_const = _mesa_add_state_reference(params, scale_state);
1177 bias_const = _mesa_add_state_reference(params, bias_state);
1178 }
1179
1180 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1181
1182 tokens = st_get_drawpix_shader(tgsi.tokens,
1183 st->needs_texcoord_semantic,
1184 key->scaleAndBias, scale_const,
1185 bias_const, key->pixelMaps,
1186 variant->drawpix_sampler,
1187 variant->pixelmap_sampler,
1188 texcoord_const, st->internal_target);
1189
1190 if (tokens) {
1191 if (tgsi.tokens != stfp->tgsi.tokens)
1192 tgsi_free_tokens(tgsi.tokens);
1193 tgsi.tokens = tokens;
1194 } else
1195 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1196 }
1197
1198 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1199 const struct tgsi_token *tokens;
1200
1201 /* samplers inserted would conflict, but this should be unpossible: */
1202 assert(!(key->bitmap || key->drawpixels));
1203
1204 tokens = st_tgsi_lower_yuv(tgsi.tokens,
1205 ~stfp->Base.SamplersUsed,
1206 key->external.lower_nv12,
1207 key->external.lower_iyuv);
1208 if (tokens) {
1209 if (tgsi.tokens != stfp->tgsi.tokens)
1210 tgsi_free_tokens(tgsi.tokens);
1211 tgsi.tokens = tokens;
1212 } else {
1213 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1214 }
1215 }
1216
1217 if (ST_DEBUG & DEBUG_TGSI) {
1218 tgsi_dump(tgsi.tokens, 0);
1219 debug_printf("\n");
1220 }
1221
1222 /* fill in variant */
1223 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1224 variant->key = *key;
1225
1226 if (tgsi.tokens != stfp->tgsi.tokens)
1227 tgsi_free_tokens(tgsi.tokens);
1228 return variant;
1229 }
1230
1231 /**
1232 * Translate fragment program if needed.
1233 */
1234 struct st_fp_variant *
1235 st_get_fp_variant(struct st_context *st,
1236 struct st_fragment_program *stfp,
1237 const struct st_fp_variant_key *key)
1238 {
1239 struct st_fp_variant *fpv;
1240
1241 /* Search for existing variant */
1242 for (fpv = stfp->variants; fpv; fpv = fpv->next) {
1243 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1244 break;
1245 }
1246 }
1247
1248 if (!fpv) {
1249 /* create new */
1250 fpv = st_create_fp_variant(st, stfp, key);
1251 if (fpv) {
1252 if (key->bitmap || key->drawpixels) {
1253 /* Regular variants should always come before the
1254 * bitmap & drawpixels variants, (unless there
1255 * are no regular variants) so that
1256 * st_update_fp can take a fast path when
1257 * shader_has_one_variant is set.
1258 */
1259 if (!stfp->variants) {
1260 stfp->variants = fpv;
1261 } else {
1262 /* insert into list after the first one */
1263 fpv->next = stfp->variants->next;
1264 stfp->variants->next = fpv;
1265 }
1266 } else {
1267 /* insert into list */
1268 fpv->next = stfp->variants;
1269 stfp->variants = fpv;
1270 }
1271 }
1272 }
1273
1274 return fpv;
1275 }
1276
1277
1278 /**
1279 * Translate a program. This is common code for geometry and tessellation
1280 * shaders.
1281 */
1282 static void
1283 st_translate_program_common(struct st_context *st,
1284 struct gl_program *prog,
1285 struct glsl_to_tgsi_visitor *glsl_to_tgsi,
1286 struct ureg_program *ureg,
1287 unsigned tgsi_processor,
1288 struct pipe_shader_state *out_state)
1289 {
1290 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1291 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1292 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1293 GLuint attr;
1294
1295 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1296 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1297 uint num_inputs = 0;
1298
1299 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1300 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1301 uint num_outputs = 0;
1302
1303 GLint i;
1304
1305 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1306 memset(inputMapping, 0, sizeof(inputMapping));
1307 memset(outputMapping, 0, sizeof(outputMapping));
1308 memset(out_state, 0, sizeof(*out_state));
1309
1310 if (prog->info.clip_distance_array_size)
1311 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1312 prog->info.clip_distance_array_size);
1313 if (prog->info.cull_distance_array_size)
1314 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1315 prog->info.cull_distance_array_size);
1316
1317 /*
1318 * Convert Mesa program inputs to TGSI input register semantics.
1319 */
1320 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1321 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1322 continue;
1323
1324 unsigned slot = num_inputs++;
1325
1326 inputMapping[attr] = slot;
1327 inputSlotToAttr[slot] = attr;
1328
1329 unsigned semantic_name, semantic_index;
1330 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1331 &semantic_name, &semantic_index);
1332 input_semantic_name[slot] = semantic_name;
1333 input_semantic_index[slot] = semantic_index;
1334 }
1335
1336 /* Also add patch inputs. */
1337 for (attr = 0; attr < 32; attr++) {
1338 if (prog->info.patch_inputs_read & (1u << attr)) {
1339 GLuint slot = num_inputs++;
1340 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1341
1342 inputMapping[patch_attr] = slot;
1343 inputSlotToAttr[slot] = patch_attr;
1344 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1345 input_semantic_index[slot] = attr;
1346 }
1347 }
1348
1349 /* initialize output semantics to defaults */
1350 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1351 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1352 output_semantic_index[i] = 0;
1353 }
1354
1355 /*
1356 * Determine number of outputs, the (default) output register
1357 * mapping and the semantic information for each output.
1358 */
1359 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1360 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1361 GLuint slot = num_outputs++;
1362
1363 outputMapping[attr] = slot;
1364
1365 unsigned semantic_name, semantic_index;
1366 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1367 &semantic_name, &semantic_index);
1368 output_semantic_name[slot] = semantic_name;
1369 output_semantic_index[slot] = semantic_index;
1370 }
1371 }
1372
1373 /* Also add patch outputs. */
1374 for (attr = 0; attr < 32; attr++) {
1375 if (prog->info.patch_outputs_written & (1u << attr)) {
1376 GLuint slot = num_outputs++;
1377 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1378
1379 outputMapping[patch_attr] = slot;
1380 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1381 output_semantic_index[slot] = attr;
1382 }
1383 }
1384
1385 st_translate_program(st->ctx,
1386 tgsi_processor,
1387 ureg,
1388 glsl_to_tgsi,
1389 prog,
1390 /* inputs */
1391 num_inputs,
1392 inputMapping,
1393 inputSlotToAttr,
1394 input_semantic_name,
1395 input_semantic_index,
1396 NULL,
1397 /* outputs */
1398 num_outputs,
1399 outputMapping,
1400 output_semantic_name,
1401 output_semantic_index);
1402
1403 if (tgsi_processor == PIPE_SHADER_COMPUTE) {
1404 struct st_compute_program *stcp = (struct st_compute_program *) prog;
1405 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1406 stcp->tgsi.prog = out_state->tokens;
1407 } else {
1408 struct st_common_program *stcp = (struct st_common_program *) prog;
1409 out_state->tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
1410 }
1411 ureg_destroy(ureg);
1412
1413 st_translate_stream_output_info(glsl_to_tgsi,
1414 outputMapping,
1415 &out_state->stream_output);
1416
1417 st_store_ir_in_disk_cache(st, prog, false);
1418
1419 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1420 _mesa_print_program(prog);
1421 debug_printf("\n");
1422 }
1423
1424 if (ST_DEBUG & DEBUG_TGSI) {
1425 tgsi_dump(out_state->tokens, 0);
1426 debug_printf("\n");
1427 }
1428 }
1429
1430 /**
1431 * Update stream-output info for GS/TCS/TES. Normally this is done in
1432 * st_translate_program_common() but that is not called for glsl_to_nir
1433 * case.
1434 */
1435 static void
1436 st_translate_program_stream_output(struct gl_program *prog,
1437 struct pipe_stream_output_info *stream_output)
1438 {
1439 if (!prog->sh.LinkedTransformFeedback)
1440 return;
1441
1442 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1443 GLuint attr;
1444 uint num_outputs = 0;
1445
1446 memset(outputMapping, 0, sizeof(outputMapping));
1447
1448 /*
1449 * Determine number of outputs, the (default) output register
1450 * mapping and the semantic information for each output.
1451 */
1452 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1453 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1454 GLuint slot = num_outputs++;
1455
1456 outputMapping[attr] = slot;
1457 }
1458 }
1459
1460 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
1461 outputMapping,
1462 stream_output);
1463 }
1464
1465 /**
1466 * Translate a geometry program to create a new variant.
1467 */
1468 bool
1469 st_translate_geometry_program(struct st_context *st,
1470 struct st_common_program *stgp)
1471 {
1472 struct ureg_program *ureg;
1473
1474 /* We have already compiled to NIR so just return */
1475 if (stgp->shader_program) {
1476 /* No variants */
1477 st_finalize_nir(st, &stgp->Base, stgp->shader_program,
1478 stgp->tgsi.ir.nir);
1479 st_translate_program_stream_output(&stgp->Base, &stgp->tgsi.stream_output);
1480 st_store_ir_in_disk_cache(st, &stgp->Base, true);
1481 return true;
1482 }
1483
1484 ureg = ureg_create_with_screen(PIPE_SHADER_GEOMETRY, st->pipe->screen);
1485 if (ureg == NULL)
1486 return false;
1487
1488 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1489 stgp->Base.info.gs.input_primitive);
1490 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1491 stgp->Base.info.gs.output_primitive);
1492 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1493 stgp->Base.info.gs.vertices_out);
1494 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1495 stgp->Base.info.gs.invocations);
1496
1497 st_translate_program_common(st, &stgp->Base, stgp->glsl_to_tgsi, ureg,
1498 PIPE_SHADER_GEOMETRY, &stgp->tgsi);
1499
1500 free_glsl_to_tgsi_visitor(stgp->glsl_to_tgsi);
1501 stgp->glsl_to_tgsi = NULL;
1502 return true;
1503 }
1504
1505
1506 /**
1507 * Get/create a basic program variant.
1508 */
1509 struct st_basic_variant *
1510 st_get_basic_variant(struct st_context *st,
1511 unsigned pipe_shader,
1512 struct st_common_program *prog)
1513 {
1514 struct pipe_context *pipe = st->pipe;
1515 struct st_basic_variant *v;
1516 struct st_basic_variant_key key;
1517 struct pipe_shader_state tgsi = {0};
1518 memset(&key, 0, sizeof(key));
1519 key.st = st->has_shareable_shaders ? NULL : st;
1520
1521 /* Search for existing variant */
1522 for (v = prog->variants; v; v = v->next) {
1523 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1524 break;
1525 }
1526 }
1527
1528 if (!v) {
1529 /* create new */
1530 v = CALLOC_STRUCT(st_basic_variant);
1531 if (v) {
1532
1533 if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
1534 tgsi.type = PIPE_SHADER_IR_NIR;
1535 tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
1536 tgsi.stream_output = prog->tgsi.stream_output;
1537 } else
1538 tgsi = prog->tgsi;
1539 /* fill in new variant */
1540 switch (pipe_shader) {
1541 case PIPE_SHADER_TESS_CTRL:
1542 v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
1543 break;
1544 case PIPE_SHADER_TESS_EVAL:
1545 v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
1546 break;
1547 case PIPE_SHADER_GEOMETRY:
1548 v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
1549 break;
1550 default:
1551 assert(!"unhandled shader type");
1552 free(v);
1553 return NULL;
1554 }
1555
1556 v->key = key;
1557
1558 /* insert into list */
1559 v->next = prog->variants;
1560 prog->variants = v;
1561 }
1562 }
1563
1564 return v;
1565 }
1566
1567
1568 /**
1569 * Translate a tessellation control program to create a new variant.
1570 */
1571 bool
1572 st_translate_tessctrl_program(struct st_context *st,
1573 struct st_common_program *sttcp)
1574 {
1575 struct ureg_program *ureg;
1576
1577 /* We have already compiled to NIR so just return */
1578 if (sttcp->shader_program) {
1579 /* No variants */
1580 st_finalize_nir(st, &sttcp->Base, sttcp->shader_program,
1581 sttcp->tgsi.ir.nir);
1582 st_store_ir_in_disk_cache(st, &sttcp->Base, true);
1583 return true;
1584 }
1585
1586 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_CTRL, st->pipe->screen);
1587 if (ureg == NULL)
1588 return false;
1589
1590 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1591 sttcp->Base.info.tess.tcs_vertices_out);
1592
1593 st_translate_program_common(st, &sttcp->Base, sttcp->glsl_to_tgsi, ureg,
1594 PIPE_SHADER_TESS_CTRL, &sttcp->tgsi);
1595
1596 free_glsl_to_tgsi_visitor(sttcp->glsl_to_tgsi);
1597 sttcp->glsl_to_tgsi = NULL;
1598 return true;
1599 }
1600
1601
1602 /**
1603 * Translate a tessellation evaluation program to create a new variant.
1604 */
1605 bool
1606 st_translate_tesseval_program(struct st_context *st,
1607 struct st_common_program *sttep)
1608 {
1609 struct ureg_program *ureg;
1610
1611 /* We have already compiled to NIR so just return */
1612 if (sttep->shader_program) {
1613 /* No variants */
1614 st_finalize_nir(st, &sttep->Base, sttep->shader_program,
1615 sttep->tgsi.ir.nir);
1616 st_translate_program_stream_output(&sttep->Base, &sttep->tgsi.stream_output);
1617 st_store_ir_in_disk_cache(st, &sttep->Base, true);
1618 return true;
1619 }
1620
1621 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_EVAL, st->pipe->screen);
1622 if (ureg == NULL)
1623 return false;
1624
1625 if (sttep->Base.info.tess.primitive_mode == GL_ISOLINES)
1626 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1627 else
1628 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1629 sttep->Base.info.tess.primitive_mode);
1630
1631 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1632 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1633 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1634 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1635 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1636
1637 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1638 (sttep->Base.info.tess.spacing + 1) % 3);
1639
1640 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1641 !sttep->Base.info.tess.ccw);
1642 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1643 sttep->Base.info.tess.point_mode);
1644
1645 st_translate_program_common(st, &sttep->Base, sttep->glsl_to_tgsi,
1646 ureg, PIPE_SHADER_TESS_EVAL, &sttep->tgsi);
1647
1648 free_glsl_to_tgsi_visitor(sttep->glsl_to_tgsi);
1649 sttep->glsl_to_tgsi = NULL;
1650 return true;
1651 }
1652
1653
1654 /**
1655 * Translate a compute program to create a new variant.
1656 */
1657 bool
1658 st_translate_compute_program(struct st_context *st,
1659 struct st_compute_program *stcp)
1660 {
1661 struct ureg_program *ureg;
1662 struct pipe_shader_state prog;
1663
1664 stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size;
1665
1666 if (stcp->shader_program) {
1667 /* no compute variants: */
1668 st_finalize_nir(st, &stcp->Base, stcp->shader_program,
1669 (struct nir_shader *) stcp->tgsi.prog);
1670 st_store_ir_in_disk_cache(st, &stcp->Base, true);
1671 return true;
1672 }
1673
1674 ureg = ureg_create_with_screen(PIPE_SHADER_COMPUTE, st->pipe->screen);
1675 if (ureg == NULL)
1676 return false;
1677
1678 st_translate_program_common(st, &stcp->Base, stcp->glsl_to_tgsi, ureg,
1679 PIPE_SHADER_COMPUTE, &prog);
1680
1681 stcp->tgsi.ir_type = PIPE_SHADER_IR_TGSI;
1682 stcp->tgsi.req_private_mem = 0;
1683 stcp->tgsi.req_input_mem = 0;
1684
1685 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1686 stcp->glsl_to_tgsi = NULL;
1687 return true;
1688 }
1689
1690
1691 /**
1692 * Get/create compute program variant.
1693 */
1694 struct st_basic_variant *
1695 st_get_cp_variant(struct st_context *st,
1696 struct pipe_compute_state *tgsi,
1697 struct st_basic_variant **variants)
1698 {
1699 struct pipe_context *pipe = st->pipe;
1700 struct st_basic_variant *v;
1701 struct st_basic_variant_key key;
1702
1703 memset(&key, 0, sizeof(key));
1704 key.st = st->has_shareable_shaders ? NULL : st;
1705
1706 /* Search for existing variant */
1707 for (v = *variants; v; v = v->next) {
1708 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1709 break;
1710 }
1711 }
1712
1713 if (!v) {
1714 /* create new */
1715 v = CALLOC_STRUCT(st_basic_variant);
1716 if (v) {
1717 /* fill in new variant */
1718 struct pipe_compute_state cs = *tgsi;
1719 if (tgsi->ir_type == PIPE_SHADER_IR_NIR)
1720 cs.prog = nir_shader_clone(NULL, tgsi->prog);
1721 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1722 v->key = key;
1723
1724 /* insert into list */
1725 v->next = *variants;
1726 *variants = v;
1727 }
1728 }
1729
1730 return v;
1731 }
1732
1733
1734 /**
1735 * Vert/Geom/Frag programs have per-context variants. Free all the
1736 * variants attached to the given program which match the given context.
1737 */
1738 static void
1739 destroy_program_variants(struct st_context *st, struct gl_program *target)
1740 {
1741 if (!target || target == &_mesa_DummyProgram)
1742 return;
1743
1744 switch (target->Target) {
1745 case GL_VERTEX_PROGRAM_ARB:
1746 {
1747 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1748 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1749
1750 for (vpv = stvp->variants; vpv; ) {
1751 struct st_vp_variant *next = vpv->next;
1752 if (vpv->key.st == st) {
1753 /* unlink from list */
1754 *prevPtr = next;
1755 /* destroy this variant */
1756 delete_vp_variant(st, vpv);
1757 }
1758 else {
1759 prevPtr = &vpv->next;
1760 }
1761 vpv = next;
1762 }
1763 }
1764 break;
1765 case GL_FRAGMENT_PROGRAM_ARB:
1766 {
1767 struct st_fragment_program *stfp =
1768 (struct st_fragment_program *) target;
1769 struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
1770
1771 for (fpv = stfp->variants; fpv; ) {
1772 struct st_fp_variant *next = fpv->next;
1773 if (fpv->key.st == st) {
1774 /* unlink from list */
1775 *prevPtr = next;
1776 /* destroy this variant */
1777 delete_fp_variant(st, fpv);
1778 }
1779 else {
1780 prevPtr = &fpv->next;
1781 }
1782 fpv = next;
1783 }
1784 }
1785 break;
1786 case GL_GEOMETRY_PROGRAM_NV:
1787 case GL_TESS_CONTROL_PROGRAM_NV:
1788 case GL_TESS_EVALUATION_PROGRAM_NV:
1789 case GL_COMPUTE_PROGRAM_NV:
1790 {
1791 struct st_common_program *p = st_common_program(target);
1792 struct st_compute_program *cp = (struct st_compute_program*)target;
1793 struct st_basic_variant **variants =
1794 target->Target == GL_COMPUTE_PROGRAM_NV ? &cp->variants :
1795 &p->variants;
1796 struct st_basic_variant *v, **prevPtr = variants;
1797
1798 for (v = *variants; v; ) {
1799 struct st_basic_variant *next = v->next;
1800 if (v->key.st == st) {
1801 /* unlink from list */
1802 *prevPtr = next;
1803 /* destroy this variant */
1804 delete_basic_variant(st, v, target->Target);
1805 }
1806 else {
1807 prevPtr = &v->next;
1808 }
1809 v = next;
1810 }
1811 }
1812 break;
1813 default:
1814 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1815 "destroy_program_variants_cb()", target->Target);
1816 }
1817 }
1818
1819
1820 /**
1821 * Callback for _mesa_HashWalk. Free all the shader's program variants
1822 * which match the given context.
1823 */
1824 static void
1825 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1826 {
1827 struct st_context *st = (struct st_context *) userData;
1828 struct gl_shader *shader = (struct gl_shader *) data;
1829
1830 switch (shader->Type) {
1831 case GL_SHADER_PROGRAM_MESA:
1832 {
1833 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1834 GLuint i;
1835
1836 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1837 if (shProg->_LinkedShaders[i])
1838 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1839 }
1840 }
1841 break;
1842 case GL_VERTEX_SHADER:
1843 case GL_FRAGMENT_SHADER:
1844 case GL_GEOMETRY_SHADER:
1845 case GL_TESS_CONTROL_SHADER:
1846 case GL_TESS_EVALUATION_SHADER:
1847 case GL_COMPUTE_SHADER:
1848 break;
1849 default:
1850 assert(0);
1851 }
1852 }
1853
1854
1855 /**
1856 * Callback for _mesa_HashWalk. Free all the program variants which match
1857 * the given context.
1858 */
1859 static void
1860 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1861 {
1862 struct st_context *st = (struct st_context *) userData;
1863 struct gl_program *program = (struct gl_program *) data;
1864 destroy_program_variants(st, program);
1865 }
1866
1867
1868 /**
1869 * Walk over all shaders and programs to delete any variants which
1870 * belong to the given context.
1871 * This is called during context tear-down.
1872 */
1873 void
1874 st_destroy_program_variants(struct st_context *st)
1875 {
1876 /* If shaders can be shared with other contexts, the last context will
1877 * call DeleteProgram on all shaders, releasing everything.
1878 */
1879 if (st->has_shareable_shaders)
1880 return;
1881
1882 /* ARB vert/frag program */
1883 _mesa_HashWalk(st->ctx->Shared->Programs,
1884 destroy_program_variants_cb, st);
1885
1886 /* GLSL vert/frag/geom shaders */
1887 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1888 destroy_shader_program_variants_cb, st);
1889 }
1890
1891
1892 /**
1893 * For debugging, print/dump the current vertex program.
1894 */
1895 void
1896 st_print_current_vertex_program(void)
1897 {
1898 GET_CURRENT_CONTEXT(ctx);
1899
1900 if (ctx->VertexProgram._Current) {
1901 struct st_vertex_program *stvp =
1902 (struct st_vertex_program *) ctx->VertexProgram._Current;
1903 struct st_vp_variant *stv;
1904
1905 debug_printf("Vertex program %u\n", stvp->Base.Id);
1906
1907 for (stv = stvp->variants; stv; stv = stv->next) {
1908 debug_printf("variant %p\n", stv);
1909 tgsi_dump(stv->tgsi.tokens, 0);
1910 }
1911 }
1912 }
1913
1914
1915 /**
1916 * Compile one shader variant.
1917 */
1918 void
1919 st_precompile_shader_variant(struct st_context *st,
1920 struct gl_program *prog)
1921 {
1922 switch (prog->Target) {
1923 case GL_VERTEX_PROGRAM_ARB: {
1924 struct st_vertex_program *p = (struct st_vertex_program *)prog;
1925 struct st_vp_variant_key key;
1926
1927 memset(&key, 0, sizeof(key));
1928 key.st = st->has_shareable_shaders ? NULL : st;
1929 st_get_vp_variant(st, p, &key);
1930 break;
1931 }
1932
1933 case GL_TESS_CONTROL_PROGRAM_NV: {
1934 struct st_common_program *p = st_common_program(prog);
1935 st_get_basic_variant(st, PIPE_SHADER_TESS_CTRL, p);
1936 break;
1937 }
1938
1939 case GL_TESS_EVALUATION_PROGRAM_NV: {
1940 struct st_common_program *p = st_common_program(prog);
1941 st_get_basic_variant(st, PIPE_SHADER_TESS_EVAL, p);
1942 break;
1943 }
1944
1945 case GL_GEOMETRY_PROGRAM_NV: {
1946 struct st_common_program *p = st_common_program(prog);
1947 st_get_basic_variant(st, PIPE_SHADER_GEOMETRY, p);
1948 break;
1949 }
1950
1951 case GL_FRAGMENT_PROGRAM_ARB: {
1952 struct st_fragment_program *p = (struct st_fragment_program *)prog;
1953 struct st_fp_variant_key key;
1954
1955 memset(&key, 0, sizeof(key));
1956 key.st = st->has_shareable_shaders ? NULL : st;
1957 st_get_fp_variant(st, p, &key);
1958 break;
1959 }
1960
1961 case GL_COMPUTE_PROGRAM_NV: {
1962 struct st_compute_program *p = (struct st_compute_program *)prog;
1963 st_get_cp_variant(st, &p->tgsi, &p->variants);
1964 break;
1965 }
1966
1967 default:
1968 assert(0);
1969 }
1970 }