st/glsl_to_nir: don't generate nir twice for gs
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/imports.h"
35 #include "main/hash.h"
36 #include "main/mtypes.h"
37 #include "program/prog_parameter.h"
38 #include "program/prog_print.h"
39 #include "program/programopt.h"
40
41 #include "compiler/nir/nir.h"
42
43 #include "pipe/p_context.h"
44 #include "pipe/p_defines.h"
45 #include "pipe/p_shader_tokens.h"
46 #include "draw/draw_context.h"
47 #include "tgsi/tgsi_dump.h"
48 #include "tgsi/tgsi_emulate.h"
49 #include "tgsi/tgsi_parse.h"
50 #include "tgsi/tgsi_ureg.h"
51
52 #include "st_debug.h"
53 #include "st_cb_bitmap.h"
54 #include "st_cb_drawpixels.h"
55 #include "st_context.h"
56 #include "st_tgsi_lower_yuv.h"
57 #include "st_program.h"
58 #include "st_mesa_to_tgsi.h"
59 #include "st_atifs_to_tgsi.h"
60 #include "st_nir.h"
61 #include "st_shader_cache.h"
62 #include "cso_cache/cso_context.h"
63
64
65
66 static void
67 set_affected_state_flags(uint64_t *states,
68 struct gl_program *prog,
69 uint64_t new_constants,
70 uint64_t new_sampler_views,
71 uint64_t new_samplers,
72 uint64_t new_images,
73 uint64_t new_ubos,
74 uint64_t new_ssbos,
75 uint64_t new_atomics)
76 {
77 if (prog->Parameters->NumParameters)
78 *states |= new_constants;
79
80 if (prog->info.num_textures)
81 *states |= new_sampler_views | new_samplers;
82
83 if (prog->info.num_images)
84 *states |= new_images;
85
86 if (prog->info.num_ubos)
87 *states |= new_ubos;
88
89 if (prog->info.num_ssbos)
90 *states |= new_ssbos;
91
92 if (prog->info.num_abos)
93 *states |= new_atomics;
94 }
95
96 /**
97 * This determines which states will be updated when the shader is bound.
98 */
99 void
100 st_set_prog_affected_state_flags(struct gl_program *prog)
101 {
102 uint64_t *states;
103
104 switch (prog->info.stage) {
105 case MESA_SHADER_VERTEX:
106 states = &((struct st_vertex_program*)prog)->affected_states;
107
108 *states = ST_NEW_VS_STATE |
109 ST_NEW_RASTERIZER |
110 ST_NEW_VERTEX_ARRAYS;
111
112 set_affected_state_flags(states, prog,
113 ST_NEW_VS_CONSTANTS,
114 ST_NEW_VS_SAMPLER_VIEWS,
115 ST_NEW_VS_SAMPLERS,
116 ST_NEW_VS_IMAGES,
117 ST_NEW_VS_UBOS,
118 ST_NEW_VS_SSBOS,
119 ST_NEW_VS_ATOMICS);
120 break;
121
122 case MESA_SHADER_TESS_CTRL:
123 states = &(st_common_program(prog))->affected_states;
124
125 *states = ST_NEW_TCS_STATE;
126
127 set_affected_state_flags(states, prog,
128 ST_NEW_TCS_CONSTANTS,
129 ST_NEW_TCS_SAMPLER_VIEWS,
130 ST_NEW_TCS_SAMPLERS,
131 ST_NEW_TCS_IMAGES,
132 ST_NEW_TCS_UBOS,
133 ST_NEW_TCS_SSBOS,
134 ST_NEW_TCS_ATOMICS);
135 break;
136
137 case MESA_SHADER_TESS_EVAL:
138 states = &(st_common_program(prog))->affected_states;
139
140 *states = ST_NEW_TES_STATE |
141 ST_NEW_RASTERIZER;
142
143 set_affected_state_flags(states, prog,
144 ST_NEW_TES_CONSTANTS,
145 ST_NEW_TES_SAMPLER_VIEWS,
146 ST_NEW_TES_SAMPLERS,
147 ST_NEW_TES_IMAGES,
148 ST_NEW_TES_UBOS,
149 ST_NEW_TES_SSBOS,
150 ST_NEW_TES_ATOMICS);
151 break;
152
153 case MESA_SHADER_GEOMETRY:
154 states = &(st_common_program(prog))->affected_states;
155
156 *states = ST_NEW_GS_STATE |
157 ST_NEW_RASTERIZER;
158
159 set_affected_state_flags(states, prog,
160 ST_NEW_GS_CONSTANTS,
161 ST_NEW_GS_SAMPLER_VIEWS,
162 ST_NEW_GS_SAMPLERS,
163 ST_NEW_GS_IMAGES,
164 ST_NEW_GS_UBOS,
165 ST_NEW_GS_SSBOS,
166 ST_NEW_GS_ATOMICS);
167 break;
168
169 case MESA_SHADER_FRAGMENT:
170 states = &((struct st_fragment_program*)prog)->affected_states;
171
172 /* gl_FragCoord and glDrawPixels always use constants. */
173 *states = ST_NEW_FS_STATE |
174 ST_NEW_SAMPLE_SHADING |
175 ST_NEW_FS_CONSTANTS;
176
177 set_affected_state_flags(states, prog,
178 ST_NEW_FS_CONSTANTS,
179 ST_NEW_FS_SAMPLER_VIEWS,
180 ST_NEW_FS_SAMPLERS,
181 ST_NEW_FS_IMAGES,
182 ST_NEW_FS_UBOS,
183 ST_NEW_FS_SSBOS,
184 ST_NEW_FS_ATOMICS);
185 break;
186
187 case MESA_SHADER_COMPUTE:
188 states = &((struct st_compute_program*)prog)->affected_states;
189
190 *states = ST_NEW_CS_STATE;
191
192 set_affected_state_flags(states, prog,
193 ST_NEW_CS_CONSTANTS,
194 ST_NEW_CS_SAMPLER_VIEWS,
195 ST_NEW_CS_SAMPLERS,
196 ST_NEW_CS_IMAGES,
197 ST_NEW_CS_UBOS,
198 ST_NEW_CS_SSBOS,
199 ST_NEW_CS_ATOMICS);
200 break;
201
202 default:
203 unreachable("unhandled shader stage");
204 }
205 }
206
207 /**
208 * Delete a vertex program variant. Note the caller must unlink
209 * the variant from the linked list.
210 */
211 static void
212 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
213 {
214 if (vpv->driver_shader)
215 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
216
217 if (vpv->draw_shader)
218 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
219
220 if (((vpv->tgsi.type == PIPE_SHADER_IR_TGSI)) && vpv->tgsi.tokens)
221 ureg_free_tokens(vpv->tgsi.tokens);
222
223 free( vpv );
224 }
225
226
227
228 /**
229 * Clean out any old compilations:
230 */
231 void
232 st_release_vp_variants( struct st_context *st,
233 struct st_vertex_program *stvp )
234 {
235 struct st_vp_variant *vpv;
236
237 for (vpv = stvp->variants; vpv; ) {
238 struct st_vp_variant *next = vpv->next;
239 delete_vp_variant(st, vpv);
240 vpv = next;
241 }
242
243 stvp->variants = NULL;
244
245 if ((stvp->tgsi.type == PIPE_SHADER_IR_TGSI) && stvp->tgsi.tokens) {
246 tgsi_free_tokens(stvp->tgsi.tokens);
247 stvp->tgsi.tokens = NULL;
248 }
249 }
250
251
252
253 /**
254 * Delete a fragment program variant. Note the caller must unlink
255 * the variant from the linked list.
256 */
257 static void
258 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
259 {
260 if (fpv->driver_shader)
261 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
262 free(fpv);
263 }
264
265
266 /**
267 * Free all variants of a fragment program.
268 */
269 void
270 st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
271 {
272 struct st_fp_variant *fpv;
273
274 for (fpv = stfp->variants; fpv; ) {
275 struct st_fp_variant *next = fpv->next;
276 delete_fp_variant(st, fpv);
277 fpv = next;
278 }
279
280 stfp->variants = NULL;
281
282 if ((stfp->tgsi.type == PIPE_SHADER_IR_TGSI) && stfp->tgsi.tokens) {
283 ureg_free_tokens(stfp->tgsi.tokens);
284 stfp->tgsi.tokens = NULL;
285 }
286 }
287
288
289 /**
290 * Delete a basic program variant. Note the caller must unlink
291 * the variant from the linked list.
292 */
293 static void
294 delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
295 GLenum target)
296 {
297 if (v->driver_shader) {
298 switch (target) {
299 case GL_TESS_CONTROL_PROGRAM_NV:
300 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
301 break;
302 case GL_TESS_EVALUATION_PROGRAM_NV:
303 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
304 break;
305 case GL_GEOMETRY_PROGRAM_NV:
306 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
307 break;
308 case GL_COMPUTE_PROGRAM_NV:
309 cso_delete_compute_shader(st->cso_context, v->driver_shader);
310 break;
311 default:
312 assert(!"this shouldn't occur");
313 }
314 }
315
316 free(v);
317 }
318
319
320 /**
321 * Free all basic program variants.
322 */
323 void
324 st_release_basic_variants(struct st_context *st, GLenum target,
325 struct st_basic_variant **variants,
326 struct pipe_shader_state *tgsi)
327 {
328 struct st_basic_variant *v;
329
330 for (v = *variants; v; ) {
331 struct st_basic_variant *next = v->next;
332 delete_basic_variant(st, v, target);
333 v = next;
334 }
335
336 *variants = NULL;
337
338 if (tgsi->tokens) {
339 ureg_free_tokens(tgsi->tokens);
340 tgsi->tokens = NULL;
341 }
342 }
343
344
345 /**
346 * Free all variants of a compute program.
347 */
348 void
349 st_release_cp_variants(struct st_context *st, struct st_compute_program *stcp)
350 {
351 struct st_basic_variant **variants = &stcp->variants;
352 struct st_basic_variant *v;
353
354 for (v = *variants; v; ) {
355 struct st_basic_variant *next = v->next;
356 delete_basic_variant(st, v, stcp->Base.Target);
357 v = next;
358 }
359
360 *variants = NULL;
361
362 if (stcp->tgsi.prog) {
363 switch (stcp->tgsi.ir_type) {
364 case PIPE_SHADER_IR_TGSI:
365 ureg_free_tokens(stcp->tgsi.prog);
366 stcp->tgsi.prog = NULL;
367 break;
368 case PIPE_SHADER_IR_NIR:
369 /* pipe driver took ownership of prog */
370 break;
371 case PIPE_SHADER_IR_LLVM:
372 case PIPE_SHADER_IR_NATIVE:
373 /* ??? */
374 stcp->tgsi.prog = NULL;
375 break;
376 }
377 }
378 }
379
380 /**
381 * Translate a vertex program.
382 */
383 bool
384 st_translate_vertex_program(struct st_context *st,
385 struct st_vertex_program *stvp)
386 {
387 struct ureg_program *ureg;
388 enum pipe_error error;
389 unsigned num_outputs = 0;
390 unsigned attr;
391 ubyte input_to_index[VERT_ATTRIB_MAX] = {0};
392 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
393 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
394
395 stvp->num_inputs = 0;
396
397 if (stvp->Base.arb.IsPositionInvariant)
398 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
399
400 /*
401 * Determine number of inputs, the mappings between VERT_ATTRIB_x
402 * and TGSI generic input indexes, plus input attrib semantic info.
403 */
404 for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
405 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
406 input_to_index[attr] = stvp->num_inputs;
407 stvp->index_to_input[stvp->num_inputs] = attr;
408 stvp->num_inputs++;
409 if ((stvp->Base.info.double_inputs_read &
410 BITFIELD64_BIT(attr)) != 0) {
411 /* add placeholder for second part of a double attribute */
412 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
413 stvp->num_inputs++;
414 }
415 }
416 }
417 /* bit of a hack, presetup potentially unused edgeflag input */
418 input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
419 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
420
421 /* Compute mapping of vertex program outputs to slots.
422 */
423 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
424 if ((stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) == 0) {
425 stvp->result_to_output[attr] = ~0;
426 }
427 else {
428 unsigned slot = num_outputs++;
429
430 stvp->result_to_output[attr] = slot;
431
432 unsigned semantic_name, semantic_index;
433 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
434 &semantic_name, &semantic_index);
435 output_semantic_name[slot] = semantic_name;
436 output_semantic_index[slot] = semantic_index;
437 }
438 }
439 /* similar hack to above, presetup potentially unused edgeflag output */
440 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
441 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
442 output_semantic_index[num_outputs] = 0;
443
444 /* ARB_vp: */
445 if (!stvp->glsl_to_tgsi && !stvp->shader_program) {
446 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
447
448 /* This determines which states will be updated when the assembly
449 * shader is bound.
450 */
451 stvp->affected_states = ST_NEW_VS_STATE |
452 ST_NEW_RASTERIZER |
453 ST_NEW_VERTEX_ARRAYS;
454
455 if (stvp->Base.Parameters->NumParameters)
456 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
457
458 /* No samplers are allowed in ARB_vp. */
459 }
460
461 if (stvp->shader_program) {
462 struct gl_program *prog = stvp->shader_program->last_vert_prog;
463 if (prog) {
464 st_translate_stream_output_info2(prog->sh.LinkedTransformFeedback,
465 stvp->result_to_output,
466 &stvp->tgsi.stream_output);
467 }
468
469 return true;
470 }
471
472 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
473 if (ureg == NULL)
474 return false;
475
476 if (stvp->Base.info.clip_distance_array_size)
477 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
478 stvp->Base.info.clip_distance_array_size);
479 if (stvp->Base.info.cull_distance_array_size)
480 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
481 stvp->Base.info.cull_distance_array_size);
482
483 if (ST_DEBUG & DEBUG_MESA) {
484 _mesa_print_program(&stvp->Base);
485 _mesa_print_program_parameters(st->ctx, &stvp->Base);
486 debug_printf("\n");
487 }
488
489 if (stvp->glsl_to_tgsi) {
490 error = st_translate_program(st->ctx,
491 PIPE_SHADER_VERTEX,
492 ureg,
493 stvp->glsl_to_tgsi,
494 &stvp->Base,
495 /* inputs */
496 stvp->num_inputs,
497 input_to_index,
498 NULL, /* inputSlotToAttr */
499 NULL, /* input semantic name */
500 NULL, /* input semantic index */
501 NULL, /* interp mode */
502 /* outputs */
503 num_outputs,
504 stvp->result_to_output,
505 output_semantic_name,
506 output_semantic_index);
507
508 st_translate_stream_output_info(stvp->glsl_to_tgsi,
509 stvp->result_to_output,
510 &stvp->tgsi.stream_output);
511
512 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
513 } else
514 error = st_translate_mesa_program(st->ctx,
515 PIPE_SHADER_VERTEX,
516 ureg,
517 &stvp->Base,
518 /* inputs */
519 stvp->num_inputs,
520 input_to_index,
521 NULL, /* input semantic name */
522 NULL, /* input semantic index */
523 NULL,
524 /* outputs */
525 num_outputs,
526 stvp->result_to_output,
527 output_semantic_name,
528 output_semantic_index);
529
530 if (error) {
531 debug_printf("%s: failed to translate Mesa program:\n", __func__);
532 _mesa_print_program(&stvp->Base);
533 debug_assert(0);
534 return false;
535 }
536
537 unsigned num_tokens;
538 stvp->tgsi.tokens = ureg_get_tokens(ureg, &num_tokens);
539 ureg_destroy(ureg);
540
541 if (stvp->glsl_to_tgsi) {
542 stvp->glsl_to_tgsi = NULL;
543 st_store_tgsi_in_disk_cache(st, &stvp->Base, NULL, num_tokens);
544 }
545
546 return stvp->tgsi.tokens != NULL;
547 }
548
549 static struct st_vp_variant *
550 st_create_vp_variant(struct st_context *st,
551 struct st_vertex_program *stvp,
552 const struct st_vp_variant_key *key)
553 {
554 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
555 struct pipe_context *pipe = st->pipe;
556
557 vpv->key = *key;
558 vpv->tgsi.stream_output = stvp->tgsi.stream_output;
559 vpv->num_inputs = stvp->num_inputs;
560
561 if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
562 vpv->tgsi.type = PIPE_SHADER_IR_NIR;
563 vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
564 if (key->clamp_color)
565 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
566 if (key->passthrough_edgeflags) {
567 NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
568 vpv->num_inputs++;
569 }
570
571 st_finalize_nir(st, &stvp->Base, stvp->shader_program,
572 vpv->tgsi.ir.nir);
573
574 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
575 /* driver takes ownership of IR: */
576 vpv->tgsi.ir.nir = NULL;
577 return vpv;
578 }
579
580 vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
581
582 /* Emulate features. */
583 if (key->clamp_color || key->passthrough_edgeflags) {
584 const struct tgsi_token *tokens;
585 unsigned flags =
586 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
587 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
588
589 tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
590
591 if (tokens) {
592 tgsi_free_tokens(vpv->tgsi.tokens);
593 vpv->tgsi.tokens = tokens;
594
595 if (key->passthrough_edgeflags)
596 vpv->num_inputs++;
597 } else
598 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
599 }
600
601 if (ST_DEBUG & DEBUG_TGSI) {
602 tgsi_dump(vpv->tgsi.tokens, 0);
603 debug_printf("\n");
604 }
605
606 vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
607 return vpv;
608 }
609
610
611 /**
612 * Find/create a vertex program variant.
613 */
614 struct st_vp_variant *
615 st_get_vp_variant(struct st_context *st,
616 struct st_vertex_program *stvp,
617 const struct st_vp_variant_key *key)
618 {
619 struct st_vp_variant *vpv;
620
621 /* Search for existing variant */
622 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
623 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
624 break;
625 }
626 }
627
628 if (!vpv) {
629 /* create now */
630 vpv = st_create_vp_variant(st, stvp, key);
631 if (vpv) {
632 /* insert into list */
633 vpv->next = stvp->variants;
634 stvp->variants = vpv;
635 }
636 }
637
638 return vpv;
639 }
640
641
642 /**
643 * Translate a Mesa fragment shader into a TGSI shader.
644 */
645 bool
646 st_translate_fragment_program(struct st_context *st,
647 struct st_fragment_program *stfp)
648 {
649 ubyte outputMapping[2 * FRAG_RESULT_MAX];
650 ubyte inputMapping[VARYING_SLOT_MAX];
651 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
652 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
653 GLuint attr;
654 GLbitfield64 inputsRead;
655 struct ureg_program *ureg;
656
657 GLboolean write_all = GL_FALSE;
658
659 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
660 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
661 uint fs_num_inputs = 0;
662
663 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
664 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
665 uint fs_num_outputs = 0;
666
667 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
668
669 /* Non-GLSL programs: */
670 if (!stfp->glsl_to_tgsi && !stfp->shader_program) {
671 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
672 if (st->ctx->Const.GLSLFragCoordIsSysVal)
673 _mesa_program_fragment_position_to_sysval(&stfp->Base);
674
675 /* This determines which states will be updated when the assembly
676 * shader is bound.
677 *
678 * fragment.position and glDrawPixels always use constants.
679 */
680 stfp->affected_states = ST_NEW_FS_STATE |
681 ST_NEW_SAMPLE_SHADING |
682 ST_NEW_FS_CONSTANTS;
683
684 if (stfp->ati_fs) {
685 /* Just set them for ATI_fs unconditionally. */
686 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
687 ST_NEW_FS_SAMPLERS;
688 } else {
689 /* ARB_fp */
690 if (stfp->Base.SamplersUsed)
691 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
692 ST_NEW_FS_SAMPLERS;
693 }
694 }
695
696 /*
697 * Convert Mesa program inputs to TGSI input register semantics.
698 */
699 inputsRead = stfp->Base.info.inputs_read;
700 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
701 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
702 const GLuint slot = fs_num_inputs++;
703
704 inputMapping[attr] = slot;
705 inputSlotToAttr[slot] = attr;
706
707 switch (attr) {
708 case VARYING_SLOT_POS:
709 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
710 input_semantic_index[slot] = 0;
711 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
712 break;
713 case VARYING_SLOT_COL0:
714 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
715 input_semantic_index[slot] = 0;
716 interpMode[slot] = stfp->glsl_to_tgsi ?
717 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
718 break;
719 case VARYING_SLOT_COL1:
720 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
721 input_semantic_index[slot] = 1;
722 interpMode[slot] = stfp->glsl_to_tgsi ?
723 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
724 break;
725 case VARYING_SLOT_FOGC:
726 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
727 input_semantic_index[slot] = 0;
728 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
729 break;
730 case VARYING_SLOT_FACE:
731 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
732 input_semantic_index[slot] = 0;
733 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
734 break;
735 case VARYING_SLOT_PRIMITIVE_ID:
736 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
737 input_semantic_index[slot] = 0;
738 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
739 break;
740 case VARYING_SLOT_LAYER:
741 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
742 input_semantic_index[slot] = 0;
743 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
744 break;
745 case VARYING_SLOT_VIEWPORT:
746 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
747 input_semantic_index[slot] = 0;
748 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
749 break;
750 case VARYING_SLOT_CLIP_DIST0:
751 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
752 input_semantic_index[slot] = 0;
753 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
754 break;
755 case VARYING_SLOT_CLIP_DIST1:
756 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
757 input_semantic_index[slot] = 1;
758 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
759 break;
760 case VARYING_SLOT_CULL_DIST0:
761 case VARYING_SLOT_CULL_DIST1:
762 /* these should have been lowered by GLSL */
763 assert(0);
764 break;
765 /* In most cases, there is nothing special about these
766 * inputs, so adopt a convention to use the generic
767 * semantic name and the mesa VARYING_SLOT_ number as the
768 * index.
769 *
770 * All that is required is that the vertex shader labels
771 * its own outputs similarly, and that the vertex shader
772 * generates at least every output required by the
773 * fragment shader plus fixed-function hardware (such as
774 * BFC).
775 *
776 * However, some drivers may need us to identify the PNTC and TEXi
777 * varyings if, for example, their capability to replace them with
778 * sprite coordinates is limited.
779 */
780 case VARYING_SLOT_PNTC:
781 if (st->needs_texcoord_semantic) {
782 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
783 input_semantic_index[slot] = 0;
784 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
785 break;
786 }
787 /* fall through */
788 case VARYING_SLOT_TEX0:
789 case VARYING_SLOT_TEX1:
790 case VARYING_SLOT_TEX2:
791 case VARYING_SLOT_TEX3:
792 case VARYING_SLOT_TEX4:
793 case VARYING_SLOT_TEX5:
794 case VARYING_SLOT_TEX6:
795 case VARYING_SLOT_TEX7:
796 if (st->needs_texcoord_semantic) {
797 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
798 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
799 interpMode[slot] = stfp->glsl_to_tgsi ?
800 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
801 break;
802 }
803 /* fall through */
804 case VARYING_SLOT_VAR0:
805 default:
806 /* Semantic indices should be zero-based because drivers may choose
807 * to assign a fixed slot determined by that index.
808 * This is useful because ARB_separate_shader_objects uses location
809 * qualifiers for linkage, and if the semantic index corresponds to
810 * these locations, linkage passes in the driver become unecessary.
811 *
812 * If needs_texcoord_semantic is true, no semantic indices will be
813 * consumed for the TEXi varyings, and we can base the locations of
814 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
815 */
816 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
817 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
818 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
819 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
820 if (attr == VARYING_SLOT_PNTC)
821 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
822 else {
823 interpMode[slot] = stfp->glsl_to_tgsi ?
824 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
825 }
826 break;
827 }
828 }
829 else {
830 inputMapping[attr] = -1;
831 }
832 }
833
834 /*
835 * Semantics and mapping for outputs
836 */
837 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
838
839 /* if z is written, emit that first */
840 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
841 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
842 fs_output_semantic_index[fs_num_outputs] = 0;
843 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
844 fs_num_outputs++;
845 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
846 }
847
848 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
849 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
850 fs_output_semantic_index[fs_num_outputs] = 0;
851 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
852 fs_num_outputs++;
853 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
854 }
855
856 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
857 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
858 fs_output_semantic_index[fs_num_outputs] = 0;
859 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
860 fs_num_outputs++;
861 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
862 }
863
864 /* handle remaining outputs (color) */
865 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
866 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
867 stfp->Base.SecondaryOutputsWritten;
868 const unsigned loc = attr % FRAG_RESULT_MAX;
869
870 if (written & BITFIELD64_BIT(loc)) {
871 switch (loc) {
872 case FRAG_RESULT_DEPTH:
873 case FRAG_RESULT_STENCIL:
874 case FRAG_RESULT_SAMPLE_MASK:
875 /* handled above */
876 assert(0);
877 break;
878 case FRAG_RESULT_COLOR:
879 write_all = GL_TRUE; /* fallthrough */
880 default: {
881 int index;
882 assert(loc == FRAG_RESULT_COLOR ||
883 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
884
885 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
886
887 if (attr >= FRAG_RESULT_MAX) {
888 /* Secondary color for dual source blending. */
889 assert(index == 0);
890 index++;
891 }
892
893 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
894 fs_output_semantic_index[fs_num_outputs] = index;
895 outputMapping[attr] = fs_num_outputs;
896 break;
897 }
898 }
899
900 fs_num_outputs++;
901 }
902 }
903
904 /* We have already compiler to NIR so just return */
905 if (stfp->shader_program)
906 return true;
907
908 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
909 if (ureg == NULL)
910 return false;
911
912 if (ST_DEBUG & DEBUG_MESA) {
913 _mesa_print_program(&stfp->Base);
914 _mesa_print_program_parameters(st->ctx, &stfp->Base);
915 debug_printf("\n");
916 }
917 if (write_all == GL_TRUE)
918 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
919
920 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
921 switch (stfp->Base.info.fs.depth_layout) {
922 case FRAG_DEPTH_LAYOUT_ANY:
923 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
924 TGSI_FS_DEPTH_LAYOUT_ANY);
925 break;
926 case FRAG_DEPTH_LAYOUT_GREATER:
927 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
928 TGSI_FS_DEPTH_LAYOUT_GREATER);
929 break;
930 case FRAG_DEPTH_LAYOUT_LESS:
931 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
932 TGSI_FS_DEPTH_LAYOUT_LESS);
933 break;
934 case FRAG_DEPTH_LAYOUT_UNCHANGED:
935 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
936 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
937 break;
938 default:
939 assert(0);
940 }
941 }
942
943 if (stfp->glsl_to_tgsi) {
944 st_translate_program(st->ctx,
945 PIPE_SHADER_FRAGMENT,
946 ureg,
947 stfp->glsl_to_tgsi,
948 &stfp->Base,
949 /* inputs */
950 fs_num_inputs,
951 inputMapping,
952 inputSlotToAttr,
953 input_semantic_name,
954 input_semantic_index,
955 interpMode,
956 /* outputs */
957 fs_num_outputs,
958 outputMapping,
959 fs_output_semantic_name,
960 fs_output_semantic_index);
961
962 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
963 } else if (stfp->ati_fs)
964 st_translate_atifs_program(ureg,
965 stfp->ati_fs,
966 &stfp->Base,
967 /* inputs */
968 fs_num_inputs,
969 inputMapping,
970 input_semantic_name,
971 input_semantic_index,
972 interpMode,
973 /* outputs */
974 fs_num_outputs,
975 outputMapping,
976 fs_output_semantic_name,
977 fs_output_semantic_index);
978 else
979 st_translate_mesa_program(st->ctx,
980 PIPE_SHADER_FRAGMENT,
981 ureg,
982 &stfp->Base,
983 /* inputs */
984 fs_num_inputs,
985 inputMapping,
986 input_semantic_name,
987 input_semantic_index,
988 interpMode,
989 /* outputs */
990 fs_num_outputs,
991 outputMapping,
992 fs_output_semantic_name,
993 fs_output_semantic_index);
994
995 unsigned num_tokens;
996 stfp->tgsi.tokens = ureg_get_tokens(ureg, &num_tokens);
997 ureg_destroy(ureg);
998
999 if (stfp->glsl_to_tgsi) {
1000 stfp->glsl_to_tgsi = NULL;
1001 st_store_tgsi_in_disk_cache(st, &stfp->Base, NULL, num_tokens);
1002 }
1003
1004 return stfp->tgsi.tokens != NULL;
1005 }
1006
1007 static struct st_fp_variant *
1008 st_create_fp_variant(struct st_context *st,
1009 struct st_fragment_program *stfp,
1010 const struct st_fp_variant_key *key)
1011 {
1012 struct pipe_context *pipe = st->pipe;
1013 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1014 struct pipe_shader_state tgsi = {0};
1015 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1016 static const gl_state_index texcoord_state[STATE_LENGTH] =
1017 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1018 static const gl_state_index scale_state[STATE_LENGTH] =
1019 { STATE_INTERNAL, STATE_PT_SCALE };
1020 static const gl_state_index bias_state[STATE_LENGTH] =
1021 { STATE_INTERNAL, STATE_PT_BIAS };
1022
1023 if (!variant)
1024 return NULL;
1025
1026 if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
1027 tgsi.type = PIPE_SHADER_IR_NIR;
1028 tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
1029
1030 if (key->clamp_color)
1031 NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
1032
1033 if (key->persample_shading) {
1034 nir_shader *shader = tgsi.ir.nir;
1035 nir_foreach_variable(var, &shader->inputs)
1036 var->data.sample = true;
1037 }
1038
1039 assert(!(key->bitmap && key->drawpixels));
1040
1041 /* glBitmap */
1042 if (key->bitmap) {
1043 nir_lower_bitmap_options options = {0};
1044
1045 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1046 options.sampler = variant->bitmap_sampler;
1047 options.swizzle_xxxx = (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM);
1048
1049 NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
1050 }
1051
1052 /* glDrawPixels (color only) */
1053 if (key->drawpixels) {
1054 nir_lower_drawpixels_options options = {{0}};
1055 unsigned samplers_used = stfp->Base.SamplersUsed;
1056
1057 /* Find the first unused slot. */
1058 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1059 options.drawpix_sampler = variant->drawpix_sampler;
1060 samplers_used |= (1 << variant->drawpix_sampler);
1061
1062 options.pixel_maps = key->pixelMaps;
1063 if (key->pixelMaps) {
1064 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1065 options.pixelmap_sampler = variant->pixelmap_sampler;
1066 }
1067
1068 options.scale_and_bias = key->scaleAndBias;
1069 if (key->scaleAndBias) {
1070 _mesa_add_state_reference(params, scale_state);
1071 memcpy(options.scale_state_tokens, scale_state,
1072 sizeof(options.scale_state_tokens));
1073 _mesa_add_state_reference(params, bias_state);
1074 memcpy(options.bias_state_tokens, bias_state,
1075 sizeof(options.bias_state_tokens));
1076 }
1077
1078 _mesa_add_state_reference(params, texcoord_state);
1079 memcpy(options.texcoord_state_tokens, texcoord_state,
1080 sizeof(options.texcoord_state_tokens));
1081
1082 NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
1083 }
1084
1085 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1086 nir_lower_tex_options options = {0};
1087 options.lower_y_uv_external = key->external.lower_nv12;
1088 options.lower_y_u_v_external = key->external.lower_iyuv;
1089 NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
1090 }
1091
1092 st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
1093
1094 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1095 /* This pass needs to happen *after* nir_lower_sampler */
1096 NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
1097 ~stfp->Base.SamplersUsed,
1098 key->external.lower_nv12,
1099 key->external.lower_iyuv);
1100 }
1101
1102 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1103 variant->key = *key;
1104
1105 return variant;
1106 }
1107
1108 tgsi.tokens = stfp->tgsi.tokens;
1109
1110 assert(!(key->bitmap && key->drawpixels));
1111
1112 /* Fix texture targets and add fog for ATI_fs */
1113 if (stfp->ati_fs) {
1114 const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
1115
1116 if (tokens)
1117 tgsi.tokens = tokens;
1118 else
1119 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1120 }
1121
1122 /* Emulate features. */
1123 if (key->clamp_color || key->persample_shading) {
1124 const struct tgsi_token *tokens;
1125 unsigned flags =
1126 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1127 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1128
1129 tokens = tgsi_emulate(tgsi.tokens, flags);
1130
1131 if (tokens) {
1132 if (tgsi.tokens != stfp->tgsi.tokens)
1133 tgsi_free_tokens(tgsi.tokens);
1134 tgsi.tokens = tokens;
1135 } else
1136 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1137 }
1138
1139 /* glBitmap */
1140 if (key->bitmap) {
1141 const struct tgsi_token *tokens;
1142
1143 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1144
1145 tokens = st_get_bitmap_shader(tgsi.tokens,
1146 st->internal_target,
1147 variant->bitmap_sampler,
1148 st->needs_texcoord_semantic,
1149 st->bitmap.tex_format ==
1150 PIPE_FORMAT_L8_UNORM);
1151
1152 if (tokens) {
1153 if (tgsi.tokens != stfp->tgsi.tokens)
1154 tgsi_free_tokens(tgsi.tokens);
1155 tgsi.tokens = tokens;
1156 } else
1157 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1158 }
1159
1160 /* glDrawPixels (color only) */
1161 if (key->drawpixels) {
1162 const struct tgsi_token *tokens;
1163 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1164
1165 /* Find the first unused slot. */
1166 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1167
1168 if (key->pixelMaps) {
1169 unsigned samplers_used = stfp->Base.SamplersUsed |
1170 (1 << variant->drawpix_sampler);
1171
1172 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1173 }
1174
1175 if (key->scaleAndBias) {
1176 scale_const = _mesa_add_state_reference(params, scale_state);
1177 bias_const = _mesa_add_state_reference(params, bias_state);
1178 }
1179
1180 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1181
1182 tokens = st_get_drawpix_shader(tgsi.tokens,
1183 st->needs_texcoord_semantic,
1184 key->scaleAndBias, scale_const,
1185 bias_const, key->pixelMaps,
1186 variant->drawpix_sampler,
1187 variant->pixelmap_sampler,
1188 texcoord_const, st->internal_target);
1189
1190 if (tokens) {
1191 if (tgsi.tokens != stfp->tgsi.tokens)
1192 tgsi_free_tokens(tgsi.tokens);
1193 tgsi.tokens = tokens;
1194 } else
1195 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1196 }
1197
1198 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv)) {
1199 const struct tgsi_token *tokens;
1200
1201 /* samplers inserted would conflict, but this should be unpossible: */
1202 assert(!(key->bitmap || key->drawpixels));
1203
1204 tokens = st_tgsi_lower_yuv(tgsi.tokens,
1205 ~stfp->Base.SamplersUsed,
1206 key->external.lower_nv12,
1207 key->external.lower_iyuv);
1208 if (tokens) {
1209 if (tgsi.tokens != stfp->tgsi.tokens)
1210 tgsi_free_tokens(tgsi.tokens);
1211 tgsi.tokens = tokens;
1212 } else {
1213 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1214 }
1215 }
1216
1217 if (ST_DEBUG & DEBUG_TGSI) {
1218 tgsi_dump(tgsi.tokens, 0);
1219 debug_printf("\n");
1220 }
1221
1222 /* fill in variant */
1223 variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
1224 variant->key = *key;
1225
1226 if (tgsi.tokens != stfp->tgsi.tokens)
1227 tgsi_free_tokens(tgsi.tokens);
1228 return variant;
1229 }
1230
1231 /**
1232 * Translate fragment program if needed.
1233 */
1234 struct st_fp_variant *
1235 st_get_fp_variant(struct st_context *st,
1236 struct st_fragment_program *stfp,
1237 const struct st_fp_variant_key *key)
1238 {
1239 struct st_fp_variant *fpv;
1240
1241 /* Search for existing variant */
1242 for (fpv = stfp->variants; fpv; fpv = fpv->next) {
1243 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1244 break;
1245 }
1246 }
1247
1248 if (!fpv) {
1249 /* create new */
1250 fpv = st_create_fp_variant(st, stfp, key);
1251 if (fpv) {
1252 if (key->bitmap || key->drawpixels) {
1253 /* Regular variants should always come before the
1254 * bitmap & drawpixels variants, (unless there
1255 * are no regular variants) so that
1256 * st_update_fp can take a fast path when
1257 * shader_has_one_variant is set.
1258 */
1259 if (!stfp->variants) {
1260 stfp->variants = fpv;
1261 } else {
1262 /* insert into list after the first one */
1263 fpv->next = stfp->variants->next;
1264 stfp->variants->next = fpv;
1265 }
1266 } else {
1267 /* insert into list */
1268 fpv->next = stfp->variants;
1269 stfp->variants = fpv;
1270 }
1271 }
1272 }
1273
1274 return fpv;
1275 }
1276
1277
1278 /**
1279 * Translate a program. This is common code for geometry and tessellation
1280 * shaders.
1281 */
1282 static void
1283 st_translate_program_common(struct st_context *st,
1284 struct gl_program *prog,
1285 struct glsl_to_tgsi_visitor *glsl_to_tgsi,
1286 struct ureg_program *ureg,
1287 unsigned tgsi_processor,
1288 struct pipe_shader_state *out_state)
1289 {
1290 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1291 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1292 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1293 GLuint attr;
1294
1295 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1296 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1297 uint num_inputs = 0;
1298
1299 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1300 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1301 uint num_outputs = 0;
1302
1303 GLint i;
1304
1305 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1306 memset(inputMapping, 0, sizeof(inputMapping));
1307 memset(outputMapping, 0, sizeof(outputMapping));
1308 memset(out_state, 0, sizeof(*out_state));
1309
1310 if (prog->info.clip_distance_array_size)
1311 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1312 prog->info.clip_distance_array_size);
1313 if (prog->info.cull_distance_array_size)
1314 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1315 prog->info.cull_distance_array_size);
1316
1317 /*
1318 * Convert Mesa program inputs to TGSI input register semantics.
1319 */
1320 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1321 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1322 continue;
1323
1324 unsigned slot = num_inputs++;
1325
1326 inputMapping[attr] = slot;
1327 inputSlotToAttr[slot] = attr;
1328
1329 unsigned semantic_name, semantic_index;
1330 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1331 &semantic_name, &semantic_index);
1332 input_semantic_name[slot] = semantic_name;
1333 input_semantic_index[slot] = semantic_index;
1334 }
1335
1336 /* Also add patch inputs. */
1337 for (attr = 0; attr < 32; attr++) {
1338 if (prog->info.patch_inputs_read & (1u << attr)) {
1339 GLuint slot = num_inputs++;
1340 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1341
1342 inputMapping[patch_attr] = slot;
1343 inputSlotToAttr[slot] = patch_attr;
1344 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1345 input_semantic_index[slot] = attr;
1346 }
1347 }
1348
1349 /* initialize output semantics to defaults */
1350 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1351 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1352 output_semantic_index[i] = 0;
1353 }
1354
1355 /*
1356 * Determine number of outputs, the (default) output register
1357 * mapping and the semantic information for each output.
1358 */
1359 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1360 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1361 GLuint slot = num_outputs++;
1362
1363 outputMapping[attr] = slot;
1364
1365 unsigned semantic_name, semantic_index;
1366 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1367 &semantic_name, &semantic_index);
1368 output_semantic_name[slot] = semantic_name;
1369 output_semantic_index[slot] = semantic_index;
1370 }
1371 }
1372
1373 /* Also add patch outputs. */
1374 for (attr = 0; attr < 32; attr++) {
1375 if (prog->info.patch_outputs_written & (1u << attr)) {
1376 GLuint slot = num_outputs++;
1377 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1378
1379 outputMapping[patch_attr] = slot;
1380 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1381 output_semantic_index[slot] = attr;
1382 }
1383 }
1384
1385 st_translate_program(st->ctx,
1386 tgsi_processor,
1387 ureg,
1388 glsl_to_tgsi,
1389 prog,
1390 /* inputs */
1391 num_inputs,
1392 inputMapping,
1393 inputSlotToAttr,
1394 input_semantic_name,
1395 input_semantic_index,
1396 NULL,
1397 /* outputs */
1398 num_outputs,
1399 outputMapping,
1400 output_semantic_name,
1401 output_semantic_index);
1402
1403 unsigned num_tokens;
1404 out_state->tokens = ureg_get_tokens(ureg, &num_tokens);
1405 ureg_destroy(ureg);
1406
1407 st_translate_stream_output_info(glsl_to_tgsi,
1408 outputMapping,
1409 &out_state->stream_output);
1410
1411 st_store_tgsi_in_disk_cache(st, prog, out_state, num_tokens);
1412
1413 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1414 _mesa_print_program(prog);
1415 debug_printf("\n");
1416 }
1417
1418 if (ST_DEBUG & DEBUG_TGSI) {
1419 tgsi_dump(out_state->tokens, 0);
1420 debug_printf("\n");
1421 }
1422 }
1423
1424
1425 /**
1426 * Translate a geometry program to create a new variant.
1427 */
1428 bool
1429 st_translate_geometry_program(struct st_context *st,
1430 struct st_common_program *stgp)
1431 {
1432 struct ureg_program *ureg;
1433
1434 /* We have already compiled to NIR so just return */
1435 if (stgp->shader_program)
1436 return true;
1437
1438 ureg = ureg_create_with_screen(PIPE_SHADER_GEOMETRY, st->pipe->screen);
1439 if (ureg == NULL)
1440 return false;
1441
1442 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1443 stgp->Base.info.gs.input_primitive);
1444 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1445 stgp->Base.info.gs.output_primitive);
1446 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1447 stgp->Base.info.gs.vertices_out);
1448 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1449 stgp->Base.info.gs.invocations);
1450
1451 st_translate_program_common(st, &stgp->Base, stgp->glsl_to_tgsi, ureg,
1452 PIPE_SHADER_GEOMETRY, &stgp->tgsi);
1453
1454 free_glsl_to_tgsi_visitor(stgp->glsl_to_tgsi);
1455 stgp->glsl_to_tgsi = NULL;
1456 return true;
1457 }
1458
1459
1460 /**
1461 * Get/create a basic program variant.
1462 */
1463 struct st_basic_variant *
1464 st_get_basic_variant(struct st_context *st,
1465 unsigned pipe_shader,
1466 struct st_common_program *prog)
1467 {
1468 struct pipe_context *pipe = st->pipe;
1469 struct st_basic_variant *v;
1470 struct st_basic_variant_key key;
1471 struct pipe_shader_state tgsi = {0};
1472 memset(&key, 0, sizeof(key));
1473 key.st = st->has_shareable_shaders ? NULL : st;
1474
1475 /* Search for existing variant */
1476 for (v = prog->variants; v; v = v->next) {
1477 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1478 break;
1479 }
1480 }
1481
1482 if (!v) {
1483 /* create new */
1484 v = CALLOC_STRUCT(st_basic_variant);
1485 if (v) {
1486
1487 if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
1488 tgsi.type = PIPE_SHADER_IR_NIR;
1489 tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
1490 st_finalize_nir(st, &prog->Base, prog->shader_program,
1491 tgsi.ir.nir);
1492 } else
1493 tgsi = prog->tgsi;
1494 /* fill in new variant */
1495 switch (pipe_shader) {
1496 case PIPE_SHADER_TESS_CTRL:
1497 v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
1498 break;
1499 case PIPE_SHADER_TESS_EVAL:
1500 v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
1501 break;
1502 case PIPE_SHADER_GEOMETRY:
1503 v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
1504 break;
1505 default:
1506 assert(!"unhandled shader type");
1507 free(v);
1508 return NULL;
1509 }
1510
1511 v->key = key;
1512
1513 /* insert into list */
1514 v->next = prog->variants;
1515 prog->variants = v;
1516 }
1517 }
1518
1519 return v;
1520 }
1521
1522
1523 /**
1524 * Translate a tessellation control program to create a new variant.
1525 */
1526 bool
1527 st_translate_tessctrl_program(struct st_context *st,
1528 struct st_common_program *sttcp)
1529 {
1530 struct ureg_program *ureg;
1531
1532 /* We have already compiler to NIR so just return */
1533 if (sttcp->shader_program)
1534 return true;
1535
1536 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_CTRL, st->pipe->screen);
1537 if (ureg == NULL)
1538 return false;
1539
1540 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1541 sttcp->Base.info.tess.tcs_vertices_out);
1542
1543 st_translate_program_common(st, &sttcp->Base, sttcp->glsl_to_tgsi, ureg,
1544 PIPE_SHADER_TESS_CTRL, &sttcp->tgsi);
1545
1546 free_glsl_to_tgsi_visitor(sttcp->glsl_to_tgsi);
1547 sttcp->glsl_to_tgsi = NULL;
1548 return true;
1549 }
1550
1551
1552 /**
1553 * Translate a tessellation evaluation program to create a new variant.
1554 */
1555 bool
1556 st_translate_tesseval_program(struct st_context *st,
1557 struct st_common_program *sttep)
1558 {
1559 struct ureg_program *ureg;
1560
1561 /* We have already compiler to NIR so just return */
1562 if (sttep->shader_program)
1563 return true;
1564
1565 ureg = ureg_create_with_screen(PIPE_SHADER_TESS_EVAL, st->pipe->screen);
1566 if (ureg == NULL)
1567 return false;
1568
1569 if (sttep->Base.info.tess.primitive_mode == GL_ISOLINES)
1570 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1571 else
1572 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1573 sttep->Base.info.tess.primitive_mode);
1574
1575 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1576 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1577 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1578 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1579 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1580
1581 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1582 (sttep->Base.info.tess.spacing + 1) % 3);
1583
1584 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1585 !sttep->Base.info.tess.ccw);
1586 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1587 sttep->Base.info.tess.point_mode);
1588
1589 st_translate_program_common(st, &sttep->Base, sttep->glsl_to_tgsi,
1590 ureg, PIPE_SHADER_TESS_EVAL, &sttep->tgsi);
1591
1592 free_glsl_to_tgsi_visitor(sttep->glsl_to_tgsi);
1593 sttep->glsl_to_tgsi = NULL;
1594 return true;
1595 }
1596
1597
1598 /**
1599 * Translate a compute program to create a new variant.
1600 */
1601 bool
1602 st_translate_compute_program(struct st_context *st,
1603 struct st_compute_program *stcp)
1604 {
1605 struct ureg_program *ureg;
1606 struct pipe_shader_state prog;
1607
1608 if (stcp->shader_program) {
1609 /* no compute variants: */
1610 st_finalize_nir(st, &stcp->Base, stcp->shader_program,
1611 (struct nir_shader *) stcp->tgsi.prog);
1612
1613 return true;
1614 }
1615
1616 ureg = ureg_create_with_screen(PIPE_SHADER_COMPUTE, st->pipe->screen);
1617 if (ureg == NULL)
1618 return false;
1619
1620 st_translate_program_common(st, &stcp->Base, stcp->glsl_to_tgsi, ureg,
1621 PIPE_SHADER_COMPUTE, &prog);
1622
1623 stcp->tgsi.ir_type = PIPE_SHADER_IR_TGSI;
1624 stcp->tgsi.prog = prog.tokens;
1625 stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size;
1626 stcp->tgsi.req_private_mem = 0;
1627 stcp->tgsi.req_input_mem = 0;
1628
1629 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1630 stcp->glsl_to_tgsi = NULL;
1631 return true;
1632 }
1633
1634
1635 /**
1636 * Get/create compute program variant.
1637 */
1638 struct st_basic_variant *
1639 st_get_cp_variant(struct st_context *st,
1640 struct pipe_compute_state *tgsi,
1641 struct st_basic_variant **variants)
1642 {
1643 struct pipe_context *pipe = st->pipe;
1644 struct st_basic_variant *v;
1645 struct st_basic_variant_key key;
1646
1647 memset(&key, 0, sizeof(key));
1648 key.st = st->has_shareable_shaders ? NULL : st;
1649
1650 /* Search for existing variant */
1651 for (v = *variants; v; v = v->next) {
1652 if (memcmp(&v->key, &key, sizeof(key)) == 0) {
1653 break;
1654 }
1655 }
1656
1657 if (!v) {
1658 /* create new */
1659 v = CALLOC_STRUCT(st_basic_variant);
1660 if (v) {
1661 /* fill in new variant */
1662 v->driver_shader = pipe->create_compute_state(pipe, tgsi);
1663 v->key = key;
1664
1665 /* insert into list */
1666 v->next = *variants;
1667 *variants = v;
1668 }
1669 }
1670
1671 return v;
1672 }
1673
1674
1675 /**
1676 * Vert/Geom/Frag programs have per-context variants. Free all the
1677 * variants attached to the given program which match the given context.
1678 */
1679 static void
1680 destroy_program_variants(struct st_context *st, struct gl_program *target)
1681 {
1682 if (!target || target == &_mesa_DummyProgram)
1683 return;
1684
1685 switch (target->Target) {
1686 case GL_VERTEX_PROGRAM_ARB:
1687 {
1688 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1689 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1690
1691 for (vpv = stvp->variants; vpv; ) {
1692 struct st_vp_variant *next = vpv->next;
1693 if (vpv->key.st == st) {
1694 /* unlink from list */
1695 *prevPtr = next;
1696 /* destroy this variant */
1697 delete_vp_variant(st, vpv);
1698 }
1699 else {
1700 prevPtr = &vpv->next;
1701 }
1702 vpv = next;
1703 }
1704 }
1705 break;
1706 case GL_FRAGMENT_PROGRAM_ARB:
1707 {
1708 struct st_fragment_program *stfp =
1709 (struct st_fragment_program *) target;
1710 struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
1711
1712 for (fpv = stfp->variants; fpv; ) {
1713 struct st_fp_variant *next = fpv->next;
1714 if (fpv->key.st == st) {
1715 /* unlink from list */
1716 *prevPtr = next;
1717 /* destroy this variant */
1718 delete_fp_variant(st, fpv);
1719 }
1720 else {
1721 prevPtr = &fpv->next;
1722 }
1723 fpv = next;
1724 }
1725 }
1726 break;
1727 case GL_GEOMETRY_PROGRAM_NV:
1728 case GL_TESS_CONTROL_PROGRAM_NV:
1729 case GL_TESS_EVALUATION_PROGRAM_NV:
1730 case GL_COMPUTE_PROGRAM_NV:
1731 {
1732 struct st_common_program *p = st_common_program(target);
1733 struct st_compute_program *cp = (struct st_compute_program*)target;
1734 struct st_basic_variant **variants =
1735 target->Target == GL_COMPUTE_PROGRAM_NV ? &cp->variants :
1736 &p->variants;
1737 struct st_basic_variant *v, **prevPtr = variants;
1738
1739 for (v = *variants; v; ) {
1740 struct st_basic_variant *next = v->next;
1741 if (v->key.st == st) {
1742 /* unlink from list */
1743 *prevPtr = next;
1744 /* destroy this variant */
1745 delete_basic_variant(st, v, target->Target);
1746 }
1747 else {
1748 prevPtr = &v->next;
1749 }
1750 v = next;
1751 }
1752 }
1753 break;
1754 default:
1755 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1756 "destroy_program_variants_cb()", target->Target);
1757 }
1758 }
1759
1760
1761 /**
1762 * Callback for _mesa_HashWalk. Free all the shader's program variants
1763 * which match the given context.
1764 */
1765 static void
1766 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1767 {
1768 struct st_context *st = (struct st_context *) userData;
1769 struct gl_shader *shader = (struct gl_shader *) data;
1770
1771 switch (shader->Type) {
1772 case GL_SHADER_PROGRAM_MESA:
1773 {
1774 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1775 GLuint i;
1776
1777 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1778 if (shProg->_LinkedShaders[i])
1779 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1780 }
1781 }
1782 break;
1783 case GL_VERTEX_SHADER:
1784 case GL_FRAGMENT_SHADER:
1785 case GL_GEOMETRY_SHADER:
1786 case GL_TESS_CONTROL_SHADER:
1787 case GL_TESS_EVALUATION_SHADER:
1788 case GL_COMPUTE_SHADER:
1789 break;
1790 default:
1791 assert(0);
1792 }
1793 }
1794
1795
1796 /**
1797 * Callback for _mesa_HashWalk. Free all the program variants which match
1798 * the given context.
1799 */
1800 static void
1801 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1802 {
1803 struct st_context *st = (struct st_context *) userData;
1804 struct gl_program *program = (struct gl_program *) data;
1805 destroy_program_variants(st, program);
1806 }
1807
1808
1809 /**
1810 * Walk over all shaders and programs to delete any variants which
1811 * belong to the given context.
1812 * This is called during context tear-down.
1813 */
1814 void
1815 st_destroy_program_variants(struct st_context *st)
1816 {
1817 /* If shaders can be shared with other contexts, the last context will
1818 * call DeleteProgram on all shaders, releasing everything.
1819 */
1820 if (st->has_shareable_shaders)
1821 return;
1822
1823 /* ARB vert/frag program */
1824 _mesa_HashWalk(st->ctx->Shared->Programs,
1825 destroy_program_variants_cb, st);
1826
1827 /* GLSL vert/frag/geom shaders */
1828 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1829 destroy_shader_program_variants_cb, st);
1830 }
1831
1832
1833 /**
1834 * For debugging, print/dump the current vertex program.
1835 */
1836 void
1837 st_print_current_vertex_program(void)
1838 {
1839 GET_CURRENT_CONTEXT(ctx);
1840
1841 if (ctx->VertexProgram._Current) {
1842 struct st_vertex_program *stvp =
1843 (struct st_vertex_program *) ctx->VertexProgram._Current;
1844 struct st_vp_variant *stv;
1845
1846 debug_printf("Vertex program %u\n", stvp->Base.Id);
1847
1848 for (stv = stvp->variants; stv; stv = stv->next) {
1849 debug_printf("variant %p\n", stv);
1850 tgsi_dump(stv->tgsi.tokens, 0);
1851 }
1852 }
1853 }
1854
1855
1856 /**
1857 * Compile one shader variant.
1858 */
1859 void
1860 st_precompile_shader_variant(struct st_context *st,
1861 struct gl_program *prog)
1862 {
1863 switch (prog->Target) {
1864 case GL_VERTEX_PROGRAM_ARB: {
1865 struct st_vertex_program *p = (struct st_vertex_program *)prog;
1866 struct st_vp_variant_key key;
1867
1868 memset(&key, 0, sizeof(key));
1869 key.st = st->has_shareable_shaders ? NULL : st;
1870 st_get_vp_variant(st, p, &key);
1871 break;
1872 }
1873
1874 case GL_TESS_CONTROL_PROGRAM_NV: {
1875 struct st_common_program *p = st_common_program(prog);
1876 st_get_basic_variant(st, PIPE_SHADER_TESS_CTRL, p);
1877 break;
1878 }
1879
1880 case GL_TESS_EVALUATION_PROGRAM_NV: {
1881 struct st_common_program *p = st_common_program(prog);
1882 st_get_basic_variant(st, PIPE_SHADER_TESS_EVAL, p);
1883 break;
1884 }
1885
1886 case GL_GEOMETRY_PROGRAM_NV: {
1887 struct st_common_program *p = st_common_program(prog);
1888 st_get_basic_variant(st, PIPE_SHADER_GEOMETRY, p);
1889 break;
1890 }
1891
1892 case GL_FRAGMENT_PROGRAM_ARB: {
1893 struct st_fragment_program *p = (struct st_fragment_program *)prog;
1894 struct st_fp_variant_key key;
1895
1896 memset(&key, 0, sizeof(key));
1897 key.st = st->has_shareable_shaders ? NULL : st;
1898 st_get_fp_variant(st, p, &key);
1899 break;
1900 }
1901
1902 case GL_COMPUTE_PROGRAM_NV: {
1903 struct st_compute_program *p = (struct st_compute_program *)prog;
1904 st_get_cp_variant(st, &p->tgsi, &p->variants);
1905 break;
1906 }
1907
1908 default:
1909 assert(0);
1910 }
1911 }