st/mesa: use pipe_screen::finalize_nir
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35 #include "main/imports.h"
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/prog_to_nir.h"
41 #include "program/programopt.h"
42
43 #include "compiler/nir/nir.h"
44
45 #include "pipe/p_context.h"
46 #include "pipe/p_defines.h"
47 #include "pipe/p_shader_tokens.h"
48 #include "draw/draw_context.h"
49 #include "tgsi/tgsi_dump.h"
50 #include "tgsi/tgsi_emulate.h"
51 #include "tgsi/tgsi_parse.h"
52 #include "tgsi/tgsi_ureg.h"
53
54 #include "st_debug.h"
55 #include "st_cb_bitmap.h"
56 #include "st_cb_drawpixels.h"
57 #include "st_context.h"
58 #include "st_tgsi_lower_depth_clamp.h"
59 #include "st_tgsi_lower_yuv.h"
60 #include "st_program.h"
61 #include "st_mesa_to_tgsi.h"
62 #include "st_atifs_to_tgsi.h"
63 #include "st_nir.h"
64 #include "st_shader_cache.h"
65 #include "cso_cache/cso_context.h"
66
67
68
69 static void
70 set_affected_state_flags(uint64_t *states,
71 struct gl_program *prog,
72 uint64_t new_constants,
73 uint64_t new_sampler_views,
74 uint64_t new_samplers,
75 uint64_t new_images,
76 uint64_t new_ubos,
77 uint64_t new_ssbos,
78 uint64_t new_atomics)
79 {
80 if (prog->Parameters->NumParameters)
81 *states |= new_constants;
82
83 if (prog->info.num_textures)
84 *states |= new_sampler_views | new_samplers;
85
86 if (prog->info.num_images)
87 *states |= new_images;
88
89 if (prog->info.num_ubos)
90 *states |= new_ubos;
91
92 if (prog->info.num_ssbos)
93 *states |= new_ssbos;
94
95 if (prog->info.num_abos)
96 *states |= new_atomics;
97 }
98
99 /**
100 * This determines which states will be updated when the shader is bound.
101 */
102 void
103 st_set_prog_affected_state_flags(struct gl_program *prog)
104 {
105 uint64_t *states;
106
107 switch (prog->info.stage) {
108 case MESA_SHADER_VERTEX:
109 states = &((struct st_vertex_program*)prog)->affected_states;
110
111 *states = ST_NEW_VS_STATE |
112 ST_NEW_RASTERIZER |
113 ST_NEW_VERTEX_ARRAYS;
114
115 set_affected_state_flags(states, prog,
116 ST_NEW_VS_CONSTANTS,
117 ST_NEW_VS_SAMPLER_VIEWS,
118 ST_NEW_VS_SAMPLERS,
119 ST_NEW_VS_IMAGES,
120 ST_NEW_VS_UBOS,
121 ST_NEW_VS_SSBOS,
122 ST_NEW_VS_ATOMICS);
123 break;
124
125 case MESA_SHADER_TESS_CTRL:
126 states = &(st_common_program(prog))->affected_states;
127
128 *states = ST_NEW_TCS_STATE;
129
130 set_affected_state_flags(states, prog,
131 ST_NEW_TCS_CONSTANTS,
132 ST_NEW_TCS_SAMPLER_VIEWS,
133 ST_NEW_TCS_SAMPLERS,
134 ST_NEW_TCS_IMAGES,
135 ST_NEW_TCS_UBOS,
136 ST_NEW_TCS_SSBOS,
137 ST_NEW_TCS_ATOMICS);
138 break;
139
140 case MESA_SHADER_TESS_EVAL:
141 states = &(st_common_program(prog))->affected_states;
142
143 *states = ST_NEW_TES_STATE |
144 ST_NEW_RASTERIZER;
145
146 set_affected_state_flags(states, prog,
147 ST_NEW_TES_CONSTANTS,
148 ST_NEW_TES_SAMPLER_VIEWS,
149 ST_NEW_TES_SAMPLERS,
150 ST_NEW_TES_IMAGES,
151 ST_NEW_TES_UBOS,
152 ST_NEW_TES_SSBOS,
153 ST_NEW_TES_ATOMICS);
154 break;
155
156 case MESA_SHADER_GEOMETRY:
157 states = &(st_common_program(prog))->affected_states;
158
159 *states = ST_NEW_GS_STATE |
160 ST_NEW_RASTERIZER;
161
162 set_affected_state_flags(states, prog,
163 ST_NEW_GS_CONSTANTS,
164 ST_NEW_GS_SAMPLER_VIEWS,
165 ST_NEW_GS_SAMPLERS,
166 ST_NEW_GS_IMAGES,
167 ST_NEW_GS_UBOS,
168 ST_NEW_GS_SSBOS,
169 ST_NEW_GS_ATOMICS);
170 break;
171
172 case MESA_SHADER_FRAGMENT:
173 states = &((struct st_common_program*)prog)->affected_states;
174
175 /* gl_FragCoord and glDrawPixels always use constants. */
176 *states = ST_NEW_FS_STATE |
177 ST_NEW_SAMPLE_SHADING |
178 ST_NEW_FS_CONSTANTS;
179
180 set_affected_state_flags(states, prog,
181 ST_NEW_FS_CONSTANTS,
182 ST_NEW_FS_SAMPLER_VIEWS,
183 ST_NEW_FS_SAMPLERS,
184 ST_NEW_FS_IMAGES,
185 ST_NEW_FS_UBOS,
186 ST_NEW_FS_SSBOS,
187 ST_NEW_FS_ATOMICS);
188 break;
189
190 case MESA_SHADER_COMPUTE:
191 states = &((struct st_common_program*)prog)->affected_states;
192
193 *states = ST_NEW_CS_STATE;
194
195 set_affected_state_flags(states, prog,
196 ST_NEW_CS_CONSTANTS,
197 ST_NEW_CS_SAMPLER_VIEWS,
198 ST_NEW_CS_SAMPLERS,
199 ST_NEW_CS_IMAGES,
200 ST_NEW_CS_UBOS,
201 ST_NEW_CS_SSBOS,
202 ST_NEW_CS_ATOMICS);
203 break;
204
205 default:
206 unreachable("unhandled shader stage");
207 }
208 }
209
210 static void
211 delete_ir(struct pipe_shader_state *ir)
212 {
213 if (ir->tokens) {
214 ureg_free_tokens(ir->tokens);
215 ir->tokens = NULL;
216 }
217
218 /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
219 * it has resulted in the driver taking ownership of the NIR. Those
220 * callers should be NULLing out the nir field in any pipe_shader_state
221 * that might have this called in order to indicate that.
222 *
223 * GLSL IR and ARB programs will have set gl_program->nir to the same
224 * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
225 */
226 }
227
228 /**
229 * Delete a vertex program variant. Note the caller must unlink
230 * the variant from the linked list.
231 */
232 static void
233 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
234 {
235 if (vpv->driver_shader) {
236 if (st->has_shareable_shaders || vpv->key.st == st) {
237 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
238 } else {
239 st_save_zombie_shader(vpv->key.st, PIPE_SHADER_VERTEX,
240 vpv->driver_shader);
241 }
242 }
243
244 if (vpv->draw_shader)
245 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
246
247 if (vpv->tokens)
248 ureg_free_tokens(vpv->tokens);
249
250 free( vpv );
251 }
252
253
254
255 /**
256 * Clean out any old compilations:
257 */
258 void
259 st_release_vp_variants( struct st_context *st,
260 struct st_vertex_program *stvp )
261 {
262 struct st_vp_variant *vpv;
263
264 for (vpv = stvp->variants; vpv; ) {
265 struct st_vp_variant *next = vpv->next;
266 delete_vp_variant(st, vpv);
267 vpv = next;
268 }
269
270 stvp->variants = NULL;
271
272 delete_ir(&stvp->state);
273 }
274
275
276
277 /**
278 * Delete a fragment program variant. Note the caller must unlink
279 * the variant from the linked list.
280 */
281 static void
282 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
283 {
284 if (fpv->driver_shader) {
285 if (st->has_shareable_shaders || fpv->key.st == st) {
286 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
287 } else {
288 st_save_zombie_shader(fpv->key.st, PIPE_SHADER_FRAGMENT,
289 fpv->driver_shader);
290 }
291 }
292
293 free(fpv);
294 }
295
296
297 /**
298 * Free all variants of a fragment program.
299 */
300 void
301 st_release_fp_variants(struct st_context *st, struct st_common_program *stfp)
302 {
303 struct st_fp_variant *fpv;
304
305 for (fpv = stfp->fp_variants; fpv; ) {
306 struct st_fp_variant *next = fpv->next;
307 delete_fp_variant(st, fpv);
308 fpv = next;
309 }
310
311 stfp->fp_variants = NULL;
312
313 delete_ir(&stfp->state);
314 }
315
316
317 /**
318 * Delete a basic program variant. Note the caller must unlink
319 * the variant from the linked list.
320 */
321 static void
322 delete_basic_variant(struct st_context *st, struct st_common_variant *v,
323 GLenum target)
324 {
325 if (v->driver_shader) {
326 if (st->has_shareable_shaders || v->key.st == st) {
327 /* The shader's context matches the calling context, or we
328 * don't care.
329 */
330 switch (target) {
331 case GL_TESS_CONTROL_PROGRAM_NV:
332 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
333 break;
334 case GL_TESS_EVALUATION_PROGRAM_NV:
335 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
336 break;
337 case GL_GEOMETRY_PROGRAM_NV:
338 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
339 break;
340 case GL_COMPUTE_PROGRAM_NV:
341 cso_delete_compute_shader(st->cso_context, v->driver_shader);
342 break;
343 default:
344 unreachable("bad shader type in delete_basic_variant");
345 }
346 } else {
347 /* We can't delete a shader with a context different from the one
348 * that created it. Add it to the creating context's zombie list.
349 */
350 enum pipe_shader_type type;
351 switch (target) {
352 case GL_TESS_CONTROL_PROGRAM_NV:
353 type = PIPE_SHADER_TESS_CTRL;
354 break;
355 case GL_TESS_EVALUATION_PROGRAM_NV:
356 type = PIPE_SHADER_TESS_EVAL;
357 break;
358 case GL_GEOMETRY_PROGRAM_NV:
359 type = PIPE_SHADER_GEOMETRY;
360 break;
361 default:
362 unreachable("");
363 }
364 st_save_zombie_shader(v->key.st, type, v->driver_shader);
365 }
366 }
367
368 free(v);
369 }
370
371
372 /**
373 * Free all basic program variants.
374 */
375 void
376 st_release_common_variants(struct st_context *st, struct st_common_program *p)
377 {
378 struct st_common_variant *v;
379
380 for (v = p->variants; v; ) {
381 struct st_common_variant *next = v->next;
382 delete_basic_variant(st, v, p->Base.Target);
383 v = next;
384 }
385
386 p->variants = NULL;
387 delete_ir(&p->state);
388 }
389
390 void
391 st_finalize_nir_before_variants(struct nir_shader *nir)
392 {
393 NIR_PASS_V(nir, nir_split_var_copies);
394 NIR_PASS_V(nir, nir_lower_var_copies);
395 if (nir->options->lower_all_io_to_temps ||
396 nir->options->lower_all_io_to_elements ||
397 nir->info.stage == MESA_SHADER_VERTEX ||
398 nir->info.stage == MESA_SHADER_GEOMETRY) {
399 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
400 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
401 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
402 }
403
404 st_nir_assign_vs_in_locations(nir);
405 }
406
407 /**
408 * Translate ARB (asm) program to NIR
409 */
410 static nir_shader *
411 st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
412 gl_shader_stage stage)
413 {
414 struct pipe_screen *screen = st->pipe->screen;
415 const struct gl_shader_compiler_options *options =
416 &st->ctx->Const.ShaderCompilerOptions[stage];
417
418 /* Translate to NIR */
419 nir_shader *nir = prog_to_nir(prog, options->NirOptions);
420 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
421 nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
422
423 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
424 NIR_PASS_V(nir, nir_lower_system_values);
425
426 /* Optimise NIR */
427 NIR_PASS_V(nir, nir_opt_constant_folding);
428 st_nir_opts(nir);
429 st_finalize_nir_before_variants(nir);
430
431 if (st->allow_st_finalize_nir_twice)
432 st_finalize_nir(st, prog, NULL, nir, true);
433
434 nir_validate_shader(nir, "after st/glsl finalize_nir");
435
436 return nir;
437 }
438
439 void
440 st_prepare_vertex_program(struct st_vertex_program *stvp)
441 {
442 stvp->num_inputs = 0;
443 memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
444 memset(stvp->result_to_output, ~0, sizeof(stvp->result_to_output));
445
446 /* Determine number of inputs, the mappings between VERT_ATTRIB_x
447 * and TGSI generic input indexes, plus input attrib semantic info.
448 */
449 for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
450 if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
451 stvp->input_to_index[attr] = stvp->num_inputs;
452 stvp->index_to_input[stvp->num_inputs] = attr;
453 stvp->num_inputs++;
454
455 if ((stvp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
456 /* add placeholder for second part of a double attribute */
457 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
458 stvp->num_inputs++;
459 }
460 }
461 }
462 /* pre-setup potentially unused edgeflag input */
463 stvp->input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
464 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
465
466 /* Compute mapping of vertex program outputs to slots. */
467 unsigned num_outputs = 0;
468 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
469 if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr))
470 stvp->result_to_output[attr] = num_outputs++;
471 }
472 /* pre-setup potentially unused edgeflag output */
473 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
474 }
475
476 void
477 st_translate_stream_output_info(struct gl_program *prog)
478 {
479 struct gl_transform_feedback_info *info = prog->sh.LinkedTransformFeedback;
480 if (!info)
481 return;
482
483 /* Determine the (default) output register mapping for each output. */
484 unsigned num_outputs = 0;
485 ubyte output_mapping[VARYING_SLOT_TESS_MAX];
486 memset(output_mapping, 0, sizeof(output_mapping));
487
488 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
489 if (prog->info.outputs_written & BITFIELD64_BIT(attr))
490 output_mapping[attr] = num_outputs++;
491 }
492
493 /* Translate stream output info. */
494 struct pipe_stream_output_info *so_info = NULL;
495 if (prog->info.stage == MESA_SHADER_VERTEX)
496 so_info = &((struct st_vertex_program*)prog)->state.stream_output;
497 else
498 so_info = &((struct st_common_program*)prog)->state.stream_output;
499
500 for (unsigned i = 0; i < info->NumOutputs; i++) {
501 so_info->output[i].register_index =
502 output_mapping[info->Outputs[i].OutputRegister];
503 so_info->output[i].start_component = info->Outputs[i].ComponentOffset;
504 so_info->output[i].num_components = info->Outputs[i].NumComponents;
505 so_info->output[i].output_buffer = info->Outputs[i].OutputBuffer;
506 so_info->output[i].dst_offset = info->Outputs[i].DstOffset;
507 so_info->output[i].stream = info->Outputs[i].StreamId;
508 }
509
510 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
511 so_info->stride[i] = info->Buffers[i].Stride;
512 }
513 so_info->num_outputs = info->NumOutputs;
514 }
515
516 /**
517 * Translate a vertex program.
518 */
519 bool
520 st_translate_vertex_program(struct st_context *st,
521 struct st_vertex_program *stvp)
522 {
523 struct ureg_program *ureg;
524 enum pipe_error error;
525 unsigned num_outputs = 0;
526 unsigned attr;
527 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
528 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
529
530 if (stvp->Base.arb.IsPositionInvariant)
531 _mesa_insert_mvp_code(st->ctx, &stvp->Base);
532
533 st_prepare_vertex_program(stvp);
534
535 /* ARB_vp: */
536 if (!stvp->glsl_to_tgsi) {
537 _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
538
539 /* This determines which states will be updated when the assembly
540 * shader is bound.
541 */
542 stvp->affected_states = ST_NEW_VS_STATE |
543 ST_NEW_RASTERIZER |
544 ST_NEW_VERTEX_ARRAYS;
545
546 if (stvp->Base.Parameters->NumParameters)
547 stvp->affected_states |= ST_NEW_VS_CONSTANTS;
548
549 /* No samplers are allowed in ARB_vp. */
550 }
551
552 /* Get semantic names and indices. */
553 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
554 if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
555 unsigned slot = num_outputs++;
556 unsigned semantic_name, semantic_index;
557 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
558 &semantic_name, &semantic_index);
559 output_semantic_name[slot] = semantic_name;
560 output_semantic_index[slot] = semantic_index;
561 }
562 }
563 /* pre-setup potentially unused edgeflag output */
564 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
565 output_semantic_index[num_outputs] = 0;
566
567 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
568 if (ureg == NULL)
569 return false;
570
571 if (stvp->Base.info.clip_distance_array_size)
572 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
573 stvp->Base.info.clip_distance_array_size);
574 if (stvp->Base.info.cull_distance_array_size)
575 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
576 stvp->Base.info.cull_distance_array_size);
577
578 if (ST_DEBUG & DEBUG_MESA) {
579 _mesa_print_program(&stvp->Base);
580 _mesa_print_program_parameters(st->ctx, &stvp->Base);
581 debug_printf("\n");
582 }
583
584 if (stvp->glsl_to_tgsi) {
585 error = st_translate_program(st->ctx,
586 PIPE_SHADER_VERTEX,
587 ureg,
588 stvp->glsl_to_tgsi,
589 &stvp->Base,
590 /* inputs */
591 stvp->num_inputs,
592 stvp->input_to_index,
593 NULL, /* inputSlotToAttr */
594 NULL, /* input semantic name */
595 NULL, /* input semantic index */
596 NULL, /* interp mode */
597 /* outputs */
598 num_outputs,
599 stvp->result_to_output,
600 output_semantic_name,
601 output_semantic_index);
602
603 st_translate_stream_output_info(&stvp->Base);
604
605 free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
606 } else
607 error = st_translate_mesa_program(st->ctx,
608 PIPE_SHADER_VERTEX,
609 ureg,
610 &stvp->Base,
611 /* inputs */
612 stvp->num_inputs,
613 stvp->input_to_index,
614 NULL, /* input semantic name */
615 NULL, /* input semantic index */
616 NULL,
617 /* outputs */
618 num_outputs,
619 stvp->result_to_output,
620 output_semantic_name,
621 output_semantic_index);
622
623 if (error) {
624 debug_printf("%s: failed to translate Mesa program:\n", __func__);
625 _mesa_print_program(&stvp->Base);
626 debug_assert(0);
627 return false;
628 }
629
630 stvp->state.tokens = ureg_get_tokens(ureg, NULL);
631 ureg_destroy(ureg);
632
633 if (stvp->glsl_to_tgsi) {
634 stvp->glsl_to_tgsi = NULL;
635 st_store_ir_in_disk_cache(st, &stvp->Base, false);
636 }
637
638 /* Translate to NIR.
639 *
640 * This must be done after the translation to TGSI is done, because
641 * we'll pass the NIR shader to the driver and the TGSI version to
642 * the draw module for the select/feedback/rasterpos code.
643 */
644 if (st->pipe->screen->get_shader_param(st->pipe->screen,
645 PIPE_SHADER_VERTEX,
646 PIPE_SHADER_CAP_PREFERRED_IR)) {
647 assert(!stvp->glsl_to_tgsi);
648
649 nir_shader *nir =
650 st_translate_prog_to_nir(st, &stvp->Base, MESA_SHADER_VERTEX);
651
652 if (stvp->state.ir.nir)
653 ralloc_free(stvp->state.ir.nir);
654 stvp->state.type = PIPE_SHADER_IR_NIR;
655 stvp->state.ir.nir = nir;
656 stvp->Base.nir = nir;
657 return true;
658 }
659
660 return stvp->state.tokens != NULL;
661 }
662
663 static const gl_state_index16 depth_range_state[STATE_LENGTH] =
664 { STATE_DEPTH_RANGE };
665
666 static struct st_vp_variant *
667 st_create_vp_variant(struct st_context *st,
668 struct st_vertex_program *stvp,
669 const struct st_common_variant_key *key)
670 {
671 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
672 struct pipe_context *pipe = st->pipe;
673 struct pipe_screen *screen = pipe->screen;
674 struct pipe_shader_state state = {0};
675
676 static const gl_state_index16 point_size_state[STATE_LENGTH] =
677 { STATE_INTERNAL, STATE_POINT_SIZE_CLAMPED, 0 };
678 struct gl_program_parameter_list *params = stvp->Base.Parameters;
679
680 vpv->key = *key;
681 vpv->num_inputs = stvp->num_inputs;
682
683 state.stream_output = stvp->state.stream_output;
684
685 if (stvp->state.type == PIPE_SHADER_IR_NIR) {
686 bool finalize = false;
687
688 state.type = PIPE_SHADER_IR_NIR;
689 state.ir.nir = nir_shader_clone(NULL, stvp->state.ir.nir);
690 if (key->clamp_color) {
691 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
692 finalize = true;
693 }
694 if (key->passthrough_edgeflags) {
695 NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
696 vpv->num_inputs++;
697 finalize = true;
698 }
699
700 if (key->lower_point_size) {
701 _mesa_add_state_reference(params, point_size_state);
702 NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
703 point_size_state);
704 finalize = true;
705 }
706
707 if (key->lower_ucp) {
708 bool can_compact = screen->get_param(screen,
709 PIPE_CAP_NIR_COMPACT_ARRAYS);
710
711 bool use_eye = st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX] != NULL;
712 gl_state_index16 clipplane_state[MAX_CLIP_PLANES][STATE_LENGTH];
713 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
714 if (use_eye) {
715 clipplane_state[i][0] = STATE_CLIPPLANE;
716 clipplane_state[i][1] = i;
717 } else {
718 clipplane_state[i][0] = STATE_INTERNAL;
719 clipplane_state[i][1] = STATE_CLIP_INTERNAL;
720 clipplane_state[i][2] = i;
721 }
722 _mesa_add_state_reference(params, clipplane_state[i]);
723 }
724
725 NIR_PASS_V(state.ir.nir, nir_lower_clip_vs, key->lower_ucp,
726 true, can_compact, clipplane_state);
727 NIR_PASS_V(state.ir.nir, nir_lower_io_to_temporaries,
728 nir_shader_get_entrypoint(state.ir.nir), true, false);
729 finalize = true;
730 }
731
732 if (finalize || !st->allow_st_finalize_nir_twice) {
733 st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
734 true);
735
736 /* Some of the lowering above may have introduced new varyings */
737 nir_shader_gather_info(state.ir.nir,
738 nir_shader_get_entrypoint(state.ir.nir));
739 }
740
741 vpv->driver_shader = pipe->create_vs_state(pipe, &state);
742
743 /* When generating a NIR program, we usually don't have TGSI tokens.
744 * However, we do create them for ARB_vertex_program / fixed-function VS
745 * programs which we may need to use with the draw module for legacy
746 * feedback/select emulation. If they exist, copy them.
747 *
748 * TODO: Lowering for shader variants is not applied to TGSI when
749 * generating a NIR shader.
750 */
751 if (stvp->state.tokens)
752 vpv->tokens = tgsi_dup_tokens(stvp->state.tokens);
753
754 return vpv;
755 }
756
757 state.type = PIPE_SHADER_IR_TGSI;
758 state.tokens = tgsi_dup_tokens(stvp->state.tokens);
759
760 /* Emulate features. */
761 if (key->clamp_color || key->passthrough_edgeflags) {
762 const struct tgsi_token *tokens;
763 unsigned flags =
764 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
765 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
766
767 tokens = tgsi_emulate(state.tokens, flags);
768
769 if (tokens) {
770 tgsi_free_tokens(state.tokens);
771 state.tokens = tokens;
772
773 if (key->passthrough_edgeflags)
774 vpv->num_inputs++;
775 } else
776 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
777 }
778
779 if (key->lower_depth_clamp) {
780 unsigned depth_range_const =
781 _mesa_add_state_reference(params, depth_range_state);
782
783 const struct tgsi_token *tokens;
784 tokens = st_tgsi_lower_depth_clamp(state.tokens, depth_range_const,
785 key->clip_negative_one_to_one);
786 if (tokens != state.tokens)
787 tgsi_free_tokens(state.tokens);
788 state.tokens = tokens;
789 }
790
791 if (ST_DEBUG & DEBUG_TGSI) {
792 tgsi_dump(state.tokens, 0);
793 debug_printf("\n");
794 }
795
796 vpv->driver_shader = pipe->create_vs_state(pipe, &state);
797 /* Save this for selection/feedback/rasterpos. */
798 vpv->tokens = state.tokens;
799 return vpv;
800 }
801
802
803 /**
804 * Find/create a vertex program variant.
805 */
806 struct st_vp_variant *
807 st_get_vp_variant(struct st_context *st,
808 struct st_vertex_program *stvp,
809 const struct st_common_variant_key *key)
810 {
811 struct st_vp_variant *vpv;
812
813 /* Search for existing variant */
814 for (vpv = stvp->variants; vpv; vpv = vpv->next) {
815 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
816 break;
817 }
818 }
819
820 if (!vpv) {
821 /* create now */
822 vpv = st_create_vp_variant(st, stvp, key);
823 if (vpv) {
824 for (unsigned index = 0; index < vpv->num_inputs; ++index) {
825 unsigned attr = stvp->index_to_input[index];
826 if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
827 continue;
828 vpv->vert_attrib_mask |= 1u << attr;
829 }
830
831 /* insert into list */
832 vpv->next = stvp->variants;
833 stvp->variants = vpv;
834 }
835 }
836
837 return vpv;
838 }
839
840
841 /**
842 * Translate a Mesa fragment shader into a TGSI shader.
843 */
844 bool
845 st_translate_fragment_program(struct st_context *st,
846 struct st_common_program *stfp)
847 {
848 /* Non-GLSL programs: */
849 if (!stfp->glsl_to_tgsi) {
850 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
851 if (st->ctx->Const.GLSLFragCoordIsSysVal)
852 _mesa_program_fragment_position_to_sysval(&stfp->Base);
853
854 /* This determines which states will be updated when the assembly
855 * shader is bound.
856 *
857 * fragment.position and glDrawPixels always use constants.
858 */
859 stfp->affected_states = ST_NEW_FS_STATE |
860 ST_NEW_SAMPLE_SHADING |
861 ST_NEW_FS_CONSTANTS;
862
863 if (stfp->ati_fs) {
864 /* Just set them for ATI_fs unconditionally. */
865 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
866 ST_NEW_FS_SAMPLERS;
867 } else {
868 /* ARB_fp */
869 if (stfp->Base.SamplersUsed)
870 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
871 ST_NEW_FS_SAMPLERS;
872 }
873
874 /* Translate to NIR. */
875 if (!stfp->ati_fs &&
876 st->pipe->screen->get_shader_param(st->pipe->screen,
877 PIPE_SHADER_FRAGMENT,
878 PIPE_SHADER_CAP_PREFERRED_IR)) {
879 nir_shader *nir =
880 st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
881
882 if (stfp->state.ir.nir)
883 ralloc_free(stfp->state.ir.nir);
884 stfp->state.type = PIPE_SHADER_IR_NIR;
885 stfp->state.ir.nir = nir;
886 stfp->Base.nir = nir;
887 return true;
888 }
889 }
890
891 ubyte outputMapping[2 * FRAG_RESULT_MAX];
892 ubyte inputMapping[VARYING_SLOT_MAX];
893 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
894 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
895 GLuint attr;
896 GLbitfield64 inputsRead;
897 struct ureg_program *ureg;
898
899 GLboolean write_all = GL_FALSE;
900
901 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
902 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
903 uint fs_num_inputs = 0;
904
905 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
906 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
907 uint fs_num_outputs = 0;
908
909 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
910
911 /*
912 * Convert Mesa program inputs to TGSI input register semantics.
913 */
914 inputsRead = stfp->Base.info.inputs_read;
915 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
916 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
917 const GLuint slot = fs_num_inputs++;
918
919 inputMapping[attr] = slot;
920 inputSlotToAttr[slot] = attr;
921
922 switch (attr) {
923 case VARYING_SLOT_POS:
924 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
925 input_semantic_index[slot] = 0;
926 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
927 break;
928 case VARYING_SLOT_COL0:
929 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
930 input_semantic_index[slot] = 0;
931 interpMode[slot] = stfp->glsl_to_tgsi ?
932 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
933 break;
934 case VARYING_SLOT_COL1:
935 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
936 input_semantic_index[slot] = 1;
937 interpMode[slot] = stfp->glsl_to_tgsi ?
938 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
939 break;
940 case VARYING_SLOT_FOGC:
941 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
942 input_semantic_index[slot] = 0;
943 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
944 break;
945 case VARYING_SLOT_FACE:
946 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
947 input_semantic_index[slot] = 0;
948 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
949 break;
950 case VARYING_SLOT_PRIMITIVE_ID:
951 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
952 input_semantic_index[slot] = 0;
953 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
954 break;
955 case VARYING_SLOT_LAYER:
956 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
957 input_semantic_index[slot] = 0;
958 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
959 break;
960 case VARYING_SLOT_VIEWPORT:
961 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
962 input_semantic_index[slot] = 0;
963 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
964 break;
965 case VARYING_SLOT_CLIP_DIST0:
966 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
967 input_semantic_index[slot] = 0;
968 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
969 break;
970 case VARYING_SLOT_CLIP_DIST1:
971 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
972 input_semantic_index[slot] = 1;
973 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
974 break;
975 case VARYING_SLOT_CULL_DIST0:
976 case VARYING_SLOT_CULL_DIST1:
977 /* these should have been lowered by GLSL */
978 assert(0);
979 break;
980 /* In most cases, there is nothing special about these
981 * inputs, so adopt a convention to use the generic
982 * semantic name and the mesa VARYING_SLOT_ number as the
983 * index.
984 *
985 * All that is required is that the vertex shader labels
986 * its own outputs similarly, and that the vertex shader
987 * generates at least every output required by the
988 * fragment shader plus fixed-function hardware (such as
989 * BFC).
990 *
991 * However, some drivers may need us to identify the PNTC and TEXi
992 * varyings if, for example, their capability to replace them with
993 * sprite coordinates is limited.
994 */
995 case VARYING_SLOT_PNTC:
996 if (st->needs_texcoord_semantic) {
997 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
998 input_semantic_index[slot] = 0;
999 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1000 break;
1001 }
1002 /* fall through */
1003 case VARYING_SLOT_TEX0:
1004 case VARYING_SLOT_TEX1:
1005 case VARYING_SLOT_TEX2:
1006 case VARYING_SLOT_TEX3:
1007 case VARYING_SLOT_TEX4:
1008 case VARYING_SLOT_TEX5:
1009 case VARYING_SLOT_TEX6:
1010 case VARYING_SLOT_TEX7:
1011 if (st->needs_texcoord_semantic) {
1012 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
1013 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
1014 interpMode[slot] = stfp->glsl_to_tgsi ?
1015 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1016 break;
1017 }
1018 /* fall through */
1019 case VARYING_SLOT_VAR0:
1020 default:
1021 /* Semantic indices should be zero-based because drivers may choose
1022 * to assign a fixed slot determined by that index.
1023 * This is useful because ARB_separate_shader_objects uses location
1024 * qualifiers for linkage, and if the semantic index corresponds to
1025 * these locations, linkage passes in the driver become unecessary.
1026 *
1027 * If needs_texcoord_semantic is true, no semantic indices will be
1028 * consumed for the TEXi varyings, and we can base the locations of
1029 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
1030 */
1031 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
1032 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
1033 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
1034 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
1035 if (attr == VARYING_SLOT_PNTC)
1036 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1037 else {
1038 interpMode[slot] = stfp->glsl_to_tgsi ?
1039 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1040 }
1041 break;
1042 }
1043 }
1044 else {
1045 inputMapping[attr] = -1;
1046 }
1047 }
1048
1049 /*
1050 * Semantics and mapping for outputs
1051 */
1052 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
1053
1054 /* if z is written, emit that first */
1055 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
1056 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
1057 fs_output_semantic_index[fs_num_outputs] = 0;
1058 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
1059 fs_num_outputs++;
1060 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
1061 }
1062
1063 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
1064 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
1065 fs_output_semantic_index[fs_num_outputs] = 0;
1066 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
1067 fs_num_outputs++;
1068 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
1069 }
1070
1071 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
1072 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
1073 fs_output_semantic_index[fs_num_outputs] = 0;
1074 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
1075 fs_num_outputs++;
1076 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
1077 }
1078
1079 /* handle remaining outputs (color) */
1080 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
1081 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
1082 stfp->Base.SecondaryOutputsWritten;
1083 const unsigned loc = attr % FRAG_RESULT_MAX;
1084
1085 if (written & BITFIELD64_BIT(loc)) {
1086 switch (loc) {
1087 case FRAG_RESULT_DEPTH:
1088 case FRAG_RESULT_STENCIL:
1089 case FRAG_RESULT_SAMPLE_MASK:
1090 /* handled above */
1091 assert(0);
1092 break;
1093 case FRAG_RESULT_COLOR:
1094 write_all = GL_TRUE; /* fallthrough */
1095 default: {
1096 int index;
1097 assert(loc == FRAG_RESULT_COLOR ||
1098 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
1099
1100 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
1101
1102 if (attr >= FRAG_RESULT_MAX) {
1103 /* Secondary color for dual source blending. */
1104 assert(index == 0);
1105 index++;
1106 }
1107
1108 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
1109 fs_output_semantic_index[fs_num_outputs] = index;
1110 outputMapping[attr] = fs_num_outputs;
1111 break;
1112 }
1113 }
1114
1115 fs_num_outputs++;
1116 }
1117 }
1118
1119 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
1120 if (ureg == NULL)
1121 return false;
1122
1123 if (ST_DEBUG & DEBUG_MESA) {
1124 _mesa_print_program(&stfp->Base);
1125 _mesa_print_program_parameters(st->ctx, &stfp->Base);
1126 debug_printf("\n");
1127 }
1128 if (write_all == GL_TRUE)
1129 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
1130
1131 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
1132 switch (stfp->Base.info.fs.depth_layout) {
1133 case FRAG_DEPTH_LAYOUT_ANY:
1134 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1135 TGSI_FS_DEPTH_LAYOUT_ANY);
1136 break;
1137 case FRAG_DEPTH_LAYOUT_GREATER:
1138 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1139 TGSI_FS_DEPTH_LAYOUT_GREATER);
1140 break;
1141 case FRAG_DEPTH_LAYOUT_LESS:
1142 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1143 TGSI_FS_DEPTH_LAYOUT_LESS);
1144 break;
1145 case FRAG_DEPTH_LAYOUT_UNCHANGED:
1146 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1147 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
1148 break;
1149 default:
1150 assert(0);
1151 }
1152 }
1153
1154 if (stfp->glsl_to_tgsi) {
1155 st_translate_program(st->ctx,
1156 PIPE_SHADER_FRAGMENT,
1157 ureg,
1158 stfp->glsl_to_tgsi,
1159 &stfp->Base,
1160 /* inputs */
1161 fs_num_inputs,
1162 inputMapping,
1163 inputSlotToAttr,
1164 input_semantic_name,
1165 input_semantic_index,
1166 interpMode,
1167 /* outputs */
1168 fs_num_outputs,
1169 outputMapping,
1170 fs_output_semantic_name,
1171 fs_output_semantic_index);
1172
1173 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
1174 } else if (stfp->ati_fs)
1175 st_translate_atifs_program(ureg,
1176 stfp->ati_fs,
1177 &stfp->Base,
1178 /* inputs */
1179 fs_num_inputs,
1180 inputMapping,
1181 input_semantic_name,
1182 input_semantic_index,
1183 interpMode,
1184 /* outputs */
1185 fs_num_outputs,
1186 outputMapping,
1187 fs_output_semantic_name,
1188 fs_output_semantic_index);
1189 else
1190 st_translate_mesa_program(st->ctx,
1191 PIPE_SHADER_FRAGMENT,
1192 ureg,
1193 &stfp->Base,
1194 /* inputs */
1195 fs_num_inputs,
1196 inputMapping,
1197 input_semantic_name,
1198 input_semantic_index,
1199 interpMode,
1200 /* outputs */
1201 fs_num_outputs,
1202 outputMapping,
1203 fs_output_semantic_name,
1204 fs_output_semantic_index);
1205
1206 stfp->state.tokens = ureg_get_tokens(ureg, NULL);
1207 ureg_destroy(ureg);
1208
1209 if (stfp->glsl_to_tgsi) {
1210 stfp->glsl_to_tgsi = NULL;
1211 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1212 }
1213
1214 return stfp->state.tokens != NULL;
1215 }
1216
1217 static struct st_fp_variant *
1218 st_create_fp_variant(struct st_context *st,
1219 struct st_common_program *stfp,
1220 const struct st_fp_variant_key *key)
1221 {
1222 struct pipe_context *pipe = st->pipe;
1223 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1224 struct pipe_shader_state state = {0};
1225 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1226 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1227 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1228 static const gl_state_index16 scale_state[STATE_LENGTH] =
1229 { STATE_INTERNAL, STATE_PT_SCALE };
1230 static const gl_state_index16 bias_state[STATE_LENGTH] =
1231 { STATE_INTERNAL, STATE_PT_BIAS };
1232 static const gl_state_index16 alpha_ref_state[STATE_LENGTH] =
1233 { STATE_INTERNAL, STATE_ALPHA_REF };
1234
1235 if (!variant)
1236 return NULL;
1237
1238 if (stfp->state.type == PIPE_SHADER_IR_NIR) {
1239 bool finalize = false;
1240
1241 state.type = PIPE_SHADER_IR_NIR;
1242 state.ir.nir = nir_shader_clone(NULL, stfp->state.ir.nir);
1243
1244 if (key->clamp_color) {
1245 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1246 finalize = true;
1247 }
1248
1249 if (key->lower_flatshade) {
1250 NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
1251 finalize = true;
1252 }
1253
1254 if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
1255 _mesa_add_state_reference(params, alpha_ref_state);
1256 NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
1257 false, alpha_ref_state);
1258 finalize = true;
1259 }
1260
1261 if (key->lower_two_sided_color) {
1262 NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color);
1263 finalize = true;
1264 }
1265
1266 if (key->persample_shading) {
1267 nir_shader *shader = state.ir.nir;
1268 nir_foreach_variable(var, &shader->inputs)
1269 var->data.sample = true;
1270 finalize = true;
1271 }
1272
1273 assert(!(key->bitmap && key->drawpixels));
1274
1275 /* glBitmap */
1276 if (key->bitmap) {
1277 nir_lower_bitmap_options options = {0};
1278
1279 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1280 options.sampler = variant->bitmap_sampler;
1281 options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
1282
1283 NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
1284 finalize = true;
1285 }
1286
1287 /* glDrawPixels (color only) */
1288 if (key->drawpixels) {
1289 nir_lower_drawpixels_options options = {{0}};
1290 unsigned samplers_used = stfp->Base.SamplersUsed;
1291
1292 /* Find the first unused slot. */
1293 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1294 options.drawpix_sampler = variant->drawpix_sampler;
1295 samplers_used |= (1 << variant->drawpix_sampler);
1296
1297 options.pixel_maps = key->pixelMaps;
1298 if (key->pixelMaps) {
1299 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1300 options.pixelmap_sampler = variant->pixelmap_sampler;
1301 }
1302
1303 options.scale_and_bias = key->scaleAndBias;
1304 if (key->scaleAndBias) {
1305 _mesa_add_state_reference(params, scale_state);
1306 memcpy(options.scale_state_tokens, scale_state,
1307 sizeof(options.scale_state_tokens));
1308 _mesa_add_state_reference(params, bias_state);
1309 memcpy(options.bias_state_tokens, bias_state,
1310 sizeof(options.bias_state_tokens));
1311 }
1312
1313 _mesa_add_state_reference(params, texcoord_state);
1314 memcpy(options.texcoord_state_tokens, texcoord_state,
1315 sizeof(options.texcoord_state_tokens));
1316
1317 NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
1318 finalize = true;
1319 }
1320
1321 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1322 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1323 key->external.lower_ayuv || key->external.lower_xyuv)) {
1324 nir_lower_tex_options options = {0};
1325 options.lower_y_uv_external = key->external.lower_nv12;
1326 options.lower_y_u_v_external = key->external.lower_iyuv;
1327 options.lower_xy_uxvx_external = key->external.lower_xy_uxvx;
1328 options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
1329 options.lower_ayuv_external = key->external.lower_ayuv;
1330 options.lower_xyuv_external = key->external.lower_xyuv;
1331 NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
1332 finalize = true;
1333 }
1334
1335 if (finalize || !st->allow_st_finalize_nir_twice) {
1336 st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
1337 false);
1338 }
1339
1340 /* This pass needs to happen *after* nir_lower_sampler */
1341 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1342 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1343 NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
1344 ~stfp->Base.SamplersUsed,
1345 key->external.lower_nv12 || key->external.lower_xy_uxvx ||
1346 key->external.lower_yx_xuxv,
1347 key->external.lower_iyuv);
1348 finalize = true;
1349 }
1350
1351 if (finalize || !st->allow_st_finalize_nir_twice) {
1352 /* Some of the lowering above may have introduced new varyings */
1353 nir_shader_gather_info(state.ir.nir,
1354 nir_shader_get_entrypoint(state.ir.nir));
1355
1356 struct pipe_screen *screen = pipe->screen;
1357 if (screen->finalize_nir)
1358 screen->finalize_nir(screen, state.ir.nir, false);
1359 }
1360
1361 variant->driver_shader = pipe->create_fs_state(pipe, &state);
1362 variant->key = *key;
1363
1364 return variant;
1365 }
1366
1367 state.tokens = stfp->state.tokens;
1368
1369 assert(!(key->bitmap && key->drawpixels));
1370
1371 /* Fix texture targets and add fog for ATI_fs */
1372 if (stfp->ati_fs) {
1373 const struct tgsi_token *tokens = st_fixup_atifs(state.tokens, key);
1374
1375 if (tokens)
1376 state.tokens = tokens;
1377 else
1378 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1379 }
1380
1381 /* Emulate features. */
1382 if (key->clamp_color || key->persample_shading) {
1383 const struct tgsi_token *tokens;
1384 unsigned flags =
1385 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1386 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1387
1388 tokens = tgsi_emulate(state.tokens, flags);
1389
1390 if (tokens) {
1391 if (state.tokens != stfp->state.tokens)
1392 tgsi_free_tokens(state.tokens);
1393 state.tokens = tokens;
1394 } else
1395 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1396 }
1397
1398 /* glBitmap */
1399 if (key->bitmap) {
1400 const struct tgsi_token *tokens;
1401
1402 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1403
1404 tokens = st_get_bitmap_shader(state.tokens,
1405 st->internal_target,
1406 variant->bitmap_sampler,
1407 st->needs_texcoord_semantic,
1408 st->bitmap.tex_format ==
1409 PIPE_FORMAT_R8_UNORM);
1410
1411 if (tokens) {
1412 if (state.tokens != stfp->state.tokens)
1413 tgsi_free_tokens(state.tokens);
1414 state.tokens = tokens;
1415 } else
1416 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1417 }
1418
1419 /* glDrawPixels (color only) */
1420 if (key->drawpixels) {
1421 const struct tgsi_token *tokens;
1422 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1423
1424 /* Find the first unused slot. */
1425 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1426
1427 if (key->pixelMaps) {
1428 unsigned samplers_used = stfp->Base.SamplersUsed |
1429 (1 << variant->drawpix_sampler);
1430
1431 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1432 }
1433
1434 if (key->scaleAndBias) {
1435 scale_const = _mesa_add_state_reference(params, scale_state);
1436 bias_const = _mesa_add_state_reference(params, bias_state);
1437 }
1438
1439 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1440
1441 tokens = st_get_drawpix_shader(state.tokens,
1442 st->needs_texcoord_semantic,
1443 key->scaleAndBias, scale_const,
1444 bias_const, key->pixelMaps,
1445 variant->drawpix_sampler,
1446 variant->pixelmap_sampler,
1447 texcoord_const, st->internal_target);
1448
1449 if (tokens) {
1450 if (state.tokens != stfp->state.tokens)
1451 tgsi_free_tokens(state.tokens);
1452 state.tokens = tokens;
1453 } else
1454 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1455 }
1456
1457 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1458 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1459 const struct tgsi_token *tokens;
1460
1461 /* samplers inserted would conflict, but this should be unpossible: */
1462 assert(!(key->bitmap || key->drawpixels));
1463
1464 tokens = st_tgsi_lower_yuv(state.tokens,
1465 ~stfp->Base.SamplersUsed,
1466 key->external.lower_nv12 ||
1467 key->external.lower_xy_uxvx ||
1468 key->external.lower_yx_xuxv,
1469 key->external.lower_iyuv);
1470 if (tokens) {
1471 if (state.tokens != stfp->state.tokens)
1472 tgsi_free_tokens(state.tokens);
1473 state.tokens = tokens;
1474 } else {
1475 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1476 }
1477 }
1478
1479 if (key->lower_depth_clamp) {
1480 unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
1481
1482 const struct tgsi_token *tokens;
1483 tokens = st_tgsi_lower_depth_clamp_fs(state.tokens, depth_range_const);
1484 if (state.tokens != stfp->state.tokens)
1485 tgsi_free_tokens(state.tokens);
1486 state.tokens = tokens;
1487 }
1488
1489 if (ST_DEBUG & DEBUG_TGSI) {
1490 tgsi_dump(state.tokens, 0);
1491 debug_printf("\n");
1492 }
1493
1494 /* fill in variant */
1495 variant->driver_shader = pipe->create_fs_state(pipe, &state);
1496 variant->key = *key;
1497
1498 if (state.tokens != stfp->state.tokens)
1499 tgsi_free_tokens(state.tokens);
1500 return variant;
1501 }
1502
1503 /**
1504 * Translate fragment program if needed.
1505 */
1506 struct st_fp_variant *
1507 st_get_fp_variant(struct st_context *st,
1508 struct st_common_program *stfp,
1509 const struct st_fp_variant_key *key)
1510 {
1511 struct st_fp_variant *fpv;
1512
1513 /* Search for existing variant */
1514 for (fpv = stfp->fp_variants; fpv; fpv = fpv->next) {
1515 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1516 break;
1517 }
1518 }
1519
1520 if (!fpv) {
1521 /* create new */
1522 fpv = st_create_fp_variant(st, stfp, key);
1523 if (fpv) {
1524 if (key->bitmap || key->drawpixels) {
1525 /* Regular variants should always come before the
1526 * bitmap & drawpixels variants, (unless there
1527 * are no regular variants) so that
1528 * st_update_fp can take a fast path when
1529 * shader_has_one_variant is set.
1530 */
1531 if (!stfp->fp_variants) {
1532 stfp->fp_variants = fpv;
1533 } else {
1534 /* insert into list after the first one */
1535 fpv->next = stfp->fp_variants->next;
1536 stfp->fp_variants->next = fpv;
1537 }
1538 } else {
1539 /* insert into list */
1540 fpv->next = stfp->fp_variants;
1541 stfp->fp_variants = fpv;
1542 }
1543 }
1544 }
1545
1546 return fpv;
1547 }
1548
1549 /**
1550 * Translate a program. This is common code for geometry and tessellation
1551 * shaders.
1552 */
1553 bool
1554 st_translate_common_program(struct st_context *st,
1555 struct st_common_program *stcp)
1556 {
1557 struct gl_program *prog = &stcp->Base;
1558 enum pipe_shader_type stage =
1559 pipe_shader_type_from_mesa(stcp->Base.info.stage);
1560 struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
1561
1562 if (ureg == NULL)
1563 return false;
1564
1565 switch (stage) {
1566 case PIPE_SHADER_TESS_CTRL:
1567 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1568 stcp->Base.info.tess.tcs_vertices_out);
1569 break;
1570
1571 case PIPE_SHADER_TESS_EVAL:
1572 if (stcp->Base.info.tess.primitive_mode == GL_ISOLINES)
1573 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1574 else
1575 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1576 stcp->Base.info.tess.primitive_mode);
1577
1578 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1579 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1580 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1581 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1582 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1583
1584 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1585 (stcp->Base.info.tess.spacing + 1) % 3);
1586
1587 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1588 !stcp->Base.info.tess.ccw);
1589 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1590 stcp->Base.info.tess.point_mode);
1591 break;
1592
1593 case PIPE_SHADER_GEOMETRY:
1594 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1595 stcp->Base.info.gs.input_primitive);
1596 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1597 stcp->Base.info.gs.output_primitive);
1598 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1599 stcp->Base.info.gs.vertices_out);
1600 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1601 stcp->Base.info.gs.invocations);
1602 break;
1603
1604 default:
1605 break;
1606 }
1607
1608 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1609 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1610 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1611 GLuint attr;
1612
1613 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1614 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1615 uint num_inputs = 0;
1616
1617 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1618 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1619 uint num_outputs = 0;
1620
1621 GLint i;
1622
1623 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1624 memset(inputMapping, 0, sizeof(inputMapping));
1625 memset(outputMapping, 0, sizeof(outputMapping));
1626 memset(&stcp->state, 0, sizeof(stcp->state));
1627
1628 if (prog->info.clip_distance_array_size)
1629 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1630 prog->info.clip_distance_array_size);
1631 if (prog->info.cull_distance_array_size)
1632 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1633 prog->info.cull_distance_array_size);
1634
1635 /*
1636 * Convert Mesa program inputs to TGSI input register semantics.
1637 */
1638 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1639 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1640 continue;
1641
1642 unsigned slot = num_inputs++;
1643
1644 inputMapping[attr] = slot;
1645 inputSlotToAttr[slot] = attr;
1646
1647 unsigned semantic_name, semantic_index;
1648 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1649 &semantic_name, &semantic_index);
1650 input_semantic_name[slot] = semantic_name;
1651 input_semantic_index[slot] = semantic_index;
1652 }
1653
1654 /* Also add patch inputs. */
1655 for (attr = 0; attr < 32; attr++) {
1656 if (prog->info.patch_inputs_read & (1u << attr)) {
1657 GLuint slot = num_inputs++;
1658 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1659
1660 inputMapping[patch_attr] = slot;
1661 inputSlotToAttr[slot] = patch_attr;
1662 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1663 input_semantic_index[slot] = attr;
1664 }
1665 }
1666
1667 /* initialize output semantics to defaults */
1668 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1669 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1670 output_semantic_index[i] = 0;
1671 }
1672
1673 /*
1674 * Determine number of outputs, the (default) output register
1675 * mapping and the semantic information for each output.
1676 */
1677 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1678 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1679 GLuint slot = num_outputs++;
1680
1681 outputMapping[attr] = slot;
1682
1683 unsigned semantic_name, semantic_index;
1684 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1685 &semantic_name, &semantic_index);
1686 output_semantic_name[slot] = semantic_name;
1687 output_semantic_index[slot] = semantic_index;
1688 }
1689 }
1690
1691 /* Also add patch outputs. */
1692 for (attr = 0; attr < 32; attr++) {
1693 if (prog->info.patch_outputs_written & (1u << attr)) {
1694 GLuint slot = num_outputs++;
1695 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1696
1697 outputMapping[patch_attr] = slot;
1698 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1699 output_semantic_index[slot] = attr;
1700 }
1701 }
1702
1703 st_translate_program(st->ctx,
1704 stage,
1705 ureg,
1706 stcp->glsl_to_tgsi,
1707 prog,
1708 /* inputs */
1709 num_inputs,
1710 inputMapping,
1711 inputSlotToAttr,
1712 input_semantic_name,
1713 input_semantic_index,
1714 NULL,
1715 /* outputs */
1716 num_outputs,
1717 outputMapping,
1718 output_semantic_name,
1719 output_semantic_index);
1720
1721 stcp->state.tokens = ureg_get_tokens(ureg, NULL);
1722
1723 ureg_destroy(ureg);
1724
1725 st_translate_stream_output_info(prog);
1726
1727 st_store_ir_in_disk_cache(st, prog, false);
1728
1729 if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
1730 _mesa_print_program(prog);
1731 debug_printf("\n");
1732 }
1733
1734 if (ST_DEBUG & DEBUG_TGSI) {
1735 tgsi_dump(stcp->state.tokens, 0);
1736 debug_printf("\n");
1737 }
1738
1739 free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
1740 stcp->glsl_to_tgsi = NULL;
1741 return true;
1742 }
1743
1744
1745 /**
1746 * Get/create a basic program variant.
1747 */
1748 struct st_common_variant *
1749 st_get_common_variant(struct st_context *st,
1750 struct st_common_program *prog,
1751 const struct st_common_variant_key *key)
1752 {
1753 struct pipe_context *pipe = st->pipe;
1754 struct st_common_variant *v;
1755 struct pipe_shader_state state = {0};
1756
1757 /* Search for existing variant */
1758 for (v = prog->variants; v; v = v->next) {
1759 if (memcmp(&v->key, key, sizeof(*key)) == 0) {
1760 break;
1761 }
1762 }
1763
1764 if (!v) {
1765 /* create new */
1766 v = CALLOC_STRUCT(st_common_variant);
1767 if (v) {
1768 if (prog->state.type == PIPE_SHADER_IR_NIR) {
1769 bool finalize = false;
1770
1771 state.type = PIPE_SHADER_IR_NIR;
1772 state.ir.nir = nir_shader_clone(NULL, prog->state.ir.nir);
1773
1774 if (key->clamp_color) {
1775 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1776 finalize = true;
1777 }
1778
1779 state.stream_output = prog->state.stream_output;
1780
1781 if (finalize || !st->allow_st_finalize_nir_twice) {
1782 st_finalize_nir(st, &prog->Base, prog->shader_program,
1783 state.ir.nir, true);
1784 }
1785 } else {
1786 if (key->lower_depth_clamp) {
1787 struct gl_program_parameter_list *params = prog->Base.Parameters;
1788
1789 unsigned depth_range_const =
1790 _mesa_add_state_reference(params, depth_range_state);
1791
1792 const struct tgsi_token *tokens;
1793 tokens =
1794 st_tgsi_lower_depth_clamp(prog->state.tokens,
1795 depth_range_const,
1796 key->clip_negative_one_to_one);
1797
1798 if (tokens != prog->state.tokens)
1799 tgsi_free_tokens(prog->state.tokens);
1800
1801 prog->state.tokens = tokens;
1802 }
1803 state = prog->state;
1804 }
1805 /* fill in new variant */
1806 switch (prog->Base.info.stage) {
1807 case MESA_SHADER_TESS_CTRL:
1808 v->driver_shader = pipe->create_tcs_state(pipe, &state);
1809 break;
1810 case MESA_SHADER_TESS_EVAL:
1811 v->driver_shader = pipe->create_tes_state(pipe, &state);
1812 break;
1813 case MESA_SHADER_GEOMETRY:
1814 v->driver_shader = pipe->create_gs_state(pipe, &state);
1815 break;
1816 case MESA_SHADER_COMPUTE: {
1817 struct pipe_compute_state cs = {0};
1818 cs.ir_type = state.type;
1819 cs.req_local_mem = prog->Base.info.cs.shared_size;
1820
1821 if (state.type == PIPE_SHADER_IR_NIR)
1822 cs.prog = state.ir.nir;
1823 else
1824 cs.prog = state.tokens;
1825
1826 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1827 break;
1828 }
1829 default:
1830 assert(!"unhandled shader type");
1831 free(v);
1832 return NULL;
1833 }
1834
1835 v->key = *key;
1836
1837 /* insert into list */
1838 v->next = prog->variants;
1839 prog->variants = v;
1840 }
1841 }
1842
1843 return v;
1844 }
1845
1846
1847 /**
1848 * Vert/Geom/Frag programs have per-context variants. Free all the
1849 * variants attached to the given program which match the given context.
1850 */
1851 static void
1852 destroy_program_variants(struct st_context *st, struct gl_program *target)
1853 {
1854 if (!target || target == &_mesa_DummyProgram)
1855 return;
1856
1857 switch (target->Target) {
1858 case GL_VERTEX_PROGRAM_ARB:
1859 {
1860 struct st_vertex_program *stvp = (struct st_vertex_program *) target;
1861 struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
1862
1863 for (vpv = stvp->variants; vpv; ) {
1864 struct st_vp_variant *next = vpv->next;
1865 if (vpv->key.st == st) {
1866 /* unlink from list */
1867 *prevPtr = next;
1868 /* destroy this variant */
1869 delete_vp_variant(st, vpv);
1870 }
1871 else {
1872 prevPtr = &vpv->next;
1873 }
1874 vpv = next;
1875 }
1876 }
1877 break;
1878 case GL_FRAGMENT_PROGRAM_ARB:
1879 {
1880 struct st_common_program *stfp =
1881 (struct st_common_program *) target;
1882 struct st_fp_variant *fpv, **prevPtr = &stfp->fp_variants;
1883
1884 for (fpv = stfp->fp_variants; fpv; ) {
1885 struct st_fp_variant *next = fpv->next;
1886 if (fpv->key.st == st) {
1887 /* unlink from list */
1888 *prevPtr = next;
1889 /* destroy this variant */
1890 delete_fp_variant(st, fpv);
1891 }
1892 else {
1893 prevPtr = &fpv->next;
1894 }
1895 fpv = next;
1896 }
1897 }
1898 break;
1899 case GL_GEOMETRY_PROGRAM_NV:
1900 case GL_TESS_CONTROL_PROGRAM_NV:
1901 case GL_TESS_EVALUATION_PROGRAM_NV:
1902 case GL_COMPUTE_PROGRAM_NV:
1903 {
1904 struct st_common_program *p = st_common_program(target);
1905 struct st_common_variant *v, **prevPtr = &p->variants;
1906
1907 for (v = p->variants; v; ) {
1908 struct st_common_variant *next = v->next;
1909 if (v->key.st == st) {
1910 /* unlink from list */
1911 *prevPtr = next;
1912 /* destroy this variant */
1913 delete_basic_variant(st, v, target->Target);
1914 }
1915 else {
1916 prevPtr = &v->next;
1917 }
1918 v = next;
1919 }
1920 }
1921 break;
1922 default:
1923 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1924 "destroy_program_variants_cb()", target->Target);
1925 }
1926 }
1927
1928
1929 /**
1930 * Callback for _mesa_HashWalk. Free all the shader's program variants
1931 * which match the given context.
1932 */
1933 static void
1934 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1935 {
1936 struct st_context *st = (struct st_context *) userData;
1937 struct gl_shader *shader = (struct gl_shader *) data;
1938
1939 switch (shader->Type) {
1940 case GL_SHADER_PROGRAM_MESA:
1941 {
1942 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1943 GLuint i;
1944
1945 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1946 if (shProg->_LinkedShaders[i])
1947 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1948 }
1949 }
1950 break;
1951 case GL_VERTEX_SHADER:
1952 case GL_FRAGMENT_SHADER:
1953 case GL_GEOMETRY_SHADER:
1954 case GL_TESS_CONTROL_SHADER:
1955 case GL_TESS_EVALUATION_SHADER:
1956 case GL_COMPUTE_SHADER:
1957 break;
1958 default:
1959 assert(0);
1960 }
1961 }
1962
1963
1964 /**
1965 * Callback for _mesa_HashWalk. Free all the program variants which match
1966 * the given context.
1967 */
1968 static void
1969 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1970 {
1971 struct st_context *st = (struct st_context *) userData;
1972 struct gl_program *program = (struct gl_program *) data;
1973 destroy_program_variants(st, program);
1974 }
1975
1976
1977 /**
1978 * Walk over all shaders and programs to delete any variants which
1979 * belong to the given context.
1980 * This is called during context tear-down.
1981 */
1982 void
1983 st_destroy_program_variants(struct st_context *st)
1984 {
1985 /* If shaders can be shared with other contexts, the last context will
1986 * call DeleteProgram on all shaders, releasing everything.
1987 */
1988 if (st->has_shareable_shaders)
1989 return;
1990
1991 /* ARB vert/frag program */
1992 _mesa_HashWalk(st->ctx->Shared->Programs,
1993 destroy_program_variants_cb, st);
1994
1995 /* GLSL vert/frag/geom shaders */
1996 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1997 destroy_shader_program_variants_cb, st);
1998 }
1999
2000
2001 /**
2002 * For debugging, print/dump the current vertex program.
2003 */
2004 void
2005 st_print_current_vertex_program(void)
2006 {
2007 GET_CURRENT_CONTEXT(ctx);
2008
2009 if (ctx->VertexProgram._Current) {
2010 struct st_vertex_program *stvp =
2011 (struct st_vertex_program *) ctx->VertexProgram._Current;
2012 struct st_vp_variant *stv;
2013
2014 debug_printf("Vertex program %u\n", stvp->Base.Id);
2015
2016 for (stv = stvp->variants; stv; stv = stv->next) {
2017 debug_printf("variant %p\n", stv);
2018 tgsi_dump(stv->tokens, 0);
2019 }
2020 }
2021 }
2022
2023
2024 /**
2025 * Compile one shader variant.
2026 */
2027 void
2028 st_precompile_shader_variant(struct st_context *st,
2029 struct gl_program *prog)
2030 {
2031 switch (prog->Target) {
2032 case GL_VERTEX_PROGRAM_ARB: {
2033 struct st_vertex_program *p = (struct st_vertex_program *)prog;
2034 struct st_common_variant_key key;
2035
2036 memset(&key, 0, sizeof(key));
2037
2038 key.st = st->has_shareable_shaders ? NULL : st;
2039 st_get_vp_variant(st, p, &key);
2040 break;
2041 }
2042
2043 case GL_FRAGMENT_PROGRAM_ARB: {
2044 struct st_common_program *p = (struct st_common_program *)prog;
2045 struct st_fp_variant_key key;
2046
2047 memset(&key, 0, sizeof(key));
2048
2049 key.st = st->has_shareable_shaders ? NULL : st;
2050 st_get_fp_variant(st, p, &key);
2051 break;
2052 }
2053
2054 case GL_TESS_CONTROL_PROGRAM_NV:
2055 case GL_TESS_EVALUATION_PROGRAM_NV:
2056 case GL_GEOMETRY_PROGRAM_NV:
2057 case GL_COMPUTE_PROGRAM_NV: {
2058 struct st_common_program *p = st_common_program(prog);
2059 struct st_common_variant_key key;
2060
2061 memset(&key, 0, sizeof(key));
2062
2063 key.st = st->has_shareable_shaders ? NULL : st;
2064 st_get_common_variant(st, p, &key);
2065 break;
2066 }
2067
2068 default:
2069 assert(0);
2070 }
2071 }