st/mesa: keep serialized NIR instead of nir_shader in st_program
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35 #include "main/imports.h"
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/prog_to_nir.h"
41 #include "program/programopt.h"
42
43 #include "compiler/nir/nir_serialize.h"
44
45 #include "pipe/p_context.h"
46 #include "pipe/p_defines.h"
47 #include "pipe/p_shader_tokens.h"
48 #include "draw/draw_context.h"
49 #include "tgsi/tgsi_dump.h"
50 #include "tgsi/tgsi_emulate.h"
51 #include "tgsi/tgsi_parse.h"
52 #include "tgsi/tgsi_ureg.h"
53
54 #include "st_debug.h"
55 #include "st_cb_bitmap.h"
56 #include "st_cb_drawpixels.h"
57 #include "st_context.h"
58 #include "st_tgsi_lower_depth_clamp.h"
59 #include "st_tgsi_lower_yuv.h"
60 #include "st_program.h"
61 #include "st_mesa_to_tgsi.h"
62 #include "st_atifs_to_tgsi.h"
63 #include "st_nir.h"
64 #include "st_shader_cache.h"
65 #include "st_util.h"
66 #include "cso_cache/cso_context.h"
67
68
69
70 static void
71 set_affected_state_flags(uint64_t *states,
72 struct gl_program *prog,
73 uint64_t new_constants,
74 uint64_t new_sampler_views,
75 uint64_t new_samplers,
76 uint64_t new_images,
77 uint64_t new_ubos,
78 uint64_t new_ssbos,
79 uint64_t new_atomics)
80 {
81 if (prog->Parameters->NumParameters)
82 *states |= new_constants;
83
84 if (prog->info.num_textures)
85 *states |= new_sampler_views | new_samplers;
86
87 if (prog->info.num_images)
88 *states |= new_images;
89
90 if (prog->info.num_ubos)
91 *states |= new_ubos;
92
93 if (prog->info.num_ssbos)
94 *states |= new_ssbos;
95
96 if (prog->info.num_abos)
97 *states |= new_atomics;
98 }
99
100 /**
101 * This determines which states will be updated when the shader is bound.
102 */
103 void
104 st_set_prog_affected_state_flags(struct gl_program *prog)
105 {
106 uint64_t *states;
107
108 switch (prog->info.stage) {
109 case MESA_SHADER_VERTEX:
110 states = &((struct st_program*)prog)->affected_states;
111
112 *states = ST_NEW_VS_STATE |
113 ST_NEW_RASTERIZER |
114 ST_NEW_VERTEX_ARRAYS;
115
116 set_affected_state_flags(states, prog,
117 ST_NEW_VS_CONSTANTS,
118 ST_NEW_VS_SAMPLER_VIEWS,
119 ST_NEW_VS_SAMPLERS,
120 ST_NEW_VS_IMAGES,
121 ST_NEW_VS_UBOS,
122 ST_NEW_VS_SSBOS,
123 ST_NEW_VS_ATOMICS);
124 break;
125
126 case MESA_SHADER_TESS_CTRL:
127 states = &(st_program(prog))->affected_states;
128
129 *states = ST_NEW_TCS_STATE;
130
131 set_affected_state_flags(states, prog,
132 ST_NEW_TCS_CONSTANTS,
133 ST_NEW_TCS_SAMPLER_VIEWS,
134 ST_NEW_TCS_SAMPLERS,
135 ST_NEW_TCS_IMAGES,
136 ST_NEW_TCS_UBOS,
137 ST_NEW_TCS_SSBOS,
138 ST_NEW_TCS_ATOMICS);
139 break;
140
141 case MESA_SHADER_TESS_EVAL:
142 states = &(st_program(prog))->affected_states;
143
144 *states = ST_NEW_TES_STATE |
145 ST_NEW_RASTERIZER;
146
147 set_affected_state_flags(states, prog,
148 ST_NEW_TES_CONSTANTS,
149 ST_NEW_TES_SAMPLER_VIEWS,
150 ST_NEW_TES_SAMPLERS,
151 ST_NEW_TES_IMAGES,
152 ST_NEW_TES_UBOS,
153 ST_NEW_TES_SSBOS,
154 ST_NEW_TES_ATOMICS);
155 break;
156
157 case MESA_SHADER_GEOMETRY:
158 states = &(st_program(prog))->affected_states;
159
160 *states = ST_NEW_GS_STATE |
161 ST_NEW_RASTERIZER;
162
163 set_affected_state_flags(states, prog,
164 ST_NEW_GS_CONSTANTS,
165 ST_NEW_GS_SAMPLER_VIEWS,
166 ST_NEW_GS_SAMPLERS,
167 ST_NEW_GS_IMAGES,
168 ST_NEW_GS_UBOS,
169 ST_NEW_GS_SSBOS,
170 ST_NEW_GS_ATOMICS);
171 break;
172
173 case MESA_SHADER_FRAGMENT:
174 states = &((struct st_program*)prog)->affected_states;
175
176 /* gl_FragCoord and glDrawPixels always use constants. */
177 *states = ST_NEW_FS_STATE |
178 ST_NEW_SAMPLE_SHADING |
179 ST_NEW_FS_CONSTANTS;
180
181 set_affected_state_flags(states, prog,
182 ST_NEW_FS_CONSTANTS,
183 ST_NEW_FS_SAMPLER_VIEWS,
184 ST_NEW_FS_SAMPLERS,
185 ST_NEW_FS_IMAGES,
186 ST_NEW_FS_UBOS,
187 ST_NEW_FS_SSBOS,
188 ST_NEW_FS_ATOMICS);
189 break;
190
191 case MESA_SHADER_COMPUTE:
192 states = &((struct st_program*)prog)->affected_states;
193
194 *states = ST_NEW_CS_STATE;
195
196 set_affected_state_flags(states, prog,
197 ST_NEW_CS_CONSTANTS,
198 ST_NEW_CS_SAMPLER_VIEWS,
199 ST_NEW_CS_SAMPLERS,
200 ST_NEW_CS_IMAGES,
201 ST_NEW_CS_UBOS,
202 ST_NEW_CS_SSBOS,
203 ST_NEW_CS_ATOMICS);
204 break;
205
206 default:
207 unreachable("unhandled shader stage");
208 }
209 }
210
211 static void
212 delete_ir(struct pipe_shader_state *ir)
213 {
214 if (ir->tokens) {
215 ureg_free_tokens(ir->tokens);
216 ir->tokens = NULL;
217 }
218
219 /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
220 * it has resulted in the driver taking ownership of the NIR. Those
221 * callers should be NULLing out the nir field in any pipe_shader_state
222 * that might have this called in order to indicate that.
223 *
224 * GLSL IR and ARB programs will have set gl_program->nir to the same
225 * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
226 */
227 }
228
229 /**
230 * Delete a vertex program variant. Note the caller must unlink
231 * the variant from the linked list.
232 */
233 static void
234 delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
235 {
236 if (vpv->driver_shader) {
237 if (st->has_shareable_shaders || vpv->key.st == st) {
238 cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
239 } else {
240 st_save_zombie_shader(vpv->key.st, PIPE_SHADER_VERTEX,
241 vpv->driver_shader);
242 }
243 }
244
245 if (vpv->draw_shader)
246 draw_delete_vertex_shader( st->draw, vpv->draw_shader );
247
248 if (vpv->tokens)
249 ureg_free_tokens(vpv->tokens);
250
251 free( vpv );
252 }
253
254
255
256 /**
257 * Clean out any old compilations:
258 */
259 void
260 st_release_vp_variants( struct st_context *st,
261 struct st_program *stvp )
262 {
263 struct st_vp_variant *vpv;
264
265 for (vpv = stvp->vp_variants; vpv; ) {
266 struct st_vp_variant *next = vpv->next;
267 delete_vp_variant(st, vpv);
268 vpv = next;
269 }
270
271 stvp->vp_variants = NULL;
272
273 delete_ir(&stvp->state);
274 }
275
276
277
278 /**
279 * Delete a fragment program variant. Note the caller must unlink
280 * the variant from the linked list.
281 */
282 static void
283 delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
284 {
285 if (fpv->driver_shader) {
286 if (st->has_shareable_shaders || fpv->key.st == st) {
287 cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
288 } else {
289 st_save_zombie_shader(fpv->key.st, PIPE_SHADER_FRAGMENT,
290 fpv->driver_shader);
291 }
292 }
293
294 free(fpv);
295 }
296
297
298 /**
299 * Free all variants of a fragment program.
300 */
301 void
302 st_release_fp_variants(struct st_context *st, struct st_program *stfp)
303 {
304 struct st_fp_variant *fpv;
305
306 for (fpv = stfp->fp_variants; fpv; ) {
307 struct st_fp_variant *next = fpv->next;
308 delete_fp_variant(st, fpv);
309 fpv = next;
310 }
311
312 stfp->fp_variants = NULL;
313
314 delete_ir(&stfp->state);
315 }
316
317
318 /**
319 * Delete a basic program variant. Note the caller must unlink
320 * the variant from the linked list.
321 */
322 static void
323 delete_common_variant(struct st_context *st, struct st_common_variant *v,
324 GLenum target)
325 {
326 if (v->driver_shader) {
327 if (st->has_shareable_shaders || v->key.st == st) {
328 /* The shader's context matches the calling context, or we
329 * don't care.
330 */
331 switch (target) {
332 case GL_TESS_CONTROL_PROGRAM_NV:
333 cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
334 break;
335 case GL_TESS_EVALUATION_PROGRAM_NV:
336 cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
337 break;
338 case GL_GEOMETRY_PROGRAM_NV:
339 cso_delete_geometry_shader(st->cso_context, v->driver_shader);
340 break;
341 case GL_COMPUTE_PROGRAM_NV:
342 cso_delete_compute_shader(st->cso_context, v->driver_shader);
343 break;
344 default:
345 unreachable("bad shader type in delete_basic_variant");
346 }
347 } else {
348 /* We can't delete a shader with a context different from the one
349 * that created it. Add it to the creating context's zombie list.
350 */
351 enum pipe_shader_type type;
352 switch (target) {
353 case GL_TESS_CONTROL_PROGRAM_NV:
354 type = PIPE_SHADER_TESS_CTRL;
355 break;
356 case GL_TESS_EVALUATION_PROGRAM_NV:
357 type = PIPE_SHADER_TESS_EVAL;
358 break;
359 case GL_GEOMETRY_PROGRAM_NV:
360 type = PIPE_SHADER_GEOMETRY;
361 break;
362 default:
363 unreachable("");
364 }
365 st_save_zombie_shader(v->key.st, type, v->driver_shader);
366 }
367 }
368
369 free(v);
370 }
371
372
373 /**
374 * Free all basic program variants.
375 */
376 void
377 st_release_common_variants(struct st_context *st, struct st_program *p)
378 {
379 struct st_common_variant *v;
380
381 for (v = p->variants; v; ) {
382 struct st_common_variant *next = v->next;
383 delete_common_variant(st, v, p->Base.Target);
384 v = next;
385 }
386
387 p->variants = NULL;
388 delete_ir(&p->state);
389 }
390
391 void
392 st_finalize_nir_before_variants(struct nir_shader *nir)
393 {
394 NIR_PASS_V(nir, nir_opt_access);
395
396 NIR_PASS_V(nir, nir_split_var_copies);
397 NIR_PASS_V(nir, nir_lower_var_copies);
398 if (nir->options->lower_all_io_to_temps ||
399 nir->options->lower_all_io_to_elements ||
400 nir->info.stage == MESA_SHADER_VERTEX ||
401 nir->info.stage == MESA_SHADER_GEOMETRY) {
402 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
403 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
404 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
405 }
406
407 st_nir_assign_vs_in_locations(nir);
408 }
409
410 /**
411 * Translate ARB (asm) program to NIR
412 */
413 static nir_shader *
414 st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
415 gl_shader_stage stage)
416 {
417 struct pipe_screen *screen = st->pipe->screen;
418 const struct gl_shader_compiler_options *options =
419 &st->ctx->Const.ShaderCompilerOptions[stage];
420
421 /* Translate to NIR */
422 nir_shader *nir = prog_to_nir(prog, options->NirOptions);
423 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
424 nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
425
426 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
427 NIR_PASS_V(nir, nir_lower_system_values);
428
429 /* Optimise NIR */
430 NIR_PASS_V(nir, nir_opt_constant_folding);
431 st_nir_opts(nir);
432 st_finalize_nir_before_variants(nir);
433
434 if (st->allow_st_finalize_nir_twice)
435 st_finalize_nir(st, prog, NULL, nir, true);
436
437 nir_validate_shader(nir, "after st/glsl finalize_nir");
438
439 return nir;
440 }
441
442 void
443 st_prepare_vertex_program(struct st_program *stp)
444 {
445 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
446
447 stvp->num_inputs = 0;
448 memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
449 memset(stvp->result_to_output, ~0, sizeof(stvp->result_to_output));
450
451 /* Determine number of inputs, the mappings between VERT_ATTRIB_x
452 * and TGSI generic input indexes, plus input attrib semantic info.
453 */
454 for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
455 if ((stp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
456 stvp->input_to_index[attr] = stvp->num_inputs;
457 stvp->index_to_input[stvp->num_inputs] = attr;
458 stvp->num_inputs++;
459
460 if ((stp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
461 /* add placeholder for second part of a double attribute */
462 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
463 stvp->num_inputs++;
464 }
465 }
466 }
467 /* pre-setup potentially unused edgeflag input */
468 stvp->input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
469 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
470
471 /* Compute mapping of vertex program outputs to slots. */
472 unsigned num_outputs = 0;
473 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
474 if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr))
475 stvp->result_to_output[attr] = num_outputs++;
476 }
477 /* pre-setup potentially unused edgeflag output */
478 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
479 }
480
481 void
482 st_translate_stream_output_info(struct gl_program *prog)
483 {
484 struct gl_transform_feedback_info *info = prog->sh.LinkedTransformFeedback;
485 if (!info)
486 return;
487
488 /* Determine the (default) output register mapping for each output. */
489 unsigned num_outputs = 0;
490 ubyte output_mapping[VARYING_SLOT_TESS_MAX];
491 memset(output_mapping, 0, sizeof(output_mapping));
492
493 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
494 if (prog->info.outputs_written & BITFIELD64_BIT(attr))
495 output_mapping[attr] = num_outputs++;
496 }
497
498 /* Translate stream output info. */
499 struct pipe_stream_output_info *so_info =
500 &((struct st_program*)prog)->state.stream_output;
501
502 for (unsigned i = 0; i < info->NumOutputs; i++) {
503 so_info->output[i].register_index =
504 output_mapping[info->Outputs[i].OutputRegister];
505 so_info->output[i].start_component = info->Outputs[i].ComponentOffset;
506 so_info->output[i].num_components = info->Outputs[i].NumComponents;
507 so_info->output[i].output_buffer = info->Outputs[i].OutputBuffer;
508 so_info->output[i].dst_offset = info->Outputs[i].DstOffset;
509 so_info->output[i].stream = info->Outputs[i].StreamId;
510 }
511
512 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
513 so_info->stride[i] = info->Buffers[i].Stride;
514 }
515 so_info->num_outputs = info->NumOutputs;
516 }
517
518 /**
519 * Translate a vertex program.
520 */
521 bool
522 st_translate_vertex_program(struct st_context *st,
523 struct st_program *stp)
524 {
525 struct ureg_program *ureg;
526 enum pipe_error error;
527 unsigned num_outputs = 0;
528 unsigned attr;
529 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
530 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
531
532 if (stp->Base.arb.IsPositionInvariant)
533 _mesa_insert_mvp_code(st->ctx, &stp->Base);
534
535 st_prepare_vertex_program(stp);
536
537 /* ARB_vp: */
538 if (!stp->glsl_to_tgsi) {
539 _mesa_remove_output_reads(&stp->Base, PROGRAM_OUTPUT);
540
541 /* This determines which states will be updated when the assembly
542 * shader is bound.
543 */
544 stp->affected_states = ST_NEW_VS_STATE |
545 ST_NEW_RASTERIZER |
546 ST_NEW_VERTEX_ARRAYS;
547
548 if (stp->Base.Parameters->NumParameters)
549 stp->affected_states |= ST_NEW_VS_CONSTANTS;
550
551 /* No samplers are allowed in ARB_vp. */
552 }
553
554 /* Get semantic names and indices. */
555 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
556 if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
557 unsigned slot = num_outputs++;
558 unsigned semantic_name, semantic_index;
559 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
560 &semantic_name, &semantic_index);
561 output_semantic_name[slot] = semantic_name;
562 output_semantic_index[slot] = semantic_index;
563 }
564 }
565 /* pre-setup potentially unused edgeflag output */
566 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
567 output_semantic_index[num_outputs] = 0;
568
569 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
570 if (ureg == NULL)
571 return false;
572
573 if (stp->Base.info.clip_distance_array_size)
574 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
575 stp->Base.info.clip_distance_array_size);
576 if (stp->Base.info.cull_distance_array_size)
577 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
578 stp->Base.info.cull_distance_array_size);
579
580 if (ST_DEBUG & DEBUG_MESA) {
581 _mesa_print_program(&stp->Base);
582 _mesa_print_program_parameters(st->ctx, &stp->Base);
583 debug_printf("\n");
584 }
585
586 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
587
588 if (stp->glsl_to_tgsi) {
589 error = st_translate_program(st->ctx,
590 PIPE_SHADER_VERTEX,
591 ureg,
592 stp->glsl_to_tgsi,
593 &stp->Base,
594 /* inputs */
595 stvp->num_inputs,
596 stvp->input_to_index,
597 NULL, /* inputSlotToAttr */
598 NULL, /* input semantic name */
599 NULL, /* input semantic index */
600 NULL, /* interp mode */
601 /* outputs */
602 num_outputs,
603 stvp->result_to_output,
604 output_semantic_name,
605 output_semantic_index);
606
607 st_translate_stream_output_info(&stp->Base);
608
609 free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
610 } else
611 error = st_translate_mesa_program(st->ctx,
612 PIPE_SHADER_VERTEX,
613 ureg,
614 &stp->Base,
615 /* inputs */
616 stvp->num_inputs,
617 stvp->input_to_index,
618 NULL, /* input semantic name */
619 NULL, /* input semantic index */
620 NULL,
621 /* outputs */
622 num_outputs,
623 stvp->result_to_output,
624 output_semantic_name,
625 output_semantic_index);
626
627 if (error) {
628 debug_printf("%s: failed to translate Mesa program:\n", __func__);
629 _mesa_print_program(&stp->Base);
630 debug_assert(0);
631 return false;
632 }
633
634 stp->state.tokens = ureg_get_tokens(ureg, NULL);
635 ureg_destroy(ureg);
636
637 if (stp->glsl_to_tgsi) {
638 stp->glsl_to_tgsi = NULL;
639 st_store_ir_in_disk_cache(st, &stp->Base, false);
640 }
641
642 /* Translate to NIR.
643 *
644 * This must be done after the translation to TGSI is done, because
645 * we'll pass the NIR shader to the driver and the TGSI version to
646 * the draw module for the select/feedback/rasterpos code.
647 */
648 if (st->pipe->screen->get_shader_param(st->pipe->screen,
649 PIPE_SHADER_VERTEX,
650 PIPE_SHADER_CAP_PREFERRED_IR)) {
651 assert(!stp->glsl_to_tgsi);
652
653 nir_shader *nir =
654 st_translate_prog_to_nir(st, &stp->Base, MESA_SHADER_VERTEX);
655
656 if (stp->state.ir.nir)
657 ralloc_free(stp->state.ir.nir);
658 stp->state.type = PIPE_SHADER_IR_NIR;
659 stp->state.ir.nir = nir;
660 stp->Base.nir = nir;
661 return true;
662 }
663
664 return stp->state.tokens != NULL;
665 }
666
667 static struct nir_shader *
668 get_nir_shader(struct st_context *st, struct st_program *stp)
669 {
670 if (stp->state.ir.nir)
671 return nir_shader_clone(NULL, stp->state.ir.nir);
672
673 struct blob_reader blob_reader;
674 const struct nir_shader_compiler_options *options =
675 st->ctx->Const.ShaderCompilerOptions[stp->Base.info.stage].NirOptions;
676
677 blob_reader_init(&blob_reader, stp->nir_binary, stp->nir_size);
678 return nir_deserialize(NULL, options, &blob_reader);
679 }
680
681 static const gl_state_index16 depth_range_state[STATE_LENGTH] =
682 { STATE_DEPTH_RANGE };
683
684 static struct st_vp_variant *
685 st_create_vp_variant(struct st_context *st,
686 struct st_program *stvp,
687 const struct st_common_variant_key *key)
688 {
689 struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
690 struct pipe_context *pipe = st->pipe;
691 struct pipe_screen *screen = pipe->screen;
692 struct pipe_shader_state state = {0};
693
694 static const gl_state_index16 point_size_state[STATE_LENGTH] =
695 { STATE_INTERNAL, STATE_POINT_SIZE_CLAMPED, 0 };
696 struct gl_program_parameter_list *params = stvp->Base.Parameters;
697
698 vpv->key = *key;
699 vpv->num_inputs = ((struct st_vertex_program*)stvp)->num_inputs;
700
701 state.stream_output = stvp->state.stream_output;
702
703 if (stvp->state.type == PIPE_SHADER_IR_NIR) {
704 bool finalize = false;
705
706 state.type = PIPE_SHADER_IR_NIR;
707 state.ir.nir = get_nir_shader(st, stvp);
708 if (key->clamp_color) {
709 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
710 finalize = true;
711 }
712 if (key->passthrough_edgeflags) {
713 NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
714 vpv->num_inputs++;
715 finalize = true;
716 }
717
718 if (key->lower_point_size) {
719 _mesa_add_state_reference(params, point_size_state);
720 NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
721 point_size_state);
722 finalize = true;
723 }
724
725 if (key->lower_ucp) {
726 bool can_compact = screen->get_param(screen,
727 PIPE_CAP_NIR_COMPACT_ARRAYS);
728
729 bool use_eye = st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX] != NULL;
730 gl_state_index16 clipplane_state[MAX_CLIP_PLANES][STATE_LENGTH];
731 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
732 if (use_eye) {
733 clipplane_state[i][0] = STATE_CLIPPLANE;
734 clipplane_state[i][1] = i;
735 } else {
736 clipplane_state[i][0] = STATE_INTERNAL;
737 clipplane_state[i][1] = STATE_CLIP_INTERNAL;
738 clipplane_state[i][2] = i;
739 }
740 _mesa_add_state_reference(params, clipplane_state[i]);
741 }
742
743 NIR_PASS_V(state.ir.nir, nir_lower_clip_vs, key->lower_ucp,
744 true, can_compact, clipplane_state);
745 NIR_PASS_V(state.ir.nir, nir_lower_io_to_temporaries,
746 nir_shader_get_entrypoint(state.ir.nir), true, false);
747 NIR_PASS_V(state.ir.nir, nir_lower_global_vars_to_local);
748 finalize = true;
749 }
750
751 if (finalize || !st->allow_st_finalize_nir_twice) {
752 st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
753 true);
754
755 /* Some of the lowering above may have introduced new varyings */
756 nir_shader_gather_info(state.ir.nir,
757 nir_shader_get_entrypoint(state.ir.nir));
758 }
759
760 if (ST_DEBUG & DEBUG_PRINT_IR)
761 nir_print_shader(state.ir.nir, stderr);
762
763 vpv->driver_shader = pipe->create_vs_state(pipe, &state);
764
765 /* When generating a NIR program, we usually don't have TGSI tokens.
766 * However, we do create them for ARB_vertex_program / fixed-function VS
767 * programs which we may need to use with the draw module for legacy
768 * feedback/select emulation. If they exist, copy them.
769 *
770 * TODO: Lowering for shader variants is not applied to TGSI when
771 * generating a NIR shader.
772 */
773 if (stvp->state.tokens)
774 vpv->tokens = tgsi_dup_tokens(stvp->state.tokens);
775
776 return vpv;
777 }
778
779 state.type = PIPE_SHADER_IR_TGSI;
780 state.tokens = tgsi_dup_tokens(stvp->state.tokens);
781
782 /* Emulate features. */
783 if (key->clamp_color || key->passthrough_edgeflags) {
784 const struct tgsi_token *tokens;
785 unsigned flags =
786 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
787 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
788
789 tokens = tgsi_emulate(state.tokens, flags);
790
791 if (tokens) {
792 tgsi_free_tokens(state.tokens);
793 state.tokens = tokens;
794
795 if (key->passthrough_edgeflags)
796 vpv->num_inputs++;
797 } else
798 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
799 }
800
801 if (key->lower_depth_clamp) {
802 unsigned depth_range_const =
803 _mesa_add_state_reference(params, depth_range_state);
804
805 const struct tgsi_token *tokens;
806 tokens = st_tgsi_lower_depth_clamp(state.tokens, depth_range_const,
807 key->clip_negative_one_to_one);
808 if (tokens != state.tokens)
809 tgsi_free_tokens(state.tokens);
810 state.tokens = tokens;
811 }
812
813 if (ST_DEBUG & DEBUG_PRINT_IR)
814 tgsi_dump(state.tokens, 0);
815
816 vpv->driver_shader = pipe->create_vs_state(pipe, &state);
817 /* Save this for selection/feedback/rasterpos. */
818 vpv->tokens = state.tokens;
819 return vpv;
820 }
821
822
823 /**
824 * Find/create a vertex program variant.
825 */
826 struct st_vp_variant *
827 st_get_vp_variant(struct st_context *st,
828 struct st_program *stp,
829 const struct st_common_variant_key *key)
830 {
831 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
832 struct st_vp_variant *vpv;
833
834 /* Search for existing variant */
835 for (vpv = stp->vp_variants; vpv; vpv = vpv->next) {
836 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
837 break;
838 }
839 }
840
841 if (!vpv) {
842 /* create now */
843 vpv = st_create_vp_variant(st, stp, key);
844 if (vpv) {
845 for (unsigned index = 0; index < vpv->num_inputs; ++index) {
846 unsigned attr = stvp->index_to_input[index];
847 if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
848 continue;
849 vpv->vert_attrib_mask |= 1u << attr;
850 }
851
852 /* insert into list */
853 vpv->next = stp->vp_variants;
854 stp->vp_variants = vpv;
855 }
856 }
857
858 return vpv;
859 }
860
861
862 /**
863 * Translate a Mesa fragment shader into a TGSI shader.
864 */
865 bool
866 st_translate_fragment_program(struct st_context *st,
867 struct st_program *stfp)
868 {
869 /* Non-GLSL programs: */
870 if (!stfp->glsl_to_tgsi) {
871 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
872 if (st->ctx->Const.GLSLFragCoordIsSysVal)
873 _mesa_program_fragment_position_to_sysval(&stfp->Base);
874
875 /* This determines which states will be updated when the assembly
876 * shader is bound.
877 *
878 * fragment.position and glDrawPixels always use constants.
879 */
880 stfp->affected_states = ST_NEW_FS_STATE |
881 ST_NEW_SAMPLE_SHADING |
882 ST_NEW_FS_CONSTANTS;
883
884 if (stfp->ati_fs) {
885 /* Just set them for ATI_fs unconditionally. */
886 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
887 ST_NEW_FS_SAMPLERS;
888 } else {
889 /* ARB_fp */
890 if (stfp->Base.SamplersUsed)
891 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
892 ST_NEW_FS_SAMPLERS;
893 }
894
895 /* Translate to NIR. */
896 if (!stfp->ati_fs &&
897 st->pipe->screen->get_shader_param(st->pipe->screen,
898 PIPE_SHADER_FRAGMENT,
899 PIPE_SHADER_CAP_PREFERRED_IR)) {
900 nir_shader *nir =
901 st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
902
903 if (stfp->state.ir.nir)
904 ralloc_free(stfp->state.ir.nir);
905 stfp->state.type = PIPE_SHADER_IR_NIR;
906 stfp->state.ir.nir = nir;
907 stfp->Base.nir = nir;
908 return true;
909 }
910 }
911
912 ubyte outputMapping[2 * FRAG_RESULT_MAX];
913 ubyte inputMapping[VARYING_SLOT_MAX];
914 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
915 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
916 GLuint attr;
917 GLbitfield64 inputsRead;
918 struct ureg_program *ureg;
919
920 GLboolean write_all = GL_FALSE;
921
922 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
923 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
924 uint fs_num_inputs = 0;
925
926 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
927 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
928 uint fs_num_outputs = 0;
929
930 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
931
932 /*
933 * Convert Mesa program inputs to TGSI input register semantics.
934 */
935 inputsRead = stfp->Base.info.inputs_read;
936 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
937 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
938 const GLuint slot = fs_num_inputs++;
939
940 inputMapping[attr] = slot;
941 inputSlotToAttr[slot] = attr;
942
943 switch (attr) {
944 case VARYING_SLOT_POS:
945 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
946 input_semantic_index[slot] = 0;
947 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
948 break;
949 case VARYING_SLOT_COL0:
950 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
951 input_semantic_index[slot] = 0;
952 interpMode[slot] = stfp->glsl_to_tgsi ?
953 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
954 break;
955 case VARYING_SLOT_COL1:
956 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
957 input_semantic_index[slot] = 1;
958 interpMode[slot] = stfp->glsl_to_tgsi ?
959 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
960 break;
961 case VARYING_SLOT_FOGC:
962 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
963 input_semantic_index[slot] = 0;
964 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
965 break;
966 case VARYING_SLOT_FACE:
967 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
968 input_semantic_index[slot] = 0;
969 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
970 break;
971 case VARYING_SLOT_PRIMITIVE_ID:
972 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
973 input_semantic_index[slot] = 0;
974 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
975 break;
976 case VARYING_SLOT_LAYER:
977 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
978 input_semantic_index[slot] = 0;
979 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
980 break;
981 case VARYING_SLOT_VIEWPORT:
982 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
983 input_semantic_index[slot] = 0;
984 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
985 break;
986 case VARYING_SLOT_CLIP_DIST0:
987 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
988 input_semantic_index[slot] = 0;
989 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
990 break;
991 case VARYING_SLOT_CLIP_DIST1:
992 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
993 input_semantic_index[slot] = 1;
994 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
995 break;
996 case VARYING_SLOT_CULL_DIST0:
997 case VARYING_SLOT_CULL_DIST1:
998 /* these should have been lowered by GLSL */
999 assert(0);
1000 break;
1001 /* In most cases, there is nothing special about these
1002 * inputs, so adopt a convention to use the generic
1003 * semantic name and the mesa VARYING_SLOT_ number as the
1004 * index.
1005 *
1006 * All that is required is that the vertex shader labels
1007 * its own outputs similarly, and that the vertex shader
1008 * generates at least every output required by the
1009 * fragment shader plus fixed-function hardware (such as
1010 * BFC).
1011 *
1012 * However, some drivers may need us to identify the PNTC and TEXi
1013 * varyings if, for example, their capability to replace them with
1014 * sprite coordinates is limited.
1015 */
1016 case VARYING_SLOT_PNTC:
1017 if (st->needs_texcoord_semantic) {
1018 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
1019 input_semantic_index[slot] = 0;
1020 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1021 break;
1022 }
1023 /* fall through */
1024 case VARYING_SLOT_TEX0:
1025 case VARYING_SLOT_TEX1:
1026 case VARYING_SLOT_TEX2:
1027 case VARYING_SLOT_TEX3:
1028 case VARYING_SLOT_TEX4:
1029 case VARYING_SLOT_TEX5:
1030 case VARYING_SLOT_TEX6:
1031 case VARYING_SLOT_TEX7:
1032 if (st->needs_texcoord_semantic) {
1033 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
1034 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
1035 interpMode[slot] = stfp->glsl_to_tgsi ?
1036 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1037 break;
1038 }
1039 /* fall through */
1040 case VARYING_SLOT_VAR0:
1041 default:
1042 /* Semantic indices should be zero-based because drivers may choose
1043 * to assign a fixed slot determined by that index.
1044 * This is useful because ARB_separate_shader_objects uses location
1045 * qualifiers for linkage, and if the semantic index corresponds to
1046 * these locations, linkage passes in the driver become unecessary.
1047 *
1048 * If needs_texcoord_semantic is true, no semantic indices will be
1049 * consumed for the TEXi varyings, and we can base the locations of
1050 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
1051 */
1052 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
1053 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
1054 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
1055 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
1056 if (attr == VARYING_SLOT_PNTC)
1057 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1058 else {
1059 interpMode[slot] = stfp->glsl_to_tgsi ?
1060 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1061 }
1062 break;
1063 }
1064 }
1065 else {
1066 inputMapping[attr] = -1;
1067 }
1068 }
1069
1070 /*
1071 * Semantics and mapping for outputs
1072 */
1073 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
1074
1075 /* if z is written, emit that first */
1076 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
1077 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
1078 fs_output_semantic_index[fs_num_outputs] = 0;
1079 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
1080 fs_num_outputs++;
1081 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
1082 }
1083
1084 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
1085 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
1086 fs_output_semantic_index[fs_num_outputs] = 0;
1087 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
1088 fs_num_outputs++;
1089 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
1090 }
1091
1092 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
1093 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
1094 fs_output_semantic_index[fs_num_outputs] = 0;
1095 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
1096 fs_num_outputs++;
1097 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
1098 }
1099
1100 /* handle remaining outputs (color) */
1101 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
1102 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
1103 stfp->Base.SecondaryOutputsWritten;
1104 const unsigned loc = attr % FRAG_RESULT_MAX;
1105
1106 if (written & BITFIELD64_BIT(loc)) {
1107 switch (loc) {
1108 case FRAG_RESULT_DEPTH:
1109 case FRAG_RESULT_STENCIL:
1110 case FRAG_RESULT_SAMPLE_MASK:
1111 /* handled above */
1112 assert(0);
1113 break;
1114 case FRAG_RESULT_COLOR:
1115 write_all = GL_TRUE; /* fallthrough */
1116 default: {
1117 int index;
1118 assert(loc == FRAG_RESULT_COLOR ||
1119 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
1120
1121 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
1122
1123 if (attr >= FRAG_RESULT_MAX) {
1124 /* Secondary color for dual source blending. */
1125 assert(index == 0);
1126 index++;
1127 }
1128
1129 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
1130 fs_output_semantic_index[fs_num_outputs] = index;
1131 outputMapping[attr] = fs_num_outputs;
1132 break;
1133 }
1134 }
1135
1136 fs_num_outputs++;
1137 }
1138 }
1139
1140 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
1141 if (ureg == NULL)
1142 return false;
1143
1144 if (ST_DEBUG & DEBUG_MESA) {
1145 _mesa_print_program(&stfp->Base);
1146 _mesa_print_program_parameters(st->ctx, &stfp->Base);
1147 debug_printf("\n");
1148 }
1149 if (write_all == GL_TRUE)
1150 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
1151
1152 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
1153 switch (stfp->Base.info.fs.depth_layout) {
1154 case FRAG_DEPTH_LAYOUT_ANY:
1155 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1156 TGSI_FS_DEPTH_LAYOUT_ANY);
1157 break;
1158 case FRAG_DEPTH_LAYOUT_GREATER:
1159 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1160 TGSI_FS_DEPTH_LAYOUT_GREATER);
1161 break;
1162 case FRAG_DEPTH_LAYOUT_LESS:
1163 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1164 TGSI_FS_DEPTH_LAYOUT_LESS);
1165 break;
1166 case FRAG_DEPTH_LAYOUT_UNCHANGED:
1167 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1168 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
1169 break;
1170 default:
1171 assert(0);
1172 }
1173 }
1174
1175 if (stfp->glsl_to_tgsi) {
1176 st_translate_program(st->ctx,
1177 PIPE_SHADER_FRAGMENT,
1178 ureg,
1179 stfp->glsl_to_tgsi,
1180 &stfp->Base,
1181 /* inputs */
1182 fs_num_inputs,
1183 inputMapping,
1184 inputSlotToAttr,
1185 input_semantic_name,
1186 input_semantic_index,
1187 interpMode,
1188 /* outputs */
1189 fs_num_outputs,
1190 outputMapping,
1191 fs_output_semantic_name,
1192 fs_output_semantic_index);
1193
1194 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
1195 } else if (stfp->ati_fs)
1196 st_translate_atifs_program(ureg,
1197 stfp->ati_fs,
1198 &stfp->Base,
1199 /* inputs */
1200 fs_num_inputs,
1201 inputMapping,
1202 input_semantic_name,
1203 input_semantic_index,
1204 interpMode,
1205 /* outputs */
1206 fs_num_outputs,
1207 outputMapping,
1208 fs_output_semantic_name,
1209 fs_output_semantic_index);
1210 else
1211 st_translate_mesa_program(st->ctx,
1212 PIPE_SHADER_FRAGMENT,
1213 ureg,
1214 &stfp->Base,
1215 /* inputs */
1216 fs_num_inputs,
1217 inputMapping,
1218 input_semantic_name,
1219 input_semantic_index,
1220 interpMode,
1221 /* outputs */
1222 fs_num_outputs,
1223 outputMapping,
1224 fs_output_semantic_name,
1225 fs_output_semantic_index);
1226
1227 stfp->state.tokens = ureg_get_tokens(ureg, NULL);
1228 ureg_destroy(ureg);
1229
1230 if (stfp->glsl_to_tgsi) {
1231 stfp->glsl_to_tgsi = NULL;
1232 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1233 }
1234
1235 return stfp->state.tokens != NULL;
1236 }
1237
1238 static struct st_fp_variant *
1239 st_create_fp_variant(struct st_context *st,
1240 struct st_program *stfp,
1241 const struct st_fp_variant_key *key)
1242 {
1243 struct pipe_context *pipe = st->pipe;
1244 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1245 struct pipe_shader_state state = {0};
1246 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1247 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1248 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1249 static const gl_state_index16 scale_state[STATE_LENGTH] =
1250 { STATE_INTERNAL, STATE_PT_SCALE };
1251 static const gl_state_index16 bias_state[STATE_LENGTH] =
1252 { STATE_INTERNAL, STATE_PT_BIAS };
1253 static const gl_state_index16 alpha_ref_state[STATE_LENGTH] =
1254 { STATE_INTERNAL, STATE_ALPHA_REF };
1255
1256 if (!variant)
1257 return NULL;
1258
1259 if (stfp->state.type == PIPE_SHADER_IR_NIR) {
1260 bool finalize = false;
1261
1262 state.type = PIPE_SHADER_IR_NIR;
1263 state.ir.nir = get_nir_shader(st, stfp);
1264
1265 if (key->clamp_color) {
1266 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1267 finalize = true;
1268 }
1269
1270 if (key->lower_flatshade) {
1271 NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
1272 finalize = true;
1273 }
1274
1275 if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
1276 _mesa_add_state_reference(params, alpha_ref_state);
1277 NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
1278 false, alpha_ref_state);
1279 finalize = true;
1280 }
1281
1282 if (key->lower_two_sided_color) {
1283 NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color);
1284 finalize = true;
1285 }
1286
1287 if (key->persample_shading) {
1288 nir_shader *shader = state.ir.nir;
1289 nir_foreach_variable(var, &shader->inputs)
1290 var->data.sample = true;
1291 finalize = true;
1292 }
1293
1294 assert(!(key->bitmap && key->drawpixels));
1295
1296 /* glBitmap */
1297 if (key->bitmap) {
1298 nir_lower_bitmap_options options = {0};
1299
1300 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1301 options.sampler = variant->bitmap_sampler;
1302 options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
1303
1304 NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
1305 finalize = true;
1306 }
1307
1308 /* glDrawPixels (color only) */
1309 if (key->drawpixels) {
1310 nir_lower_drawpixels_options options = {{0}};
1311 unsigned samplers_used = stfp->Base.SamplersUsed;
1312
1313 /* Find the first unused slot. */
1314 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1315 options.drawpix_sampler = variant->drawpix_sampler;
1316 samplers_used |= (1 << variant->drawpix_sampler);
1317
1318 options.pixel_maps = key->pixelMaps;
1319 if (key->pixelMaps) {
1320 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1321 options.pixelmap_sampler = variant->pixelmap_sampler;
1322 }
1323
1324 options.scale_and_bias = key->scaleAndBias;
1325 if (key->scaleAndBias) {
1326 _mesa_add_state_reference(params, scale_state);
1327 memcpy(options.scale_state_tokens, scale_state,
1328 sizeof(options.scale_state_tokens));
1329 _mesa_add_state_reference(params, bias_state);
1330 memcpy(options.bias_state_tokens, bias_state,
1331 sizeof(options.bias_state_tokens));
1332 }
1333
1334 _mesa_add_state_reference(params, texcoord_state);
1335 memcpy(options.texcoord_state_tokens, texcoord_state,
1336 sizeof(options.texcoord_state_tokens));
1337
1338 NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
1339 finalize = true;
1340 }
1341
1342 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1343 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1344 key->external.lower_ayuv || key->external.lower_xyuv)) {
1345 nir_lower_tex_options options = {0};
1346 options.lower_y_uv_external = key->external.lower_nv12;
1347 options.lower_y_u_v_external = key->external.lower_iyuv;
1348 options.lower_xy_uxvx_external = key->external.lower_xy_uxvx;
1349 options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
1350 options.lower_ayuv_external = key->external.lower_ayuv;
1351 options.lower_xyuv_external = key->external.lower_xyuv;
1352 NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
1353 finalize = true;
1354 }
1355
1356 if (finalize || !st->allow_st_finalize_nir_twice) {
1357 st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
1358 false);
1359 }
1360
1361 /* This pass needs to happen *after* nir_lower_sampler */
1362 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1363 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1364 NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
1365 ~stfp->Base.SamplersUsed,
1366 key->external.lower_nv12 || key->external.lower_xy_uxvx ||
1367 key->external.lower_yx_xuxv,
1368 key->external.lower_iyuv);
1369 finalize = true;
1370 }
1371
1372 if (finalize || !st->allow_st_finalize_nir_twice) {
1373 /* Some of the lowering above may have introduced new varyings */
1374 nir_shader_gather_info(state.ir.nir,
1375 nir_shader_get_entrypoint(state.ir.nir));
1376
1377 struct pipe_screen *screen = pipe->screen;
1378 if (screen->finalize_nir)
1379 screen->finalize_nir(screen, state.ir.nir, false);
1380 }
1381
1382 if (ST_DEBUG & DEBUG_PRINT_IR)
1383 nir_print_shader(state.ir.nir, stderr);
1384
1385 variant->driver_shader = pipe->create_fs_state(pipe, &state);
1386 variant->key = *key;
1387
1388 return variant;
1389 }
1390
1391 state.tokens = stfp->state.tokens;
1392
1393 assert(!(key->bitmap && key->drawpixels));
1394
1395 /* Fix texture targets and add fog for ATI_fs */
1396 if (stfp->ati_fs) {
1397 const struct tgsi_token *tokens = st_fixup_atifs(state.tokens, key);
1398
1399 if (tokens)
1400 state.tokens = tokens;
1401 else
1402 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1403 }
1404
1405 /* Emulate features. */
1406 if (key->clamp_color || key->persample_shading) {
1407 const struct tgsi_token *tokens;
1408 unsigned flags =
1409 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1410 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1411
1412 tokens = tgsi_emulate(state.tokens, flags);
1413
1414 if (tokens) {
1415 if (state.tokens != stfp->state.tokens)
1416 tgsi_free_tokens(state.tokens);
1417 state.tokens = tokens;
1418 } else
1419 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1420 }
1421
1422 /* glBitmap */
1423 if (key->bitmap) {
1424 const struct tgsi_token *tokens;
1425
1426 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1427
1428 tokens = st_get_bitmap_shader(state.tokens,
1429 st->internal_target,
1430 variant->bitmap_sampler,
1431 st->needs_texcoord_semantic,
1432 st->bitmap.tex_format ==
1433 PIPE_FORMAT_R8_UNORM);
1434
1435 if (tokens) {
1436 if (state.tokens != stfp->state.tokens)
1437 tgsi_free_tokens(state.tokens);
1438 state.tokens = tokens;
1439 } else
1440 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1441 }
1442
1443 /* glDrawPixels (color only) */
1444 if (key->drawpixels) {
1445 const struct tgsi_token *tokens;
1446 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1447
1448 /* Find the first unused slot. */
1449 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1450
1451 if (key->pixelMaps) {
1452 unsigned samplers_used = stfp->Base.SamplersUsed |
1453 (1 << variant->drawpix_sampler);
1454
1455 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1456 }
1457
1458 if (key->scaleAndBias) {
1459 scale_const = _mesa_add_state_reference(params, scale_state);
1460 bias_const = _mesa_add_state_reference(params, bias_state);
1461 }
1462
1463 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1464
1465 tokens = st_get_drawpix_shader(state.tokens,
1466 st->needs_texcoord_semantic,
1467 key->scaleAndBias, scale_const,
1468 bias_const, key->pixelMaps,
1469 variant->drawpix_sampler,
1470 variant->pixelmap_sampler,
1471 texcoord_const, st->internal_target);
1472
1473 if (tokens) {
1474 if (state.tokens != stfp->state.tokens)
1475 tgsi_free_tokens(state.tokens);
1476 state.tokens = tokens;
1477 } else
1478 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1479 }
1480
1481 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1482 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1483 const struct tgsi_token *tokens;
1484
1485 /* samplers inserted would conflict, but this should be unpossible: */
1486 assert(!(key->bitmap || key->drawpixels));
1487
1488 tokens = st_tgsi_lower_yuv(state.tokens,
1489 ~stfp->Base.SamplersUsed,
1490 key->external.lower_nv12 ||
1491 key->external.lower_xy_uxvx ||
1492 key->external.lower_yx_xuxv,
1493 key->external.lower_iyuv);
1494 if (tokens) {
1495 if (state.tokens != stfp->state.tokens)
1496 tgsi_free_tokens(state.tokens);
1497 state.tokens = tokens;
1498 } else {
1499 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1500 }
1501 }
1502
1503 if (key->lower_depth_clamp) {
1504 unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
1505
1506 const struct tgsi_token *tokens;
1507 tokens = st_tgsi_lower_depth_clamp_fs(state.tokens, depth_range_const);
1508 if (state.tokens != stfp->state.tokens)
1509 tgsi_free_tokens(state.tokens);
1510 state.tokens = tokens;
1511 }
1512
1513 if (ST_DEBUG & DEBUG_PRINT_IR)
1514 tgsi_dump(state.tokens, 0);
1515
1516 /* fill in variant */
1517 variant->driver_shader = pipe->create_fs_state(pipe, &state);
1518 variant->key = *key;
1519
1520 if (state.tokens != stfp->state.tokens)
1521 tgsi_free_tokens(state.tokens);
1522 return variant;
1523 }
1524
1525 /**
1526 * Translate fragment program if needed.
1527 */
1528 struct st_fp_variant *
1529 st_get_fp_variant(struct st_context *st,
1530 struct st_program *stfp,
1531 const struct st_fp_variant_key *key)
1532 {
1533 struct st_fp_variant *fpv;
1534
1535 /* Search for existing variant */
1536 for (fpv = stfp->fp_variants; fpv; fpv = fpv->next) {
1537 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1538 break;
1539 }
1540 }
1541
1542 if (!fpv) {
1543 /* create new */
1544 fpv = st_create_fp_variant(st, stfp, key);
1545 if (fpv) {
1546 if (key->bitmap || key->drawpixels) {
1547 /* Regular variants should always come before the
1548 * bitmap & drawpixels variants, (unless there
1549 * are no regular variants) so that
1550 * st_update_fp can take a fast path when
1551 * shader_has_one_variant is set.
1552 */
1553 if (!stfp->fp_variants) {
1554 stfp->fp_variants = fpv;
1555 } else {
1556 /* insert into list after the first one */
1557 fpv->next = stfp->fp_variants->next;
1558 stfp->fp_variants->next = fpv;
1559 }
1560 } else {
1561 /* insert into list */
1562 fpv->next = stfp->fp_variants;
1563 stfp->fp_variants = fpv;
1564 }
1565 }
1566 }
1567
1568 return fpv;
1569 }
1570
1571 /**
1572 * Translate a program. This is common code for geometry and tessellation
1573 * shaders.
1574 */
1575 bool
1576 st_translate_common_program(struct st_context *st,
1577 struct st_program *stp)
1578 {
1579 struct gl_program *prog = &stp->Base;
1580 enum pipe_shader_type stage =
1581 pipe_shader_type_from_mesa(stp->Base.info.stage);
1582 struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
1583
1584 if (ureg == NULL)
1585 return false;
1586
1587 switch (stage) {
1588 case PIPE_SHADER_TESS_CTRL:
1589 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1590 stp->Base.info.tess.tcs_vertices_out);
1591 break;
1592
1593 case PIPE_SHADER_TESS_EVAL:
1594 if (stp->Base.info.tess.primitive_mode == GL_ISOLINES)
1595 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1596 else
1597 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1598 stp->Base.info.tess.primitive_mode);
1599
1600 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1601 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1602 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1603 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1604 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1605
1606 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1607 (stp->Base.info.tess.spacing + 1) % 3);
1608
1609 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1610 !stp->Base.info.tess.ccw);
1611 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1612 stp->Base.info.tess.point_mode);
1613 break;
1614
1615 case PIPE_SHADER_GEOMETRY:
1616 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1617 stp->Base.info.gs.input_primitive);
1618 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1619 stp->Base.info.gs.output_primitive);
1620 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1621 stp->Base.info.gs.vertices_out);
1622 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1623 stp->Base.info.gs.invocations);
1624 break;
1625
1626 default:
1627 break;
1628 }
1629
1630 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1631 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1632 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1633 GLuint attr;
1634
1635 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1636 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1637 uint num_inputs = 0;
1638
1639 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1640 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1641 uint num_outputs = 0;
1642
1643 GLint i;
1644
1645 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1646 memset(inputMapping, 0, sizeof(inputMapping));
1647 memset(outputMapping, 0, sizeof(outputMapping));
1648 memset(&stp->state, 0, sizeof(stp->state));
1649
1650 if (prog->info.clip_distance_array_size)
1651 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1652 prog->info.clip_distance_array_size);
1653 if (prog->info.cull_distance_array_size)
1654 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1655 prog->info.cull_distance_array_size);
1656
1657 /*
1658 * Convert Mesa program inputs to TGSI input register semantics.
1659 */
1660 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1661 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1662 continue;
1663
1664 unsigned slot = num_inputs++;
1665
1666 inputMapping[attr] = slot;
1667 inputSlotToAttr[slot] = attr;
1668
1669 unsigned semantic_name, semantic_index;
1670 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1671 &semantic_name, &semantic_index);
1672 input_semantic_name[slot] = semantic_name;
1673 input_semantic_index[slot] = semantic_index;
1674 }
1675
1676 /* Also add patch inputs. */
1677 for (attr = 0; attr < 32; attr++) {
1678 if (prog->info.patch_inputs_read & (1u << attr)) {
1679 GLuint slot = num_inputs++;
1680 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1681
1682 inputMapping[patch_attr] = slot;
1683 inputSlotToAttr[slot] = patch_attr;
1684 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1685 input_semantic_index[slot] = attr;
1686 }
1687 }
1688
1689 /* initialize output semantics to defaults */
1690 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1691 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1692 output_semantic_index[i] = 0;
1693 }
1694
1695 /*
1696 * Determine number of outputs, the (default) output register
1697 * mapping and the semantic information for each output.
1698 */
1699 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1700 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1701 GLuint slot = num_outputs++;
1702
1703 outputMapping[attr] = slot;
1704
1705 unsigned semantic_name, semantic_index;
1706 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1707 &semantic_name, &semantic_index);
1708 output_semantic_name[slot] = semantic_name;
1709 output_semantic_index[slot] = semantic_index;
1710 }
1711 }
1712
1713 /* Also add patch outputs. */
1714 for (attr = 0; attr < 32; attr++) {
1715 if (prog->info.patch_outputs_written & (1u << attr)) {
1716 GLuint slot = num_outputs++;
1717 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1718
1719 outputMapping[patch_attr] = slot;
1720 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1721 output_semantic_index[slot] = attr;
1722 }
1723 }
1724
1725 st_translate_program(st->ctx,
1726 stage,
1727 ureg,
1728 stp->glsl_to_tgsi,
1729 prog,
1730 /* inputs */
1731 num_inputs,
1732 inputMapping,
1733 inputSlotToAttr,
1734 input_semantic_name,
1735 input_semantic_index,
1736 NULL,
1737 /* outputs */
1738 num_outputs,
1739 outputMapping,
1740 output_semantic_name,
1741 output_semantic_index);
1742
1743 stp->state.tokens = ureg_get_tokens(ureg, NULL);
1744
1745 ureg_destroy(ureg);
1746
1747 st_translate_stream_output_info(prog);
1748
1749 st_store_ir_in_disk_cache(st, prog, false);
1750
1751 if (ST_DEBUG & DEBUG_PRINT_IR && ST_DEBUG & DEBUG_MESA)
1752 _mesa_print_program(prog);
1753
1754 free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
1755 stp->glsl_to_tgsi = NULL;
1756 return true;
1757 }
1758
1759
1760 /**
1761 * Get/create a basic program variant.
1762 */
1763 struct st_common_variant *
1764 st_get_common_variant(struct st_context *st,
1765 struct st_program *prog,
1766 const struct st_common_variant_key *key)
1767 {
1768 struct pipe_context *pipe = st->pipe;
1769 struct st_common_variant *v;
1770 struct pipe_shader_state state = {0};
1771
1772 /* Search for existing variant */
1773 for (v = prog->variants; v; v = v->next) {
1774 if (memcmp(&v->key, key, sizeof(*key)) == 0) {
1775 break;
1776 }
1777 }
1778
1779 if (!v) {
1780 /* create new */
1781 v = CALLOC_STRUCT(st_common_variant);
1782 if (v) {
1783 if (prog->state.type == PIPE_SHADER_IR_NIR) {
1784 bool finalize = false;
1785
1786 state.type = PIPE_SHADER_IR_NIR;
1787 state.ir.nir = get_nir_shader(st, prog);
1788
1789 if (key->clamp_color) {
1790 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1791 finalize = true;
1792 }
1793
1794 state.stream_output = prog->state.stream_output;
1795
1796 if (finalize || !st->allow_st_finalize_nir_twice) {
1797 st_finalize_nir(st, &prog->Base, prog->shader_program,
1798 state.ir.nir, true);
1799 }
1800
1801 if (ST_DEBUG & DEBUG_PRINT_IR)
1802 nir_print_shader(state.ir.nir, stderr);
1803 } else {
1804 if (key->lower_depth_clamp) {
1805 struct gl_program_parameter_list *params = prog->Base.Parameters;
1806
1807 unsigned depth_range_const =
1808 _mesa_add_state_reference(params, depth_range_state);
1809
1810 const struct tgsi_token *tokens;
1811 tokens =
1812 st_tgsi_lower_depth_clamp(prog->state.tokens,
1813 depth_range_const,
1814 key->clip_negative_one_to_one);
1815
1816 if (tokens != prog->state.tokens)
1817 tgsi_free_tokens(prog->state.tokens);
1818
1819 prog->state.tokens = tokens;
1820 }
1821 state = prog->state;
1822
1823 if (ST_DEBUG & DEBUG_PRINT_IR)
1824 tgsi_dump(state.tokens, 0);
1825 }
1826 /* fill in new variant */
1827 switch (prog->Base.info.stage) {
1828 case MESA_SHADER_TESS_CTRL:
1829 v->driver_shader = pipe->create_tcs_state(pipe, &state);
1830 break;
1831 case MESA_SHADER_TESS_EVAL:
1832 v->driver_shader = pipe->create_tes_state(pipe, &state);
1833 break;
1834 case MESA_SHADER_GEOMETRY:
1835 v->driver_shader = pipe->create_gs_state(pipe, &state);
1836 break;
1837 case MESA_SHADER_COMPUTE: {
1838 struct pipe_compute_state cs = {0};
1839 cs.ir_type = state.type;
1840 cs.req_local_mem = prog->Base.info.cs.shared_size;
1841
1842 if (state.type == PIPE_SHADER_IR_NIR)
1843 cs.prog = state.ir.nir;
1844 else
1845 cs.prog = state.tokens;
1846
1847 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1848 break;
1849 }
1850 default:
1851 assert(!"unhandled shader type");
1852 free(v);
1853 return NULL;
1854 }
1855
1856 v->key = *key;
1857
1858 /* insert into list */
1859 v->next = prog->variants;
1860 prog->variants = v;
1861 }
1862 }
1863
1864 return v;
1865 }
1866
1867
1868 /**
1869 * Vert/Geom/Frag programs have per-context variants. Free all the
1870 * variants attached to the given program which match the given context.
1871 */
1872 static void
1873 destroy_program_variants(struct st_context *st, struct gl_program *target)
1874 {
1875 if (!target || target == &_mesa_DummyProgram)
1876 return;
1877
1878 switch (target->Target) {
1879 case GL_VERTEX_PROGRAM_ARB:
1880 {
1881 struct st_program *stvp = (struct st_program *) target;
1882 struct st_vp_variant *vpv, **prevPtr = &stvp->vp_variants;
1883
1884 for (vpv = stvp->vp_variants; vpv; ) {
1885 struct st_vp_variant *next = vpv->next;
1886 if (vpv->key.st == st) {
1887 /* unlink from list */
1888 *prevPtr = next;
1889 /* destroy this variant */
1890 delete_vp_variant(st, vpv);
1891 }
1892 else {
1893 prevPtr = &vpv->next;
1894 }
1895 vpv = next;
1896 }
1897 }
1898 break;
1899 case GL_FRAGMENT_PROGRAM_ARB:
1900 {
1901 struct st_program *stfp =
1902 (struct st_program *) target;
1903 struct st_fp_variant *fpv, **prevPtr = &stfp->fp_variants;
1904
1905 for (fpv = stfp->fp_variants; fpv; ) {
1906 struct st_fp_variant *next = fpv->next;
1907 if (fpv->key.st == st) {
1908 /* unlink from list */
1909 *prevPtr = next;
1910 /* destroy this variant */
1911 delete_fp_variant(st, fpv);
1912 }
1913 else {
1914 prevPtr = &fpv->next;
1915 }
1916 fpv = next;
1917 }
1918 }
1919 break;
1920 case GL_GEOMETRY_PROGRAM_NV:
1921 case GL_TESS_CONTROL_PROGRAM_NV:
1922 case GL_TESS_EVALUATION_PROGRAM_NV:
1923 case GL_COMPUTE_PROGRAM_NV:
1924 {
1925 struct st_program *p = st_program(target);
1926 struct st_common_variant *v, **prevPtr = &p->variants;
1927
1928 for (v = p->variants; v; ) {
1929 struct st_common_variant *next = v->next;
1930 if (v->key.st == st) {
1931 /* unlink from list */
1932 *prevPtr = next;
1933 /* destroy this variant */
1934 delete_common_variant(st, v, target->Target);
1935 }
1936 else {
1937 prevPtr = &v->next;
1938 }
1939 v = next;
1940 }
1941 }
1942 break;
1943 default:
1944 _mesa_problem(NULL, "Unexpected program target 0x%x in "
1945 "destroy_program_variants_cb()", target->Target);
1946 }
1947 }
1948
1949
1950 /**
1951 * Callback for _mesa_HashWalk. Free all the shader's program variants
1952 * which match the given context.
1953 */
1954 static void
1955 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1956 {
1957 struct st_context *st = (struct st_context *) userData;
1958 struct gl_shader *shader = (struct gl_shader *) data;
1959
1960 switch (shader->Type) {
1961 case GL_SHADER_PROGRAM_MESA:
1962 {
1963 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1964 GLuint i;
1965
1966 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1967 if (shProg->_LinkedShaders[i])
1968 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1969 }
1970 }
1971 break;
1972 case GL_VERTEX_SHADER:
1973 case GL_FRAGMENT_SHADER:
1974 case GL_GEOMETRY_SHADER:
1975 case GL_TESS_CONTROL_SHADER:
1976 case GL_TESS_EVALUATION_SHADER:
1977 case GL_COMPUTE_SHADER:
1978 break;
1979 default:
1980 assert(0);
1981 }
1982 }
1983
1984
1985 /**
1986 * Callback for _mesa_HashWalk. Free all the program variants which match
1987 * the given context.
1988 */
1989 static void
1990 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1991 {
1992 struct st_context *st = (struct st_context *) userData;
1993 struct gl_program *program = (struct gl_program *) data;
1994 destroy_program_variants(st, program);
1995 }
1996
1997
1998 /**
1999 * Walk over all shaders and programs to delete any variants which
2000 * belong to the given context.
2001 * This is called during context tear-down.
2002 */
2003 void
2004 st_destroy_program_variants(struct st_context *st)
2005 {
2006 /* If shaders can be shared with other contexts, the last context will
2007 * call DeleteProgram on all shaders, releasing everything.
2008 */
2009 if (st->has_shareable_shaders)
2010 return;
2011
2012 /* ARB vert/frag program */
2013 _mesa_HashWalk(st->ctx->Shared->Programs,
2014 destroy_program_variants_cb, st);
2015
2016 /* GLSL vert/frag/geom shaders */
2017 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
2018 destroy_shader_program_variants_cb, st);
2019 }
2020
2021
2022 /**
2023 * Compile one shader variant.
2024 */
2025 static void
2026 st_precompile_shader_variant(struct st_context *st,
2027 struct gl_program *prog)
2028 {
2029 switch (prog->Target) {
2030 case GL_VERTEX_PROGRAM_ARB: {
2031 struct st_program *p = (struct st_program *)prog;
2032 struct st_common_variant_key key;
2033
2034 memset(&key, 0, sizeof(key));
2035
2036 key.st = st->has_shareable_shaders ? NULL : st;
2037 st_get_vp_variant(st, p, &key);
2038 break;
2039 }
2040
2041 case GL_FRAGMENT_PROGRAM_ARB: {
2042 struct st_program *p = (struct st_program *)prog;
2043 struct st_fp_variant_key key;
2044
2045 memset(&key, 0, sizeof(key));
2046
2047 key.st = st->has_shareable_shaders ? NULL : st;
2048 st_get_fp_variant(st, p, &key);
2049 break;
2050 }
2051
2052 case GL_TESS_CONTROL_PROGRAM_NV:
2053 case GL_TESS_EVALUATION_PROGRAM_NV:
2054 case GL_GEOMETRY_PROGRAM_NV:
2055 case GL_COMPUTE_PROGRAM_NV: {
2056 struct st_program *p = st_program(prog);
2057 struct st_common_variant_key key;
2058
2059 memset(&key, 0, sizeof(key));
2060
2061 key.st = st->has_shareable_shaders ? NULL : st;
2062 st_get_common_variant(st, p, &key);
2063 break;
2064 }
2065
2066 default:
2067 assert(0);
2068 }
2069 }
2070
2071 void
2072 st_finalize_program(struct st_context *st, struct gl_program *prog)
2073 {
2074 struct st_program *stp = (struct st_program *)prog;
2075
2076 if (st->current_program[prog->info.stage] == prog) {
2077 if (prog->info.stage == MESA_SHADER_VERTEX)
2078 st->dirty |= ST_NEW_VERTEX_PROGRAM(st, stp);
2079 else
2080 st->dirty |= stp->affected_states;
2081 }
2082
2083 if (prog->nir)
2084 nir_sweep(prog->nir);
2085
2086 /* Create Gallium shaders now instead of on demand. */
2087 if (ST_DEBUG & DEBUG_PRECOMPILE ||
2088 st->shader_has_one_variant[prog->info.stage])
2089 st_precompile_shader_variant(st, prog);
2090
2091 /* Additional shader variants are always generated from serialized NIR
2092 * to save memory.
2093 */
2094 if (prog->nir) {
2095 /* Serialize NIR. */
2096 struct blob blob;
2097 blob_init(&blob);
2098 nir_serialize(&blob, prog->nir, false);
2099 stp->nir_binary = malloc(blob.size);
2100 memcpy(stp->nir_binary, blob.data, blob.size);
2101 stp->nir_size = blob.size;
2102 blob_finish(&blob);
2103
2104 /* Free NIR. */
2105 assert(stp->state.ir.nir == prog->nir);
2106 ralloc_free(prog->nir);
2107 prog->nir = NULL;
2108 stp->state.ir.nir = NULL;
2109 }
2110 }