st/mesa: fix lowered IO - don't call st_nir_assign_vs_in_locations twice
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/prog_to_nir.h"
41 #include "program/programopt.h"
42
43 #include "compiler/nir/nir.h"
44 #include "compiler/nir/nir_serialize.h"
45 #include "draw/draw_context.h"
46
47 #include "pipe/p_context.h"
48 #include "pipe/p_defines.h"
49 #include "pipe/p_shader_tokens.h"
50 #include "draw/draw_context.h"
51 #include "tgsi/tgsi_dump.h"
52 #include "tgsi/tgsi_emulate.h"
53 #include "tgsi/tgsi_parse.h"
54 #include "tgsi/tgsi_ureg.h"
55
56 #include "util/u_memory.h"
57
58 #include "st_debug.h"
59 #include "st_cb_bitmap.h"
60 #include "st_cb_drawpixels.h"
61 #include "st_context.h"
62 #include "st_tgsi_lower_depth_clamp.h"
63 #include "st_tgsi_lower_yuv.h"
64 #include "st_program.h"
65 #include "st_mesa_to_tgsi.h"
66 #include "st_atifs_to_tgsi.h"
67 #include "st_nir.h"
68 #include "st_shader_cache.h"
69 #include "st_util.h"
70 #include "cso_cache/cso_context.h"
71
72
73 static void
74 destroy_program_variants(struct st_context *st, struct gl_program *target);
75
76 static void
77 set_affected_state_flags(uint64_t *states,
78 struct gl_program *prog,
79 uint64_t new_constants,
80 uint64_t new_sampler_views,
81 uint64_t new_samplers,
82 uint64_t new_images,
83 uint64_t new_ubos,
84 uint64_t new_ssbos,
85 uint64_t new_atomics)
86 {
87 if (prog->Parameters->NumParameters)
88 *states |= new_constants;
89
90 if (prog->info.num_textures)
91 *states |= new_sampler_views | new_samplers;
92
93 if (prog->info.num_images)
94 *states |= new_images;
95
96 if (prog->info.num_ubos)
97 *states |= new_ubos;
98
99 if (prog->info.num_ssbos)
100 *states |= new_ssbos;
101
102 if (prog->info.num_abos)
103 *states |= new_atomics;
104 }
105
106 /**
107 * This determines which states will be updated when the shader is bound.
108 */
109 void
110 st_set_prog_affected_state_flags(struct gl_program *prog)
111 {
112 uint64_t *states;
113
114 switch (prog->info.stage) {
115 case MESA_SHADER_VERTEX:
116 states = &((struct st_program*)prog)->affected_states;
117
118 *states = ST_NEW_VS_STATE |
119 ST_NEW_RASTERIZER |
120 ST_NEW_VERTEX_ARRAYS;
121
122 set_affected_state_flags(states, prog,
123 ST_NEW_VS_CONSTANTS,
124 ST_NEW_VS_SAMPLER_VIEWS,
125 ST_NEW_VS_SAMPLERS,
126 ST_NEW_VS_IMAGES,
127 ST_NEW_VS_UBOS,
128 ST_NEW_VS_SSBOS,
129 ST_NEW_VS_ATOMICS);
130 break;
131
132 case MESA_SHADER_TESS_CTRL:
133 states = &(st_program(prog))->affected_states;
134
135 *states = ST_NEW_TCS_STATE;
136
137 set_affected_state_flags(states, prog,
138 ST_NEW_TCS_CONSTANTS,
139 ST_NEW_TCS_SAMPLER_VIEWS,
140 ST_NEW_TCS_SAMPLERS,
141 ST_NEW_TCS_IMAGES,
142 ST_NEW_TCS_UBOS,
143 ST_NEW_TCS_SSBOS,
144 ST_NEW_TCS_ATOMICS);
145 break;
146
147 case MESA_SHADER_TESS_EVAL:
148 states = &(st_program(prog))->affected_states;
149
150 *states = ST_NEW_TES_STATE |
151 ST_NEW_RASTERIZER;
152
153 set_affected_state_flags(states, prog,
154 ST_NEW_TES_CONSTANTS,
155 ST_NEW_TES_SAMPLER_VIEWS,
156 ST_NEW_TES_SAMPLERS,
157 ST_NEW_TES_IMAGES,
158 ST_NEW_TES_UBOS,
159 ST_NEW_TES_SSBOS,
160 ST_NEW_TES_ATOMICS);
161 break;
162
163 case MESA_SHADER_GEOMETRY:
164 states = &(st_program(prog))->affected_states;
165
166 *states = ST_NEW_GS_STATE |
167 ST_NEW_RASTERIZER;
168
169 set_affected_state_flags(states, prog,
170 ST_NEW_GS_CONSTANTS,
171 ST_NEW_GS_SAMPLER_VIEWS,
172 ST_NEW_GS_SAMPLERS,
173 ST_NEW_GS_IMAGES,
174 ST_NEW_GS_UBOS,
175 ST_NEW_GS_SSBOS,
176 ST_NEW_GS_ATOMICS);
177 break;
178
179 case MESA_SHADER_FRAGMENT:
180 states = &((struct st_program*)prog)->affected_states;
181
182 /* gl_FragCoord and glDrawPixels always use constants. */
183 *states = ST_NEW_FS_STATE |
184 ST_NEW_SAMPLE_SHADING |
185 ST_NEW_FS_CONSTANTS;
186
187 set_affected_state_flags(states, prog,
188 ST_NEW_FS_CONSTANTS,
189 ST_NEW_FS_SAMPLER_VIEWS,
190 ST_NEW_FS_SAMPLERS,
191 ST_NEW_FS_IMAGES,
192 ST_NEW_FS_UBOS,
193 ST_NEW_FS_SSBOS,
194 ST_NEW_FS_ATOMICS);
195 break;
196
197 case MESA_SHADER_COMPUTE:
198 states = &((struct st_program*)prog)->affected_states;
199
200 *states = ST_NEW_CS_STATE;
201
202 set_affected_state_flags(states, prog,
203 ST_NEW_CS_CONSTANTS,
204 ST_NEW_CS_SAMPLER_VIEWS,
205 ST_NEW_CS_SAMPLERS,
206 ST_NEW_CS_IMAGES,
207 ST_NEW_CS_UBOS,
208 ST_NEW_CS_SSBOS,
209 ST_NEW_CS_ATOMICS);
210 break;
211
212 default:
213 unreachable("unhandled shader stage");
214 }
215 }
216
217
218 /**
219 * Delete a shader variant. Note the caller must unlink the variant from
220 * the linked list.
221 */
222 static void
223 delete_variant(struct st_context *st, struct st_variant *v, GLenum target)
224 {
225 if (v->driver_shader) {
226 if (target == GL_VERTEX_PROGRAM_ARB &&
227 ((struct st_common_variant*)v)->key.is_draw_shader) {
228 /* Draw shader. */
229 draw_delete_vertex_shader(st->draw, v->driver_shader);
230 } else if (st->has_shareable_shaders || v->st == st) {
231 /* The shader's context matches the calling context, or we
232 * don't care.
233 */
234 switch (target) {
235 case GL_VERTEX_PROGRAM_ARB:
236 st->pipe->delete_vs_state(st->pipe, v->driver_shader);
237 break;
238 case GL_TESS_CONTROL_PROGRAM_NV:
239 st->pipe->delete_tcs_state(st->pipe, v->driver_shader);
240 break;
241 case GL_TESS_EVALUATION_PROGRAM_NV:
242 st->pipe->delete_tes_state(st->pipe, v->driver_shader);
243 break;
244 case GL_GEOMETRY_PROGRAM_NV:
245 st->pipe->delete_gs_state(st->pipe, v->driver_shader);
246 break;
247 case GL_FRAGMENT_PROGRAM_ARB:
248 st->pipe->delete_fs_state(st->pipe, v->driver_shader);
249 break;
250 case GL_COMPUTE_PROGRAM_NV:
251 st->pipe->delete_compute_state(st->pipe, v->driver_shader);
252 break;
253 default:
254 unreachable("bad shader type in delete_basic_variant");
255 }
256 } else {
257 /* We can't delete a shader with a context different from the one
258 * that created it. Add it to the creating context's zombie list.
259 */
260 enum pipe_shader_type type =
261 pipe_shader_type_from_mesa(_mesa_program_enum_to_shader_stage(target));
262
263 st_save_zombie_shader(v->st, type, v->driver_shader);
264 }
265 }
266
267 free(v);
268 }
269
270 static void
271 st_unbind_program(struct st_context *st, struct st_program *p)
272 {
273 /* Unbind the shader in cso_context and re-bind in st/mesa. */
274 switch (p->Base.info.stage) {
275 case MESA_SHADER_VERTEX:
276 cso_set_vertex_shader_handle(st->cso_context, NULL);
277 st->dirty |= ST_NEW_VS_STATE;
278 break;
279 case MESA_SHADER_TESS_CTRL:
280 cso_set_tessctrl_shader_handle(st->cso_context, NULL);
281 st->dirty |= ST_NEW_TCS_STATE;
282 break;
283 case MESA_SHADER_TESS_EVAL:
284 cso_set_tesseval_shader_handle(st->cso_context, NULL);
285 st->dirty |= ST_NEW_TES_STATE;
286 break;
287 case MESA_SHADER_GEOMETRY:
288 cso_set_geometry_shader_handle(st->cso_context, NULL);
289 st->dirty |= ST_NEW_GS_STATE;
290 break;
291 case MESA_SHADER_FRAGMENT:
292 cso_set_fragment_shader_handle(st->cso_context, NULL);
293 st->dirty |= ST_NEW_FS_STATE;
294 break;
295 case MESA_SHADER_COMPUTE:
296 cso_set_compute_shader_handle(st->cso_context, NULL);
297 st->dirty |= ST_NEW_CS_STATE;
298 break;
299 default:
300 unreachable("invalid shader type");
301 }
302 }
303
304 /**
305 * Free all basic program variants.
306 */
307 void
308 st_release_variants(struct st_context *st, struct st_program *p)
309 {
310 struct st_variant *v;
311
312 /* If we are releasing shaders, re-bind them, because we don't
313 * know which shaders are bound in the driver.
314 */
315 if (p->variants)
316 st_unbind_program(st, p);
317
318 for (v = p->variants; v; ) {
319 struct st_variant *next = v->next;
320 delete_variant(st, v, p->Base.Target);
321 v = next;
322 }
323
324 p->variants = NULL;
325
326 if (p->state.tokens) {
327 ureg_free_tokens(p->state.tokens);
328 p->state.tokens = NULL;
329 }
330
331 /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
332 * it has resulted in the driver taking ownership of the NIR. Those
333 * callers should be NULLing out the nir field in any pipe_shader_state
334 * that might have this called in order to indicate that.
335 *
336 * GLSL IR and ARB programs will have set gl_program->nir to the same
337 * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
338 */
339 }
340
341 /**
342 * Free all basic program variants and unref program.
343 */
344 void
345 st_release_program(struct st_context *st, struct st_program **p)
346 {
347 if (!*p)
348 return;
349
350 destroy_program_variants(st, &((*p)->Base));
351 st_reference_prog(st, p, NULL);
352 }
353
354 void
355 st_finalize_nir_before_variants(struct nir_shader *nir)
356 {
357 NIR_PASS_V(nir, nir_opt_access);
358
359 NIR_PASS_V(nir, nir_split_var_copies);
360 NIR_PASS_V(nir, nir_lower_var_copies);
361 if (nir->options->lower_all_io_to_temps ||
362 nir->options->lower_all_io_to_elements ||
363 nir->info.stage == MESA_SHADER_VERTEX ||
364 nir->info.stage == MESA_SHADER_GEOMETRY) {
365 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
366 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
367 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
368 }
369
370 /* st_nir_assign_vs_in_locations requires correct shader info. */
371 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
372
373 st_nir_assign_vs_in_locations(nir);
374 }
375
376 /**
377 * Translate ARB (asm) program to NIR
378 */
379 static nir_shader *
380 st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
381 gl_shader_stage stage)
382 {
383 struct pipe_screen *screen = st->pipe->screen;
384 const struct gl_shader_compiler_options *options =
385 &st->ctx->Const.ShaderCompilerOptions[stage];
386
387 /* Translate to NIR */
388 nir_shader *nir = prog_to_nir(prog, options->NirOptions);
389 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
390 nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
391
392 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
393 NIR_PASS_V(nir, nir_lower_system_values);
394 NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
395
396 /* Optimise NIR */
397 NIR_PASS_V(nir, nir_opt_constant_folding);
398 st_nir_opts(nir);
399 st_finalize_nir_before_variants(nir);
400
401 if (st->allow_st_finalize_nir_twice)
402 st_finalize_nir(st, prog, NULL, nir, true);
403
404 nir_validate_shader(nir, "after st/glsl finalize_nir");
405
406 return nir;
407 }
408
409 void
410 st_prepare_vertex_program(struct st_program *stp)
411 {
412 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
413
414 stvp->num_inputs = 0;
415 memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
416 memset(stvp->result_to_output, ~0, sizeof(stvp->result_to_output));
417
418 /* Determine number of inputs, the mappings between VERT_ATTRIB_x
419 * and TGSI generic input indexes, plus input attrib semantic info.
420 */
421 for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
422 if ((stp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
423 stvp->input_to_index[attr] = stvp->num_inputs;
424 stvp->index_to_input[stvp->num_inputs] = attr;
425 stvp->num_inputs++;
426
427 if ((stp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
428 /* add placeholder for second part of a double attribute */
429 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
430 stvp->num_inputs++;
431 }
432 }
433 }
434 /* pre-setup potentially unused edgeflag input */
435 stvp->input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
436 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
437
438 /* Compute mapping of vertex program outputs to slots. */
439 unsigned num_outputs = 0;
440 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
441 if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr))
442 stvp->result_to_output[attr] = num_outputs++;
443 }
444 /* pre-setup potentially unused edgeflag output */
445 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
446 }
447
448 void
449 st_translate_stream_output_info(struct gl_program *prog)
450 {
451 struct gl_transform_feedback_info *info = prog->sh.LinkedTransformFeedback;
452 if (!info)
453 return;
454
455 /* Determine the (default) output register mapping for each output. */
456 unsigned num_outputs = 0;
457 ubyte output_mapping[VARYING_SLOT_TESS_MAX];
458 memset(output_mapping, 0, sizeof(output_mapping));
459
460 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
461 if (prog->info.outputs_written & BITFIELD64_BIT(attr))
462 output_mapping[attr] = num_outputs++;
463 }
464
465 /* Translate stream output info. */
466 struct pipe_stream_output_info *so_info =
467 &((struct st_program*)prog)->state.stream_output;
468
469 for (unsigned i = 0; i < info->NumOutputs; i++) {
470 so_info->output[i].register_index =
471 output_mapping[info->Outputs[i].OutputRegister];
472 so_info->output[i].start_component = info->Outputs[i].ComponentOffset;
473 so_info->output[i].num_components = info->Outputs[i].NumComponents;
474 so_info->output[i].output_buffer = info->Outputs[i].OutputBuffer;
475 so_info->output[i].dst_offset = info->Outputs[i].DstOffset;
476 so_info->output[i].stream = info->Outputs[i].StreamId;
477 }
478
479 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
480 so_info->stride[i] = info->Buffers[i].Stride;
481 }
482 so_info->num_outputs = info->NumOutputs;
483 }
484
485 /**
486 * Translate a vertex program.
487 */
488 bool
489 st_translate_vertex_program(struct st_context *st,
490 struct st_program *stp)
491 {
492 struct ureg_program *ureg;
493 enum pipe_error error;
494 unsigned num_outputs = 0;
495 unsigned attr;
496 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
497 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
498
499 if (stp->Base.arb.IsPositionInvariant)
500 _mesa_insert_mvp_code(st->ctx, &stp->Base);
501
502 /* ARB_vp: */
503 if (!stp->glsl_to_tgsi) {
504 _mesa_remove_output_reads(&stp->Base, PROGRAM_OUTPUT);
505
506 /* This determines which states will be updated when the assembly
507 * shader is bound.
508 */
509 stp->affected_states = ST_NEW_VS_STATE |
510 ST_NEW_RASTERIZER |
511 ST_NEW_VERTEX_ARRAYS;
512
513 if (stp->Base.Parameters->NumParameters)
514 stp->affected_states |= ST_NEW_VS_CONSTANTS;
515
516 /* Translate to NIR if preferred. */
517 if (PIPE_SHADER_IR_NIR ==
518 st->pipe->screen->get_shader_param(st->pipe->screen,
519 PIPE_SHADER_VERTEX,
520 PIPE_SHADER_CAP_PREFERRED_IR)) {
521 assert(!stp->glsl_to_tgsi);
522
523 if (stp->Base.nir)
524 ralloc_free(stp->Base.nir);
525
526 if (stp->serialized_nir) {
527 free(stp->serialized_nir);
528 stp->serialized_nir = NULL;
529 }
530
531 stp->state.type = PIPE_SHADER_IR_NIR;
532 stp->Base.nir = st_translate_prog_to_nir(st, &stp->Base,
533 MESA_SHADER_VERTEX);
534 stp->Base.info = stp->Base.nir->info;
535
536 /* For st_draw_feedback, we need to generate TGSI too if draw doesn't
537 * use LLVM.
538 */
539 /* TODO: Draw can't handle lowered IO. */
540 if (draw_has_llvm() && !stp->Base.info.io_lowered) {
541 st_prepare_vertex_program(stp);
542 return true;
543 }
544 }
545 }
546
547 st_prepare_vertex_program(stp);
548
549 /* Get semantic names and indices. */
550 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
551 if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
552 unsigned slot = num_outputs++;
553 unsigned semantic_name, semantic_index;
554 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
555 &semantic_name, &semantic_index);
556 output_semantic_name[slot] = semantic_name;
557 output_semantic_index[slot] = semantic_index;
558 }
559 }
560 /* pre-setup potentially unused edgeflag output */
561 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
562 output_semantic_index[num_outputs] = 0;
563
564 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
565 if (ureg == NULL)
566 return false;
567
568 ureg_setup_shader_info(ureg, &stp->Base.info);
569
570 if (ST_DEBUG & DEBUG_MESA) {
571 _mesa_print_program(&stp->Base);
572 _mesa_print_program_parameters(st->ctx, &stp->Base);
573 debug_printf("\n");
574 }
575
576 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
577
578 if (stp->glsl_to_tgsi) {
579 error = st_translate_program(st->ctx,
580 PIPE_SHADER_VERTEX,
581 ureg,
582 stp->glsl_to_tgsi,
583 &stp->Base,
584 /* inputs */
585 stvp->num_inputs,
586 stvp->input_to_index,
587 NULL, /* inputSlotToAttr */
588 NULL, /* input semantic name */
589 NULL, /* input semantic index */
590 NULL, /* interp mode */
591 /* outputs */
592 num_outputs,
593 stvp->result_to_output,
594 output_semantic_name,
595 output_semantic_index);
596
597 st_translate_stream_output_info(&stp->Base);
598
599 free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
600 } else
601 error = st_translate_mesa_program(st->ctx,
602 PIPE_SHADER_VERTEX,
603 ureg,
604 &stp->Base,
605 /* inputs */
606 stvp->num_inputs,
607 stvp->input_to_index,
608 NULL, /* input semantic name */
609 NULL, /* input semantic index */
610 NULL,
611 /* outputs */
612 num_outputs,
613 stvp->result_to_output,
614 output_semantic_name,
615 output_semantic_index);
616
617 if (error) {
618 debug_printf("%s: failed to translate Mesa program:\n", __func__);
619 _mesa_print_program(&stp->Base);
620 debug_assert(0);
621 return false;
622 }
623
624 stp->state.tokens = ureg_get_tokens(ureg, NULL);
625 ureg_destroy(ureg);
626
627 if (stp->glsl_to_tgsi) {
628 stp->glsl_to_tgsi = NULL;
629 st_store_ir_in_disk_cache(st, &stp->Base, false);
630 }
631
632 return stp->state.tokens != NULL;
633 }
634
635 static struct nir_shader *
636 get_nir_shader(struct st_context *st, struct st_program *stp)
637 {
638 if (stp->Base.nir) {
639 nir_shader *nir = stp->Base.nir;
640
641 /* The first shader variant takes ownership of NIR, so that there is
642 * no cloning. Additional shader variants are always generated from
643 * serialized NIR to save memory.
644 */
645 stp->Base.nir = NULL;
646 assert(stp->serialized_nir && stp->serialized_nir_size);
647 return nir;
648 }
649
650 struct blob_reader blob_reader;
651 const struct nir_shader_compiler_options *options =
652 st->ctx->Const.ShaderCompilerOptions[stp->Base.info.stage].NirOptions;
653
654 blob_reader_init(&blob_reader, stp->serialized_nir, stp->serialized_nir_size);
655 return nir_deserialize(NULL, options, &blob_reader);
656 }
657
658 static void
659 lower_ucp(struct st_context *st,
660 struct nir_shader *nir,
661 unsigned ucp_enables,
662 struct gl_program_parameter_list *params)
663 {
664 if (nir->info.outputs_written & VARYING_BIT_CLIP_DIST0)
665 NIR_PASS_V(nir, nir_lower_clip_disable, ucp_enables);
666 else {
667 struct pipe_screen *screen = st->pipe->screen;
668 bool can_compact = screen->get_param(screen,
669 PIPE_CAP_NIR_COMPACT_ARRAYS);
670 bool use_eye = st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX] != NULL;
671
672 gl_state_index16 clipplane_state[MAX_CLIP_PLANES][STATE_LENGTH];
673 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
674 if (use_eye) {
675 clipplane_state[i][0] = STATE_CLIPPLANE;
676 clipplane_state[i][1] = i;
677 } else {
678 clipplane_state[i][0] = STATE_INTERNAL;
679 clipplane_state[i][1] = STATE_CLIP_INTERNAL;
680 clipplane_state[i][2] = i;
681 }
682 _mesa_add_state_reference(params, clipplane_state[i]);
683 }
684
685 if (nir->info.stage == MESA_SHADER_VERTEX) {
686 NIR_PASS_V(nir, nir_lower_clip_vs, ucp_enables,
687 true, can_compact, clipplane_state);
688 } else if (nir->info.stage == MESA_SHADER_GEOMETRY) {
689 NIR_PASS_V(nir, nir_lower_clip_gs, ucp_enables,
690 can_compact, clipplane_state);
691 }
692
693 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
694 nir_shader_get_entrypoint(nir), true, false);
695 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
696 }
697 }
698
699 static const gl_state_index16 depth_range_state[STATE_LENGTH] =
700 { STATE_DEPTH_RANGE };
701
702 static struct st_common_variant *
703 st_create_vp_variant(struct st_context *st,
704 struct st_program *stvp,
705 const struct st_common_variant_key *key)
706 {
707 struct st_common_variant *vpv = CALLOC_STRUCT(st_common_variant);
708 struct pipe_context *pipe = st->pipe;
709 struct pipe_shader_state state = {0};
710
711 static const gl_state_index16 point_size_state[STATE_LENGTH] =
712 { STATE_INTERNAL, STATE_POINT_SIZE_CLAMPED, 0 };
713 struct gl_program_parameter_list *params = stvp->Base.Parameters;
714
715 vpv->key = *key;
716
717 state.stream_output = stvp->state.stream_output;
718
719 if (stvp->state.type == PIPE_SHADER_IR_NIR &&
720 (!key->is_draw_shader ||
721 /* TODO: Draw can't handle lowered IO. */
722 (draw_has_llvm() && !stvp->Base.info.io_lowered))) {
723 bool finalize = false;
724
725 state.type = PIPE_SHADER_IR_NIR;
726 state.ir.nir = get_nir_shader(st, stvp);
727 if (key->clamp_color) {
728 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
729 finalize = true;
730 }
731 if (key->passthrough_edgeflags) {
732 NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
733 finalize = true;
734 }
735
736 if (key->lower_point_size) {
737 _mesa_add_state_reference(params, point_size_state);
738 NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
739 point_size_state);
740 finalize = true;
741 }
742
743 if (key->lower_ucp) {
744 lower_ucp(st, state.ir.nir, key->lower_ucp, params);
745 finalize = true;
746 }
747
748 if (finalize || !st->allow_st_finalize_nir_twice) {
749 st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
750 true);
751
752 /* Some of the lowering above may have introduced new varyings */
753 nir_shader_gather_info(state.ir.nir,
754 nir_shader_get_entrypoint(state.ir.nir));
755 }
756
757 if (ST_DEBUG & DEBUG_PRINT_IR)
758 nir_print_shader(state.ir.nir, stderr);
759
760 if (key->is_draw_shader)
761 vpv->base.driver_shader = draw_create_vertex_shader(st->draw, &state);
762 else
763 vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
764
765 return vpv;
766 }
767
768 state.type = PIPE_SHADER_IR_TGSI;
769 state.tokens = tgsi_dup_tokens(stvp->state.tokens);
770
771 /* Emulate features. */
772 if (key->clamp_color || key->passthrough_edgeflags) {
773 const struct tgsi_token *tokens;
774 unsigned flags =
775 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
776 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
777
778 tokens = tgsi_emulate(state.tokens, flags);
779
780 if (tokens) {
781 tgsi_free_tokens(state.tokens);
782 state.tokens = tokens;
783 } else {
784 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
785 }
786 }
787
788 if (key->lower_depth_clamp) {
789 unsigned depth_range_const =
790 _mesa_add_state_reference(params, depth_range_state);
791
792 const struct tgsi_token *tokens;
793 tokens = st_tgsi_lower_depth_clamp(state.tokens, depth_range_const,
794 key->clip_negative_one_to_one);
795 if (tokens != state.tokens)
796 tgsi_free_tokens(state.tokens);
797 state.tokens = tokens;
798 }
799
800 if (ST_DEBUG & DEBUG_PRINT_IR)
801 tgsi_dump(state.tokens, 0);
802
803 if (key->is_draw_shader)
804 vpv->base.driver_shader = draw_create_vertex_shader(st->draw, &state);
805 else
806 vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
807
808 if (state.tokens) {
809 tgsi_free_tokens(state.tokens);
810 }
811
812 return vpv;
813 }
814
815
816 /**
817 * Find/create a vertex program variant.
818 */
819 struct st_common_variant *
820 st_get_vp_variant(struct st_context *st,
821 struct st_program *stp,
822 const struct st_common_variant_key *key)
823 {
824 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
825 struct st_common_variant *vpv;
826
827 /* Search for existing variant */
828 for (vpv = st_common_variant(stp->variants); vpv;
829 vpv = st_common_variant(vpv->base.next)) {
830 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
831 break;
832 }
833 }
834
835 if (!vpv) {
836 /* create now */
837 vpv = st_create_vp_variant(st, stp, key);
838 if (vpv) {
839 vpv->base.st = key->st;
840
841 unsigned num_inputs = stvp->num_inputs + key->passthrough_edgeflags;
842 for (unsigned index = 0; index < num_inputs; ++index) {
843 unsigned attr = stvp->index_to_input[index];
844 if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
845 continue;
846 vpv->vert_attrib_mask |= 1u << attr;
847 }
848
849 /* insert into list */
850 vpv->base.next = stp->variants;
851 stp->variants = &vpv->base;
852 }
853 }
854
855 return vpv;
856 }
857
858
859 /**
860 * Translate a Mesa fragment shader into a TGSI shader.
861 */
862 bool
863 st_translate_fragment_program(struct st_context *st,
864 struct st_program *stfp)
865 {
866 /* Non-GLSL programs: */
867 if (!stfp->glsl_to_tgsi) {
868 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
869 if (st->ctx->Const.GLSLFragCoordIsSysVal)
870 _mesa_program_fragment_position_to_sysval(&stfp->Base);
871
872 /* This determines which states will be updated when the assembly
873 * shader is bound.
874 *
875 * fragment.position and glDrawPixels always use constants.
876 */
877 stfp->affected_states = ST_NEW_FS_STATE |
878 ST_NEW_SAMPLE_SHADING |
879 ST_NEW_FS_CONSTANTS;
880
881 if (stfp->ati_fs) {
882 /* Just set them for ATI_fs unconditionally. */
883 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
884 ST_NEW_FS_SAMPLERS;
885 } else {
886 /* ARB_fp */
887 if (stfp->Base.SamplersUsed)
888 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
889 ST_NEW_FS_SAMPLERS;
890 }
891
892 /* Translate to NIR. */
893 if (!stfp->ati_fs &&
894 PIPE_SHADER_IR_NIR ==
895 st->pipe->screen->get_shader_param(st->pipe->screen,
896 PIPE_SHADER_FRAGMENT,
897 PIPE_SHADER_CAP_PREFERRED_IR)) {
898 nir_shader *nir =
899 st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
900
901 if (stfp->Base.nir)
902 ralloc_free(stfp->Base.nir);
903 if (stfp->serialized_nir) {
904 free(stfp->serialized_nir);
905 stfp->serialized_nir = NULL;
906 }
907 stfp->state.type = PIPE_SHADER_IR_NIR;
908 stfp->Base.nir = nir;
909 return true;
910 }
911 }
912
913 ubyte outputMapping[2 * FRAG_RESULT_MAX];
914 ubyte inputMapping[VARYING_SLOT_MAX];
915 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
916 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
917 GLuint attr;
918 GLbitfield64 inputsRead;
919 struct ureg_program *ureg;
920
921 GLboolean write_all = GL_FALSE;
922
923 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
924 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
925 uint fs_num_inputs = 0;
926
927 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
928 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
929 uint fs_num_outputs = 0;
930
931 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
932
933 /*
934 * Convert Mesa program inputs to TGSI input register semantics.
935 */
936 inputsRead = stfp->Base.info.inputs_read;
937 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
938 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
939 const GLuint slot = fs_num_inputs++;
940
941 inputMapping[attr] = slot;
942 inputSlotToAttr[slot] = attr;
943
944 switch (attr) {
945 case VARYING_SLOT_POS:
946 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
947 input_semantic_index[slot] = 0;
948 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
949 break;
950 case VARYING_SLOT_COL0:
951 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
952 input_semantic_index[slot] = 0;
953 interpMode[slot] = stfp->glsl_to_tgsi ?
954 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
955 break;
956 case VARYING_SLOT_COL1:
957 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
958 input_semantic_index[slot] = 1;
959 interpMode[slot] = stfp->glsl_to_tgsi ?
960 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
961 break;
962 case VARYING_SLOT_FOGC:
963 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
964 input_semantic_index[slot] = 0;
965 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
966 break;
967 case VARYING_SLOT_FACE:
968 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
969 input_semantic_index[slot] = 0;
970 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
971 break;
972 case VARYING_SLOT_PRIMITIVE_ID:
973 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
974 input_semantic_index[slot] = 0;
975 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
976 break;
977 case VARYING_SLOT_LAYER:
978 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
979 input_semantic_index[slot] = 0;
980 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
981 break;
982 case VARYING_SLOT_VIEWPORT:
983 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
984 input_semantic_index[slot] = 0;
985 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
986 break;
987 case VARYING_SLOT_CLIP_DIST0:
988 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
989 input_semantic_index[slot] = 0;
990 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
991 break;
992 case VARYING_SLOT_CLIP_DIST1:
993 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
994 input_semantic_index[slot] = 1;
995 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
996 break;
997 case VARYING_SLOT_CULL_DIST0:
998 case VARYING_SLOT_CULL_DIST1:
999 /* these should have been lowered by GLSL */
1000 assert(0);
1001 break;
1002 /* In most cases, there is nothing special about these
1003 * inputs, so adopt a convention to use the generic
1004 * semantic name and the mesa VARYING_SLOT_ number as the
1005 * index.
1006 *
1007 * All that is required is that the vertex shader labels
1008 * its own outputs similarly, and that the vertex shader
1009 * generates at least every output required by the
1010 * fragment shader plus fixed-function hardware (such as
1011 * BFC).
1012 *
1013 * However, some drivers may need us to identify the PNTC and TEXi
1014 * varyings if, for example, their capability to replace them with
1015 * sprite coordinates is limited.
1016 */
1017 case VARYING_SLOT_PNTC:
1018 if (st->needs_texcoord_semantic) {
1019 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
1020 input_semantic_index[slot] = 0;
1021 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1022 break;
1023 }
1024 /* fall through */
1025 case VARYING_SLOT_TEX0:
1026 case VARYING_SLOT_TEX1:
1027 case VARYING_SLOT_TEX2:
1028 case VARYING_SLOT_TEX3:
1029 case VARYING_SLOT_TEX4:
1030 case VARYING_SLOT_TEX5:
1031 case VARYING_SLOT_TEX6:
1032 case VARYING_SLOT_TEX7:
1033 if (st->needs_texcoord_semantic) {
1034 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
1035 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
1036 interpMode[slot] = stfp->glsl_to_tgsi ?
1037 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1038 break;
1039 }
1040 /* fall through */
1041 case VARYING_SLOT_VAR0:
1042 default:
1043 /* Semantic indices should be zero-based because drivers may choose
1044 * to assign a fixed slot determined by that index.
1045 * This is useful because ARB_separate_shader_objects uses location
1046 * qualifiers for linkage, and if the semantic index corresponds to
1047 * these locations, linkage passes in the driver become unecessary.
1048 *
1049 * If needs_texcoord_semantic is true, no semantic indices will be
1050 * consumed for the TEXi varyings, and we can base the locations of
1051 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
1052 */
1053 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
1054 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
1055 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
1056 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
1057 if (attr == VARYING_SLOT_PNTC)
1058 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1059 else {
1060 interpMode[slot] = stfp->glsl_to_tgsi ?
1061 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1062 }
1063 break;
1064 }
1065 }
1066 else {
1067 inputMapping[attr] = -1;
1068 }
1069 }
1070
1071 /*
1072 * Semantics and mapping for outputs
1073 */
1074 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
1075
1076 /* if z is written, emit that first */
1077 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
1078 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
1079 fs_output_semantic_index[fs_num_outputs] = 0;
1080 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
1081 fs_num_outputs++;
1082 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
1083 }
1084
1085 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
1086 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
1087 fs_output_semantic_index[fs_num_outputs] = 0;
1088 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
1089 fs_num_outputs++;
1090 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
1091 }
1092
1093 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
1094 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
1095 fs_output_semantic_index[fs_num_outputs] = 0;
1096 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
1097 fs_num_outputs++;
1098 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
1099 }
1100
1101 /* handle remaining outputs (color) */
1102 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
1103 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
1104 stfp->Base.SecondaryOutputsWritten;
1105 const unsigned loc = attr % FRAG_RESULT_MAX;
1106
1107 if (written & BITFIELD64_BIT(loc)) {
1108 switch (loc) {
1109 case FRAG_RESULT_DEPTH:
1110 case FRAG_RESULT_STENCIL:
1111 case FRAG_RESULT_SAMPLE_MASK:
1112 /* handled above */
1113 assert(0);
1114 break;
1115 case FRAG_RESULT_COLOR:
1116 write_all = GL_TRUE; /* fallthrough */
1117 default: {
1118 int index;
1119 assert(loc == FRAG_RESULT_COLOR ||
1120 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
1121
1122 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
1123
1124 if (attr >= FRAG_RESULT_MAX) {
1125 /* Secondary color for dual source blending. */
1126 assert(index == 0);
1127 index++;
1128 }
1129
1130 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
1131 fs_output_semantic_index[fs_num_outputs] = index;
1132 outputMapping[attr] = fs_num_outputs;
1133 break;
1134 }
1135 }
1136
1137 fs_num_outputs++;
1138 }
1139 }
1140
1141 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
1142 if (ureg == NULL)
1143 return false;
1144
1145 ureg_setup_shader_info(ureg, &stfp->Base.info);
1146
1147 if (ST_DEBUG & DEBUG_MESA) {
1148 _mesa_print_program(&stfp->Base);
1149 _mesa_print_program_parameters(st->ctx, &stfp->Base);
1150 debug_printf("\n");
1151 }
1152 if (write_all == GL_TRUE)
1153 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
1154
1155 if (stfp->glsl_to_tgsi) {
1156 st_translate_program(st->ctx,
1157 PIPE_SHADER_FRAGMENT,
1158 ureg,
1159 stfp->glsl_to_tgsi,
1160 &stfp->Base,
1161 /* inputs */
1162 fs_num_inputs,
1163 inputMapping,
1164 inputSlotToAttr,
1165 input_semantic_name,
1166 input_semantic_index,
1167 interpMode,
1168 /* outputs */
1169 fs_num_outputs,
1170 outputMapping,
1171 fs_output_semantic_name,
1172 fs_output_semantic_index);
1173
1174 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
1175 } else if (stfp->ati_fs)
1176 st_translate_atifs_program(ureg,
1177 stfp->ati_fs,
1178 &stfp->Base,
1179 /* inputs */
1180 fs_num_inputs,
1181 inputMapping,
1182 input_semantic_name,
1183 input_semantic_index,
1184 interpMode,
1185 /* outputs */
1186 fs_num_outputs,
1187 outputMapping,
1188 fs_output_semantic_name,
1189 fs_output_semantic_index);
1190 else
1191 st_translate_mesa_program(st->ctx,
1192 PIPE_SHADER_FRAGMENT,
1193 ureg,
1194 &stfp->Base,
1195 /* inputs */
1196 fs_num_inputs,
1197 inputMapping,
1198 input_semantic_name,
1199 input_semantic_index,
1200 interpMode,
1201 /* outputs */
1202 fs_num_outputs,
1203 outputMapping,
1204 fs_output_semantic_name,
1205 fs_output_semantic_index);
1206
1207 stfp->state.tokens = ureg_get_tokens(ureg, NULL);
1208 ureg_destroy(ureg);
1209
1210 if (stfp->glsl_to_tgsi) {
1211 stfp->glsl_to_tgsi = NULL;
1212 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1213 }
1214
1215 return stfp->state.tokens != NULL;
1216 }
1217
1218 static struct st_fp_variant *
1219 st_create_fp_variant(struct st_context *st,
1220 struct st_program *stfp,
1221 const struct st_fp_variant_key *key)
1222 {
1223 struct pipe_context *pipe = st->pipe;
1224 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1225 struct pipe_shader_state state = {0};
1226 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1227 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1228 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1229 static const gl_state_index16 scale_state[STATE_LENGTH] =
1230 { STATE_INTERNAL, STATE_PT_SCALE };
1231 static const gl_state_index16 bias_state[STATE_LENGTH] =
1232 { STATE_INTERNAL, STATE_PT_BIAS };
1233 static const gl_state_index16 alpha_ref_state[STATE_LENGTH] =
1234 { STATE_INTERNAL, STATE_ALPHA_REF };
1235
1236 if (!variant)
1237 return NULL;
1238
1239 if (stfp->state.type == PIPE_SHADER_IR_NIR) {
1240 bool finalize = false;
1241
1242 state.type = PIPE_SHADER_IR_NIR;
1243 state.ir.nir = get_nir_shader(st, stfp);
1244
1245 if (key->clamp_color) {
1246 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1247 finalize = true;
1248 }
1249
1250 if (key->lower_flatshade) {
1251 NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
1252 finalize = true;
1253 }
1254
1255 if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
1256 _mesa_add_state_reference(params, alpha_ref_state);
1257 NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
1258 false, alpha_ref_state);
1259 finalize = true;
1260 }
1261
1262 if (key->lower_two_sided_color) {
1263 bool face_sysval = st->ctx->Const.GLSLFrontFacingIsSysVal;
1264 NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color, face_sysval);
1265 finalize = true;
1266 }
1267
1268 if (key->persample_shading) {
1269 nir_shader *shader = state.ir.nir;
1270 nir_foreach_shader_in_variable(var, shader)
1271 var->data.sample = true;
1272 finalize = true;
1273 }
1274
1275 assert(!(key->bitmap && key->drawpixels));
1276
1277 /* glBitmap */
1278 if (key->bitmap) {
1279 nir_lower_bitmap_options options = {0};
1280
1281 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1282 options.sampler = variant->bitmap_sampler;
1283 options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
1284
1285 NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
1286 finalize = true;
1287 }
1288
1289 /* glDrawPixels (color only) */
1290 if (key->drawpixels) {
1291 nir_lower_drawpixels_options options = {{0}};
1292 unsigned samplers_used = stfp->Base.SamplersUsed;
1293
1294 /* Find the first unused slot. */
1295 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1296 options.drawpix_sampler = variant->drawpix_sampler;
1297 samplers_used |= (1 << variant->drawpix_sampler);
1298
1299 options.pixel_maps = key->pixelMaps;
1300 if (key->pixelMaps) {
1301 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1302 options.pixelmap_sampler = variant->pixelmap_sampler;
1303 }
1304
1305 options.scale_and_bias = key->scaleAndBias;
1306 if (key->scaleAndBias) {
1307 _mesa_add_state_reference(params, scale_state);
1308 memcpy(options.scale_state_tokens, scale_state,
1309 sizeof(options.scale_state_tokens));
1310 _mesa_add_state_reference(params, bias_state);
1311 memcpy(options.bias_state_tokens, bias_state,
1312 sizeof(options.bias_state_tokens));
1313 }
1314
1315 _mesa_add_state_reference(params, texcoord_state);
1316 memcpy(options.texcoord_state_tokens, texcoord_state,
1317 sizeof(options.texcoord_state_tokens));
1318
1319 NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
1320 finalize = true;
1321 }
1322
1323 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1324 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1325 key->external.lower_ayuv || key->external.lower_xyuv)) {
1326
1327 st_nir_lower_samplers(pipe->screen, state.ir.nir,
1328 stfp->shader_program, &stfp->Base);
1329
1330 nir_lower_tex_options options = {0};
1331 options.lower_y_uv_external = key->external.lower_nv12;
1332 options.lower_y_u_v_external = key->external.lower_iyuv;
1333 options.lower_xy_uxvx_external = key->external.lower_xy_uxvx;
1334 options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
1335 options.lower_ayuv_external = key->external.lower_ayuv;
1336 options.lower_xyuv_external = key->external.lower_xyuv;
1337 NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
1338 finalize = true;
1339 }
1340
1341 if (finalize || !st->allow_st_finalize_nir_twice) {
1342 st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
1343 false);
1344 }
1345
1346 /* This pass needs to happen *after* nir_lower_sampler */
1347 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1348 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1349 key->external.lower_ayuv || key->external.lower_xyuv)) {
1350 NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
1351 ~stfp->Base.SamplersUsed,
1352 key->external.lower_nv12 || key->external.lower_xy_uxvx ||
1353 key->external.lower_yx_xuxv,
1354 key->external.lower_iyuv);
1355 finalize = true;
1356 }
1357
1358 if (finalize || !st->allow_st_finalize_nir_twice) {
1359 /* Some of the lowering above may have introduced new varyings */
1360 nir_shader_gather_info(state.ir.nir,
1361 nir_shader_get_entrypoint(state.ir.nir));
1362
1363 struct pipe_screen *screen = pipe->screen;
1364 if (screen->finalize_nir)
1365 screen->finalize_nir(screen, state.ir.nir, false);
1366 }
1367
1368 if (ST_DEBUG & DEBUG_PRINT_IR)
1369 nir_print_shader(state.ir.nir, stderr);
1370
1371 variant->base.driver_shader = pipe->create_fs_state(pipe, &state);
1372 variant->key = *key;
1373
1374 return variant;
1375 }
1376
1377 state.tokens = stfp->state.tokens;
1378
1379 assert(!(key->bitmap && key->drawpixels));
1380
1381 /* Fix texture targets and add fog for ATI_fs */
1382 if (stfp->ati_fs) {
1383 const struct tgsi_token *tokens = st_fixup_atifs(state.tokens, key);
1384
1385 if (tokens)
1386 state.tokens = tokens;
1387 else
1388 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1389 }
1390
1391 /* Emulate features. */
1392 if (key->clamp_color || key->persample_shading) {
1393 const struct tgsi_token *tokens;
1394 unsigned flags =
1395 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1396 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1397
1398 tokens = tgsi_emulate(state.tokens, flags);
1399
1400 if (tokens) {
1401 if (state.tokens != stfp->state.tokens)
1402 tgsi_free_tokens(state.tokens);
1403 state.tokens = tokens;
1404 } else
1405 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1406 }
1407
1408 /* glBitmap */
1409 if (key->bitmap) {
1410 const struct tgsi_token *tokens;
1411
1412 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1413
1414 tokens = st_get_bitmap_shader(state.tokens,
1415 st->internal_target,
1416 variant->bitmap_sampler,
1417 st->needs_texcoord_semantic,
1418 st->bitmap.tex_format ==
1419 PIPE_FORMAT_R8_UNORM);
1420
1421 if (tokens) {
1422 if (state.tokens != stfp->state.tokens)
1423 tgsi_free_tokens(state.tokens);
1424 state.tokens = tokens;
1425 } else
1426 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1427 }
1428
1429 /* glDrawPixels (color only) */
1430 if (key->drawpixels) {
1431 const struct tgsi_token *tokens;
1432 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1433
1434 /* Find the first unused slot. */
1435 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1436
1437 if (key->pixelMaps) {
1438 unsigned samplers_used = stfp->Base.SamplersUsed |
1439 (1 << variant->drawpix_sampler);
1440
1441 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1442 }
1443
1444 if (key->scaleAndBias) {
1445 scale_const = _mesa_add_state_reference(params, scale_state);
1446 bias_const = _mesa_add_state_reference(params, bias_state);
1447 }
1448
1449 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1450
1451 tokens = st_get_drawpix_shader(state.tokens,
1452 st->needs_texcoord_semantic,
1453 key->scaleAndBias, scale_const,
1454 bias_const, key->pixelMaps,
1455 variant->drawpix_sampler,
1456 variant->pixelmap_sampler,
1457 texcoord_const, st->internal_target);
1458
1459 if (tokens) {
1460 if (state.tokens != stfp->state.tokens)
1461 tgsi_free_tokens(state.tokens);
1462 state.tokens = tokens;
1463 } else
1464 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1465 }
1466
1467 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1468 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1469 const struct tgsi_token *tokens;
1470
1471 /* samplers inserted would conflict, but this should be unpossible: */
1472 assert(!(key->bitmap || key->drawpixels));
1473
1474 tokens = st_tgsi_lower_yuv(state.tokens,
1475 ~stfp->Base.SamplersUsed,
1476 key->external.lower_nv12 ||
1477 key->external.lower_xy_uxvx ||
1478 key->external.lower_yx_xuxv,
1479 key->external.lower_iyuv);
1480 if (tokens) {
1481 if (state.tokens != stfp->state.tokens)
1482 tgsi_free_tokens(state.tokens);
1483 state.tokens = tokens;
1484 } else {
1485 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1486 }
1487 }
1488
1489 if (key->lower_depth_clamp) {
1490 unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
1491
1492 const struct tgsi_token *tokens;
1493 tokens = st_tgsi_lower_depth_clamp_fs(state.tokens, depth_range_const);
1494 if (state.tokens != stfp->state.tokens)
1495 tgsi_free_tokens(state.tokens);
1496 state.tokens = tokens;
1497 }
1498
1499 if (ST_DEBUG & DEBUG_PRINT_IR)
1500 tgsi_dump(state.tokens, 0);
1501
1502 /* fill in variant */
1503 variant->base.driver_shader = pipe->create_fs_state(pipe, &state);
1504 variant->key = *key;
1505
1506 if (state.tokens != stfp->state.tokens)
1507 tgsi_free_tokens(state.tokens);
1508 return variant;
1509 }
1510
1511 /**
1512 * Translate fragment program if needed.
1513 */
1514 struct st_fp_variant *
1515 st_get_fp_variant(struct st_context *st,
1516 struct st_program *stfp,
1517 const struct st_fp_variant_key *key)
1518 {
1519 struct st_fp_variant *fpv;
1520
1521 /* Search for existing variant */
1522 for (fpv = st_fp_variant(stfp->variants); fpv;
1523 fpv = st_fp_variant(fpv->base.next)) {
1524 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1525 break;
1526 }
1527 }
1528
1529 if (!fpv) {
1530 /* create new */
1531 fpv = st_create_fp_variant(st, stfp, key);
1532 if (fpv) {
1533 fpv->base.st = key->st;
1534
1535 if (key->bitmap || key->drawpixels) {
1536 /* Regular variants should always come before the
1537 * bitmap & drawpixels variants, (unless there
1538 * are no regular variants) so that
1539 * st_update_fp can take a fast path when
1540 * shader_has_one_variant is set.
1541 */
1542 if (!stfp->variants) {
1543 stfp->variants = &fpv->base;
1544 } else {
1545 /* insert into list after the first one */
1546 fpv->base.next = stfp->variants->next;
1547 stfp->variants->next = &fpv->base;
1548 }
1549 } else {
1550 /* insert into list */
1551 fpv->base.next = stfp->variants;
1552 stfp->variants = &fpv->base;
1553 }
1554 }
1555 }
1556
1557 return fpv;
1558 }
1559
1560 /**
1561 * Translate a program. This is common code for geometry and tessellation
1562 * shaders.
1563 */
1564 bool
1565 st_translate_common_program(struct st_context *st,
1566 struct st_program *stp)
1567 {
1568 struct gl_program *prog = &stp->Base;
1569 enum pipe_shader_type stage =
1570 pipe_shader_type_from_mesa(stp->Base.info.stage);
1571 struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
1572
1573 if (ureg == NULL)
1574 return false;
1575
1576 ureg_setup_shader_info(ureg, &stp->Base.info);
1577
1578 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1579 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1580 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1581 GLuint attr;
1582
1583 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1584 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1585 uint num_inputs = 0;
1586
1587 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1588 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1589 uint num_outputs = 0;
1590
1591 GLint i;
1592
1593 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1594 memset(inputMapping, 0, sizeof(inputMapping));
1595 memset(outputMapping, 0, sizeof(outputMapping));
1596 memset(&stp->state, 0, sizeof(stp->state));
1597
1598 /*
1599 * Convert Mesa program inputs to TGSI input register semantics.
1600 */
1601 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1602 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1603 continue;
1604
1605 unsigned slot = num_inputs++;
1606
1607 inputMapping[attr] = slot;
1608 inputSlotToAttr[slot] = attr;
1609
1610 unsigned semantic_name, semantic_index;
1611 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1612 &semantic_name, &semantic_index);
1613 input_semantic_name[slot] = semantic_name;
1614 input_semantic_index[slot] = semantic_index;
1615 }
1616
1617 /* Also add patch inputs. */
1618 for (attr = 0; attr < 32; attr++) {
1619 if (prog->info.patch_inputs_read & (1u << attr)) {
1620 GLuint slot = num_inputs++;
1621 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1622
1623 inputMapping[patch_attr] = slot;
1624 inputSlotToAttr[slot] = patch_attr;
1625 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1626 input_semantic_index[slot] = attr;
1627 }
1628 }
1629
1630 /* initialize output semantics to defaults */
1631 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1632 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1633 output_semantic_index[i] = 0;
1634 }
1635
1636 /*
1637 * Determine number of outputs, the (default) output register
1638 * mapping and the semantic information for each output.
1639 */
1640 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1641 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1642 GLuint slot = num_outputs++;
1643
1644 outputMapping[attr] = slot;
1645
1646 unsigned semantic_name, semantic_index;
1647 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1648 &semantic_name, &semantic_index);
1649 output_semantic_name[slot] = semantic_name;
1650 output_semantic_index[slot] = semantic_index;
1651 }
1652 }
1653
1654 /* Also add patch outputs. */
1655 for (attr = 0; attr < 32; attr++) {
1656 if (prog->info.patch_outputs_written & (1u << attr)) {
1657 GLuint slot = num_outputs++;
1658 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1659
1660 outputMapping[patch_attr] = slot;
1661 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1662 output_semantic_index[slot] = attr;
1663 }
1664 }
1665
1666 st_translate_program(st->ctx,
1667 stage,
1668 ureg,
1669 stp->glsl_to_tgsi,
1670 prog,
1671 /* inputs */
1672 num_inputs,
1673 inputMapping,
1674 inputSlotToAttr,
1675 input_semantic_name,
1676 input_semantic_index,
1677 NULL,
1678 /* outputs */
1679 num_outputs,
1680 outputMapping,
1681 output_semantic_name,
1682 output_semantic_index);
1683
1684 stp->state.tokens = ureg_get_tokens(ureg, NULL);
1685
1686 ureg_destroy(ureg);
1687
1688 st_translate_stream_output_info(prog);
1689
1690 st_store_ir_in_disk_cache(st, prog, false);
1691
1692 if (ST_DEBUG & DEBUG_PRINT_IR && ST_DEBUG & DEBUG_MESA)
1693 _mesa_print_program(prog);
1694
1695 free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
1696 stp->glsl_to_tgsi = NULL;
1697 return true;
1698 }
1699
1700
1701 /**
1702 * Get/create a basic program variant.
1703 */
1704 struct st_variant *
1705 st_get_common_variant(struct st_context *st,
1706 struct st_program *prog,
1707 const struct st_common_variant_key *key)
1708 {
1709 struct pipe_context *pipe = st->pipe;
1710 struct st_variant *v;
1711 struct pipe_shader_state state = {0};
1712 struct gl_program_parameter_list *params = prog->Base.Parameters;
1713
1714 /* Search for existing variant */
1715 for (v = prog->variants; v; v = v->next) {
1716 if (memcmp(&st_common_variant(v)->key, key, sizeof(*key)) == 0)
1717 break;
1718 }
1719
1720 if (!v) {
1721 /* create new */
1722 v = (struct st_variant*)CALLOC_STRUCT(st_common_variant);
1723 if (v) {
1724 if (prog->state.type == PIPE_SHADER_IR_NIR) {
1725 bool finalize = false;
1726
1727 state.type = PIPE_SHADER_IR_NIR;
1728 state.ir.nir = get_nir_shader(st, prog);
1729
1730 if (key->clamp_color) {
1731 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1732 finalize = true;
1733 }
1734
1735 if (key->lower_ucp) {
1736 lower_ucp(st, state.ir.nir, key->lower_ucp, params);
1737 finalize = true;
1738 }
1739
1740 state.stream_output = prog->state.stream_output;
1741
1742 if (finalize || !st->allow_st_finalize_nir_twice) {
1743 st_finalize_nir(st, &prog->Base, prog->shader_program,
1744 state.ir.nir, true);
1745 }
1746
1747 if (ST_DEBUG & DEBUG_PRINT_IR)
1748 nir_print_shader(state.ir.nir, stderr);
1749 } else {
1750 if (key->lower_depth_clamp) {
1751 struct gl_program_parameter_list *params = prog->Base.Parameters;
1752
1753 unsigned depth_range_const =
1754 _mesa_add_state_reference(params, depth_range_state);
1755
1756 const struct tgsi_token *tokens;
1757 tokens =
1758 st_tgsi_lower_depth_clamp(prog->state.tokens,
1759 depth_range_const,
1760 key->clip_negative_one_to_one);
1761
1762 if (tokens != prog->state.tokens)
1763 tgsi_free_tokens(prog->state.tokens);
1764
1765 prog->state.tokens = tokens;
1766 }
1767 state = prog->state;
1768
1769 if (ST_DEBUG & DEBUG_PRINT_IR)
1770 tgsi_dump(state.tokens, 0);
1771 }
1772 /* fill in new variant */
1773 switch (prog->Base.info.stage) {
1774 case MESA_SHADER_TESS_CTRL:
1775 v->driver_shader = pipe->create_tcs_state(pipe, &state);
1776 break;
1777 case MESA_SHADER_TESS_EVAL:
1778 v->driver_shader = pipe->create_tes_state(pipe, &state);
1779 break;
1780 case MESA_SHADER_GEOMETRY:
1781 v->driver_shader = pipe->create_gs_state(pipe, &state);
1782 break;
1783 case MESA_SHADER_COMPUTE: {
1784 struct pipe_compute_state cs = {0};
1785 cs.ir_type = state.type;
1786 cs.req_local_mem = prog->Base.info.cs.shared_size;
1787
1788 if (state.type == PIPE_SHADER_IR_NIR)
1789 cs.prog = state.ir.nir;
1790 else
1791 cs.prog = state.tokens;
1792
1793 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1794 break;
1795 }
1796 default:
1797 assert(!"unhandled shader type");
1798 free(v);
1799 return NULL;
1800 }
1801
1802 st_common_variant(v)->key = *key;
1803 v->st = key->st;
1804
1805 /* insert into list */
1806 v->next = prog->variants;
1807 prog->variants = v;
1808 }
1809 }
1810
1811 return v;
1812 }
1813
1814
1815 /**
1816 * Vert/Geom/Frag programs have per-context variants. Free all the
1817 * variants attached to the given program which match the given context.
1818 */
1819 static void
1820 destroy_program_variants(struct st_context *st, struct gl_program *target)
1821 {
1822 if (!target || target == &_mesa_DummyProgram)
1823 return;
1824
1825 struct st_program *p = st_program(target);
1826 struct st_variant *v, **prevPtr = &p->variants;
1827 bool unbound = false;
1828
1829 for (v = p->variants; v; ) {
1830 struct st_variant *next = v->next;
1831 if (v->st == st) {
1832 if (!unbound) {
1833 st_unbind_program(st, p);
1834 unbound = true;
1835 }
1836
1837 /* unlink from list */
1838 *prevPtr = next;
1839 /* destroy this variant */
1840 delete_variant(st, v, target->Target);
1841 }
1842 else {
1843 prevPtr = &v->next;
1844 }
1845 v = next;
1846 }
1847 }
1848
1849
1850 /**
1851 * Callback for _mesa_HashWalk. Free all the shader's program variants
1852 * which match the given context.
1853 */
1854 static void
1855 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1856 {
1857 struct st_context *st = (struct st_context *) userData;
1858 struct gl_shader *shader = (struct gl_shader *) data;
1859
1860 switch (shader->Type) {
1861 case GL_SHADER_PROGRAM_MESA:
1862 {
1863 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1864 GLuint i;
1865
1866 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1867 if (shProg->_LinkedShaders[i])
1868 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1869 }
1870 }
1871 break;
1872 case GL_VERTEX_SHADER:
1873 case GL_FRAGMENT_SHADER:
1874 case GL_GEOMETRY_SHADER:
1875 case GL_TESS_CONTROL_SHADER:
1876 case GL_TESS_EVALUATION_SHADER:
1877 case GL_COMPUTE_SHADER:
1878 break;
1879 default:
1880 assert(0);
1881 }
1882 }
1883
1884
1885 /**
1886 * Callback for _mesa_HashWalk. Free all the program variants which match
1887 * the given context.
1888 */
1889 static void
1890 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1891 {
1892 struct st_context *st = (struct st_context *) userData;
1893 struct gl_program *program = (struct gl_program *) data;
1894 destroy_program_variants(st, program);
1895 }
1896
1897
1898 /**
1899 * Walk over all shaders and programs to delete any variants which
1900 * belong to the given context.
1901 * This is called during context tear-down.
1902 */
1903 void
1904 st_destroy_program_variants(struct st_context *st)
1905 {
1906 /* If shaders can be shared with other contexts, the last context will
1907 * call DeleteProgram on all shaders, releasing everything.
1908 */
1909 if (st->has_shareable_shaders)
1910 return;
1911
1912 /* ARB vert/frag program */
1913 _mesa_HashWalk(st->ctx->Shared->Programs,
1914 destroy_program_variants_cb, st);
1915
1916 /* GLSL vert/frag/geom shaders */
1917 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1918 destroy_shader_program_variants_cb, st);
1919 }
1920
1921
1922 /**
1923 * Compile one shader variant.
1924 */
1925 static void
1926 st_precompile_shader_variant(struct st_context *st,
1927 struct gl_program *prog)
1928 {
1929 switch (prog->Target) {
1930 case GL_VERTEX_PROGRAM_ARB: {
1931 struct st_program *p = (struct st_program *)prog;
1932 struct st_common_variant_key key;
1933
1934 memset(&key, 0, sizeof(key));
1935
1936 key.st = st->has_shareable_shaders ? NULL : st;
1937 st_get_vp_variant(st, p, &key);
1938 break;
1939 }
1940
1941 case GL_FRAGMENT_PROGRAM_ARB: {
1942 struct st_program *p = (struct st_program *)prog;
1943 struct st_fp_variant_key key;
1944
1945 memset(&key, 0, sizeof(key));
1946
1947 key.st = st->has_shareable_shaders ? NULL : st;
1948 st_get_fp_variant(st, p, &key);
1949 break;
1950 }
1951
1952 case GL_TESS_CONTROL_PROGRAM_NV:
1953 case GL_TESS_EVALUATION_PROGRAM_NV:
1954 case GL_GEOMETRY_PROGRAM_NV:
1955 case GL_COMPUTE_PROGRAM_NV: {
1956 struct st_program *p = st_program(prog);
1957 struct st_common_variant_key key;
1958
1959 memset(&key, 0, sizeof(key));
1960
1961 key.st = st->has_shareable_shaders ? NULL : st;
1962 st_get_common_variant(st, p, &key);
1963 break;
1964 }
1965
1966 default:
1967 assert(0);
1968 }
1969 }
1970
1971 void
1972 st_serialize_nir(struct st_program *stp)
1973 {
1974 if (!stp->serialized_nir) {
1975 struct blob blob;
1976 size_t size;
1977
1978 blob_init(&blob);
1979 nir_serialize(&blob, stp->Base.nir, false);
1980 blob_finish_get_buffer(&blob, &stp->serialized_nir, &size);
1981 stp->serialized_nir_size = size;
1982 }
1983 }
1984
1985 void
1986 st_finalize_program(struct st_context *st, struct gl_program *prog)
1987 {
1988 if (st->current_program[prog->info.stage] == prog) {
1989 if (prog->info.stage == MESA_SHADER_VERTEX)
1990 st->dirty |= ST_NEW_VERTEX_PROGRAM(st, (struct st_program *)prog);
1991 else
1992 st->dirty |= ((struct st_program *)prog)->affected_states;
1993 }
1994
1995 if (prog->nir) {
1996 nir_sweep(prog->nir);
1997
1998 /* This is only needed for ARB_vp/fp programs and when the disk cache
1999 * is disabled. If the disk cache is enabled, GLSL programs are
2000 * serialized in write_nir_to_cache.
2001 */
2002 st_serialize_nir(st_program(prog));
2003 }
2004
2005 /* Create Gallium shaders now instead of on demand. */
2006 if (ST_DEBUG & DEBUG_PRECOMPILE ||
2007 st->shader_has_one_variant[prog->info.stage])
2008 st_precompile_shader_variant(st, prog);
2009 }