Move compiler.h and imports.h/c from src/mesa/main into src/util
[mesa.git] / src / mesa / state_tracker / st_program.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 * Brian Paul
31 */
32
33
34 #include "main/errors.h"
35 #include "util/imports.h"
36 #include "main/hash.h"
37 #include "main/mtypes.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/prog_to_nir.h"
41 #include "program/programopt.h"
42
43 #include "compiler/nir/nir.h"
44 #include "compiler/nir/nir_serialize.h"
45 #include "draw/draw_context.h"
46
47 #include "pipe/p_context.h"
48 #include "pipe/p_defines.h"
49 #include "pipe/p_shader_tokens.h"
50 #include "draw/draw_context.h"
51 #include "tgsi/tgsi_dump.h"
52 #include "tgsi/tgsi_emulate.h"
53 #include "tgsi/tgsi_parse.h"
54 #include "tgsi/tgsi_ureg.h"
55
56 #include "st_debug.h"
57 #include "st_cb_bitmap.h"
58 #include "st_cb_drawpixels.h"
59 #include "st_context.h"
60 #include "st_tgsi_lower_depth_clamp.h"
61 #include "st_tgsi_lower_yuv.h"
62 #include "st_program.h"
63 #include "st_mesa_to_tgsi.h"
64 #include "st_atifs_to_tgsi.h"
65 #include "st_nir.h"
66 #include "st_shader_cache.h"
67 #include "st_util.h"
68 #include "cso_cache/cso_context.h"
69
70
71
72 static void
73 set_affected_state_flags(uint64_t *states,
74 struct gl_program *prog,
75 uint64_t new_constants,
76 uint64_t new_sampler_views,
77 uint64_t new_samplers,
78 uint64_t new_images,
79 uint64_t new_ubos,
80 uint64_t new_ssbos,
81 uint64_t new_atomics)
82 {
83 if (prog->Parameters->NumParameters)
84 *states |= new_constants;
85
86 if (prog->info.num_textures)
87 *states |= new_sampler_views | new_samplers;
88
89 if (prog->info.num_images)
90 *states |= new_images;
91
92 if (prog->info.num_ubos)
93 *states |= new_ubos;
94
95 if (prog->info.num_ssbos)
96 *states |= new_ssbos;
97
98 if (prog->info.num_abos)
99 *states |= new_atomics;
100 }
101
102 /**
103 * This determines which states will be updated when the shader is bound.
104 */
105 void
106 st_set_prog_affected_state_flags(struct gl_program *prog)
107 {
108 uint64_t *states;
109
110 switch (prog->info.stage) {
111 case MESA_SHADER_VERTEX:
112 states = &((struct st_program*)prog)->affected_states;
113
114 *states = ST_NEW_VS_STATE |
115 ST_NEW_RASTERIZER |
116 ST_NEW_VERTEX_ARRAYS;
117
118 set_affected_state_flags(states, prog,
119 ST_NEW_VS_CONSTANTS,
120 ST_NEW_VS_SAMPLER_VIEWS,
121 ST_NEW_VS_SAMPLERS,
122 ST_NEW_VS_IMAGES,
123 ST_NEW_VS_UBOS,
124 ST_NEW_VS_SSBOS,
125 ST_NEW_VS_ATOMICS);
126 break;
127
128 case MESA_SHADER_TESS_CTRL:
129 states = &(st_program(prog))->affected_states;
130
131 *states = ST_NEW_TCS_STATE;
132
133 set_affected_state_flags(states, prog,
134 ST_NEW_TCS_CONSTANTS,
135 ST_NEW_TCS_SAMPLER_VIEWS,
136 ST_NEW_TCS_SAMPLERS,
137 ST_NEW_TCS_IMAGES,
138 ST_NEW_TCS_UBOS,
139 ST_NEW_TCS_SSBOS,
140 ST_NEW_TCS_ATOMICS);
141 break;
142
143 case MESA_SHADER_TESS_EVAL:
144 states = &(st_program(prog))->affected_states;
145
146 *states = ST_NEW_TES_STATE |
147 ST_NEW_RASTERIZER;
148
149 set_affected_state_flags(states, prog,
150 ST_NEW_TES_CONSTANTS,
151 ST_NEW_TES_SAMPLER_VIEWS,
152 ST_NEW_TES_SAMPLERS,
153 ST_NEW_TES_IMAGES,
154 ST_NEW_TES_UBOS,
155 ST_NEW_TES_SSBOS,
156 ST_NEW_TES_ATOMICS);
157 break;
158
159 case MESA_SHADER_GEOMETRY:
160 states = &(st_program(prog))->affected_states;
161
162 *states = ST_NEW_GS_STATE |
163 ST_NEW_RASTERIZER;
164
165 set_affected_state_flags(states, prog,
166 ST_NEW_GS_CONSTANTS,
167 ST_NEW_GS_SAMPLER_VIEWS,
168 ST_NEW_GS_SAMPLERS,
169 ST_NEW_GS_IMAGES,
170 ST_NEW_GS_UBOS,
171 ST_NEW_GS_SSBOS,
172 ST_NEW_GS_ATOMICS);
173 break;
174
175 case MESA_SHADER_FRAGMENT:
176 states = &((struct st_program*)prog)->affected_states;
177
178 /* gl_FragCoord and glDrawPixels always use constants. */
179 *states = ST_NEW_FS_STATE |
180 ST_NEW_SAMPLE_SHADING |
181 ST_NEW_FS_CONSTANTS;
182
183 set_affected_state_flags(states, prog,
184 ST_NEW_FS_CONSTANTS,
185 ST_NEW_FS_SAMPLER_VIEWS,
186 ST_NEW_FS_SAMPLERS,
187 ST_NEW_FS_IMAGES,
188 ST_NEW_FS_UBOS,
189 ST_NEW_FS_SSBOS,
190 ST_NEW_FS_ATOMICS);
191 break;
192
193 case MESA_SHADER_COMPUTE:
194 states = &((struct st_program*)prog)->affected_states;
195
196 *states = ST_NEW_CS_STATE;
197
198 set_affected_state_flags(states, prog,
199 ST_NEW_CS_CONSTANTS,
200 ST_NEW_CS_SAMPLER_VIEWS,
201 ST_NEW_CS_SAMPLERS,
202 ST_NEW_CS_IMAGES,
203 ST_NEW_CS_UBOS,
204 ST_NEW_CS_SSBOS,
205 ST_NEW_CS_ATOMICS);
206 break;
207
208 default:
209 unreachable("unhandled shader stage");
210 }
211 }
212
213
214 /**
215 * Delete a shader variant. Note the caller must unlink the variant from
216 * the linked list.
217 */
218 static void
219 delete_variant(struct st_context *st, struct st_variant *v, GLenum target)
220 {
221 if (v->driver_shader) {
222 if (target == GL_VERTEX_PROGRAM_ARB &&
223 ((struct st_common_variant*)v)->key.is_draw_shader) {
224 /* Draw shader. */
225 draw_delete_vertex_shader(st->draw, v->driver_shader);
226 } else if (st->has_shareable_shaders || v->st == st) {
227 /* The shader's context matches the calling context, or we
228 * don't care.
229 */
230 switch (target) {
231 case GL_VERTEX_PROGRAM_ARB:
232 st->pipe->delete_vs_state(st->pipe, v->driver_shader);
233 break;
234 case GL_TESS_CONTROL_PROGRAM_NV:
235 st->pipe->delete_tcs_state(st->pipe, v->driver_shader);
236 break;
237 case GL_TESS_EVALUATION_PROGRAM_NV:
238 st->pipe->delete_tes_state(st->pipe, v->driver_shader);
239 break;
240 case GL_GEOMETRY_PROGRAM_NV:
241 st->pipe->delete_gs_state(st->pipe, v->driver_shader);
242 break;
243 case GL_FRAGMENT_PROGRAM_ARB:
244 st->pipe->delete_fs_state(st->pipe, v->driver_shader);
245 break;
246 case GL_COMPUTE_PROGRAM_NV:
247 st->pipe->delete_compute_state(st->pipe, v->driver_shader);
248 break;
249 default:
250 unreachable("bad shader type in delete_basic_variant");
251 }
252 } else {
253 /* We can't delete a shader with a context different from the one
254 * that created it. Add it to the creating context's zombie list.
255 */
256 enum pipe_shader_type type =
257 pipe_shader_type_from_mesa(_mesa_program_enum_to_shader_stage(target));
258
259 st_save_zombie_shader(v->st, type, v->driver_shader);
260 }
261 }
262
263 free(v);
264 }
265
266 static void
267 st_unbind_program(struct st_context *st, struct st_program *p)
268 {
269 /* Unbind the shader in cso_context and re-bind in st/mesa. */
270 switch (p->Base.info.stage) {
271 case MESA_SHADER_VERTEX:
272 cso_set_vertex_shader_handle(st->cso_context, NULL);
273 st->dirty |= ST_NEW_VS_STATE;
274 break;
275 case MESA_SHADER_TESS_CTRL:
276 cso_set_tessctrl_shader_handle(st->cso_context, NULL);
277 st->dirty |= ST_NEW_TCS_STATE;
278 break;
279 case MESA_SHADER_TESS_EVAL:
280 cso_set_tesseval_shader_handle(st->cso_context, NULL);
281 st->dirty |= ST_NEW_TES_STATE;
282 break;
283 case MESA_SHADER_GEOMETRY:
284 cso_set_geometry_shader_handle(st->cso_context, NULL);
285 st->dirty |= ST_NEW_GS_STATE;
286 break;
287 case MESA_SHADER_FRAGMENT:
288 cso_set_fragment_shader_handle(st->cso_context, NULL);
289 st->dirty |= ST_NEW_FS_STATE;
290 break;
291 case MESA_SHADER_COMPUTE:
292 cso_set_compute_shader_handle(st->cso_context, NULL);
293 st->dirty |= ST_NEW_CS_STATE;
294 break;
295 default:
296 unreachable("invalid shader type");
297 }
298 }
299
300 /**
301 * Free all basic program variants.
302 */
303 void
304 st_release_variants(struct st_context *st, struct st_program *p)
305 {
306 struct st_variant *v;
307
308 /* If we are releasing shaders, re-bind them, because we don't
309 * know which shaders are bound in the driver.
310 */
311 if (p->variants)
312 st_unbind_program(st, p);
313
314 for (v = p->variants; v; ) {
315 struct st_variant *next = v->next;
316 delete_variant(st, v, p->Base.Target);
317 v = next;
318 }
319
320 p->variants = NULL;
321
322 if (p->state.tokens) {
323 ureg_free_tokens(p->state.tokens);
324 p->state.tokens = NULL;
325 }
326
327 /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
328 * it has resulted in the driver taking ownership of the NIR. Those
329 * callers should be NULLing out the nir field in any pipe_shader_state
330 * that might have this called in order to indicate that.
331 *
332 * GLSL IR and ARB programs will have set gl_program->nir to the same
333 * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
334 */
335 }
336
337 void
338 st_finalize_nir_before_variants(struct nir_shader *nir)
339 {
340 NIR_PASS_V(nir, nir_opt_access);
341
342 NIR_PASS_V(nir, nir_split_var_copies);
343 NIR_PASS_V(nir, nir_lower_var_copies);
344 if (nir->options->lower_all_io_to_temps ||
345 nir->options->lower_all_io_to_elements ||
346 nir->info.stage == MESA_SHADER_VERTEX ||
347 nir->info.stage == MESA_SHADER_GEOMETRY) {
348 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
349 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
350 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
351 }
352
353 st_nir_assign_vs_in_locations(nir);
354 }
355
356 /**
357 * Translate ARB (asm) program to NIR
358 */
359 static nir_shader *
360 st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
361 gl_shader_stage stage)
362 {
363 struct pipe_screen *screen = st->pipe->screen;
364 const struct gl_shader_compiler_options *options =
365 &st->ctx->Const.ShaderCompilerOptions[stage];
366
367 /* Translate to NIR */
368 nir_shader *nir = prog_to_nir(prog, options->NirOptions);
369 NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
370 nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
371
372 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
373 NIR_PASS_V(nir, nir_lower_system_values);
374
375 /* Optimise NIR */
376 NIR_PASS_V(nir, nir_opt_constant_folding);
377 st_nir_opts(nir);
378 st_finalize_nir_before_variants(nir);
379
380 if (st->allow_st_finalize_nir_twice)
381 st_finalize_nir(st, prog, NULL, nir, true);
382
383 nir_validate_shader(nir, "after st/glsl finalize_nir");
384
385 return nir;
386 }
387
388 void
389 st_prepare_vertex_program(struct st_program *stp)
390 {
391 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
392
393 stvp->num_inputs = 0;
394 memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
395 memset(stvp->result_to_output, ~0, sizeof(stvp->result_to_output));
396
397 /* Determine number of inputs, the mappings between VERT_ATTRIB_x
398 * and TGSI generic input indexes, plus input attrib semantic info.
399 */
400 for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
401 if ((stp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
402 stvp->input_to_index[attr] = stvp->num_inputs;
403 stvp->index_to_input[stvp->num_inputs] = attr;
404 stvp->num_inputs++;
405
406 if ((stp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
407 /* add placeholder for second part of a double attribute */
408 stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
409 stvp->num_inputs++;
410 }
411 }
412 }
413 /* pre-setup potentially unused edgeflag input */
414 stvp->input_to_index[VERT_ATTRIB_EDGEFLAG] = stvp->num_inputs;
415 stvp->index_to_input[stvp->num_inputs] = VERT_ATTRIB_EDGEFLAG;
416
417 /* Compute mapping of vertex program outputs to slots. */
418 unsigned num_outputs = 0;
419 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
420 if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr))
421 stvp->result_to_output[attr] = num_outputs++;
422 }
423 /* pre-setup potentially unused edgeflag output */
424 stvp->result_to_output[VARYING_SLOT_EDGE] = num_outputs;
425 }
426
427 void
428 st_translate_stream_output_info(struct gl_program *prog)
429 {
430 struct gl_transform_feedback_info *info = prog->sh.LinkedTransformFeedback;
431 if (!info)
432 return;
433
434 /* Determine the (default) output register mapping for each output. */
435 unsigned num_outputs = 0;
436 ubyte output_mapping[VARYING_SLOT_TESS_MAX];
437 memset(output_mapping, 0, sizeof(output_mapping));
438
439 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
440 if (prog->info.outputs_written & BITFIELD64_BIT(attr))
441 output_mapping[attr] = num_outputs++;
442 }
443
444 /* Translate stream output info. */
445 struct pipe_stream_output_info *so_info =
446 &((struct st_program*)prog)->state.stream_output;
447
448 for (unsigned i = 0; i < info->NumOutputs; i++) {
449 so_info->output[i].register_index =
450 output_mapping[info->Outputs[i].OutputRegister];
451 so_info->output[i].start_component = info->Outputs[i].ComponentOffset;
452 so_info->output[i].num_components = info->Outputs[i].NumComponents;
453 so_info->output[i].output_buffer = info->Outputs[i].OutputBuffer;
454 so_info->output[i].dst_offset = info->Outputs[i].DstOffset;
455 so_info->output[i].stream = info->Outputs[i].StreamId;
456 }
457
458 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
459 so_info->stride[i] = info->Buffers[i].Stride;
460 }
461 so_info->num_outputs = info->NumOutputs;
462 }
463
464 /**
465 * Translate a vertex program.
466 */
467 bool
468 st_translate_vertex_program(struct st_context *st,
469 struct st_program *stp)
470 {
471 struct ureg_program *ureg;
472 enum pipe_error error;
473 unsigned num_outputs = 0;
474 unsigned attr;
475 ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
476 ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
477
478 if (stp->Base.arb.IsPositionInvariant)
479 _mesa_insert_mvp_code(st->ctx, &stp->Base);
480
481 st_prepare_vertex_program(stp);
482
483 /* ARB_vp: */
484 if (!stp->glsl_to_tgsi) {
485 _mesa_remove_output_reads(&stp->Base, PROGRAM_OUTPUT);
486
487 /* This determines which states will be updated when the assembly
488 * shader is bound.
489 */
490 stp->affected_states = ST_NEW_VS_STATE |
491 ST_NEW_RASTERIZER |
492 ST_NEW_VERTEX_ARRAYS;
493
494 if (stp->Base.Parameters->NumParameters)
495 stp->affected_states |= ST_NEW_VS_CONSTANTS;
496
497 /* Translate to NIR if preferred. */
498 if (st->pipe->screen->get_shader_param(st->pipe->screen,
499 PIPE_SHADER_VERTEX,
500 PIPE_SHADER_CAP_PREFERRED_IR)) {
501 assert(!stp->glsl_to_tgsi);
502
503 if (stp->Base.nir)
504 ralloc_free(stp->Base.nir);
505
506 if (stp->serialized_nir) {
507 free(stp->serialized_nir);
508 stp->serialized_nir = NULL;
509 }
510
511 stp->state.type = PIPE_SHADER_IR_NIR;
512 stp->Base.nir = st_translate_prog_to_nir(st, &stp->Base,
513 MESA_SHADER_VERTEX);
514 /* For st_draw_feedback, we need to generate TGSI too if draw doesn't
515 * use LLVM.
516 */
517 if (draw_has_llvm())
518 return true;
519 }
520 }
521
522 /* Get semantic names and indices. */
523 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
524 if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
525 unsigned slot = num_outputs++;
526 unsigned semantic_name, semantic_index;
527 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
528 &semantic_name, &semantic_index);
529 output_semantic_name[slot] = semantic_name;
530 output_semantic_index[slot] = semantic_index;
531 }
532 }
533 /* pre-setup potentially unused edgeflag output */
534 output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
535 output_semantic_index[num_outputs] = 0;
536
537 ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
538 if (ureg == NULL)
539 return false;
540
541 if (stp->Base.info.clip_distance_array_size)
542 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
543 stp->Base.info.clip_distance_array_size);
544 if (stp->Base.info.cull_distance_array_size)
545 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
546 stp->Base.info.cull_distance_array_size);
547
548 if (ST_DEBUG & DEBUG_MESA) {
549 _mesa_print_program(&stp->Base);
550 _mesa_print_program_parameters(st->ctx, &stp->Base);
551 debug_printf("\n");
552 }
553
554 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
555
556 if (stp->glsl_to_tgsi) {
557 error = st_translate_program(st->ctx,
558 PIPE_SHADER_VERTEX,
559 ureg,
560 stp->glsl_to_tgsi,
561 &stp->Base,
562 /* inputs */
563 stvp->num_inputs,
564 stvp->input_to_index,
565 NULL, /* inputSlotToAttr */
566 NULL, /* input semantic name */
567 NULL, /* input semantic index */
568 NULL, /* interp mode */
569 /* outputs */
570 num_outputs,
571 stvp->result_to_output,
572 output_semantic_name,
573 output_semantic_index);
574
575 st_translate_stream_output_info(&stp->Base);
576
577 free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
578 } else
579 error = st_translate_mesa_program(st->ctx,
580 PIPE_SHADER_VERTEX,
581 ureg,
582 &stp->Base,
583 /* inputs */
584 stvp->num_inputs,
585 stvp->input_to_index,
586 NULL, /* input semantic name */
587 NULL, /* input semantic index */
588 NULL,
589 /* outputs */
590 num_outputs,
591 stvp->result_to_output,
592 output_semantic_name,
593 output_semantic_index);
594
595 if (error) {
596 debug_printf("%s: failed to translate Mesa program:\n", __func__);
597 _mesa_print_program(&stp->Base);
598 debug_assert(0);
599 return false;
600 }
601
602 stp->state.tokens = ureg_get_tokens(ureg, NULL);
603 ureg_destroy(ureg);
604
605 if (stp->glsl_to_tgsi) {
606 stp->glsl_to_tgsi = NULL;
607 st_store_ir_in_disk_cache(st, &stp->Base, false);
608 }
609
610 return stp->state.tokens != NULL;
611 }
612
613 static struct nir_shader *
614 get_nir_shader(struct st_context *st, struct st_program *stp)
615 {
616 if (stp->Base.nir) {
617 nir_shader *nir = stp->Base.nir;
618
619 /* The first shader variant takes ownership of NIR, so that there is
620 * no cloning. Additional shader variants are always generated from
621 * serialized NIR to save memory.
622 */
623 stp->Base.nir = NULL;
624 assert(stp->serialized_nir && stp->serialized_nir_size);
625 return nir;
626 }
627
628 struct blob_reader blob_reader;
629 const struct nir_shader_compiler_options *options =
630 st->ctx->Const.ShaderCompilerOptions[stp->Base.info.stage].NirOptions;
631
632 blob_reader_init(&blob_reader, stp->serialized_nir, stp->serialized_nir_size);
633 return nir_deserialize(NULL, options, &blob_reader);
634 }
635
636 static const gl_state_index16 depth_range_state[STATE_LENGTH] =
637 { STATE_DEPTH_RANGE };
638
639 static struct st_common_variant *
640 st_create_vp_variant(struct st_context *st,
641 struct st_program *stvp,
642 const struct st_common_variant_key *key)
643 {
644 struct st_common_variant *vpv = CALLOC_STRUCT(st_common_variant);
645 struct pipe_context *pipe = st->pipe;
646 struct pipe_screen *screen = pipe->screen;
647 struct pipe_shader_state state = {0};
648
649 static const gl_state_index16 point_size_state[STATE_LENGTH] =
650 { STATE_INTERNAL, STATE_POINT_SIZE_CLAMPED, 0 };
651 struct gl_program_parameter_list *params = stvp->Base.Parameters;
652
653 vpv->key = *key;
654
655 state.stream_output = stvp->state.stream_output;
656
657 if (stvp->state.type == PIPE_SHADER_IR_NIR &&
658 (!key->is_draw_shader || draw_has_llvm())) {
659 bool finalize = false;
660
661 state.type = PIPE_SHADER_IR_NIR;
662 state.ir.nir = get_nir_shader(st, stvp);
663 if (key->clamp_color) {
664 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
665 finalize = true;
666 }
667 if (key->passthrough_edgeflags) {
668 NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
669 finalize = true;
670 }
671
672 if (key->lower_point_size) {
673 _mesa_add_state_reference(params, point_size_state);
674 NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
675 point_size_state);
676 finalize = true;
677 }
678
679 if (key->lower_ucp) {
680 bool can_compact = screen->get_param(screen,
681 PIPE_CAP_NIR_COMPACT_ARRAYS);
682
683 bool use_eye = st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX] != NULL;
684 gl_state_index16 clipplane_state[MAX_CLIP_PLANES][STATE_LENGTH];
685 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
686 if (use_eye) {
687 clipplane_state[i][0] = STATE_CLIPPLANE;
688 clipplane_state[i][1] = i;
689 } else {
690 clipplane_state[i][0] = STATE_INTERNAL;
691 clipplane_state[i][1] = STATE_CLIP_INTERNAL;
692 clipplane_state[i][2] = i;
693 }
694 _mesa_add_state_reference(params, clipplane_state[i]);
695 }
696
697 NIR_PASS_V(state.ir.nir, nir_lower_clip_vs, key->lower_ucp,
698 true, can_compact, clipplane_state);
699 NIR_PASS_V(state.ir.nir, nir_lower_io_to_temporaries,
700 nir_shader_get_entrypoint(state.ir.nir), true, false);
701 NIR_PASS_V(state.ir.nir, nir_lower_global_vars_to_local);
702 finalize = true;
703 }
704
705 if (finalize || !st->allow_st_finalize_nir_twice) {
706 st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
707 true);
708
709 /* Some of the lowering above may have introduced new varyings */
710 nir_shader_gather_info(state.ir.nir,
711 nir_shader_get_entrypoint(state.ir.nir));
712 }
713
714 if (ST_DEBUG & DEBUG_PRINT_IR)
715 nir_print_shader(state.ir.nir, stderr);
716
717 if (key->is_draw_shader)
718 vpv->base.driver_shader = draw_create_vertex_shader(st->draw, &state);
719 else
720 vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
721
722 return vpv;
723 }
724
725 state.type = PIPE_SHADER_IR_TGSI;
726 state.tokens = tgsi_dup_tokens(stvp->state.tokens);
727
728 /* Emulate features. */
729 if (key->clamp_color || key->passthrough_edgeflags) {
730 const struct tgsi_token *tokens;
731 unsigned flags =
732 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
733 (key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
734
735 tokens = tgsi_emulate(state.tokens, flags);
736
737 if (tokens) {
738 tgsi_free_tokens(state.tokens);
739 state.tokens = tokens;
740 } else {
741 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
742 }
743 }
744
745 if (key->lower_depth_clamp) {
746 unsigned depth_range_const =
747 _mesa_add_state_reference(params, depth_range_state);
748
749 const struct tgsi_token *tokens;
750 tokens = st_tgsi_lower_depth_clamp(state.tokens, depth_range_const,
751 key->clip_negative_one_to_one);
752 if (tokens != state.tokens)
753 tgsi_free_tokens(state.tokens);
754 state.tokens = tokens;
755 }
756
757 if (ST_DEBUG & DEBUG_PRINT_IR)
758 tgsi_dump(state.tokens, 0);
759
760 if (key->is_draw_shader)
761 vpv->base.driver_shader = draw_create_vertex_shader(st->draw, &state);
762 else
763 vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
764
765 if (state.tokens) {
766 tgsi_free_tokens(state.tokens);
767 }
768
769 return vpv;
770 }
771
772
773 /**
774 * Find/create a vertex program variant.
775 */
776 struct st_common_variant *
777 st_get_vp_variant(struct st_context *st,
778 struct st_program *stp,
779 const struct st_common_variant_key *key)
780 {
781 struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
782 struct st_common_variant *vpv;
783
784 /* Search for existing variant */
785 for (vpv = st_common_variant(stp->variants); vpv;
786 vpv = st_common_variant(vpv->base.next)) {
787 if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
788 break;
789 }
790 }
791
792 if (!vpv) {
793 /* create now */
794 vpv = st_create_vp_variant(st, stp, key);
795 if (vpv) {
796 vpv->base.st = key->st;
797
798 unsigned num_inputs = stvp->num_inputs + key->passthrough_edgeflags;
799 for (unsigned index = 0; index < num_inputs; ++index) {
800 unsigned attr = stvp->index_to_input[index];
801 if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
802 continue;
803 vpv->vert_attrib_mask |= 1u << attr;
804 }
805
806 /* insert into list */
807 vpv->base.next = stp->variants;
808 stp->variants = &vpv->base;
809 }
810 }
811
812 return vpv;
813 }
814
815
816 /**
817 * Translate a Mesa fragment shader into a TGSI shader.
818 */
819 bool
820 st_translate_fragment_program(struct st_context *st,
821 struct st_program *stfp)
822 {
823 /* Non-GLSL programs: */
824 if (!stfp->glsl_to_tgsi) {
825 _mesa_remove_output_reads(&stfp->Base, PROGRAM_OUTPUT);
826 if (st->ctx->Const.GLSLFragCoordIsSysVal)
827 _mesa_program_fragment_position_to_sysval(&stfp->Base);
828
829 /* This determines which states will be updated when the assembly
830 * shader is bound.
831 *
832 * fragment.position and glDrawPixels always use constants.
833 */
834 stfp->affected_states = ST_NEW_FS_STATE |
835 ST_NEW_SAMPLE_SHADING |
836 ST_NEW_FS_CONSTANTS;
837
838 if (stfp->ati_fs) {
839 /* Just set them for ATI_fs unconditionally. */
840 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
841 ST_NEW_FS_SAMPLERS;
842 } else {
843 /* ARB_fp */
844 if (stfp->Base.SamplersUsed)
845 stfp->affected_states |= ST_NEW_FS_SAMPLER_VIEWS |
846 ST_NEW_FS_SAMPLERS;
847 }
848
849 /* Translate to NIR. */
850 if (!stfp->ati_fs &&
851 st->pipe->screen->get_shader_param(st->pipe->screen,
852 PIPE_SHADER_FRAGMENT,
853 PIPE_SHADER_CAP_PREFERRED_IR)) {
854 nir_shader *nir =
855 st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
856
857 if (stfp->Base.nir)
858 ralloc_free(stfp->Base.nir);
859 if (stfp->serialized_nir) {
860 free(stfp->serialized_nir);
861 stfp->serialized_nir = NULL;
862 }
863 stfp->state.type = PIPE_SHADER_IR_NIR;
864 stfp->Base.nir = nir;
865 return true;
866 }
867 }
868
869 ubyte outputMapping[2 * FRAG_RESULT_MAX];
870 ubyte inputMapping[VARYING_SLOT_MAX];
871 ubyte inputSlotToAttr[VARYING_SLOT_MAX];
872 ubyte interpMode[PIPE_MAX_SHADER_INPUTS]; /* XXX size? */
873 GLuint attr;
874 GLbitfield64 inputsRead;
875 struct ureg_program *ureg;
876
877 GLboolean write_all = GL_FALSE;
878
879 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
880 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
881 uint fs_num_inputs = 0;
882
883 ubyte fs_output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
884 ubyte fs_output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
885 uint fs_num_outputs = 0;
886
887 memset(inputSlotToAttr, ~0, sizeof(inputSlotToAttr));
888
889 /*
890 * Convert Mesa program inputs to TGSI input register semantics.
891 */
892 inputsRead = stfp->Base.info.inputs_read;
893 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
894 if ((inputsRead & BITFIELD64_BIT(attr)) != 0) {
895 const GLuint slot = fs_num_inputs++;
896
897 inputMapping[attr] = slot;
898 inputSlotToAttr[slot] = attr;
899
900 switch (attr) {
901 case VARYING_SLOT_POS:
902 input_semantic_name[slot] = TGSI_SEMANTIC_POSITION;
903 input_semantic_index[slot] = 0;
904 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
905 break;
906 case VARYING_SLOT_COL0:
907 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
908 input_semantic_index[slot] = 0;
909 interpMode[slot] = stfp->glsl_to_tgsi ?
910 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
911 break;
912 case VARYING_SLOT_COL1:
913 input_semantic_name[slot] = TGSI_SEMANTIC_COLOR;
914 input_semantic_index[slot] = 1;
915 interpMode[slot] = stfp->glsl_to_tgsi ?
916 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_COLOR;
917 break;
918 case VARYING_SLOT_FOGC:
919 input_semantic_name[slot] = TGSI_SEMANTIC_FOG;
920 input_semantic_index[slot] = 0;
921 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
922 break;
923 case VARYING_SLOT_FACE:
924 input_semantic_name[slot] = TGSI_SEMANTIC_FACE;
925 input_semantic_index[slot] = 0;
926 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
927 break;
928 case VARYING_SLOT_PRIMITIVE_ID:
929 input_semantic_name[slot] = TGSI_SEMANTIC_PRIMID;
930 input_semantic_index[slot] = 0;
931 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
932 break;
933 case VARYING_SLOT_LAYER:
934 input_semantic_name[slot] = TGSI_SEMANTIC_LAYER;
935 input_semantic_index[slot] = 0;
936 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
937 break;
938 case VARYING_SLOT_VIEWPORT:
939 input_semantic_name[slot] = TGSI_SEMANTIC_VIEWPORT_INDEX;
940 input_semantic_index[slot] = 0;
941 interpMode[slot] = TGSI_INTERPOLATE_CONSTANT;
942 break;
943 case VARYING_SLOT_CLIP_DIST0:
944 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
945 input_semantic_index[slot] = 0;
946 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
947 break;
948 case VARYING_SLOT_CLIP_DIST1:
949 input_semantic_name[slot] = TGSI_SEMANTIC_CLIPDIST;
950 input_semantic_index[slot] = 1;
951 interpMode[slot] = TGSI_INTERPOLATE_PERSPECTIVE;
952 break;
953 case VARYING_SLOT_CULL_DIST0:
954 case VARYING_SLOT_CULL_DIST1:
955 /* these should have been lowered by GLSL */
956 assert(0);
957 break;
958 /* In most cases, there is nothing special about these
959 * inputs, so adopt a convention to use the generic
960 * semantic name and the mesa VARYING_SLOT_ number as the
961 * index.
962 *
963 * All that is required is that the vertex shader labels
964 * its own outputs similarly, and that the vertex shader
965 * generates at least every output required by the
966 * fragment shader plus fixed-function hardware (such as
967 * BFC).
968 *
969 * However, some drivers may need us to identify the PNTC and TEXi
970 * varyings if, for example, their capability to replace them with
971 * sprite coordinates is limited.
972 */
973 case VARYING_SLOT_PNTC:
974 if (st->needs_texcoord_semantic) {
975 input_semantic_name[slot] = TGSI_SEMANTIC_PCOORD;
976 input_semantic_index[slot] = 0;
977 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
978 break;
979 }
980 /* fall through */
981 case VARYING_SLOT_TEX0:
982 case VARYING_SLOT_TEX1:
983 case VARYING_SLOT_TEX2:
984 case VARYING_SLOT_TEX3:
985 case VARYING_SLOT_TEX4:
986 case VARYING_SLOT_TEX5:
987 case VARYING_SLOT_TEX6:
988 case VARYING_SLOT_TEX7:
989 if (st->needs_texcoord_semantic) {
990 input_semantic_name[slot] = TGSI_SEMANTIC_TEXCOORD;
991 input_semantic_index[slot] = attr - VARYING_SLOT_TEX0;
992 interpMode[slot] = stfp->glsl_to_tgsi ?
993 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
994 break;
995 }
996 /* fall through */
997 case VARYING_SLOT_VAR0:
998 default:
999 /* Semantic indices should be zero-based because drivers may choose
1000 * to assign a fixed slot determined by that index.
1001 * This is useful because ARB_separate_shader_objects uses location
1002 * qualifiers for linkage, and if the semantic index corresponds to
1003 * these locations, linkage passes in the driver become unecessary.
1004 *
1005 * If needs_texcoord_semantic is true, no semantic indices will be
1006 * consumed for the TEXi varyings, and we can base the locations of
1007 * the user varyings on VAR0. Otherwise, we use TEX0 as base index.
1008 */
1009 assert(attr >= VARYING_SLOT_VAR0 || attr == VARYING_SLOT_PNTC ||
1010 (attr >= VARYING_SLOT_TEX0 && attr <= VARYING_SLOT_TEX7));
1011 input_semantic_name[slot] = TGSI_SEMANTIC_GENERIC;
1012 input_semantic_index[slot] = st_get_generic_varying_index(st, attr);
1013 if (attr == VARYING_SLOT_PNTC)
1014 interpMode[slot] = TGSI_INTERPOLATE_LINEAR;
1015 else {
1016 interpMode[slot] = stfp->glsl_to_tgsi ?
1017 TGSI_INTERPOLATE_COUNT : TGSI_INTERPOLATE_PERSPECTIVE;
1018 }
1019 break;
1020 }
1021 }
1022 else {
1023 inputMapping[attr] = -1;
1024 }
1025 }
1026
1027 /*
1028 * Semantics and mapping for outputs
1029 */
1030 GLbitfield64 outputsWritten = stfp->Base.info.outputs_written;
1031
1032 /* if z is written, emit that first */
1033 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
1034 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_POSITION;
1035 fs_output_semantic_index[fs_num_outputs] = 0;
1036 outputMapping[FRAG_RESULT_DEPTH] = fs_num_outputs;
1037 fs_num_outputs++;
1038 outputsWritten &= ~(1 << FRAG_RESULT_DEPTH);
1039 }
1040
1041 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
1042 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_STENCIL;
1043 fs_output_semantic_index[fs_num_outputs] = 0;
1044 outputMapping[FRAG_RESULT_STENCIL] = fs_num_outputs;
1045 fs_num_outputs++;
1046 outputsWritten &= ~(1 << FRAG_RESULT_STENCIL);
1047 }
1048
1049 if (outputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)) {
1050 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_SAMPLEMASK;
1051 fs_output_semantic_index[fs_num_outputs] = 0;
1052 outputMapping[FRAG_RESULT_SAMPLE_MASK] = fs_num_outputs;
1053 fs_num_outputs++;
1054 outputsWritten &= ~(1 << FRAG_RESULT_SAMPLE_MASK);
1055 }
1056
1057 /* handle remaining outputs (color) */
1058 for (attr = 0; attr < ARRAY_SIZE(outputMapping); attr++) {
1059 const GLbitfield64 written = attr < FRAG_RESULT_MAX ? outputsWritten :
1060 stfp->Base.SecondaryOutputsWritten;
1061 const unsigned loc = attr % FRAG_RESULT_MAX;
1062
1063 if (written & BITFIELD64_BIT(loc)) {
1064 switch (loc) {
1065 case FRAG_RESULT_DEPTH:
1066 case FRAG_RESULT_STENCIL:
1067 case FRAG_RESULT_SAMPLE_MASK:
1068 /* handled above */
1069 assert(0);
1070 break;
1071 case FRAG_RESULT_COLOR:
1072 write_all = GL_TRUE; /* fallthrough */
1073 default: {
1074 int index;
1075 assert(loc == FRAG_RESULT_COLOR ||
1076 (FRAG_RESULT_DATA0 <= loc && loc < FRAG_RESULT_MAX));
1077
1078 index = (loc == FRAG_RESULT_COLOR) ? 0 : (loc - FRAG_RESULT_DATA0);
1079
1080 if (attr >= FRAG_RESULT_MAX) {
1081 /* Secondary color for dual source blending. */
1082 assert(index == 0);
1083 index++;
1084 }
1085
1086 fs_output_semantic_name[fs_num_outputs] = TGSI_SEMANTIC_COLOR;
1087 fs_output_semantic_index[fs_num_outputs] = index;
1088 outputMapping[attr] = fs_num_outputs;
1089 break;
1090 }
1091 }
1092
1093 fs_num_outputs++;
1094 }
1095 }
1096
1097 ureg = ureg_create_with_screen(PIPE_SHADER_FRAGMENT, st->pipe->screen);
1098 if (ureg == NULL)
1099 return false;
1100
1101 if (ST_DEBUG & DEBUG_MESA) {
1102 _mesa_print_program(&stfp->Base);
1103 _mesa_print_program_parameters(st->ctx, &stfp->Base);
1104 debug_printf("\n");
1105 }
1106 if (write_all == GL_TRUE)
1107 ureg_property(ureg, TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS, 1);
1108
1109 if (stfp->Base.info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
1110 switch (stfp->Base.info.fs.depth_layout) {
1111 case FRAG_DEPTH_LAYOUT_ANY:
1112 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1113 TGSI_FS_DEPTH_LAYOUT_ANY);
1114 break;
1115 case FRAG_DEPTH_LAYOUT_GREATER:
1116 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1117 TGSI_FS_DEPTH_LAYOUT_GREATER);
1118 break;
1119 case FRAG_DEPTH_LAYOUT_LESS:
1120 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1121 TGSI_FS_DEPTH_LAYOUT_LESS);
1122 break;
1123 case FRAG_DEPTH_LAYOUT_UNCHANGED:
1124 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
1125 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
1126 break;
1127 default:
1128 assert(0);
1129 }
1130 }
1131
1132 if (stfp->glsl_to_tgsi) {
1133 st_translate_program(st->ctx,
1134 PIPE_SHADER_FRAGMENT,
1135 ureg,
1136 stfp->glsl_to_tgsi,
1137 &stfp->Base,
1138 /* inputs */
1139 fs_num_inputs,
1140 inputMapping,
1141 inputSlotToAttr,
1142 input_semantic_name,
1143 input_semantic_index,
1144 interpMode,
1145 /* outputs */
1146 fs_num_outputs,
1147 outputMapping,
1148 fs_output_semantic_name,
1149 fs_output_semantic_index);
1150
1151 free_glsl_to_tgsi_visitor(stfp->glsl_to_tgsi);
1152 } else if (stfp->ati_fs)
1153 st_translate_atifs_program(ureg,
1154 stfp->ati_fs,
1155 &stfp->Base,
1156 /* inputs */
1157 fs_num_inputs,
1158 inputMapping,
1159 input_semantic_name,
1160 input_semantic_index,
1161 interpMode,
1162 /* outputs */
1163 fs_num_outputs,
1164 outputMapping,
1165 fs_output_semantic_name,
1166 fs_output_semantic_index);
1167 else
1168 st_translate_mesa_program(st->ctx,
1169 PIPE_SHADER_FRAGMENT,
1170 ureg,
1171 &stfp->Base,
1172 /* inputs */
1173 fs_num_inputs,
1174 inputMapping,
1175 input_semantic_name,
1176 input_semantic_index,
1177 interpMode,
1178 /* outputs */
1179 fs_num_outputs,
1180 outputMapping,
1181 fs_output_semantic_name,
1182 fs_output_semantic_index);
1183
1184 stfp->state.tokens = ureg_get_tokens(ureg, NULL);
1185 ureg_destroy(ureg);
1186
1187 if (stfp->glsl_to_tgsi) {
1188 stfp->glsl_to_tgsi = NULL;
1189 st_store_ir_in_disk_cache(st, &stfp->Base, false);
1190 }
1191
1192 return stfp->state.tokens != NULL;
1193 }
1194
1195 static struct st_fp_variant *
1196 st_create_fp_variant(struct st_context *st,
1197 struct st_program *stfp,
1198 const struct st_fp_variant_key *key)
1199 {
1200 struct pipe_context *pipe = st->pipe;
1201 struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
1202 struct pipe_shader_state state = {0};
1203 struct gl_program_parameter_list *params = stfp->Base.Parameters;
1204 static const gl_state_index16 texcoord_state[STATE_LENGTH] =
1205 { STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
1206 static const gl_state_index16 scale_state[STATE_LENGTH] =
1207 { STATE_INTERNAL, STATE_PT_SCALE };
1208 static const gl_state_index16 bias_state[STATE_LENGTH] =
1209 { STATE_INTERNAL, STATE_PT_BIAS };
1210 static const gl_state_index16 alpha_ref_state[STATE_LENGTH] =
1211 { STATE_INTERNAL, STATE_ALPHA_REF };
1212
1213 if (!variant)
1214 return NULL;
1215
1216 if (stfp->state.type == PIPE_SHADER_IR_NIR) {
1217 bool finalize = false;
1218
1219 state.type = PIPE_SHADER_IR_NIR;
1220 state.ir.nir = get_nir_shader(st, stfp);
1221
1222 if (key->clamp_color) {
1223 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1224 finalize = true;
1225 }
1226
1227 if (key->lower_flatshade) {
1228 NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
1229 finalize = true;
1230 }
1231
1232 if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
1233 _mesa_add_state_reference(params, alpha_ref_state);
1234 NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
1235 false, alpha_ref_state);
1236 finalize = true;
1237 }
1238
1239 if (key->lower_two_sided_color) {
1240 NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color);
1241 finalize = true;
1242 }
1243
1244 if (key->persample_shading) {
1245 nir_shader *shader = state.ir.nir;
1246 nir_foreach_variable(var, &shader->inputs)
1247 var->data.sample = true;
1248 finalize = true;
1249 }
1250
1251 assert(!(key->bitmap && key->drawpixels));
1252
1253 /* glBitmap */
1254 if (key->bitmap) {
1255 nir_lower_bitmap_options options = {0};
1256
1257 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1258 options.sampler = variant->bitmap_sampler;
1259 options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
1260
1261 NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
1262 finalize = true;
1263 }
1264
1265 /* glDrawPixels (color only) */
1266 if (key->drawpixels) {
1267 nir_lower_drawpixels_options options = {{0}};
1268 unsigned samplers_used = stfp->Base.SamplersUsed;
1269
1270 /* Find the first unused slot. */
1271 variant->drawpix_sampler = ffs(~samplers_used) - 1;
1272 options.drawpix_sampler = variant->drawpix_sampler;
1273 samplers_used |= (1 << variant->drawpix_sampler);
1274
1275 options.pixel_maps = key->pixelMaps;
1276 if (key->pixelMaps) {
1277 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1278 options.pixelmap_sampler = variant->pixelmap_sampler;
1279 }
1280
1281 options.scale_and_bias = key->scaleAndBias;
1282 if (key->scaleAndBias) {
1283 _mesa_add_state_reference(params, scale_state);
1284 memcpy(options.scale_state_tokens, scale_state,
1285 sizeof(options.scale_state_tokens));
1286 _mesa_add_state_reference(params, bias_state);
1287 memcpy(options.bias_state_tokens, bias_state,
1288 sizeof(options.bias_state_tokens));
1289 }
1290
1291 _mesa_add_state_reference(params, texcoord_state);
1292 memcpy(options.texcoord_state_tokens, texcoord_state,
1293 sizeof(options.texcoord_state_tokens));
1294
1295 NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
1296 finalize = true;
1297 }
1298
1299 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1300 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1301 key->external.lower_ayuv || key->external.lower_xyuv)) {
1302
1303 st_nir_lower_samplers(pipe->screen, state.ir.nir,
1304 stfp->shader_program, &stfp->Base);
1305
1306 nir_lower_tex_options options = {0};
1307 options.lower_y_uv_external = key->external.lower_nv12;
1308 options.lower_y_u_v_external = key->external.lower_iyuv;
1309 options.lower_xy_uxvx_external = key->external.lower_xy_uxvx;
1310 options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
1311 options.lower_ayuv_external = key->external.lower_ayuv;
1312 options.lower_xyuv_external = key->external.lower_xyuv;
1313 NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
1314 finalize = true;
1315 }
1316
1317 if (finalize || !st->allow_st_finalize_nir_twice) {
1318 st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
1319 false);
1320 }
1321
1322 /* This pass needs to happen *after* nir_lower_sampler */
1323 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1324 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
1325 key->external.lower_ayuv || key->external.lower_xyuv)) {
1326 NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
1327 ~stfp->Base.SamplersUsed,
1328 key->external.lower_nv12 || key->external.lower_xy_uxvx ||
1329 key->external.lower_yx_xuxv,
1330 key->external.lower_iyuv);
1331 finalize = true;
1332 }
1333
1334 if (finalize || !st->allow_st_finalize_nir_twice) {
1335 /* Some of the lowering above may have introduced new varyings */
1336 nir_shader_gather_info(state.ir.nir,
1337 nir_shader_get_entrypoint(state.ir.nir));
1338
1339 struct pipe_screen *screen = pipe->screen;
1340 if (screen->finalize_nir)
1341 screen->finalize_nir(screen, state.ir.nir, false);
1342 }
1343
1344 if (ST_DEBUG & DEBUG_PRINT_IR)
1345 nir_print_shader(state.ir.nir, stderr);
1346
1347 variant->base.driver_shader = pipe->create_fs_state(pipe, &state);
1348 variant->key = *key;
1349
1350 return variant;
1351 }
1352
1353 state.tokens = stfp->state.tokens;
1354
1355 assert(!(key->bitmap && key->drawpixels));
1356
1357 /* Fix texture targets and add fog for ATI_fs */
1358 if (stfp->ati_fs) {
1359 const struct tgsi_token *tokens = st_fixup_atifs(state.tokens, key);
1360
1361 if (tokens)
1362 state.tokens = tokens;
1363 else
1364 fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
1365 }
1366
1367 /* Emulate features. */
1368 if (key->clamp_color || key->persample_shading) {
1369 const struct tgsi_token *tokens;
1370 unsigned flags =
1371 (key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
1372 (key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
1373
1374 tokens = tgsi_emulate(state.tokens, flags);
1375
1376 if (tokens) {
1377 if (state.tokens != stfp->state.tokens)
1378 tgsi_free_tokens(state.tokens);
1379 state.tokens = tokens;
1380 } else
1381 fprintf(stderr, "mesa: cannot emulate deprecated features\n");
1382 }
1383
1384 /* glBitmap */
1385 if (key->bitmap) {
1386 const struct tgsi_token *tokens;
1387
1388 variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1389
1390 tokens = st_get_bitmap_shader(state.tokens,
1391 st->internal_target,
1392 variant->bitmap_sampler,
1393 st->needs_texcoord_semantic,
1394 st->bitmap.tex_format ==
1395 PIPE_FORMAT_R8_UNORM);
1396
1397 if (tokens) {
1398 if (state.tokens != stfp->state.tokens)
1399 tgsi_free_tokens(state.tokens);
1400 state.tokens = tokens;
1401 } else
1402 fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
1403 }
1404
1405 /* glDrawPixels (color only) */
1406 if (key->drawpixels) {
1407 const struct tgsi_token *tokens;
1408 unsigned scale_const = 0, bias_const = 0, texcoord_const = 0;
1409
1410 /* Find the first unused slot. */
1411 variant->drawpix_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
1412
1413 if (key->pixelMaps) {
1414 unsigned samplers_used = stfp->Base.SamplersUsed |
1415 (1 << variant->drawpix_sampler);
1416
1417 variant->pixelmap_sampler = ffs(~samplers_used) - 1;
1418 }
1419
1420 if (key->scaleAndBias) {
1421 scale_const = _mesa_add_state_reference(params, scale_state);
1422 bias_const = _mesa_add_state_reference(params, bias_state);
1423 }
1424
1425 texcoord_const = _mesa_add_state_reference(params, texcoord_state);
1426
1427 tokens = st_get_drawpix_shader(state.tokens,
1428 st->needs_texcoord_semantic,
1429 key->scaleAndBias, scale_const,
1430 bias_const, key->pixelMaps,
1431 variant->drawpix_sampler,
1432 variant->pixelmap_sampler,
1433 texcoord_const, st->internal_target);
1434
1435 if (tokens) {
1436 if (state.tokens != stfp->state.tokens)
1437 tgsi_free_tokens(state.tokens);
1438 state.tokens = tokens;
1439 } else
1440 fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
1441 }
1442
1443 if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
1444 key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
1445 const struct tgsi_token *tokens;
1446
1447 /* samplers inserted would conflict, but this should be unpossible: */
1448 assert(!(key->bitmap || key->drawpixels));
1449
1450 tokens = st_tgsi_lower_yuv(state.tokens,
1451 ~stfp->Base.SamplersUsed,
1452 key->external.lower_nv12 ||
1453 key->external.lower_xy_uxvx ||
1454 key->external.lower_yx_xuxv,
1455 key->external.lower_iyuv);
1456 if (tokens) {
1457 if (state.tokens != stfp->state.tokens)
1458 tgsi_free_tokens(state.tokens);
1459 state.tokens = tokens;
1460 } else {
1461 fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
1462 }
1463 }
1464
1465 if (key->lower_depth_clamp) {
1466 unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
1467
1468 const struct tgsi_token *tokens;
1469 tokens = st_tgsi_lower_depth_clamp_fs(state.tokens, depth_range_const);
1470 if (state.tokens != stfp->state.tokens)
1471 tgsi_free_tokens(state.tokens);
1472 state.tokens = tokens;
1473 }
1474
1475 if (ST_DEBUG & DEBUG_PRINT_IR)
1476 tgsi_dump(state.tokens, 0);
1477
1478 /* fill in variant */
1479 variant->base.driver_shader = pipe->create_fs_state(pipe, &state);
1480 variant->key = *key;
1481
1482 if (state.tokens != stfp->state.tokens)
1483 tgsi_free_tokens(state.tokens);
1484 return variant;
1485 }
1486
1487 /**
1488 * Translate fragment program if needed.
1489 */
1490 struct st_fp_variant *
1491 st_get_fp_variant(struct st_context *st,
1492 struct st_program *stfp,
1493 const struct st_fp_variant_key *key)
1494 {
1495 struct st_fp_variant *fpv;
1496
1497 /* Search for existing variant */
1498 for (fpv = st_fp_variant(stfp->variants); fpv;
1499 fpv = st_fp_variant(fpv->base.next)) {
1500 if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
1501 break;
1502 }
1503 }
1504
1505 if (!fpv) {
1506 /* create new */
1507 fpv = st_create_fp_variant(st, stfp, key);
1508 if (fpv) {
1509 fpv->base.st = key->st;
1510
1511 if (key->bitmap || key->drawpixels) {
1512 /* Regular variants should always come before the
1513 * bitmap & drawpixels variants, (unless there
1514 * are no regular variants) so that
1515 * st_update_fp can take a fast path when
1516 * shader_has_one_variant is set.
1517 */
1518 if (!stfp->variants) {
1519 stfp->variants = &fpv->base;
1520 } else {
1521 /* insert into list after the first one */
1522 fpv->base.next = stfp->variants->next;
1523 stfp->variants->next = &fpv->base;
1524 }
1525 } else {
1526 /* insert into list */
1527 fpv->base.next = stfp->variants;
1528 stfp->variants = &fpv->base;
1529 }
1530 }
1531 }
1532
1533 return fpv;
1534 }
1535
1536 /**
1537 * Translate a program. This is common code for geometry and tessellation
1538 * shaders.
1539 */
1540 bool
1541 st_translate_common_program(struct st_context *st,
1542 struct st_program *stp)
1543 {
1544 struct gl_program *prog = &stp->Base;
1545 enum pipe_shader_type stage =
1546 pipe_shader_type_from_mesa(stp->Base.info.stage);
1547 struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
1548
1549 if (ureg == NULL)
1550 return false;
1551
1552 switch (stage) {
1553 case PIPE_SHADER_TESS_CTRL:
1554 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
1555 stp->Base.info.tess.tcs_vertices_out);
1556 break;
1557
1558 case PIPE_SHADER_TESS_EVAL:
1559 if (stp->Base.info.tess.primitive_mode == GL_ISOLINES)
1560 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
1561 else
1562 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
1563 stp->Base.info.tess.primitive_mode);
1564
1565 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
1566 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
1567 PIPE_TESS_SPACING_FRACTIONAL_ODD);
1568 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
1569 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
1570
1571 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
1572 (stp->Base.info.tess.spacing + 1) % 3);
1573
1574 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
1575 !stp->Base.info.tess.ccw);
1576 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
1577 stp->Base.info.tess.point_mode);
1578 break;
1579
1580 case PIPE_SHADER_GEOMETRY:
1581 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
1582 stp->Base.info.gs.input_primitive);
1583 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
1584 stp->Base.info.gs.output_primitive);
1585 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
1586 stp->Base.info.gs.vertices_out);
1587 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
1588 stp->Base.info.gs.invocations);
1589 break;
1590
1591 default:
1592 break;
1593 }
1594
1595 ubyte inputSlotToAttr[VARYING_SLOT_TESS_MAX];
1596 ubyte inputMapping[VARYING_SLOT_TESS_MAX];
1597 ubyte outputMapping[VARYING_SLOT_TESS_MAX];
1598 GLuint attr;
1599
1600 ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS];
1601 ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
1602 uint num_inputs = 0;
1603
1604 ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS];
1605 ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
1606 uint num_outputs = 0;
1607
1608 GLint i;
1609
1610 memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
1611 memset(inputMapping, 0, sizeof(inputMapping));
1612 memset(outputMapping, 0, sizeof(outputMapping));
1613 memset(&stp->state, 0, sizeof(stp->state));
1614
1615 if (prog->info.clip_distance_array_size)
1616 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
1617 prog->info.clip_distance_array_size);
1618 if (prog->info.cull_distance_array_size)
1619 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
1620 prog->info.cull_distance_array_size);
1621
1622 /*
1623 * Convert Mesa program inputs to TGSI input register semantics.
1624 */
1625 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1626 if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) == 0)
1627 continue;
1628
1629 unsigned slot = num_inputs++;
1630
1631 inputMapping[attr] = slot;
1632 inputSlotToAttr[slot] = attr;
1633
1634 unsigned semantic_name, semantic_index;
1635 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1636 &semantic_name, &semantic_index);
1637 input_semantic_name[slot] = semantic_name;
1638 input_semantic_index[slot] = semantic_index;
1639 }
1640
1641 /* Also add patch inputs. */
1642 for (attr = 0; attr < 32; attr++) {
1643 if (prog->info.patch_inputs_read & (1u << attr)) {
1644 GLuint slot = num_inputs++;
1645 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1646
1647 inputMapping[patch_attr] = slot;
1648 inputSlotToAttr[slot] = patch_attr;
1649 input_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1650 input_semantic_index[slot] = attr;
1651 }
1652 }
1653
1654 /* initialize output semantics to defaults */
1655 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1656 output_semantic_name[i] = TGSI_SEMANTIC_GENERIC;
1657 output_semantic_index[i] = 0;
1658 }
1659
1660 /*
1661 * Determine number of outputs, the (default) output register
1662 * mapping and the semantic information for each output.
1663 */
1664 for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
1665 if (prog->info.outputs_written & BITFIELD64_BIT(attr)) {
1666 GLuint slot = num_outputs++;
1667
1668 outputMapping[attr] = slot;
1669
1670 unsigned semantic_name, semantic_index;
1671 tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
1672 &semantic_name, &semantic_index);
1673 output_semantic_name[slot] = semantic_name;
1674 output_semantic_index[slot] = semantic_index;
1675 }
1676 }
1677
1678 /* Also add patch outputs. */
1679 for (attr = 0; attr < 32; attr++) {
1680 if (prog->info.patch_outputs_written & (1u << attr)) {
1681 GLuint slot = num_outputs++;
1682 GLuint patch_attr = VARYING_SLOT_PATCH0 + attr;
1683
1684 outputMapping[patch_attr] = slot;
1685 output_semantic_name[slot] = TGSI_SEMANTIC_PATCH;
1686 output_semantic_index[slot] = attr;
1687 }
1688 }
1689
1690 st_translate_program(st->ctx,
1691 stage,
1692 ureg,
1693 stp->glsl_to_tgsi,
1694 prog,
1695 /* inputs */
1696 num_inputs,
1697 inputMapping,
1698 inputSlotToAttr,
1699 input_semantic_name,
1700 input_semantic_index,
1701 NULL,
1702 /* outputs */
1703 num_outputs,
1704 outputMapping,
1705 output_semantic_name,
1706 output_semantic_index);
1707
1708 stp->state.tokens = ureg_get_tokens(ureg, NULL);
1709
1710 ureg_destroy(ureg);
1711
1712 st_translate_stream_output_info(prog);
1713
1714 st_store_ir_in_disk_cache(st, prog, false);
1715
1716 if (ST_DEBUG & DEBUG_PRINT_IR && ST_DEBUG & DEBUG_MESA)
1717 _mesa_print_program(prog);
1718
1719 free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
1720 stp->glsl_to_tgsi = NULL;
1721 return true;
1722 }
1723
1724
1725 /**
1726 * Get/create a basic program variant.
1727 */
1728 struct st_variant *
1729 st_get_common_variant(struct st_context *st,
1730 struct st_program *prog,
1731 const struct st_common_variant_key *key)
1732 {
1733 struct pipe_context *pipe = st->pipe;
1734 struct st_variant *v;
1735 struct pipe_shader_state state = {0};
1736
1737 /* Search for existing variant */
1738 for (v = prog->variants; v; v = v->next) {
1739 if (memcmp(&st_common_variant(v)->key, key, sizeof(*key)) == 0)
1740 break;
1741 }
1742
1743 if (!v) {
1744 /* create new */
1745 v = (struct st_variant*)CALLOC_STRUCT(st_common_variant);
1746 if (v) {
1747 if (prog->state.type == PIPE_SHADER_IR_NIR) {
1748 bool finalize = false;
1749
1750 state.type = PIPE_SHADER_IR_NIR;
1751 state.ir.nir = get_nir_shader(st, prog);
1752
1753 if (key->clamp_color) {
1754 NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
1755 finalize = true;
1756 }
1757
1758 state.stream_output = prog->state.stream_output;
1759
1760 if (finalize || !st->allow_st_finalize_nir_twice) {
1761 st_finalize_nir(st, &prog->Base, prog->shader_program,
1762 state.ir.nir, true);
1763 }
1764
1765 if (ST_DEBUG & DEBUG_PRINT_IR)
1766 nir_print_shader(state.ir.nir, stderr);
1767 } else {
1768 if (key->lower_depth_clamp) {
1769 struct gl_program_parameter_list *params = prog->Base.Parameters;
1770
1771 unsigned depth_range_const =
1772 _mesa_add_state_reference(params, depth_range_state);
1773
1774 const struct tgsi_token *tokens;
1775 tokens =
1776 st_tgsi_lower_depth_clamp(prog->state.tokens,
1777 depth_range_const,
1778 key->clip_negative_one_to_one);
1779
1780 if (tokens != prog->state.tokens)
1781 tgsi_free_tokens(prog->state.tokens);
1782
1783 prog->state.tokens = tokens;
1784 }
1785 state = prog->state;
1786
1787 if (ST_DEBUG & DEBUG_PRINT_IR)
1788 tgsi_dump(state.tokens, 0);
1789 }
1790 /* fill in new variant */
1791 switch (prog->Base.info.stage) {
1792 case MESA_SHADER_TESS_CTRL:
1793 v->driver_shader = pipe->create_tcs_state(pipe, &state);
1794 break;
1795 case MESA_SHADER_TESS_EVAL:
1796 v->driver_shader = pipe->create_tes_state(pipe, &state);
1797 break;
1798 case MESA_SHADER_GEOMETRY:
1799 v->driver_shader = pipe->create_gs_state(pipe, &state);
1800 break;
1801 case MESA_SHADER_COMPUTE: {
1802 struct pipe_compute_state cs = {0};
1803 cs.ir_type = state.type;
1804 cs.req_local_mem = prog->Base.info.cs.shared_size;
1805
1806 if (state.type == PIPE_SHADER_IR_NIR)
1807 cs.prog = state.ir.nir;
1808 else
1809 cs.prog = state.tokens;
1810
1811 v->driver_shader = pipe->create_compute_state(pipe, &cs);
1812 break;
1813 }
1814 default:
1815 assert(!"unhandled shader type");
1816 free(v);
1817 return NULL;
1818 }
1819
1820 st_common_variant(v)->key = *key;
1821 v->st = key->st;
1822
1823 /* insert into list */
1824 v->next = prog->variants;
1825 prog->variants = v;
1826 }
1827 }
1828
1829 return v;
1830 }
1831
1832
1833 /**
1834 * Vert/Geom/Frag programs have per-context variants. Free all the
1835 * variants attached to the given program which match the given context.
1836 */
1837 static void
1838 destroy_program_variants(struct st_context *st, struct gl_program *target)
1839 {
1840 if (!target || target == &_mesa_DummyProgram)
1841 return;
1842
1843 struct st_program *p = st_program(target);
1844 struct st_variant *v, **prevPtr = &p->variants;
1845 bool unbound = false;
1846
1847 for (v = p->variants; v; ) {
1848 struct st_variant *next = v->next;
1849 if (v->st == st) {
1850 if (!unbound) {
1851 st_unbind_program(st, p);
1852 unbound = true;
1853 }
1854
1855 /* unlink from list */
1856 *prevPtr = next;
1857 /* destroy this variant */
1858 delete_variant(st, v, target->Target);
1859 }
1860 else {
1861 prevPtr = &v->next;
1862 }
1863 v = next;
1864 }
1865 }
1866
1867
1868 /**
1869 * Callback for _mesa_HashWalk. Free all the shader's program variants
1870 * which match the given context.
1871 */
1872 static void
1873 destroy_shader_program_variants_cb(GLuint key, void *data, void *userData)
1874 {
1875 struct st_context *st = (struct st_context *) userData;
1876 struct gl_shader *shader = (struct gl_shader *) data;
1877
1878 switch (shader->Type) {
1879 case GL_SHADER_PROGRAM_MESA:
1880 {
1881 struct gl_shader_program *shProg = (struct gl_shader_program *) data;
1882 GLuint i;
1883
1884 for (i = 0; i < ARRAY_SIZE(shProg->_LinkedShaders); i++) {
1885 if (shProg->_LinkedShaders[i])
1886 destroy_program_variants(st, shProg->_LinkedShaders[i]->Program);
1887 }
1888 }
1889 break;
1890 case GL_VERTEX_SHADER:
1891 case GL_FRAGMENT_SHADER:
1892 case GL_GEOMETRY_SHADER:
1893 case GL_TESS_CONTROL_SHADER:
1894 case GL_TESS_EVALUATION_SHADER:
1895 case GL_COMPUTE_SHADER:
1896 break;
1897 default:
1898 assert(0);
1899 }
1900 }
1901
1902
1903 /**
1904 * Callback for _mesa_HashWalk. Free all the program variants which match
1905 * the given context.
1906 */
1907 static void
1908 destroy_program_variants_cb(GLuint key, void *data, void *userData)
1909 {
1910 struct st_context *st = (struct st_context *) userData;
1911 struct gl_program *program = (struct gl_program *) data;
1912 destroy_program_variants(st, program);
1913 }
1914
1915
1916 /**
1917 * Walk over all shaders and programs to delete any variants which
1918 * belong to the given context.
1919 * This is called during context tear-down.
1920 */
1921 void
1922 st_destroy_program_variants(struct st_context *st)
1923 {
1924 /* If shaders can be shared with other contexts, the last context will
1925 * call DeleteProgram on all shaders, releasing everything.
1926 */
1927 if (st->has_shareable_shaders)
1928 return;
1929
1930 /* ARB vert/frag program */
1931 _mesa_HashWalk(st->ctx->Shared->Programs,
1932 destroy_program_variants_cb, st);
1933
1934 /* GLSL vert/frag/geom shaders */
1935 _mesa_HashWalk(st->ctx->Shared->ShaderObjects,
1936 destroy_shader_program_variants_cb, st);
1937 }
1938
1939
1940 /**
1941 * Compile one shader variant.
1942 */
1943 static void
1944 st_precompile_shader_variant(struct st_context *st,
1945 struct gl_program *prog)
1946 {
1947 switch (prog->Target) {
1948 case GL_VERTEX_PROGRAM_ARB: {
1949 struct st_program *p = (struct st_program *)prog;
1950 struct st_common_variant_key key;
1951
1952 memset(&key, 0, sizeof(key));
1953
1954 key.st = st->has_shareable_shaders ? NULL : st;
1955 st_get_vp_variant(st, p, &key);
1956 break;
1957 }
1958
1959 case GL_FRAGMENT_PROGRAM_ARB: {
1960 struct st_program *p = (struct st_program *)prog;
1961 struct st_fp_variant_key key;
1962
1963 memset(&key, 0, sizeof(key));
1964
1965 key.st = st->has_shareable_shaders ? NULL : st;
1966 st_get_fp_variant(st, p, &key);
1967 break;
1968 }
1969
1970 case GL_TESS_CONTROL_PROGRAM_NV:
1971 case GL_TESS_EVALUATION_PROGRAM_NV:
1972 case GL_GEOMETRY_PROGRAM_NV:
1973 case GL_COMPUTE_PROGRAM_NV: {
1974 struct st_program *p = st_program(prog);
1975 struct st_common_variant_key key;
1976
1977 memset(&key, 0, sizeof(key));
1978
1979 key.st = st->has_shareable_shaders ? NULL : st;
1980 st_get_common_variant(st, p, &key);
1981 break;
1982 }
1983
1984 default:
1985 assert(0);
1986 }
1987 }
1988
1989 void
1990 st_serialize_nir(struct st_program *stp)
1991 {
1992 if (!stp->serialized_nir) {
1993 struct blob blob;
1994 size_t size;
1995
1996 blob_init(&blob);
1997 nir_serialize(&blob, stp->Base.nir, false);
1998 blob_finish_get_buffer(&blob, &stp->serialized_nir, &size);
1999 stp->serialized_nir_size = size;
2000 }
2001 }
2002
2003 void
2004 st_finalize_program(struct st_context *st, struct gl_program *prog)
2005 {
2006 if (st->current_program[prog->info.stage] == prog) {
2007 if (prog->info.stage == MESA_SHADER_VERTEX)
2008 st->dirty |= ST_NEW_VERTEX_PROGRAM(st, (struct st_program *)prog);
2009 else
2010 st->dirty |= ((struct st_program *)prog)->affected_states;
2011 }
2012
2013 if (prog->nir) {
2014 nir_sweep(prog->nir);
2015
2016 /* This is only needed for ARB_vp/fp programs and when the disk cache
2017 * is disabled. If the disk cache is enabled, GLSL programs are
2018 * serialized in write_nir_to_cache.
2019 */
2020 st_serialize_nir(st_program(prog));
2021 }
2022
2023 /* Create Gallium shaders now instead of on demand. */
2024 if (ST_DEBUG & DEBUG_PRECOMPILE ||
2025 st->shader_has_one_variant[prog->info.stage])
2026 st_precompile_shader_variant(st, prog);
2027 }