2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
)) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
65 nir_foreach_function(function
, shader
) {
69 nir_foreach_block(block
, function
->impl
) {
70 nir_foreach_instr(instr
, block
) {
71 if (instr
->type
!= nir_instr_type_intrinsic
)
74 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
75 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
78 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
79 if (deref
->mode
!= nir_var_shader_out
)
82 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
83 if (var
->data
.patch
) {
84 patches_read
[var
->data
.location_frac
] |=
85 get_variable_io_mask(var
, shader
->info
.stage
);
87 read
[var
->data
.location_frac
] |=
88 get_variable_io_mask(var
, shader
->info
.stage
);
96 * Helper for removing unused shader I/O variables, by demoting them to global
97 * variables (which may then by dead code eliminated).
101 * progress = nir_remove_unused_io_vars(producer,
102 * &producer->outputs,
103 * read, patches_read) ||
106 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
107 * representing each .location_frac used. Note that for vector variables,
108 * only the first channel (.location_frac) is examined for deciding if the
112 nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
113 uint64_t *used_by_other_stage
,
114 uint64_t *used_by_other_stage_patches
)
116 bool progress
= false;
119 nir_foreach_variable_safe(var
, var_list
) {
121 used
= used_by_other_stage_patches
;
123 used
= used_by_other_stage
;
125 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
128 if (var
->data
.always_active_io
)
131 uint64_t other_stage
= used
[var
->data
.location_frac
];
133 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
134 /* This one is invalid, make it a global variable instead */
135 var
->data
.location
= 0;
136 var
->data
.mode
= nir_var_global
;
138 exec_node_remove(&var
->node
);
139 exec_list_push_tail(&shader
->globals
, &var
->node
);
146 nir_fixup_deref_modes(shader
);
152 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
154 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
155 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
157 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
158 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
160 nir_foreach_variable(var
, &producer
->outputs
) {
161 if (var
->data
.patch
) {
162 patches_written
[var
->data
.location_frac
] |=
163 get_variable_io_mask(var
, producer
->info
.stage
);
165 written
[var
->data
.location_frac
] |=
166 get_variable_io_mask(var
, producer
->info
.stage
);
170 nir_foreach_variable(var
, &consumer
->inputs
) {
171 if (var
->data
.patch
) {
172 patches_read
[var
->data
.location_frac
] |=
173 get_variable_io_mask(var
, consumer
->info
.stage
);
175 read
[var
->data
.location_frac
] |=
176 get_variable_io_mask(var
, consumer
->info
.stage
);
180 /* Each TCS invocation can read data written by other TCS invocations,
181 * so even if the outputs are not used by the TES we must also make
182 * sure they are not read by the TCS before demoting them to globals.
184 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
185 tcs_add_output_reads(producer
, read
, patches_read
);
187 bool progress
= false;
188 progress
= nir_remove_unused_io_vars(producer
, &producer
->outputs
, read
,
191 progress
= nir_remove_unused_io_vars(consumer
, &consumer
->inputs
, written
,
192 patches_written
) || progress
;
198 get_interp_type(nir_variable
*var
, const struct glsl_type
*type
,
199 bool default_to_smooth_interp
)
201 if (glsl_type_is_integer(type
))
202 return INTERP_MODE_FLAT
;
203 else if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
204 return var
->data
.interpolation
;
205 else if (default_to_smooth_interp
)
206 return INTERP_MODE_SMOOTH
;
208 return INTERP_MODE_NONE
;
211 #define INTERPOLATE_LOC_SAMPLE 0
212 #define INTERPOLATE_LOC_CENTROID 1
213 #define INTERPOLATE_LOC_CENTER 2
216 get_interp_loc(nir_variable
*var
)
218 if (var
->data
.sample
)
219 return INTERPOLATE_LOC_SAMPLE
;
220 else if (var
->data
.centroid
)
221 return INTERPOLATE_LOC_CENTROID
;
223 return INTERPOLATE_LOC_CENTER
;
227 get_slot_component_masks_and_interp_types(struct exec_list
*var_list
,
229 uint8_t *interp_type
,
231 gl_shader_stage stage
,
232 bool default_to_smooth_interp
)
234 nir_foreach_variable_safe(var
, var_list
) {
235 assert(var
->data
.location
>= 0);
237 /* Only remap things that aren't built-ins.
238 * TODO: add TES patch support.
240 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
241 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
243 const struct glsl_type
*type
= var
->type
;
244 if (nir_is_per_vertex_io(var
, stage
)) {
245 assert(glsl_type_is_array(type
));
246 type
= glsl_get_array_element(type
);
249 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
251 glsl_get_vector_elements(glsl_without_array(type
));
253 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
254 unsigned slots
= glsl_count_attribute_slots(type
, false);
255 unsigned comps_slot2
= 0;
256 for (unsigned i
= 0; i
< slots
; i
++) {
257 interp_type
[location
+ i
] =
258 get_interp_type(var
, type
, default_to_smooth_interp
);
259 interp_loc
[location
+ i
] = get_interp_loc(var
);
263 comps
[location
+ i
] |= ((1 << comps_slot2
) - 1);
265 unsigned num_comps
= 4 - var
->data
.location_frac
;
266 comps_slot2
= (elements
* 2) - num_comps
;
268 /* Assume ARB_enhanced_layouts packing rules for doubles */
269 assert(var
->data
.location_frac
== 0 ||
270 var
->data
.location_frac
== 2);
271 assert(comps_slot2
<= 4);
273 comps
[location
+ i
] |=
274 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
277 comps
[location
+ i
] |=
278 ((1 << elements
) - 1) << var
->data
.location_frac
;
292 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
293 struct varying_loc (*remap
)[4],
294 uint64_t *slots_used
, uint64_t *out_slots_read
)
296 uint64_t out_slots_read_tmp
= 0;
298 /* We don't touch builtins so just copy the bitmask */
299 uint64_t slots_used_tmp
=
300 *slots_used
& (((uint64_t)1 << (VARYING_SLOT_VAR0
- 1)) - 1);
302 nir_foreach_variable(var
, var_list
) {
303 assert(var
->data
.location
>= 0);
305 /* Only remap things that aren't built-ins */
306 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
307 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
308 assert(var
->data
.location
- VARYING_SLOT_VAR0
< 32);
310 const struct glsl_type
*type
= var
->type
;
311 if (nir_is_per_vertex_io(var
, stage
)) {
312 assert(glsl_type_is_array(type
));
313 type
= glsl_get_array_element(type
);
316 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
317 bool used_across_stages
= false;
318 bool outputs_read
= false;
320 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
321 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
323 uint64_t slots
= (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
324 if (slots
& *slots_used
)
325 used_across_stages
= true;
327 if (slots
& *out_slots_read
)
330 if (new_loc
->location
) {
331 var
->data
.location
= new_loc
->location
;
332 var
->data
.location_frac
= new_loc
->component
;
335 if (var
->data
.always_active_io
) {
336 /* We can't apply link time optimisations (specifically array
337 * splitting) to these so we need to copy the existing mask
338 * otherwise we will mess up the mask for things like partially
341 if (used_across_stages
) {
343 *slots_used
& (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
347 out_slots_read_tmp
|=
348 *out_slots_read
& (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
352 for (unsigned i
= 0; i
< num_slots
; i
++) {
353 if (used_across_stages
)
354 slots_used_tmp
|= (uint64_t)1 << (var
->data
.location
+ i
);
357 out_slots_read_tmp
|= (uint64_t)1 << (var
->data
.location
+ i
);
363 *slots_used
= slots_used_tmp
;
364 *out_slots_read
= out_slots_read_tmp
;
367 /* If there are empty components in the slot compact the remaining components
368 * as close to component 0 as possible. This will make it easier to fill the
369 * empty components with components from a different slot in a following pass.
372 compact_components(nir_shader
*producer
, nir_shader
*consumer
, uint8_t *comps
,
373 uint8_t *interp_type
, uint8_t *interp_loc
,
374 bool default_to_smooth_interp
)
376 struct exec_list
*input_list
= &consumer
->inputs
;
377 struct exec_list
*output_list
= &producer
->outputs
;
378 struct varying_loc remap
[32][4] = {{{0}, {0}}};
380 /* Create a cursor for each interpolation type */
381 unsigned cursor
[4] = {0};
383 /* We only need to pass over one stage and we choose the consumer as it seems
384 * to cause a larger reduction in instruction counts (tested on i965).
386 nir_foreach_variable(var
, input_list
) {
388 /* Only remap things that aren't builtins.
389 * TODO: add TES patch support.
391 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
392 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
394 /* We can't repack xfb varyings. */
395 if (var
->data
.always_active_io
)
398 const struct glsl_type
*type
= var
->type
;
399 if (nir_is_per_vertex_io(var
, consumer
->info
.stage
)) {
400 assert(glsl_type_is_array(type
));
401 type
= glsl_get_array_element(type
);
404 /* Skip types that require more complex packing handling.
405 * TODO: add support for these types.
407 if (glsl_type_is_array(type
) ||
408 glsl_type_is_dual_slot(type
) ||
409 glsl_type_is_matrix(type
) ||
410 glsl_type_is_struct(type
) ||
411 glsl_type_is_64bit(type
))
414 /* We ignore complex types above and all other vector types should
415 * have been split into scalar variables by the lower_io_to_scalar
416 * pass. The only exception should by OpenGL xfb varyings.
418 if (glsl_get_vector_elements(type
) != 1)
421 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
422 uint8_t used_comps
= comps
[location
];
424 /* If there are no empty components there is nothing more for us to do.
426 if (used_comps
== 0xf)
429 bool found_new_offset
= false;
430 uint8_t interp
= get_interp_type(var
, type
, default_to_smooth_interp
);
431 for (; cursor
[interp
] < 32; cursor
[interp
]++) {
432 uint8_t cursor_used_comps
= comps
[cursor
[interp
]];
434 /* We couldn't find anywhere to pack the varying continue on. */
435 if (cursor
[interp
] == location
&&
436 (var
->data
.location_frac
== 0 ||
437 cursor_used_comps
& ((1 << (var
->data
.location_frac
)) - 1)))
440 /* We can only pack varyings with matching interpolation types */
441 if (interp_type
[cursor
[interp
]] != interp
)
444 /* Interpolation loc must match also.
445 * TODO: i965 can handle these if they don't match, but the
446 * radeonsi nir backend handles everything as vec4s and so expects
447 * this to be the same for all components. We could make this
448 * check driver specfific or drop it if NIR ever become the only
451 if (interp_loc
[cursor
[interp
]] != get_interp_loc(var
))
454 /* If the slot is empty just skip it for now, compact_var_list()
455 * can be called after this function to remove empty slots for us.
456 * TODO: finish implementing compact_var_list() requires array and
459 if (!cursor_used_comps
)
462 uint8_t unused_comps
= ~cursor_used_comps
;
464 for (unsigned i
= 0; i
< 4; i
++) {
465 uint8_t new_var_comps
= 1 << i
;
466 if (unused_comps
& new_var_comps
) {
467 remap
[location
][var
->data
.location_frac
].component
= i
;
468 remap
[location
][var
->data
.location_frac
].location
=
469 cursor
[interp
] + VARYING_SLOT_VAR0
;
471 found_new_offset
= true;
473 /* Turn off the mask for the component we are remapping */
474 if (comps
[location
] & 1 << var
->data
.location_frac
) {
475 comps
[location
] ^= 1 << var
->data
.location_frac
;
476 comps
[cursor
[interp
]] |= new_var_comps
;
482 if (found_new_offset
)
489 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
490 &consumer
->info
.inputs_read
, &zero
);
491 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
492 &producer
->info
.outputs_written
,
493 &producer
->info
.outputs_read
);
496 /* We assume that this has been called more-or-less directly after
497 * remove_unused_varyings. At this point, all of the varyings that we
498 * aren't going to be using have been completely removed and the
499 * inputs_read and outputs_written fields in nir_shader_info reflect
500 * this. Therefore, the total set of valid slots is the OR of the two
501 * sets of varyings; this accounts for varyings which one side may need
502 * to read/write even if the other doesn't. This can happen if, for
503 * instance, an array is used indirectly from one side causing it to be
504 * unsplittable but directly from the other.
507 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
508 bool default_to_smooth_interp
)
510 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
511 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
513 uint8_t comps
[32] = {0};
514 uint8_t interp_type
[32] = {0};
515 uint8_t interp_loc
[32] = {0};
517 get_slot_component_masks_and_interp_types(&producer
->outputs
, comps
,
518 interp_type
, interp_loc
,
519 producer
->info
.stage
,
520 default_to_smooth_interp
);
521 get_slot_component_masks_and_interp_types(&consumer
->inputs
, comps
,
522 interp_type
, interp_loc
,
523 consumer
->info
.stage
,
524 default_to_smooth_interp
);
526 compact_components(producer
, consumer
, comps
, interp_type
, interp_loc
,
527 default_to_smooth_interp
);
531 * Mark XFB varyings as always_active_io in the consumer so the linking opts
535 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
537 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
539 nir_foreach_variable(var
, &consumer
->inputs
) {
540 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
541 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
543 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
544 input_vars
[location
] = var
;
548 nir_foreach_variable(var
, &producer
->outputs
) {
549 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
550 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
552 if (!var
->data
.always_active_io
)
555 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
556 if (input_vars
[location
]) {
557 input_vars
[location
]->data
.always_active_io
= true;
564 does_varying_match(nir_variable
*out_var
, nir_variable
*in_var
)
566 if (in_var
->data
.location
== out_var
->data
.location
&&
567 in_var
->data
.location_frac
== out_var
->data
.location_frac
)
573 static nir_variable
*
574 get_matching_input_var(nir_shader
*consumer
, nir_variable
*out_var
)
576 nir_foreach_variable(var
, &consumer
->inputs
) {
577 if (does_varying_match(out_var
, var
))
585 can_replace_varying(nir_variable
*out_var
)
587 /* Skip types that require more complex handling.
588 * TODO: add support for these types.
590 if (glsl_type_is_array(out_var
->type
) ||
591 glsl_type_is_dual_slot(out_var
->type
) ||
592 glsl_type_is_matrix(out_var
->type
) ||
593 glsl_type_is_struct(out_var
->type
))
596 /* Limit this pass to scalars for now to keep things simple. Most varyings
597 * should have been lowered to scalars at this point anyway.
599 if (!glsl_type_is_scalar(out_var
->type
))
602 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
603 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
610 replace_constant_input(nir_shader
*shader
, nir_intrinsic_instr
*store_intr
)
612 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
615 nir_builder_init(&b
, impl
);
617 nir_variable
*out_var
=
618 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
620 bool progress
= false;
621 nir_foreach_block(block
, impl
) {
622 nir_foreach_instr(instr
, block
) {
623 if (instr
->type
!= nir_instr_type_intrinsic
)
626 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
627 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
630 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
631 if (in_deref
->mode
!= nir_var_shader_in
)
634 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
636 if (!does_varying_match(out_var
, in_var
))
639 b
.cursor
= nir_before_instr(instr
);
641 nir_load_const_instr
*out_const
=
642 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
644 /* Add new const to replace the input */
645 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
646 intr
->dest
.ssa
.bit_size
,
649 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
659 replace_duplicate_input(nir_shader
*shader
, nir_variable
*input_var
,
660 nir_intrinsic_instr
*dup_store_intr
)
664 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
667 nir_builder_init(&b
, impl
);
669 nir_variable
*dup_out_var
=
670 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr
->src
[0]));
672 bool progress
= false;
673 nir_foreach_block(block
, impl
) {
674 nir_foreach_instr(instr
, block
) {
675 if (instr
->type
!= nir_instr_type_intrinsic
)
678 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
679 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
682 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
683 if (in_deref
->mode
!= nir_var_shader_in
)
686 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
688 if (!does_varying_match(dup_out_var
, in_var
) ||
689 in_var
->data
.interpolation
!= input_var
->data
.interpolation
||
690 get_interp_loc(in_var
) != get_interp_loc(input_var
))
693 b
.cursor
= nir_before_instr(instr
);
695 nir_ssa_def
*load
= nir_load_var(&b
, input_var
);
696 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(load
));
706 nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
)
708 /* TODO: Add support for more shader stage combinations */
709 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
710 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
711 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
714 bool progress
= false;
716 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
718 struct hash_table
*varying_values
=
719 _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
720 _mesa_key_pointer_equal
);
722 /* If we find a store in the last block of the producer we can be sure this
723 * is the only possible value for this output.
725 nir_block
*last_block
= nir_impl_last_block(impl
);
726 nir_foreach_instr_reverse(instr
, last_block
) {
727 if (instr
->type
!= nir_instr_type_intrinsic
)
730 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
732 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
735 nir_deref_instr
*out_deref
= nir_src_as_deref(intr
->src
[0]);
736 if (out_deref
->mode
!= nir_var_shader_out
)
739 nir_variable
*out_var
= nir_deref_instr_get_variable(out_deref
);
740 if (!can_replace_varying(out_var
))
743 if (intr
->src
[1].ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
744 progress
|= replace_constant_input(consumer
, intr
);
746 struct hash_entry
*entry
=
747 _mesa_hash_table_search(varying_values
, intr
->src
[1].ssa
);
749 progress
|= replace_duplicate_input(consumer
,
750 (nir_variable
*) entry
->data
,
753 nir_variable
*in_var
= get_matching_input_var(consumer
, out_var
);
755 _mesa_hash_table_insert(varying_values
, intr
->src
[1].ssa
,
762 _mesa_hash_table_destroy(varying_values
, NULL
);