anv: Clean up descriptor set layouts
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28
29 struct apply_pipeline_layout_state {
30 nir_shader *shader;
31 nir_builder builder;
32
33 struct anv_pipeline_layout *layout;
34 bool add_bounds_checks;
35
36 unsigned first_image_uniform;
37
38 bool uses_constants;
39 uint8_t constants_offset;
40 struct {
41 BITSET_WORD *used;
42 uint8_t *surface_offsets;
43 uint8_t *sampler_offsets;
44 uint8_t *image_offsets;
45 } set[MAX_SETS];
46 };
47
48 static void
49 add_binding(struct apply_pipeline_layout_state *state,
50 uint32_t set, uint32_t binding)
51 {
52 BITSET_SET(state->set[set].used, binding);
53 }
54
55 static void
56 add_var_binding(struct apply_pipeline_layout_state *state, nir_variable *var)
57 {
58 add_binding(state, var->data.descriptor_set, var->data.binding);
59 }
60
61 static void
62 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
63 {
64 nir_deref_instr *deref = nir_src_as_deref(src);
65 add_var_binding(state, nir_deref_instr_get_variable(deref));
66 }
67
68 static void
69 add_tex_src_binding(struct apply_pipeline_layout_state *state,
70 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
71 {
72 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
73 if (deref_src_idx < 0)
74 return;
75
76 add_deref_src_binding(state, tex->src[deref_src_idx].src);
77 }
78
79 static void
80 get_used_bindings_block(nir_block *block,
81 struct apply_pipeline_layout_state *state)
82 {
83 nir_foreach_instr_safe(instr, block) {
84 switch (instr->type) {
85 case nir_instr_type_intrinsic: {
86 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
87 switch (intrin->intrinsic) {
88 case nir_intrinsic_vulkan_resource_index:
89 add_binding(state, nir_intrinsic_desc_set(intrin),
90 nir_intrinsic_binding(intrin));
91 break;
92
93 case nir_intrinsic_image_deref_load:
94 case nir_intrinsic_image_deref_store:
95 case nir_intrinsic_image_deref_atomic_add:
96 case nir_intrinsic_image_deref_atomic_min:
97 case nir_intrinsic_image_deref_atomic_max:
98 case nir_intrinsic_image_deref_atomic_and:
99 case nir_intrinsic_image_deref_atomic_or:
100 case nir_intrinsic_image_deref_atomic_xor:
101 case nir_intrinsic_image_deref_atomic_exchange:
102 case nir_intrinsic_image_deref_atomic_comp_swap:
103 case nir_intrinsic_image_deref_size:
104 case nir_intrinsic_image_deref_samples:
105 case nir_intrinsic_image_deref_load_param_intel:
106 case nir_intrinsic_image_deref_load_raw_intel:
107 case nir_intrinsic_image_deref_store_raw_intel:
108 add_deref_src_binding(state, intrin->src[0]);
109 break;
110
111 case nir_intrinsic_load_constant:
112 state->uses_constants = true;
113 break;
114
115 default:
116 break;
117 }
118 break;
119 }
120 case nir_instr_type_tex: {
121 nir_tex_instr *tex = nir_instr_as_tex(instr);
122 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
123 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
124 break;
125 }
126 default:
127 continue;
128 }
129 }
130 }
131
132 static void
133 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
134 struct apply_pipeline_layout_state *state)
135 {
136 nir_builder *b = &state->builder;
137
138 b->cursor = nir_before_instr(&intrin->instr);
139
140 uint32_t set = nir_intrinsic_desc_set(intrin);
141 uint32_t binding = nir_intrinsic_binding(intrin);
142
143 uint32_t surface_index = state->set[set].surface_offsets[binding];
144 uint32_t array_size =
145 state->layout->set[set].layout->binding[binding].array_size;
146
147 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
148 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
149 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
150
151 nir_ssa_def *block_index = nir_iadd_imm(b, array_index, surface_index);
152
153 assert(intrin->dest.is_ssa);
154 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
155 nir_instr_remove(&intrin->instr);
156 }
157
158 static void
159 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
160 struct apply_pipeline_layout_state *state)
161 {
162 nir_builder *b = &state->builder;
163
164 b->cursor = nir_before_instr(&intrin->instr);
165
166 /* For us, the resource indices are just indices into the binding table and
167 * array elements are sequential. A resource_reindex just turns into an
168 * add of the two indices.
169 */
170 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
171 nir_ssa_def *new_index = nir_iadd(b, intrin->src[0].ssa,
172 intrin->src[1].ssa);
173
174 assert(intrin->dest.is_ssa);
175 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
176 nir_instr_remove(&intrin->instr);
177 }
178
179 static void
180 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
181 struct apply_pipeline_layout_state *state)
182 {
183 nir_builder *b = &state->builder;
184
185 b->cursor = nir_before_instr(&intrin->instr);
186
187 /* We follow the nir_address_format_vk_index_offset model */
188 assert(intrin->src[0].is_ssa);
189 nir_ssa_def *vec2 = nir_vec2(b, intrin->src[0].ssa, nir_imm_int(b, 0));
190
191 assert(intrin->dest.is_ssa);
192 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(vec2));
193 nir_instr_remove(&intrin->instr);
194 }
195
196 static void
197 lower_image_intrinsic(nir_intrinsic_instr *intrin,
198 struct apply_pipeline_layout_state *state)
199 {
200 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
201 nir_variable *var = nir_deref_instr_get_variable(deref);
202
203 unsigned set = var->data.descriptor_set;
204 unsigned binding = var->data.binding;
205 unsigned array_size =
206 state->layout->set[set].layout->binding[binding].array_size;
207
208 nir_builder *b = &state->builder;
209 b->cursor = nir_before_instr(&intrin->instr);
210
211 nir_ssa_def *index = NULL;
212 if (deref->deref_type != nir_deref_type_var) {
213 assert(deref->deref_type == nir_deref_type_array);
214 index = nir_ssa_for_src(b, deref->arr.index, 1);
215 if (state->add_bounds_checks)
216 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
217 } else {
218 index = nir_imm_int(b, 0);
219 }
220
221 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
222 b->cursor = nir_instr_remove(&intrin->instr);
223
224 nir_intrinsic_instr *load =
225 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
226
227 nir_intrinsic_set_base(load, state->first_image_uniform +
228 state->set[set].image_offsets[binding] *
229 BRW_IMAGE_PARAM_SIZE * 4);
230 nir_intrinsic_set_range(load, array_size * BRW_IMAGE_PARAM_SIZE * 4);
231
232 const unsigned param = nir_intrinsic_base(intrin);
233 nir_ssa_def *offset =
234 nir_imul(b, index, nir_imm_int(b, BRW_IMAGE_PARAM_SIZE * 4));
235 offset = nir_iadd(b, offset, nir_imm_int(b, param * 16));
236 load->src[0] = nir_src_for_ssa(offset);
237
238 load->num_components = intrin->dest.ssa.num_components;
239 nir_ssa_dest_init(&load->instr, &load->dest,
240 intrin->dest.ssa.num_components,
241 intrin->dest.ssa.bit_size, NULL);
242 nir_builder_instr_insert(b, &load->instr);
243
244 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
245 nir_src_for_ssa(&load->dest.ssa));
246 } else {
247 unsigned binding_offset = state->set[set].surface_offsets[binding];
248 index = nir_iadd(b, index, nir_imm_int(b, binding_offset));
249 brw_nir_rewrite_image_intrinsic(intrin, index);
250 }
251 }
252
253 static void
254 lower_load_constant(nir_intrinsic_instr *intrin,
255 struct apply_pipeline_layout_state *state)
256 {
257 nir_builder *b = &state->builder;
258
259 b->cursor = nir_before_instr(&intrin->instr);
260
261 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
262 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
263 nir_imm_int(b, nir_intrinsic_base(intrin)));
264
265 nir_intrinsic_instr *load_ubo =
266 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
267 load_ubo->num_components = intrin->num_components;
268 load_ubo->src[0] = nir_src_for_ssa(index);
269 load_ubo->src[1] = nir_src_for_ssa(offset);
270 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
271 intrin->dest.ssa.num_components,
272 intrin->dest.ssa.bit_size, NULL);
273 nir_builder_instr_insert(b, &load_ubo->instr);
274
275 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
276 nir_src_for_ssa(&load_ubo->dest.ssa));
277 nir_instr_remove(&intrin->instr);
278 }
279
280 static void
281 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
282 unsigned *base_index,
283 struct apply_pipeline_layout_state *state)
284 {
285 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
286 if (deref_src_idx < 0)
287 return;
288
289 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
290 nir_variable *var = nir_deref_instr_get_variable(deref);
291
292 unsigned set = var->data.descriptor_set;
293 unsigned binding = var->data.binding;
294 unsigned array_size =
295 state->layout->set[set].layout->binding[binding].array_size;
296
297 nir_tex_src_type offset_src_type;
298 if (deref_src_type == nir_tex_src_texture_deref) {
299 offset_src_type = nir_tex_src_texture_offset;
300 *base_index = state->set[set].surface_offsets[binding];
301 } else {
302 assert(deref_src_type == nir_tex_src_sampler_deref);
303 offset_src_type = nir_tex_src_sampler_offset;
304 *base_index = state->set[set].sampler_offsets[binding];
305 }
306
307 nir_ssa_def *index = NULL;
308 if (deref->deref_type != nir_deref_type_var) {
309 assert(deref->deref_type == nir_deref_type_array);
310
311 if (nir_src_is_const(deref->arr.index)) {
312 unsigned arr_index = nir_src_as_uint(deref->arr.index);
313 *base_index += MIN2(arr_index, array_size - 1);
314 } else {
315 nir_builder *b = &state->builder;
316
317 /* From VK_KHR_sampler_ycbcr_conversion:
318 *
319 * If sampler Y’CBCR conversion is enabled, the combined image
320 * sampler must be indexed only by constant integral expressions when
321 * aggregated into arrays in shader code, irrespective of the
322 * shaderSampledImageArrayDynamicIndexing feature.
323 */
324 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
325
326 index = nir_ssa_for_src(b, deref->arr.index, 1);
327
328 if (state->add_bounds_checks)
329 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
330 }
331 }
332
333 if (index) {
334 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
335 nir_src_for_ssa(index));
336 tex->src[deref_src_idx].src_type = offset_src_type;
337 } else {
338 nir_tex_instr_remove_src(tex, deref_src_idx);
339 }
340 }
341
342 static uint32_t
343 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
344 {
345 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
346 if (plane_src_idx < 0)
347 return 0;
348
349 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
350
351 nir_tex_instr_remove_src(tex, plane_src_idx);
352
353 return plane;
354 }
355
356 static void
357 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
358 {
359 state->builder.cursor = nir_before_instr(&tex->instr);
360
361 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
362
363 lower_tex_deref(tex, nir_tex_src_texture_deref,
364 &tex->texture_index, state);
365 tex->texture_index += plane;
366
367 lower_tex_deref(tex, nir_tex_src_sampler_deref,
368 &tex->sampler_index, state);
369 tex->sampler_index += plane;
370
371 /* The backend only ever uses this to mark used surfaces. We don't care
372 * about that little optimization so it just needs to be non-zero.
373 */
374 tex->texture_array_size = 1;
375 }
376
377 static void
378 apply_pipeline_layout_block(nir_block *block,
379 struct apply_pipeline_layout_state *state)
380 {
381 nir_foreach_instr_safe(instr, block) {
382 switch (instr->type) {
383 case nir_instr_type_intrinsic: {
384 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
385 switch (intrin->intrinsic) {
386 case nir_intrinsic_vulkan_resource_index:
387 lower_res_index_intrinsic(intrin, state);
388 break;
389 case nir_intrinsic_vulkan_resource_reindex:
390 lower_res_reindex_intrinsic(intrin, state);
391 break;
392 case nir_intrinsic_load_vulkan_descriptor:
393 lower_load_vulkan_descriptor(intrin, state);
394 break;
395 case nir_intrinsic_image_deref_load:
396 case nir_intrinsic_image_deref_store:
397 case nir_intrinsic_image_deref_atomic_add:
398 case nir_intrinsic_image_deref_atomic_min:
399 case nir_intrinsic_image_deref_atomic_max:
400 case nir_intrinsic_image_deref_atomic_and:
401 case nir_intrinsic_image_deref_atomic_or:
402 case nir_intrinsic_image_deref_atomic_xor:
403 case nir_intrinsic_image_deref_atomic_exchange:
404 case nir_intrinsic_image_deref_atomic_comp_swap:
405 case nir_intrinsic_image_deref_size:
406 case nir_intrinsic_image_deref_samples:
407 case nir_intrinsic_image_deref_load_param_intel:
408 case nir_intrinsic_image_deref_load_raw_intel:
409 case nir_intrinsic_image_deref_store_raw_intel:
410 lower_image_intrinsic(intrin, state);
411 break;
412 case nir_intrinsic_load_constant:
413 lower_load_constant(intrin, state);
414 break;
415 default:
416 break;
417 }
418 break;
419 }
420 case nir_instr_type_tex:
421 lower_tex(nir_instr_as_tex(instr), state);
422 break;
423 default:
424 continue;
425 }
426 }
427 }
428
429 static void
430 setup_vec4_uniform_value(uint32_t *params, uint32_t offset, unsigned n)
431 {
432 for (unsigned i = 0; i < n; ++i)
433 params[i] = ANV_PARAM_PUSH(offset + i * sizeof(uint32_t));
434
435 for (unsigned i = n; i < 4; ++i)
436 params[i] = BRW_PARAM_BUILTIN_ZERO;
437 }
438
439 void
440 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
441 bool robust_buffer_access,
442 struct anv_pipeline_layout *layout,
443 nir_shader *shader,
444 struct brw_stage_prog_data *prog_data,
445 struct anv_pipeline_bind_map *map)
446 {
447 struct apply_pipeline_layout_state state = {
448 .shader = shader,
449 .layout = layout,
450 .add_bounds_checks = robust_buffer_access,
451 };
452
453 void *mem_ctx = ralloc_context(NULL);
454
455 for (unsigned s = 0; s < layout->num_sets; s++) {
456 const unsigned count = layout->set[s].layout->binding_count;
457 const unsigned words = BITSET_WORDS(count);
458 state.set[s].used = rzalloc_array(mem_ctx, BITSET_WORD, words);
459 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
460 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
461 state.set[s].image_offsets = rzalloc_array(mem_ctx, uint8_t, count);
462 }
463
464 nir_foreach_function(function, shader) {
465 if (!function->impl)
466 continue;
467
468 nir_foreach_block(block, function->impl)
469 get_used_bindings_block(block, &state);
470 }
471
472 if (state.uses_constants) {
473 state.constants_offset = map->surface_count;
474 map->surface_to_descriptor[map->surface_count].set =
475 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
476 map->surface_count++;
477 }
478
479 for (uint32_t set = 0; set < layout->num_sets; set++) {
480 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
481
482 BITSET_WORD b, _tmp;
483 BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
484 set_layout->binding_count) {
485 struct anv_descriptor_set_binding_layout *binding =
486 &set_layout->binding[b];
487
488 if (binding->array_size == 0)
489 continue;
490
491 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
492 state.set[set].surface_offsets[b] = map->surface_count;
493 struct anv_sampler **samplers = binding->immutable_samplers;
494 for (unsigned i = 0; i < binding->array_size; i++) {
495 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
496 for (uint8_t p = 0; p < planes; p++) {
497 map->surface_to_descriptor[map->surface_count++] =
498 (struct anv_pipeline_binding) {
499 .set = set,
500 .binding = b,
501 .index = i,
502 .plane = p,
503 };
504 }
505 }
506 }
507
508 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
509 state.set[set].sampler_offsets[b] = map->sampler_count;
510 struct anv_sampler **samplers = binding->immutable_samplers;
511 for (unsigned i = 0; i < binding->array_size; i++) {
512 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
513 for (uint8_t p = 0; p < planes; p++) {
514 map->sampler_to_descriptor[map->sampler_count++] =
515 (struct anv_pipeline_binding) {
516 .set = set,
517 .binding = b,
518 .index = i,
519 .plane = p,
520 };
521 }
522 }
523 }
524
525 if (binding->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
526 state.set[set].image_offsets[b] = map->image_param_count;
527 map->image_param_count += binding->array_size;
528 }
529 }
530 }
531
532 if (map->image_param_count > 0) {
533 assert(map->image_param_count <= MAX_GEN8_IMAGES);
534 assert(shader->num_uniforms == prog_data->nr_params * 4);
535 state.first_image_uniform = shader->num_uniforms;
536 uint32_t *param = brw_stage_prog_data_add_params(prog_data,
537 map->image_param_count *
538 BRW_IMAGE_PARAM_SIZE);
539 struct anv_push_constants *null_data = NULL;
540 const struct brw_image_param *image_param = null_data->images;
541 for (uint32_t i = 0; i < map->image_param_count; i++) {
542 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
543 (uintptr_t)image_param->offset, 2);
544 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
545 (uintptr_t)image_param->size, 3);
546 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
547 (uintptr_t)image_param->stride, 4);
548 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
549 (uintptr_t)image_param->tiling, 3);
550 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
551 (uintptr_t)image_param->swizzling, 2);
552
553 param += BRW_IMAGE_PARAM_SIZE;
554 image_param ++;
555 }
556 assert(param == prog_data->param + prog_data->nr_params);
557
558 shader->num_uniforms += map->image_param_count *
559 BRW_IMAGE_PARAM_SIZE * 4;
560 assert(shader->num_uniforms == prog_data->nr_params * 4);
561 }
562
563 nir_foreach_variable(var, &shader->uniforms) {
564 const struct glsl_type *glsl_type = glsl_without_array(var->type);
565
566 if (!glsl_type_is_image(glsl_type))
567 continue;
568
569 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
570
571 const uint32_t set = var->data.descriptor_set;
572 const uint32_t binding = var->data.binding;
573 const uint32_t array_size =
574 layout->set[set].layout->binding[binding].array_size;
575
576 if (!BITSET_TEST(state.set[set].used, binding))
577 continue;
578
579 struct anv_pipeline_binding *pipe_binding =
580 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
581 for (unsigned i = 0; i < array_size; i++) {
582 assert(pipe_binding[i].set == set);
583 assert(pipe_binding[i].binding == binding);
584 assert(pipe_binding[i].index == i);
585
586 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
587 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
588 pipe_binding[i].input_attachment_index = var->data.index + i;
589
590 pipe_binding[i].write_only =
591 (var->data.image.access & ACCESS_NON_READABLE) != 0;
592 }
593 }
594
595 nir_foreach_function(function, shader) {
596 if (!function->impl)
597 continue;
598
599 nir_builder_init(&state.builder, function->impl);
600 nir_foreach_block(block, function->impl)
601 apply_pipeline_layout_block(block, &state);
602 nir_metadata_preserve(function->impl, nir_metadata_block_index |
603 nir_metadata_dominance);
604 }
605
606 ralloc_free(mem_ctx);
607 }