spirv: Use the same types for resource indices as pointers
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28
29 struct apply_pipeline_layout_state {
30 const struct anv_physical_device *pdevice;
31
32 nir_shader *shader;
33 nir_builder builder;
34
35 struct anv_pipeline_layout *layout;
36 bool add_bounds_checks;
37
38 unsigned first_image_uniform;
39
40 bool uses_constants;
41 uint8_t constants_offset;
42 struct {
43 bool desc_buffer_used;
44 uint8_t desc_offset;
45
46 BITSET_WORD *used;
47 uint8_t *surface_offsets;
48 uint8_t *sampler_offsets;
49 uint8_t *image_offsets;
50 } set[MAX_SETS];
51 };
52
53 static void
54 add_binding(struct apply_pipeline_layout_state *state,
55 uint32_t set, uint32_t binding)
56 {
57 const struct anv_descriptor_set_binding_layout *bind_layout =
58 &state->layout->set[set].layout->binding[binding];
59
60 BITSET_SET(state->set[set].used, binding);
61
62 /* Only flag the descriptor buffer as used if there's actually data for
63 * this binding. This lets us be lazy and call this function constantly
64 * without worrying about unnecessarily enabling the buffer.
65 */
66 if (anv_descriptor_size(bind_layout))
67 state->set[set].desc_buffer_used = true;
68 }
69
70 static void
71 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
72 {
73 nir_deref_instr *deref = nir_src_as_deref(src);
74 nir_variable *var = nir_deref_instr_get_variable(deref);
75 add_binding(state, var->data.descriptor_set, var->data.binding);
76 }
77
78 static void
79 add_tex_src_binding(struct apply_pipeline_layout_state *state,
80 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
81 {
82 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
83 if (deref_src_idx < 0)
84 return;
85
86 add_deref_src_binding(state, tex->src[deref_src_idx].src);
87 }
88
89 static void
90 get_used_bindings_block(nir_block *block,
91 struct apply_pipeline_layout_state *state)
92 {
93 nir_foreach_instr_safe(instr, block) {
94 switch (instr->type) {
95 case nir_instr_type_intrinsic: {
96 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
97 switch (intrin->intrinsic) {
98 case nir_intrinsic_vulkan_resource_index:
99 add_binding(state, nir_intrinsic_desc_set(intrin),
100 nir_intrinsic_binding(intrin));
101 break;
102
103 case nir_intrinsic_image_deref_load:
104 case nir_intrinsic_image_deref_store:
105 case nir_intrinsic_image_deref_atomic_add:
106 case nir_intrinsic_image_deref_atomic_min:
107 case nir_intrinsic_image_deref_atomic_max:
108 case nir_intrinsic_image_deref_atomic_and:
109 case nir_intrinsic_image_deref_atomic_or:
110 case nir_intrinsic_image_deref_atomic_xor:
111 case nir_intrinsic_image_deref_atomic_exchange:
112 case nir_intrinsic_image_deref_atomic_comp_swap:
113 case nir_intrinsic_image_deref_size:
114 case nir_intrinsic_image_deref_samples:
115 case nir_intrinsic_image_deref_load_param_intel:
116 case nir_intrinsic_image_deref_load_raw_intel:
117 case nir_intrinsic_image_deref_store_raw_intel:
118 add_deref_src_binding(state, intrin->src[0]);
119 break;
120
121 case nir_intrinsic_load_constant:
122 state->uses_constants = true;
123 break;
124
125 default:
126 break;
127 }
128 break;
129 }
130 case nir_instr_type_tex: {
131 nir_tex_instr *tex = nir_instr_as_tex(instr);
132 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
133 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
134 break;
135 }
136 default:
137 continue;
138 }
139 }
140 }
141
142 static void
143 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
144 struct apply_pipeline_layout_state *state)
145 {
146 nir_builder *b = &state->builder;
147
148 b->cursor = nir_before_instr(&intrin->instr);
149
150 uint32_t set = nir_intrinsic_desc_set(intrin);
151 uint32_t binding = nir_intrinsic_binding(intrin);
152
153 uint32_t surface_index = state->set[set].surface_offsets[binding];
154 uint32_t array_size =
155 state->layout->set[set].layout->binding[binding].array_size;
156
157 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
158 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
159 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
160
161 /* We're using nir_address_format_vk_index_offset */
162 nir_ssa_def *index =
163 nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
164 nir_imm_int(b, 0));
165
166 assert(intrin->dest.is_ssa);
167 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
168 nir_instr_remove(&intrin->instr);
169 }
170
171 static void
172 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
173 struct apply_pipeline_layout_state *state)
174 {
175 nir_builder *b = &state->builder;
176
177 b->cursor = nir_before_instr(&intrin->instr);
178
179 /* For us, the resource indices are just indices into the binding table and
180 * array elements are sequential. A resource_reindex just turns into an
181 * add of the two indices.
182 */
183 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
184 nir_ssa_def *old_index = intrin->src[0].ssa;
185 nir_ssa_def *offset = intrin->src[1].ssa;
186
187 nir_ssa_def *new_index =
188 nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
189 nir_channel(b, old_index, 1));
190
191 assert(intrin->dest.is_ssa);
192 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
193 nir_instr_remove(&intrin->instr);
194 }
195
196 static void
197 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
198 struct apply_pipeline_layout_state *state)
199 {
200 nir_builder *b = &state->builder;
201
202 b->cursor = nir_before_instr(&intrin->instr);
203
204 /* We follow the nir_address_format_vk_index_offset model */
205 assert(intrin->src[0].is_ssa);
206 nir_ssa_def *index = intrin->src[0].ssa;
207
208 assert(intrin->dest.is_ssa);
209 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
210 nir_instr_remove(&intrin->instr);
211 }
212
213 static void
214 lower_get_buffer_size(nir_intrinsic_instr *intrin,
215 struct apply_pipeline_layout_state *state)
216 {
217 nir_builder *b = &state->builder;
218
219 b->cursor = nir_before_instr(&intrin->instr);
220
221 assert(intrin->src[0].is_ssa);
222 nir_ssa_def *index = intrin->src[0].ssa;
223
224 /* We're following the nir_address_format_vk_index_offset model so the
225 * binding table index is the first component of the address. The
226 * back-end wants a scalar binding table index source.
227 */
228 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
229 nir_src_for_ssa(nir_channel(b, index, 0)));
230 }
231
232 static void
233 lower_image_intrinsic(nir_intrinsic_instr *intrin,
234 struct apply_pipeline_layout_state *state)
235 {
236 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
237 nir_variable *var = nir_deref_instr_get_variable(deref);
238
239 unsigned set = var->data.descriptor_set;
240 unsigned binding = var->data.binding;
241 unsigned array_size =
242 state->layout->set[set].layout->binding[binding].array_size;
243
244 nir_builder *b = &state->builder;
245 b->cursor = nir_before_instr(&intrin->instr);
246
247 nir_ssa_def *index = NULL;
248 if (deref->deref_type != nir_deref_type_var) {
249 assert(deref->deref_type == nir_deref_type_array);
250 index = nir_ssa_for_src(b, deref->arr.index, 1);
251 if (state->add_bounds_checks)
252 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
253 } else {
254 index = nir_imm_int(b, 0);
255 }
256
257 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
258 b->cursor = nir_instr_remove(&intrin->instr);
259
260 nir_intrinsic_instr *load =
261 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
262
263 nir_intrinsic_set_base(load, state->first_image_uniform +
264 state->set[set].image_offsets[binding] *
265 BRW_IMAGE_PARAM_SIZE * 4);
266 nir_intrinsic_set_range(load, array_size * BRW_IMAGE_PARAM_SIZE * 4);
267
268 const unsigned param = nir_intrinsic_base(intrin);
269 nir_ssa_def *offset =
270 nir_imul(b, index, nir_imm_int(b, BRW_IMAGE_PARAM_SIZE * 4));
271 offset = nir_iadd(b, offset, nir_imm_int(b, param * 16));
272 load->src[0] = nir_src_for_ssa(offset);
273
274 load->num_components = intrin->dest.ssa.num_components;
275 nir_ssa_dest_init(&load->instr, &load->dest,
276 intrin->dest.ssa.num_components,
277 intrin->dest.ssa.bit_size, NULL);
278 nir_builder_instr_insert(b, &load->instr);
279
280 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
281 nir_src_for_ssa(&load->dest.ssa));
282 } else {
283 unsigned binding_offset = state->set[set].surface_offsets[binding];
284 index = nir_iadd(b, index, nir_imm_int(b, binding_offset));
285 brw_nir_rewrite_image_intrinsic(intrin, index);
286 }
287 }
288
289 static void
290 lower_load_constant(nir_intrinsic_instr *intrin,
291 struct apply_pipeline_layout_state *state)
292 {
293 nir_builder *b = &state->builder;
294
295 b->cursor = nir_before_instr(&intrin->instr);
296
297 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
298 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
299 nir_imm_int(b, nir_intrinsic_base(intrin)));
300
301 nir_intrinsic_instr *load_ubo =
302 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
303 load_ubo->num_components = intrin->num_components;
304 load_ubo->src[0] = nir_src_for_ssa(index);
305 load_ubo->src[1] = nir_src_for_ssa(offset);
306 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
307 intrin->dest.ssa.num_components,
308 intrin->dest.ssa.bit_size, NULL);
309 nir_builder_instr_insert(b, &load_ubo->instr);
310
311 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
312 nir_src_for_ssa(&load_ubo->dest.ssa));
313 nir_instr_remove(&intrin->instr);
314 }
315
316 static void
317 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
318 unsigned *base_index,
319 struct apply_pipeline_layout_state *state)
320 {
321 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
322 if (deref_src_idx < 0)
323 return;
324
325 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
326 nir_variable *var = nir_deref_instr_get_variable(deref);
327
328 unsigned set = var->data.descriptor_set;
329 unsigned binding = var->data.binding;
330 unsigned array_size =
331 state->layout->set[set].layout->binding[binding].array_size;
332
333 nir_tex_src_type offset_src_type;
334 if (deref_src_type == nir_tex_src_texture_deref) {
335 offset_src_type = nir_tex_src_texture_offset;
336 *base_index = state->set[set].surface_offsets[binding];
337 } else {
338 assert(deref_src_type == nir_tex_src_sampler_deref);
339 offset_src_type = nir_tex_src_sampler_offset;
340 *base_index = state->set[set].sampler_offsets[binding];
341 }
342
343 nir_ssa_def *index = NULL;
344 if (deref->deref_type != nir_deref_type_var) {
345 assert(deref->deref_type == nir_deref_type_array);
346
347 if (nir_src_is_const(deref->arr.index)) {
348 unsigned arr_index = nir_src_as_uint(deref->arr.index);
349 *base_index += MIN2(arr_index, array_size - 1);
350 } else {
351 nir_builder *b = &state->builder;
352
353 /* From VK_KHR_sampler_ycbcr_conversion:
354 *
355 * If sampler Y’CBCR conversion is enabled, the combined image
356 * sampler must be indexed only by constant integral expressions when
357 * aggregated into arrays in shader code, irrespective of the
358 * shaderSampledImageArrayDynamicIndexing feature.
359 */
360 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
361
362 index = nir_ssa_for_src(b, deref->arr.index, 1);
363
364 if (state->add_bounds_checks)
365 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
366 }
367 }
368
369 if (index) {
370 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
371 nir_src_for_ssa(index));
372 tex->src[deref_src_idx].src_type = offset_src_type;
373 } else {
374 nir_tex_instr_remove_src(tex, deref_src_idx);
375 }
376 }
377
378 static uint32_t
379 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
380 {
381 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
382 if (plane_src_idx < 0)
383 return 0;
384
385 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
386
387 nir_tex_instr_remove_src(tex, plane_src_idx);
388
389 return plane;
390 }
391
392 static void
393 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
394 {
395 state->builder.cursor = nir_before_instr(&tex->instr);
396
397 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
398
399 lower_tex_deref(tex, nir_tex_src_texture_deref,
400 &tex->texture_index, state);
401 tex->texture_index += plane;
402
403 lower_tex_deref(tex, nir_tex_src_sampler_deref,
404 &tex->sampler_index, state);
405 tex->sampler_index += plane;
406
407 /* The backend only ever uses this to mark used surfaces. We don't care
408 * about that little optimization so it just needs to be non-zero.
409 */
410 tex->texture_array_size = 1;
411 }
412
413 static void
414 apply_pipeline_layout_block(nir_block *block,
415 struct apply_pipeline_layout_state *state)
416 {
417 nir_foreach_instr_safe(instr, block) {
418 switch (instr->type) {
419 case nir_instr_type_intrinsic: {
420 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
421 switch (intrin->intrinsic) {
422 case nir_intrinsic_vulkan_resource_index:
423 lower_res_index_intrinsic(intrin, state);
424 break;
425 case nir_intrinsic_vulkan_resource_reindex:
426 lower_res_reindex_intrinsic(intrin, state);
427 break;
428 case nir_intrinsic_load_vulkan_descriptor:
429 lower_load_vulkan_descriptor(intrin, state);
430 break;
431 case nir_intrinsic_get_buffer_size:
432 lower_get_buffer_size(intrin, state);
433 break;
434 case nir_intrinsic_image_deref_load:
435 case nir_intrinsic_image_deref_store:
436 case nir_intrinsic_image_deref_atomic_add:
437 case nir_intrinsic_image_deref_atomic_min:
438 case nir_intrinsic_image_deref_atomic_max:
439 case nir_intrinsic_image_deref_atomic_and:
440 case nir_intrinsic_image_deref_atomic_or:
441 case nir_intrinsic_image_deref_atomic_xor:
442 case nir_intrinsic_image_deref_atomic_exchange:
443 case nir_intrinsic_image_deref_atomic_comp_swap:
444 case nir_intrinsic_image_deref_size:
445 case nir_intrinsic_image_deref_samples:
446 case nir_intrinsic_image_deref_load_param_intel:
447 case nir_intrinsic_image_deref_load_raw_intel:
448 case nir_intrinsic_image_deref_store_raw_intel:
449 lower_image_intrinsic(intrin, state);
450 break;
451 case nir_intrinsic_load_constant:
452 lower_load_constant(intrin, state);
453 break;
454 default:
455 break;
456 }
457 break;
458 }
459 case nir_instr_type_tex:
460 lower_tex(nir_instr_as_tex(instr), state);
461 break;
462 default:
463 continue;
464 }
465 }
466 }
467
468 static void
469 setup_vec4_uniform_value(uint32_t *params, uint32_t offset, unsigned n)
470 {
471 for (unsigned i = 0; i < n; ++i)
472 params[i] = ANV_PARAM_PUSH(offset + i * sizeof(uint32_t));
473
474 for (unsigned i = n; i < 4; ++i)
475 params[i] = BRW_PARAM_BUILTIN_ZERO;
476 }
477
478 void
479 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
480 bool robust_buffer_access,
481 struct anv_pipeline_layout *layout,
482 nir_shader *shader,
483 struct brw_stage_prog_data *prog_data,
484 struct anv_pipeline_bind_map *map)
485 {
486 struct apply_pipeline_layout_state state = {
487 .pdevice = pdevice,
488 .shader = shader,
489 .layout = layout,
490 .add_bounds_checks = robust_buffer_access,
491 };
492
493 void *mem_ctx = ralloc_context(NULL);
494
495 for (unsigned s = 0; s < layout->num_sets; s++) {
496 const unsigned count = layout->set[s].layout->binding_count;
497 const unsigned words = BITSET_WORDS(count);
498 state.set[s].used = rzalloc_array(mem_ctx, BITSET_WORD, words);
499 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
500 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
501 state.set[s].image_offsets = rzalloc_array(mem_ctx, uint8_t, count);
502 }
503
504 nir_foreach_function(function, shader) {
505 if (!function->impl)
506 continue;
507
508 nir_foreach_block(block, function->impl)
509 get_used_bindings_block(block, &state);
510 }
511
512 for (unsigned s = 0; s < layout->num_sets; s++) {
513 if (state.set[s].desc_buffer_used) {
514 map->surface_to_descriptor[map->surface_count] =
515 (struct anv_pipeline_binding) {
516 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
517 .binding = s,
518 };
519 state.set[s].desc_offset = map->surface_count;
520 map->surface_count++;
521 }
522 }
523
524 if (state.uses_constants) {
525 state.constants_offset = map->surface_count;
526 map->surface_to_descriptor[map->surface_count].set =
527 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
528 map->surface_count++;
529 }
530
531 for (uint32_t set = 0; set < layout->num_sets; set++) {
532 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
533
534 BITSET_WORD b, _tmp;
535 BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
536 set_layout->binding_count) {
537 struct anv_descriptor_set_binding_layout *binding =
538 &set_layout->binding[b];
539
540 if (binding->array_size == 0)
541 continue;
542
543 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
544 state.set[set].surface_offsets[b] = map->surface_count;
545 struct anv_sampler **samplers = binding->immutable_samplers;
546 for (unsigned i = 0; i < binding->array_size; i++) {
547 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
548 for (uint8_t p = 0; p < planes; p++) {
549 map->surface_to_descriptor[map->surface_count++] =
550 (struct anv_pipeline_binding) {
551 .set = set,
552 .binding = b,
553 .index = i,
554 .plane = p,
555 };
556 }
557 }
558 }
559
560 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
561 state.set[set].sampler_offsets[b] = map->sampler_count;
562 struct anv_sampler **samplers = binding->immutable_samplers;
563 for (unsigned i = 0; i < binding->array_size; i++) {
564 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
565 for (uint8_t p = 0; p < planes; p++) {
566 map->sampler_to_descriptor[map->sampler_count++] =
567 (struct anv_pipeline_binding) {
568 .set = set,
569 .binding = b,
570 .index = i,
571 .plane = p,
572 };
573 }
574 }
575 }
576
577 if (binding->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
578 state.set[set].image_offsets[b] = map->image_param_count;
579 map->image_param_count += binding->array_size;
580 }
581 }
582 }
583
584 if (map->image_param_count > 0) {
585 assert(map->image_param_count <= MAX_GEN8_IMAGES);
586 assert(shader->num_uniforms == prog_data->nr_params * 4);
587 state.first_image_uniform = shader->num_uniforms;
588 uint32_t *param = brw_stage_prog_data_add_params(prog_data,
589 map->image_param_count *
590 BRW_IMAGE_PARAM_SIZE);
591 struct anv_push_constants *null_data = NULL;
592 const struct brw_image_param *image_param = null_data->images;
593 for (uint32_t i = 0; i < map->image_param_count; i++) {
594 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
595 (uintptr_t)image_param->offset, 2);
596 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
597 (uintptr_t)image_param->size, 3);
598 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
599 (uintptr_t)image_param->stride, 4);
600 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
601 (uintptr_t)image_param->tiling, 3);
602 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
603 (uintptr_t)image_param->swizzling, 2);
604
605 param += BRW_IMAGE_PARAM_SIZE;
606 image_param ++;
607 }
608 assert(param == prog_data->param + prog_data->nr_params);
609
610 shader->num_uniforms += map->image_param_count *
611 BRW_IMAGE_PARAM_SIZE * 4;
612 assert(shader->num_uniforms == prog_data->nr_params * 4);
613 }
614
615 nir_foreach_variable(var, &shader->uniforms) {
616 const struct glsl_type *glsl_type = glsl_without_array(var->type);
617
618 if (!glsl_type_is_image(glsl_type))
619 continue;
620
621 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
622
623 const uint32_t set = var->data.descriptor_set;
624 const uint32_t binding = var->data.binding;
625 const uint32_t array_size =
626 layout->set[set].layout->binding[binding].array_size;
627
628 if (!BITSET_TEST(state.set[set].used, binding))
629 continue;
630
631 struct anv_pipeline_binding *pipe_binding =
632 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
633 for (unsigned i = 0; i < array_size; i++) {
634 assert(pipe_binding[i].set == set);
635 assert(pipe_binding[i].binding == binding);
636 assert(pipe_binding[i].index == i);
637
638 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
639 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
640 pipe_binding[i].input_attachment_index = var->data.index + i;
641
642 pipe_binding[i].write_only =
643 (var->data.image.access & ACCESS_NON_READABLE) != 0;
644 }
645 }
646
647 nir_foreach_function(function, shader) {
648 if (!function->impl)
649 continue;
650
651 nir_builder_init(&state.builder, function->impl);
652 nir_foreach_block(block, function->impl)
653 apply_pipeline_layout_block(block, &state);
654 nir_metadata_preserve(function->impl, nir_metadata_block_index |
655 nir_metadata_dominance);
656 }
657
658 ralloc_free(mem_ctx);
659 }