nir: Rename nir_address_format_vk_index_offset to not be vk
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28
29 struct apply_pipeline_layout_state {
30 const struct anv_physical_device *pdevice;
31
32 nir_shader *shader;
33 nir_builder builder;
34
35 struct anv_pipeline_layout *layout;
36 bool add_bounds_checks;
37
38 unsigned first_image_uniform;
39
40 bool uses_constants;
41 uint8_t constants_offset;
42 struct {
43 bool desc_buffer_used;
44 uint8_t desc_offset;
45
46 BITSET_WORD *used;
47 uint8_t *surface_offsets;
48 uint8_t *sampler_offsets;
49 uint8_t *image_offsets;
50 } set[MAX_SETS];
51 };
52
53 static void
54 add_binding(struct apply_pipeline_layout_state *state,
55 uint32_t set, uint32_t binding)
56 {
57 const struct anv_descriptor_set_binding_layout *bind_layout =
58 &state->layout->set[set].layout->binding[binding];
59
60 BITSET_SET(state->set[set].used, binding);
61
62 /* Only flag the descriptor buffer as used if there's actually data for
63 * this binding. This lets us be lazy and call this function constantly
64 * without worrying about unnecessarily enabling the buffer.
65 */
66 if (anv_descriptor_size(bind_layout))
67 state->set[set].desc_buffer_used = true;
68 }
69
70 static void
71 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
72 {
73 nir_deref_instr *deref = nir_src_as_deref(src);
74 nir_variable *var = nir_deref_instr_get_variable(deref);
75 add_binding(state, var->data.descriptor_set, var->data.binding);
76 }
77
78 static void
79 add_tex_src_binding(struct apply_pipeline_layout_state *state,
80 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
81 {
82 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
83 if (deref_src_idx < 0)
84 return;
85
86 add_deref_src_binding(state, tex->src[deref_src_idx].src);
87 }
88
89 static void
90 get_used_bindings_block(nir_block *block,
91 struct apply_pipeline_layout_state *state)
92 {
93 nir_foreach_instr_safe(instr, block) {
94 switch (instr->type) {
95 case nir_instr_type_intrinsic: {
96 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
97 switch (intrin->intrinsic) {
98 case nir_intrinsic_vulkan_resource_index:
99 add_binding(state, nir_intrinsic_desc_set(intrin),
100 nir_intrinsic_binding(intrin));
101 break;
102
103 case nir_intrinsic_image_deref_load:
104 case nir_intrinsic_image_deref_store:
105 case nir_intrinsic_image_deref_atomic_add:
106 case nir_intrinsic_image_deref_atomic_min:
107 case nir_intrinsic_image_deref_atomic_max:
108 case nir_intrinsic_image_deref_atomic_and:
109 case nir_intrinsic_image_deref_atomic_or:
110 case nir_intrinsic_image_deref_atomic_xor:
111 case nir_intrinsic_image_deref_atomic_exchange:
112 case nir_intrinsic_image_deref_atomic_comp_swap:
113 case nir_intrinsic_image_deref_size:
114 case nir_intrinsic_image_deref_samples:
115 case nir_intrinsic_image_deref_load_param_intel:
116 case nir_intrinsic_image_deref_load_raw_intel:
117 case nir_intrinsic_image_deref_store_raw_intel:
118 add_deref_src_binding(state, intrin->src[0]);
119 break;
120
121 case nir_intrinsic_load_constant:
122 state->uses_constants = true;
123 break;
124
125 default:
126 break;
127 }
128 break;
129 }
130 case nir_instr_type_tex: {
131 nir_tex_instr *tex = nir_instr_as_tex(instr);
132 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
133 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
134 break;
135 }
136 default:
137 continue;
138 }
139 }
140 }
141
142 static void
143 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
144 struct apply_pipeline_layout_state *state)
145 {
146 nir_builder *b = &state->builder;
147
148 b->cursor = nir_before_instr(&intrin->instr);
149
150 uint32_t set = nir_intrinsic_desc_set(intrin);
151 uint32_t binding = nir_intrinsic_binding(intrin);
152
153 const struct anv_descriptor_set_binding_layout *bind_layout =
154 &state->layout->set[set].layout->binding[binding];
155
156 uint32_t surface_index = state->set[set].surface_offsets[binding];
157 uint32_t array_size = bind_layout->array_size;
158
159 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
160 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
161 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
162
163 nir_ssa_def *index;
164 if (bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
165 /* This is an inline uniform block. Just reference the descriptor set
166 * and use the descriptor offset as the base.
167 */
168 index = nir_imm_ivec2(b, state->set[set].desc_offset,
169 bind_layout->descriptor_offset);
170 } else {
171 /* We're using nir_address_format_32bit_index_offset */
172 index = nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
173 nir_imm_int(b, 0));
174 }
175
176 assert(intrin->dest.is_ssa);
177 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
178 nir_instr_remove(&intrin->instr);
179 }
180
181 static void
182 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
183 struct apply_pipeline_layout_state *state)
184 {
185 nir_builder *b = &state->builder;
186
187 b->cursor = nir_before_instr(&intrin->instr);
188
189 /* For us, the resource indices are just indices into the binding table and
190 * array elements are sequential. A resource_reindex just turns into an
191 * add of the two indices.
192 */
193 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
194 nir_ssa_def *old_index = intrin->src[0].ssa;
195 nir_ssa_def *offset = intrin->src[1].ssa;
196
197 nir_ssa_def *new_index =
198 nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
199 nir_channel(b, old_index, 1));
200
201 assert(intrin->dest.is_ssa);
202 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
203 nir_instr_remove(&intrin->instr);
204 }
205
206 static void
207 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
208 struct apply_pipeline_layout_state *state)
209 {
210 nir_builder *b = &state->builder;
211
212 b->cursor = nir_before_instr(&intrin->instr);
213
214 /* We follow the nir_address_format_32bit_index_offset model */
215 assert(intrin->src[0].is_ssa);
216 nir_ssa_def *index = intrin->src[0].ssa;
217
218 assert(intrin->dest.is_ssa);
219 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
220 nir_instr_remove(&intrin->instr);
221 }
222
223 static void
224 lower_get_buffer_size(nir_intrinsic_instr *intrin,
225 struct apply_pipeline_layout_state *state)
226 {
227 nir_builder *b = &state->builder;
228
229 b->cursor = nir_before_instr(&intrin->instr);
230
231 assert(intrin->src[0].is_ssa);
232 nir_ssa_def *index = intrin->src[0].ssa;
233
234 /* We're following the nir_address_format_32bit_index_offset model so the
235 * binding table index is the first component of the address. The
236 * back-end wants a scalar binding table index source.
237 */
238 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
239 nir_src_for_ssa(nir_channel(b, index, 0)));
240 }
241
242 static void
243 lower_image_intrinsic(nir_intrinsic_instr *intrin,
244 struct apply_pipeline_layout_state *state)
245 {
246 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
247 nir_variable *var = nir_deref_instr_get_variable(deref);
248
249 unsigned set = var->data.descriptor_set;
250 unsigned binding = var->data.binding;
251 unsigned array_size =
252 state->layout->set[set].layout->binding[binding].array_size;
253
254 nir_builder *b = &state->builder;
255 b->cursor = nir_before_instr(&intrin->instr);
256
257 nir_ssa_def *index = NULL;
258 if (deref->deref_type != nir_deref_type_var) {
259 assert(deref->deref_type == nir_deref_type_array);
260 index = nir_ssa_for_src(b, deref->arr.index, 1);
261 if (state->add_bounds_checks)
262 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
263 } else {
264 index = nir_imm_int(b, 0);
265 }
266
267 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
268 b->cursor = nir_instr_remove(&intrin->instr);
269
270 nir_intrinsic_instr *load =
271 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
272
273 nir_intrinsic_set_base(load, state->first_image_uniform +
274 state->set[set].image_offsets[binding] *
275 BRW_IMAGE_PARAM_SIZE * 4);
276 nir_intrinsic_set_range(load, array_size * BRW_IMAGE_PARAM_SIZE * 4);
277
278 const unsigned param = nir_intrinsic_base(intrin);
279 nir_ssa_def *offset =
280 nir_imul(b, index, nir_imm_int(b, BRW_IMAGE_PARAM_SIZE * 4));
281 offset = nir_iadd(b, offset, nir_imm_int(b, param * 16));
282 load->src[0] = nir_src_for_ssa(offset);
283
284 load->num_components = intrin->dest.ssa.num_components;
285 nir_ssa_dest_init(&load->instr, &load->dest,
286 intrin->dest.ssa.num_components,
287 intrin->dest.ssa.bit_size, NULL);
288 nir_builder_instr_insert(b, &load->instr);
289
290 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
291 nir_src_for_ssa(&load->dest.ssa));
292 } else {
293 unsigned binding_offset = state->set[set].surface_offsets[binding];
294 index = nir_iadd(b, index, nir_imm_int(b, binding_offset));
295 brw_nir_rewrite_image_intrinsic(intrin, index);
296 }
297 }
298
299 static void
300 lower_load_constant(nir_intrinsic_instr *intrin,
301 struct apply_pipeline_layout_state *state)
302 {
303 nir_builder *b = &state->builder;
304
305 b->cursor = nir_before_instr(&intrin->instr);
306
307 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
308 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
309 nir_imm_int(b, nir_intrinsic_base(intrin)));
310
311 nir_intrinsic_instr *load_ubo =
312 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
313 load_ubo->num_components = intrin->num_components;
314 load_ubo->src[0] = nir_src_for_ssa(index);
315 load_ubo->src[1] = nir_src_for_ssa(offset);
316 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
317 intrin->dest.ssa.num_components,
318 intrin->dest.ssa.bit_size, NULL);
319 nir_builder_instr_insert(b, &load_ubo->instr);
320
321 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
322 nir_src_for_ssa(&load_ubo->dest.ssa));
323 nir_instr_remove(&intrin->instr);
324 }
325
326 static void
327 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
328 unsigned *base_index,
329 struct apply_pipeline_layout_state *state)
330 {
331 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
332 if (deref_src_idx < 0)
333 return;
334
335 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
336 nir_variable *var = nir_deref_instr_get_variable(deref);
337
338 unsigned set = var->data.descriptor_set;
339 unsigned binding = var->data.binding;
340 unsigned array_size =
341 state->layout->set[set].layout->binding[binding].array_size;
342
343 nir_tex_src_type offset_src_type;
344 if (deref_src_type == nir_tex_src_texture_deref) {
345 offset_src_type = nir_tex_src_texture_offset;
346 *base_index = state->set[set].surface_offsets[binding];
347 } else {
348 assert(deref_src_type == nir_tex_src_sampler_deref);
349 offset_src_type = nir_tex_src_sampler_offset;
350 *base_index = state->set[set].sampler_offsets[binding];
351 }
352
353 nir_ssa_def *index = NULL;
354 if (deref->deref_type != nir_deref_type_var) {
355 assert(deref->deref_type == nir_deref_type_array);
356
357 if (nir_src_is_const(deref->arr.index)) {
358 unsigned arr_index = nir_src_as_uint(deref->arr.index);
359 *base_index += MIN2(arr_index, array_size - 1);
360 } else {
361 nir_builder *b = &state->builder;
362
363 /* From VK_KHR_sampler_ycbcr_conversion:
364 *
365 * If sampler Y’CBCR conversion is enabled, the combined image
366 * sampler must be indexed only by constant integral expressions when
367 * aggregated into arrays in shader code, irrespective of the
368 * shaderSampledImageArrayDynamicIndexing feature.
369 */
370 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
371
372 index = nir_ssa_for_src(b, deref->arr.index, 1);
373
374 if (state->add_bounds_checks)
375 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
376 }
377 }
378
379 if (index) {
380 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
381 nir_src_for_ssa(index));
382 tex->src[deref_src_idx].src_type = offset_src_type;
383 } else {
384 nir_tex_instr_remove_src(tex, deref_src_idx);
385 }
386 }
387
388 static uint32_t
389 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
390 {
391 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
392 if (plane_src_idx < 0)
393 return 0;
394
395 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
396
397 nir_tex_instr_remove_src(tex, plane_src_idx);
398
399 return plane;
400 }
401
402 static void
403 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
404 {
405 state->builder.cursor = nir_before_instr(&tex->instr);
406
407 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
408
409 lower_tex_deref(tex, nir_tex_src_texture_deref,
410 &tex->texture_index, state);
411 tex->texture_index += plane;
412
413 lower_tex_deref(tex, nir_tex_src_sampler_deref,
414 &tex->sampler_index, state);
415 tex->sampler_index += plane;
416
417 /* The backend only ever uses this to mark used surfaces. We don't care
418 * about that little optimization so it just needs to be non-zero.
419 */
420 tex->texture_array_size = 1;
421 }
422
423 static void
424 apply_pipeline_layout_block(nir_block *block,
425 struct apply_pipeline_layout_state *state)
426 {
427 nir_foreach_instr_safe(instr, block) {
428 switch (instr->type) {
429 case nir_instr_type_intrinsic: {
430 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
431 switch (intrin->intrinsic) {
432 case nir_intrinsic_vulkan_resource_index:
433 lower_res_index_intrinsic(intrin, state);
434 break;
435 case nir_intrinsic_vulkan_resource_reindex:
436 lower_res_reindex_intrinsic(intrin, state);
437 break;
438 case nir_intrinsic_load_vulkan_descriptor:
439 lower_load_vulkan_descriptor(intrin, state);
440 break;
441 case nir_intrinsic_get_buffer_size:
442 lower_get_buffer_size(intrin, state);
443 break;
444 case nir_intrinsic_image_deref_load:
445 case nir_intrinsic_image_deref_store:
446 case nir_intrinsic_image_deref_atomic_add:
447 case nir_intrinsic_image_deref_atomic_min:
448 case nir_intrinsic_image_deref_atomic_max:
449 case nir_intrinsic_image_deref_atomic_and:
450 case nir_intrinsic_image_deref_atomic_or:
451 case nir_intrinsic_image_deref_atomic_xor:
452 case nir_intrinsic_image_deref_atomic_exchange:
453 case nir_intrinsic_image_deref_atomic_comp_swap:
454 case nir_intrinsic_image_deref_size:
455 case nir_intrinsic_image_deref_samples:
456 case nir_intrinsic_image_deref_load_param_intel:
457 case nir_intrinsic_image_deref_load_raw_intel:
458 case nir_intrinsic_image_deref_store_raw_intel:
459 lower_image_intrinsic(intrin, state);
460 break;
461 case nir_intrinsic_load_constant:
462 lower_load_constant(intrin, state);
463 break;
464 default:
465 break;
466 }
467 break;
468 }
469 case nir_instr_type_tex:
470 lower_tex(nir_instr_as_tex(instr), state);
471 break;
472 default:
473 continue;
474 }
475 }
476 }
477
478 static void
479 setup_vec4_uniform_value(uint32_t *params, uint32_t offset, unsigned n)
480 {
481 for (unsigned i = 0; i < n; ++i)
482 params[i] = ANV_PARAM_PUSH(offset + i * sizeof(uint32_t));
483
484 for (unsigned i = n; i < 4; ++i)
485 params[i] = BRW_PARAM_BUILTIN_ZERO;
486 }
487
488 void
489 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
490 bool robust_buffer_access,
491 struct anv_pipeline_layout *layout,
492 nir_shader *shader,
493 struct brw_stage_prog_data *prog_data,
494 struct anv_pipeline_bind_map *map)
495 {
496 struct apply_pipeline_layout_state state = {
497 .pdevice = pdevice,
498 .shader = shader,
499 .layout = layout,
500 .add_bounds_checks = robust_buffer_access,
501 };
502
503 void *mem_ctx = ralloc_context(NULL);
504
505 for (unsigned s = 0; s < layout->num_sets; s++) {
506 const unsigned count = layout->set[s].layout->binding_count;
507 const unsigned words = BITSET_WORDS(count);
508 state.set[s].used = rzalloc_array(mem_ctx, BITSET_WORD, words);
509 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
510 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
511 state.set[s].image_offsets = rzalloc_array(mem_ctx, uint8_t, count);
512 }
513
514 nir_foreach_function(function, shader) {
515 if (!function->impl)
516 continue;
517
518 nir_foreach_block(block, function->impl)
519 get_used_bindings_block(block, &state);
520 }
521
522 for (unsigned s = 0; s < layout->num_sets; s++) {
523 if (state.set[s].desc_buffer_used) {
524 map->surface_to_descriptor[map->surface_count] =
525 (struct anv_pipeline_binding) {
526 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
527 .binding = s,
528 };
529 state.set[s].desc_offset = map->surface_count;
530 map->surface_count++;
531 }
532 }
533
534 if (state.uses_constants) {
535 state.constants_offset = map->surface_count;
536 map->surface_to_descriptor[map->surface_count].set =
537 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
538 map->surface_count++;
539 }
540
541 for (uint32_t set = 0; set < layout->num_sets; set++) {
542 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
543
544 BITSET_WORD b, _tmp;
545 BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
546 set_layout->binding_count) {
547 struct anv_descriptor_set_binding_layout *binding =
548 &set_layout->binding[b];
549
550 if (binding->array_size == 0)
551 continue;
552
553 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
554 state.set[set].surface_offsets[b] = map->surface_count;
555 struct anv_sampler **samplers = binding->immutable_samplers;
556 for (unsigned i = 0; i < binding->array_size; i++) {
557 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
558 for (uint8_t p = 0; p < planes; p++) {
559 map->surface_to_descriptor[map->surface_count++] =
560 (struct anv_pipeline_binding) {
561 .set = set,
562 .binding = b,
563 .index = i,
564 .plane = p,
565 };
566 }
567 }
568 }
569
570 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
571 state.set[set].sampler_offsets[b] = map->sampler_count;
572 struct anv_sampler **samplers = binding->immutable_samplers;
573 for (unsigned i = 0; i < binding->array_size; i++) {
574 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
575 for (uint8_t p = 0; p < planes; p++) {
576 map->sampler_to_descriptor[map->sampler_count++] =
577 (struct anv_pipeline_binding) {
578 .set = set,
579 .binding = b,
580 .index = i,
581 .plane = p,
582 };
583 }
584 }
585 }
586
587 if (binding->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
588 state.set[set].image_offsets[b] = map->image_param_count;
589 map->image_param_count += binding->array_size;
590 }
591 }
592 }
593
594 if (map->image_param_count > 0) {
595 assert(map->image_param_count <= MAX_GEN8_IMAGES);
596 assert(shader->num_uniforms == prog_data->nr_params * 4);
597 state.first_image_uniform = shader->num_uniforms;
598 uint32_t *param = brw_stage_prog_data_add_params(prog_data,
599 map->image_param_count *
600 BRW_IMAGE_PARAM_SIZE);
601 struct anv_push_constants *null_data = NULL;
602 const struct brw_image_param *image_param = null_data->images;
603 for (uint32_t i = 0; i < map->image_param_count; i++) {
604 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
605 (uintptr_t)image_param->offset, 2);
606 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
607 (uintptr_t)image_param->size, 3);
608 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
609 (uintptr_t)image_param->stride, 4);
610 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
611 (uintptr_t)image_param->tiling, 3);
612 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
613 (uintptr_t)image_param->swizzling, 2);
614
615 param += BRW_IMAGE_PARAM_SIZE;
616 image_param ++;
617 }
618 assert(param == prog_data->param + prog_data->nr_params);
619
620 shader->num_uniforms += map->image_param_count *
621 BRW_IMAGE_PARAM_SIZE * 4;
622 assert(shader->num_uniforms == prog_data->nr_params * 4);
623 }
624
625 nir_foreach_variable(var, &shader->uniforms) {
626 const struct glsl_type *glsl_type = glsl_without_array(var->type);
627
628 if (!glsl_type_is_image(glsl_type))
629 continue;
630
631 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
632
633 const uint32_t set = var->data.descriptor_set;
634 const uint32_t binding = var->data.binding;
635 const uint32_t array_size =
636 layout->set[set].layout->binding[binding].array_size;
637
638 if (!BITSET_TEST(state.set[set].used, binding))
639 continue;
640
641 struct anv_pipeline_binding *pipe_binding =
642 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
643 for (unsigned i = 0; i < array_size; i++) {
644 assert(pipe_binding[i].set == set);
645 assert(pipe_binding[i].binding == binding);
646 assert(pipe_binding[i].index == i);
647
648 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
649 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
650 pipe_binding[i].input_attachment_index = var->data.index + i;
651
652 pipe_binding[i].write_only =
653 (var->data.image.access & ACCESS_NON_READABLE) != 0;
654 }
655 }
656
657 nir_foreach_function(function, shader) {
658 if (!function->impl)
659 continue;
660
661 nir_builder_init(&state.builder, function->impl);
662 nir_foreach_block(block, function->impl)
663 apply_pipeline_layout_block(block, &state);
664 nir_metadata_preserve(function->impl, nir_metadata_block_index |
665 nir_metadata_dominance);
666 }
667
668 ralloc_free(mem_ctx);
669 }