vk/0.210.0: Remove the VkShaderStage enum
[mesa.git] / src / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "glsl/nir/nir_builder.h"
26
27 struct apply_pipeline_layout_state {
28 nir_shader *shader;
29 nir_builder builder;
30
31 const struct anv_pipeline_layout *layout;
32
33 bool progress;
34 };
35
36 static uint32_t
37 get_surface_index(unsigned set, unsigned binding,
38 struct apply_pipeline_layout_state *state)
39 {
40 assert(set < state->layout->num_sets);
41 struct anv_descriptor_set_layout *set_layout =
42 state->layout->set[set].layout;
43
44 gl_shader_stage stage = state->shader->stage;
45
46 assert(binding < set_layout->binding_count);
47
48 assert(set_layout->binding[binding].stage[stage].surface_index >= 0);
49
50 uint32_t surface_index =
51 state->layout->set[set].stage[stage].surface_start +
52 set_layout->binding[binding].stage[stage].surface_index;
53
54 assert(surface_index < state->layout->stage[stage].surface_count);
55
56 return surface_index;
57 }
58
59 static uint32_t
60 get_sampler_index(unsigned set, unsigned binding, nir_texop tex_op,
61 struct apply_pipeline_layout_state *state)
62 {
63 assert(set < state->layout->num_sets);
64 struct anv_descriptor_set_layout *set_layout =
65 state->layout->set[set].layout;
66
67 assert(binding < set_layout->binding_count);
68
69 gl_shader_stage stage = state->shader->stage;
70
71 if (set_layout->binding[binding].stage[stage].sampler_index < 0) {
72 assert(tex_op == nir_texop_txf);
73 return 0;
74 }
75
76 uint32_t sampler_index =
77 state->layout->set[set].stage[stage].sampler_start +
78 set_layout->binding[binding].stage[stage].sampler_index;
79
80 assert(sampler_index < state->layout->stage[stage].sampler_count);
81
82 return sampler_index;
83 }
84
85 static void
86 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
87 struct apply_pipeline_layout_state *state)
88 {
89 nir_builder *b = &state->builder;
90
91 b->cursor = nir_before_instr(&intrin->instr);
92
93 uint32_t set = intrin->const_index[0];
94 uint32_t binding = intrin->const_index[1];
95
96 uint32_t surface_index = get_surface_index(set, binding, state);
97
98 nir_const_value *const_block_idx =
99 nir_src_as_const_value(intrin->src[0]);
100
101 nir_ssa_def *block_index;
102 if (const_block_idx) {
103 block_index = nir_imm_int(b, surface_index + const_block_idx->u[0]);
104 } else {
105 block_index = nir_iadd(b, nir_imm_int(b, surface_index),
106 nir_ssa_for_src(b, intrin->src[0], 1));
107 }
108
109 assert(intrin->dest.is_ssa);
110 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
111 nir_instr_remove(&intrin->instr);
112 }
113
114 static void
115 lower_tex_deref(nir_tex_instr *tex, nir_deref_var *deref,
116 unsigned *const_index, nir_tex_src_type src_type,
117 struct apply_pipeline_layout_state *state)
118 {
119 if (deref->deref.child) {
120 assert(deref->deref.child->deref_type == nir_deref_type_array);
121 nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child);
122
123 *const_index += deref_array->base_offset;
124
125 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
126 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
127 tex->num_srcs + 1);
128
129 for (unsigned i = 0; i < tex->num_srcs; i++) {
130 new_srcs[i].src_type = tex->src[i].src_type;
131 nir_instr_move_src(&tex->instr, &new_srcs[i].src, &tex->src[i].src);
132 }
133
134 ralloc_free(tex->src);
135 tex->src = new_srcs;
136
137 /* Now we can go ahead and move the source over to being a
138 * first-class texture source.
139 */
140 tex->src[tex->num_srcs].src_type = src_type;
141 tex->num_srcs++;
142 assert(deref_array->indirect.is_ssa);
143 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs - 1].src,
144 deref_array->indirect);
145 }
146 }
147 }
148
149 static void
150 cleanup_tex_deref(nir_tex_instr *tex, nir_deref_var *deref)
151 {
152 if (deref->deref.child == NULL)
153 return;
154
155 nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child);
156
157 if (deref_array->deref_array_type != nir_deref_array_type_indirect)
158 return;
159
160 nir_instr_rewrite_src(&tex->instr, &deref_array->indirect, NIR_SRC_INIT);
161 }
162
163 static void
164 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
165 {
166 /* No one should have come by and lowered it already */
167 assert(tex->sampler);
168
169 nir_deref_var *tex_deref = tex->texture ? tex->texture : tex->sampler;
170 tex->texture_index =
171 get_surface_index(tex_deref->var->data.descriptor_set,
172 tex_deref->var->data.binding, state);
173 lower_tex_deref(tex, tex_deref, &tex->texture_index,
174 nir_tex_src_texture_offset, state);
175
176 tex->sampler_index =
177 get_sampler_index(tex->sampler->var->data.descriptor_set,
178 tex->sampler->var->data.binding, tex->op, state);
179 lower_tex_deref(tex, tex->sampler, &tex->sampler_index,
180 nir_tex_src_sampler_offset, state);
181
182 if (tex->texture)
183 cleanup_tex_deref(tex, tex->texture);
184 cleanup_tex_deref(tex, tex->sampler);
185 tex->texture = NULL;
186 tex->sampler = NULL;
187 }
188
189 static bool
190 apply_pipeline_layout_block(nir_block *block, void *void_state)
191 {
192 struct apply_pipeline_layout_state *state = void_state;
193
194 nir_foreach_instr_safe(block, instr) {
195 switch (instr->type) {
196 case nir_instr_type_intrinsic: {
197 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
198 if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
199 lower_res_index_intrinsic(intrin, state);
200 state->progress = true;
201 }
202 break;
203 }
204 case nir_instr_type_tex:
205 lower_tex(nir_instr_as_tex(instr), state);
206 /* All texture instructions need lowering */
207 state->progress = true;
208 break;
209 default:
210 continue;
211 }
212 }
213
214 return true;
215 }
216
217 bool
218 anv_nir_apply_pipeline_layout(nir_shader *shader,
219 const struct anv_pipeline_layout *layout)
220 {
221 struct apply_pipeline_layout_state state = {
222 .shader = shader,
223 .layout = layout,
224 };
225
226 nir_foreach_overload(shader, overload) {
227 if (overload->impl) {
228 nir_builder_init(&state.builder, overload->impl);
229 nir_foreach_block(overload->impl, apply_pipeline_layout_block, &state);
230 nir_metadata_preserve(overload->impl, nir_metadata_block_index |
231 nir_metadata_dominance);
232 }
233 }
234
235 return state.progress;
236 }