Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / vulkan / anv_nir_apply_dynamic_offsets.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "glsl/nir/nir_builder.h"
26
27 struct apply_dynamic_offsets_state {
28 nir_shader *shader;
29 nir_builder builder;
30
31 VkShaderStage stage;
32 struct anv_pipeline_layout *layout;
33
34 uint32_t indices_start;
35 };
36
37 static bool
38 apply_dynamic_offsets_block(nir_block *block, void *void_state)
39 {
40 struct apply_dynamic_offsets_state *state = void_state;
41 struct anv_descriptor_set_layout *set_layout;
42
43 nir_builder *b = &state->builder;
44
45 nir_foreach_instr_safe(block, instr) {
46 if (instr->type != nir_instr_type_intrinsic)
47 continue;
48
49 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
50
51 unsigned block_idx_src;
52 switch (intrin->intrinsic) {
53 case nir_intrinsic_load_ubo:
54 case nir_intrinsic_load_ubo_indirect:
55 case nir_intrinsic_load_ssbo:
56 case nir_intrinsic_load_ssbo_indirect:
57 block_idx_src = 0;
58 break;
59 case nir_intrinsic_store_ssbo:
60 case nir_intrinsic_store_ssbo_indirect:
61 block_idx_src = 1;
62 break;
63 default:
64 continue; /* the loop */
65 }
66
67 nir_instr *res_instr = intrin->src[block_idx_src].ssa->parent_instr;
68 assert(res_instr->type == nir_instr_type_intrinsic);
69 nir_intrinsic_instr *res_intrin = nir_instr_as_intrinsic(res_instr);
70 assert(res_intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
71
72 unsigned set = res_intrin->const_index[0];
73 unsigned binding = res_intrin->const_index[1];
74
75 set_layout = state->layout->set[set].layout;
76 if (set_layout->binding[binding].dynamic_offset_index < 0)
77 continue;
78
79 b->cursor = nir_before_instr(&intrin->instr);
80
81 int indirect_src;
82 switch (intrin->intrinsic) {
83 case nir_intrinsic_load_ubo_indirect:
84 case nir_intrinsic_load_ssbo_indirect:
85 indirect_src = 1;
86 break;
87 case nir_intrinsic_store_ssbo_indirect:
88 indirect_src = 2;
89 break;
90 default:
91 indirect_src = -1;
92 break;
93 }
94
95 /* First, we need to generate the uniform load for the buffer offset */
96 uint32_t index = state->layout->set[set].dynamic_offset_start +
97 set_layout->binding[binding].dynamic_offset_index;
98
99 nir_const_value *const_arr_idx =
100 nir_src_as_const_value(res_intrin->src[0]);
101
102 nir_intrinsic_op offset_load_op;
103 if (const_arr_idx)
104 offset_load_op = nir_intrinsic_load_uniform;
105 else
106 offset_load_op = nir_intrinsic_load_uniform_indirect;
107
108 nir_intrinsic_instr *offset_load =
109 nir_intrinsic_instr_create(state->shader, offset_load_op);
110 offset_load->num_components = 2;
111 offset_load->const_index[0] = state->indices_start + index * 2;
112
113 if (const_arr_idx) {
114 offset_load->const_index[1] = const_arr_idx->u[0] * 2;
115 } else {
116 offset_load->const_index[1] = 0;
117 offset_load->src[0] = nir_src_for_ssa(
118 nir_imul(b, nir_ssa_for_src(b, res_intrin->src[0], 1),
119 nir_imm_int(b, 2)));
120 }
121
122 nir_ssa_dest_init(&offset_load->instr, &offset_load->dest, 2, NULL);
123 nir_builder_instr_insert(b, &offset_load->instr);
124
125 /* We calculate the full offset and don't bother with the base
126 * offset. We need the full offset for the predicate anyway.
127 */
128 nir_ssa_def *rel_offset = nir_imm_int(b, intrin->const_index[0]);
129 if (indirect_src >= 0) {
130 assert(intrin->src[indirect_src].is_ssa);
131 rel_offset = nir_iadd(b, intrin->src[indirect_src].ssa, rel_offset);
132 }
133 nir_ssa_def *global_offset = nir_iadd(b, rel_offset,
134 &offset_load->dest.ssa);
135
136 /* Now we replace the load/store intrinsic */
137
138 nir_intrinsic_op indirect_op;
139 switch (intrin->intrinsic) {
140 case nir_intrinsic_load_ubo:
141 indirect_op = nir_intrinsic_load_ubo_indirect;
142 break;
143 case nir_intrinsic_load_ssbo:
144 indirect_op = nir_intrinsic_load_ssbo_indirect;
145 break;
146 case nir_intrinsic_store_ssbo:
147 indirect_op = nir_intrinsic_store_ssbo_indirect;
148 break;
149 default:
150 unreachable("Invalid direct load/store intrinsic");
151 }
152
153 nir_intrinsic_instr *copy =
154 nir_intrinsic_instr_create(state->shader, indirect_op);
155 copy->num_components = intrin->num_components;
156
157 /* The indirect is always the last source */
158 indirect_src = nir_intrinsic_infos[indirect_op].num_srcs - 1;
159
160 for (unsigned i = 0; i < (unsigned)indirect_src; i++)
161 nir_src_copy(&copy->src[i], &intrin->src[i], &copy->instr);
162
163 copy->src[indirect_src] = nir_src_for_ssa(global_offset);
164 nir_ssa_dest_init(&copy->instr, &copy->dest,
165 intrin->dest.ssa.num_components,
166 intrin->dest.ssa.name);
167
168 /* In order to avoid out-of-bounds access, we predicate */
169 nir_ssa_def *pred = nir_fge(b, nir_channel(b, &offset_load->dest.ssa, 1),
170 rel_offset);
171 nir_if *if_stmt = nir_if_create(b->shader);
172 if_stmt->condition = nir_src_for_ssa(pred);
173 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
174
175 nir_instr_insert_after_cf_list(&if_stmt->then_list, &copy->instr);
176
177 if (indirect_op != nir_intrinsic_store_ssbo) {
178 /* It's a load, we need a phi node */
179 nir_phi_instr *phi = nir_phi_instr_create(b->shader);
180 nir_ssa_dest_init(&phi->instr, &phi->dest,
181 intrin->num_components, NULL);
182
183 nir_phi_src *src1 = ralloc(phi, nir_phi_src);
184 struct exec_node *tnode = exec_list_get_tail(&if_stmt->then_list);
185 src1->pred = exec_node_data(nir_block, tnode, cf_node.node);
186 src1->src = nir_src_for_ssa(&copy->dest.ssa);
187 exec_list_push_tail(&phi->srcs, &src1->node);
188
189 b->cursor = nir_after_cf_list(&if_stmt->else_list);
190 nir_ssa_def *zero = nir_build_imm(b, intrin->num_components,
191 (nir_const_value) { .u = { 0, 0, 0, 0 } });
192
193 nir_phi_src *src2 = ralloc(phi, nir_phi_src);
194 struct exec_node *enode = exec_list_get_tail(&if_stmt->else_list);
195 src2->pred = exec_node_data(nir_block, enode, cf_node.node);
196 src2->src = nir_src_for_ssa(zero);
197 exec_list_push_tail(&phi->srcs, &src2->node);
198
199 nir_instr_insert_after_cf(&if_stmt->cf_node, &phi->instr);
200
201 assert(intrin->dest.is_ssa);
202 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
203 nir_src_for_ssa(&phi->dest.ssa));
204 }
205
206 nir_instr_remove(&intrin->instr);
207 }
208
209 return true;
210 }
211
212 void
213 anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
214 nir_shader *shader,
215 struct brw_stage_prog_data *prog_data)
216 {
217 struct apply_dynamic_offsets_state state = {
218 .shader = shader,
219 .stage = anv_vk_shader_stage_for_mesa_stage(shader->stage),
220 .layout = pipeline->layout,
221 .indices_start = shader->num_uniforms,
222 };
223
224 if (!state.layout || !state.layout->stage[state.stage].has_dynamic_offsets)
225 return;
226
227 nir_foreach_overload(shader, overload) {
228 if (overload->impl) {
229 nir_builder_init(&state.builder, overload->impl);
230 nir_foreach_block(overload->impl, apply_dynamic_offsets_block, &state);
231 nir_metadata_preserve(overload->impl, nir_metadata_block_index |
232 nir_metadata_dominance);
233 }
234 }
235
236 struct anv_push_constants *null_data = NULL;
237 for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++) {
238 prog_data->param[i * 2 + shader->num_uniforms] =
239 (const gl_constant_value *)&null_data->dynamic[i].offset;
240 prog_data->param[i * 2 + 1 + shader->num_uniforms] =
241 (const gl_constant_value *)&null_data->dynamic[i].range;
242 }
243
244 shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 2;
245 }