anv,i965: Lower away image derefs in the driver
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir_uniforms.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/brw_nir.h"
25 #include "compiler/glsl/ir_uniform.h"
26 #include "compiler/nir/nir_builder.h"
27 #include "brw_program.h"
28
29 static void
30 brw_nir_setup_glsl_builtin_uniform(nir_variable *var,
31 const struct gl_program *prog,
32 struct brw_stage_prog_data *stage_prog_data,
33 bool is_scalar)
34 {
35 const nir_state_slot *const slots = var->state_slots;
36 assert(var->state_slots != NULL);
37
38 unsigned uniform_index = var->data.driver_location / 4;
39 for (unsigned int i = 0; i < var->num_state_slots; i++) {
40 /* This state reference has already been setup by ir_to_mesa, but we'll
41 * get the same index back here.
42 */
43 int index = _mesa_add_state_reference(prog->Parameters,
44 slots[i].tokens);
45
46 /* Add each of the unique swizzles of the element as a parameter.
47 * This'll end up matching the expected layout of the
48 * array/matrix/structure we're trying to fill in.
49 */
50 int last_swiz = -1;
51 for (unsigned j = 0; j < 4; j++) {
52 int swiz = GET_SWZ(slots[i].swizzle, j);
53
54 /* If we hit a pair of identical swizzles, this means we've hit the
55 * end of the builtin variable. In scalar mode, we should just quit
56 * and move on to the next one. In vec4, we need to continue and pad
57 * it out to 4 components.
58 */
59 if (swiz == last_swiz && is_scalar)
60 break;
61
62 last_swiz = swiz;
63
64 stage_prog_data->param[uniform_index++] =
65 BRW_PARAM_PARAMETER(index, swiz);
66 }
67 }
68 }
69
70 static void
71 setup_vec4_image_param(uint32_t *params, uint32_t idx,
72 unsigned offset, unsigned n)
73 {
74 assert(offset % sizeof(uint32_t) == 0);
75 for (unsigned i = 0; i < n; ++i)
76 params[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
77
78 for (unsigned i = n; i < 4; ++i)
79 params[i] = BRW_PARAM_BUILTIN_ZERO;
80 }
81
82 static void
83 brw_setup_image_uniform_values(gl_shader_stage stage,
84 struct brw_stage_prog_data *stage_prog_data,
85 unsigned param_start_index,
86 const gl_uniform_storage *storage)
87 {
88 uint32_t *param = &stage_prog_data->param[param_start_index];
89
90 for (unsigned i = 0; i < MAX2(storage->array_elements, 1); i++) {
91 const unsigned image_idx = storage->opaque[stage].index + i;
92
93 /* Upload the brw_image_param structure. The order is expected to match
94 * the BRW_IMAGE_PARAM_*_OFFSET defines.
95 */
96 setup_vec4_image_param(param + BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET,
97 image_idx,
98 offsetof(brw_image_param, surface_idx), 1);
99 setup_vec4_image_param(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
100 image_idx,
101 offsetof(brw_image_param, offset), 2);
102 setup_vec4_image_param(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
103 image_idx,
104 offsetof(brw_image_param, size), 3);
105 setup_vec4_image_param(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
106 image_idx,
107 offsetof(brw_image_param, stride), 4);
108 setup_vec4_image_param(param + BRW_IMAGE_PARAM_TILING_OFFSET,
109 image_idx,
110 offsetof(brw_image_param, tiling), 3);
111 setup_vec4_image_param(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
112 image_idx,
113 offsetof(brw_image_param, swizzling), 2);
114 param += BRW_IMAGE_PARAM_SIZE;
115
116 brw_mark_surface_used(
117 stage_prog_data,
118 stage_prog_data->binding_table.image_start + image_idx);
119 }
120 }
121
122 static unsigned
123 count_uniform_storage_slots(const struct glsl_type *type)
124 {
125 /* gl_uniform_storage can cope with one level of array, so if the
126 * type is a composite type or an array where each element occupies
127 * more than one slot than we need to recursively process it.
128 */
129 if (glsl_type_is_struct(type)) {
130 unsigned location_count = 0;
131
132 for (unsigned i = 0; i < glsl_get_length(type); i++) {
133 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
134
135 location_count += count_uniform_storage_slots(field_type);
136 }
137
138 return location_count;
139 }
140
141 if (glsl_type_is_array(type)) {
142 const struct glsl_type *element_type = glsl_get_array_element(type);
143
144 if (glsl_type_is_array(element_type) ||
145 glsl_type_is_struct(element_type)) {
146 unsigned element_count = count_uniform_storage_slots(element_type);
147 return element_count * glsl_get_length(type);
148 }
149 }
150
151 return 1;
152 }
153
154 static void
155 brw_nir_setup_glsl_uniform(gl_shader_stage stage, nir_variable *var,
156 const struct gl_program *prog,
157 struct brw_stage_prog_data *stage_prog_data,
158 bool is_scalar)
159 {
160 /* The data for our (non-builtin) uniforms is stored in a series of
161 * gl_uniform_storage structs for each subcomponent that
162 * glGetUniformLocation() could name. We know it's been set up in the same
163 * order we'd walk the type, so walk the list of storage that matches the
164 * range of slots covered by this variable.
165 */
166 unsigned uniform_index = var->data.driver_location / 4;
167 unsigned num_slots = count_uniform_storage_slots(var->type);
168 for (unsigned u = 0; u < num_slots; u++) {
169 struct gl_uniform_storage *storage =
170 &prog->sh.data->UniformStorage[var->data.location + u];
171
172 if (storage->builtin || storage->type->is_sampler())
173 continue;
174
175 if (storage->type->is_image()) {
176 brw_setup_image_uniform_values(stage, stage_prog_data,
177 uniform_index, storage);
178 uniform_index +=
179 BRW_IMAGE_PARAM_SIZE * MAX2(storage->array_elements, 1);
180 } else {
181 gl_constant_value *components = storage->storage;
182 unsigned vector_count = (MAX2(storage->array_elements, 1) *
183 storage->type->matrix_columns);
184 unsigned vector_size = storage->type->vector_elements;
185 unsigned max_vector_size = 4;
186 if (storage->type->base_type == GLSL_TYPE_DOUBLE ||
187 storage->type->base_type == GLSL_TYPE_UINT64 ||
188 storage->type->base_type == GLSL_TYPE_INT64) {
189 vector_size *= 2;
190 if (vector_size > 4)
191 max_vector_size = 8;
192 }
193
194 for (unsigned s = 0; s < vector_count; s++) {
195 unsigned i;
196 for (i = 0; i < vector_size; i++) {
197 uint32_t idx = components - prog->sh.data->UniformDataSlots;
198 stage_prog_data->param[uniform_index++] = BRW_PARAM_UNIFORM(idx);
199 components++;
200 }
201
202 if (!is_scalar) {
203 /* Pad out with zeros if needed (only needed for vec4) */
204 for (; i < max_vector_size; i++) {
205 stage_prog_data->param[uniform_index++] =
206 BRW_PARAM_BUILTIN_ZERO;
207 }
208 }
209 }
210 }
211 }
212 }
213
214 void
215 brw_nir_setup_glsl_uniforms(void *mem_ctx, nir_shader *shader,
216 const struct gl_program *prog,
217 struct brw_stage_prog_data *stage_prog_data,
218 bool is_scalar)
219 {
220 unsigned nr_params = shader->num_uniforms / 4;
221 stage_prog_data->nr_params = nr_params;
222 stage_prog_data->param = rzalloc_array(mem_ctx, uint32_t, nr_params);
223
224 nir_foreach_variable(var, &shader->uniforms) {
225 /* UBO's, atomics and samplers don't take up space in the
226 uniform file */
227 if (var->interface_type != NULL || var->type->contains_atomic())
228 continue;
229
230 if (var->num_state_slots > 0) {
231 brw_nir_setup_glsl_builtin_uniform(var, prog, stage_prog_data,
232 is_scalar);
233 } else {
234 brw_nir_setup_glsl_uniform(shader->info.stage, var, prog,
235 stage_prog_data, is_scalar);
236 }
237 }
238 }
239
240 void
241 brw_nir_setup_arb_uniforms(void *mem_ctx, nir_shader *shader,
242 struct gl_program *prog,
243 struct brw_stage_prog_data *stage_prog_data)
244 {
245 struct gl_program_parameter_list *plist = prog->Parameters;
246
247 unsigned nr_params = plist->NumParameters * 4;
248 stage_prog_data->nr_params = nr_params;
249 stage_prog_data->param = rzalloc_array(mem_ctx, uint32_t, nr_params);
250
251 /* For ARB programs, prog_to_nir generates a single "parameters" variable
252 * for all uniform data. nir_lower_wpos_ytransform may also create an
253 * additional variable.
254 */
255 assert(shader->uniforms.length() <= 2);
256
257 for (unsigned p = 0; p < plist->NumParameters; p++) {
258 /* Parameters should be either vec4 uniforms or single component
259 * constants; matrices and other larger types should have been broken
260 * down earlier.
261 */
262 assert(plist->Parameters[p].Size <= 4);
263
264 unsigned i;
265 for (i = 0; i < plist->Parameters[p].Size; i++)
266 stage_prog_data->param[4 * p + i] = BRW_PARAM_PARAMETER(p, i);
267 for (; i < 4; i++)
268 stage_prog_data->param[4 * p + i] = BRW_PARAM_BUILTIN_ZERO;
269 }
270 }
271
272 static nir_ssa_def *
273 get_aoa_deref_offset(nir_builder *b,
274 nir_deref_instr *deref,
275 unsigned elem_size)
276 {
277 unsigned array_size = elem_size;
278 nir_ssa_def *offset = nir_imm_int(b, 0);
279
280 while (deref->deref_type != nir_deref_type_var) {
281 assert(deref->deref_type == nir_deref_type_array);
282
283 /* This level's element size is the previous level's array size */
284 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
285 assert(deref->arr.index.ssa);
286 offset = nir_iadd(b, offset,
287 nir_imul(b, index, nir_imm_int(b, array_size)));
288
289 deref = nir_deref_instr_parent(deref);
290 assert(glsl_type_is_array(deref->type));
291 array_size *= glsl_get_length(deref->type);
292 }
293
294 /* Accessing an invalid surface index with the dataport can result in a
295 * hang. According to the spec "if the index used to select an individual
296 * element is negative or greater than or equal to the size of the array,
297 * the results of the operation are undefined but may not lead to
298 * termination" -- which is one of the possible outcomes of the hang.
299 * Clamp the index to prevent access outside of the array bounds.
300 */
301 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
302 }
303
304 void
305 brw_nir_lower_glsl_images(nir_shader *shader,
306 const struct gl_program *prog)
307 {
308 /* We put image uniforms at the end */
309 nir_foreach_variable(var, &shader->uniforms) {
310 if (!var->type->contains_image())
311 continue;
312
313 /* GL Only allows arrays of arrays of images */
314 assert(var->type->without_array()->is_image());
315 const unsigned num_images = MAX2(1, var->type->arrays_of_arrays_size());
316
317 var->data.driver_location = shader->num_uniforms;
318 shader->num_uniforms += num_images * BRW_IMAGE_PARAM_SIZE * 4;
319 }
320
321 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
322
323 nir_builder b;
324 nir_builder_init(&b, impl);
325
326 nir_foreach_block(block, impl) {
327 nir_foreach_instr_safe(instr, block) {
328 if (instr->type != nir_instr_type_intrinsic)
329 continue;
330
331 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
332 switch (intrin->intrinsic) {
333 case nir_intrinsic_image_deref_load:
334 case nir_intrinsic_image_deref_store:
335 case nir_intrinsic_image_deref_atomic_add:
336 case nir_intrinsic_image_deref_atomic_min:
337 case nir_intrinsic_image_deref_atomic_max:
338 case nir_intrinsic_image_deref_atomic_and:
339 case nir_intrinsic_image_deref_atomic_or:
340 case nir_intrinsic_image_deref_atomic_xor:
341 case nir_intrinsic_image_deref_atomic_exchange:
342 case nir_intrinsic_image_deref_atomic_comp_swap:
343 case nir_intrinsic_image_deref_size:
344 case nir_intrinsic_image_deref_samples:
345 case nir_intrinsic_image_deref_load_raw_intel:
346 case nir_intrinsic_image_deref_store_raw_intel: {
347 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
348 nir_variable *var = nir_deref_instr_get_variable(deref);
349 const unsigned num_images =
350 MAX2(1, var->type->arrays_of_arrays_size());
351
352 struct gl_uniform_storage *storage =
353 &prog->sh.data->UniformStorage[var->data.location];
354 const unsigned image_var_idx =
355 storage->opaque[shader->info.stage].index;
356
357 b.cursor = nir_before_instr(&intrin->instr);
358 nir_ssa_def *index = nir_iadd(&b, nir_imm_int(&b, image_var_idx),
359 get_aoa_deref_offset(&b, deref, 1));
360 brw_nir_rewrite_image_intrinsic(intrin, index);
361 break;
362 }
363
364 case nir_intrinsic_image_deref_load_param_intel: {
365 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
366 nir_variable *var = nir_deref_instr_get_variable(deref);
367 const unsigned num_images =
368 MAX2(1, var->type->arrays_of_arrays_size());
369
370 b.cursor = nir_instr_remove(&intrin->instr);
371
372 const unsigned param = nir_intrinsic_base(intrin);
373 nir_ssa_def *offset =
374 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4);
375 offset = nir_iadd(&b, offset, nir_imm_int(&b, param * 16));
376
377 nir_intrinsic_instr *load =
378 nir_intrinsic_instr_create(b.shader,
379 nir_intrinsic_load_uniform);
380 nir_intrinsic_set_base(load, var->data.driver_location);
381 nir_intrinsic_set_range(load, num_images * BRW_IMAGE_PARAM_SIZE * 4);
382 load->src[0] = nir_src_for_ssa(offset);
383 load->num_components = intrin->dest.ssa.num_components;
384 nir_ssa_dest_init(&load->instr, &load->dest,
385 intrin->dest.ssa.num_components,
386 intrin->dest.ssa.bit_size, NULL);
387 nir_builder_instr_insert(&b, &load->instr);
388
389 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
390 nir_src_for_ssa(&load->dest.ssa));
391 break;
392 }
393
394 default:
395 break;
396 }
397 }
398 }
399 }