nir/lower_io_to_vector: add flat mode
[mesa.git] / src / compiler / nir / nir_lower_io_to_vector.c
1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 /** @file nir_lower_io_to_vector.c
29 *
30 * Merges compatible input/output variables residing in different components
31 * of the same location. It's expected that further passes such as
32 * nir_lower_io_to_temporaries will combine loads and stores of the merged
33 * variables, producing vector nir_load_input/nir_store_output instructions
34 * when all is said and done.
35 */
36
37 /* FRAG_RESULT_MAX+1 instead of just FRAG_RESULT_MAX because of how this pass
38 * handles dual source blending */
39 #define MAX_SLOTS MAX2(VARYING_SLOT_TESS_MAX, FRAG_RESULT_MAX+1)
40
41 static unsigned
42 get_slot(const nir_variable *var)
43 {
44 /* This handling of dual-source blending might not be correct when more than
45 * one render target is supported, but it seems no driver supports more than
46 * one. */
47 return var->data.location + var->data.index;
48 }
49
50 static const struct glsl_type *
51 get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
52 unsigned *num_vertices)
53 {
54 if (nir_is_per_vertex_io(var, shader->info.stage)) {
55 assert(glsl_type_is_array(var->type));
56 if (num_vertices)
57 *num_vertices = glsl_get_length(var->type);
58 return glsl_get_array_element(var->type);
59 } else {
60 if (num_vertices)
61 *num_vertices = 0;
62 return var->type;
63 }
64 }
65
66 static const struct glsl_type *
67 resize_array_vec_type(const struct glsl_type *type, unsigned num_components)
68 {
69 if (glsl_type_is_array(type)) {
70 const struct glsl_type *arr_elem =
71 resize_array_vec_type(glsl_get_array_element(type), num_components);
72 return glsl_array_type(arr_elem, glsl_get_length(type), 0);
73 } else {
74 assert(glsl_type_is_vector_or_scalar(type));
75 return glsl_vector_type(glsl_get_base_type(type), num_components);
76 }
77 }
78
79 static bool
80 variables_can_merge(const nir_shader *shader,
81 const nir_variable *a, const nir_variable *b,
82 bool same_array_structure)
83 {
84 const struct glsl_type *a_type_tail = a->type;
85 const struct glsl_type *b_type_tail = b->type;
86
87 if (nir_is_per_vertex_io(a, shader->info.stage) !=
88 nir_is_per_vertex_io(b, shader->info.stage))
89 return false;
90
91 /* They must have the same array structure */
92 if (same_array_structure) {
93 while (glsl_type_is_array(a_type_tail)) {
94 if (!glsl_type_is_array(b_type_tail))
95 return false;
96
97 if (glsl_get_length(a_type_tail) != glsl_get_length(b_type_tail))
98 return false;
99
100 a_type_tail = glsl_get_array_element(a_type_tail);
101 b_type_tail = glsl_get_array_element(b_type_tail);
102 }
103 if (glsl_type_is_array(b_type_tail))
104 return false;
105 } else {
106 a_type_tail = glsl_without_array(a_type_tail);
107 b_type_tail = glsl_without_array(b_type_tail);
108 }
109
110 if (!glsl_type_is_vector_or_scalar(a_type_tail) ||
111 !glsl_type_is_vector_or_scalar(b_type_tail))
112 return false;
113
114 if (glsl_get_base_type(a_type_tail) != glsl_get_base_type(b_type_tail))
115 return false;
116
117 /* TODO: add 64/16bit support ? */
118 if (glsl_get_bit_size(a_type_tail) != 32)
119 return false;
120
121 assert(a->data.mode == b->data.mode);
122 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
123 a->data.mode == nir_var_shader_in &&
124 a->data.interpolation != b->data.interpolation)
125 return false;
126
127 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
128 a->data.mode == nir_var_shader_out &&
129 a->data.index != b->data.index)
130 return false;
131
132 return true;
133 }
134
135 static const struct glsl_type *
136 get_flat_type(const nir_shader *shader, nir_variable *old_vars[MAX_SLOTS][4],
137 unsigned *loc, nir_variable **first_var, unsigned *num_vertices)
138 {
139 unsigned todo = 1;
140 unsigned slots = 0;
141 unsigned num_vars = 0;
142 enum glsl_base_type base;
143 *num_vertices = 0;
144 *first_var = NULL;
145
146 while (todo) {
147 assert(*loc < MAX_SLOTS);
148 for (unsigned frac = 0; frac < 4; frac++) {
149 nir_variable *var = old_vars[*loc][frac];
150 if (!var)
151 continue;
152 if ((*first_var &&
153 !variables_can_merge(shader, var, *first_var, false)) ||
154 var->data.compact) {
155 (*loc)++;
156 return NULL;
157 }
158
159 if (!*first_var) {
160 if (!glsl_type_is_vector_or_scalar(glsl_without_array(var->type))) {
161 (*loc)++;
162 return NULL;
163 }
164 *first_var = var;
165 base = glsl_get_base_type(
166 glsl_without_array(get_per_vertex_type(shader, var, NULL)));
167 }
168
169 bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
170 var->data.mode == nir_var_shader_in;
171 unsigned var_slots = glsl_count_attribute_slots(
172 get_per_vertex_type(shader, var, num_vertices), vs_in);
173 todo = MAX2(todo, var_slots);
174 num_vars++;
175 }
176 todo--;
177 slots++;
178 (*loc)++;
179 }
180
181 if (num_vars <= 1)
182 return NULL;
183
184 return glsl_array_type(glsl_vector_type(base, 4), slots, 0);
185 }
186
187 static bool
188 create_new_io_vars(nir_shader *shader, struct exec_list *io_list,
189 nir_variable *new_vars[MAX_SLOTS][4],
190 bool flat_vars[MAX_SLOTS])
191 {
192 if (exec_list_is_empty(io_list))
193 return false;
194
195 nir_variable *old_vars[MAX_SLOTS][4] = {{0}};
196
197 nir_foreach_variable(var, io_list) {
198 unsigned frac = var->data.location_frac;
199 old_vars[get_slot(var)][frac] = var;
200 }
201
202 bool merged_any_vars = false;
203
204 for (unsigned loc = 0; loc < MAX_SLOTS; loc++) {
205 unsigned frac = 0;
206 while (frac < 4) {
207 nir_variable *first_var = old_vars[loc][frac];
208 if (!first_var) {
209 frac++;
210 continue;
211 }
212
213 int first = frac;
214 bool found_merge = false;
215
216 while (frac < 4) {
217 nir_variable *var = old_vars[loc][frac];
218 if (!var)
219 break;
220
221 if (var != first_var) {
222 if (!variables_can_merge(shader, first_var, var, true))
223 break;
224
225 found_merge = true;
226 }
227
228 const unsigned num_components =
229 glsl_get_components(glsl_without_array(var->type));
230 if (!num_components) {
231 assert(frac == 0);
232 frac++;
233 break; /* The type was a struct. */
234 }
235
236 /* We had better not have any overlapping vars */
237 for (unsigned i = 1; i < num_components; i++)
238 assert(old_vars[loc][frac + i] == NULL);
239
240 frac += num_components;
241 }
242
243 if (!found_merge)
244 continue;
245
246 merged_any_vars = true;
247
248 nir_variable *var = nir_variable_clone(old_vars[loc][first], shader);
249 var->data.location_frac = first;
250 var->type = resize_array_vec_type(var->type, frac - first);
251
252 nir_shader_add_variable(shader, var);
253 for (unsigned i = first; i < frac; i++) {
254 new_vars[loc][i] = var;
255 old_vars[loc][i] = NULL;
256 }
257
258 old_vars[loc][first] = var;
259 }
260 }
261
262 /* "flat" mode: tries to ensure there is at most one variable per slot by
263 * merging variables into vec4s
264 */
265 for (unsigned loc = 0; loc < MAX_SLOTS;) {
266 nir_variable *first_var;
267 unsigned num_vertices;
268 unsigned new_loc = loc;
269 const struct glsl_type *flat_type =
270 get_flat_type(shader, old_vars, &new_loc, &first_var, &num_vertices);
271 if (flat_type) {
272 merged_any_vars = true;
273
274 nir_variable *var = nir_variable_clone(first_var, shader);
275 var->data.location_frac = 0;
276 if (num_vertices)
277 var->type = glsl_array_type(flat_type, num_vertices, 0);
278 else
279 var->type = flat_type;
280
281 nir_shader_add_variable(shader, var);
282 for (unsigned i = 0; i < glsl_get_length(flat_type); i++) {
283 for (unsigned j = 0; j < 4; j++)
284 new_vars[loc + i][j] = var;
285 flat_vars[loc + i] = true;
286 }
287 }
288 loc = new_loc;
289 }
290
291 return merged_any_vars;
292 }
293
294 static nir_deref_instr *
295 build_array_deref_of_new_var(nir_builder *b, nir_variable *new_var,
296 nir_deref_instr *leader)
297 {
298 if (leader->deref_type == nir_deref_type_var)
299 return nir_build_deref_var(b, new_var);
300
301 nir_deref_instr *parent =
302 build_array_deref_of_new_var(b, new_var, nir_deref_instr_parent(leader));
303
304 return nir_build_deref_follower(b, parent, leader);
305 }
306
307 static nir_ssa_def *
308 build_array_index(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *base,
309 bool vs_in)
310 {
311 switch (deref->deref_type) {
312 case nir_deref_type_var:
313 return base;
314 case nir_deref_type_array: {
315 nir_ssa_def *index = nir_i2i(b, deref->arr.index.ssa,
316 deref->dest.ssa.bit_size);
317 return nir_iadd(
318 b, build_array_index(b, nir_deref_instr_parent(deref), base, vs_in),
319 nir_imul_imm(b, index, glsl_count_attribute_slots(deref->type, vs_in)));
320 }
321 default:
322 unreachable("Invalid deref instruction type");
323 }
324 }
325
326 static nir_deref_instr *
327 build_array_deref_of_new_var_flat(nir_shader *shader,
328 nir_builder *b, nir_variable *new_var,
329 nir_deref_instr *leader, unsigned base)
330 {
331 nir_deref_instr *deref = nir_build_deref_var(b, new_var);
332
333 if (nir_is_per_vertex_io(new_var, shader->info.stage)) {
334 assert(leader->deref_type == nir_deref_type_array);
335 nir_ssa_def *index = leader->arr.index.ssa;
336 leader = nir_deref_instr_parent(leader);
337 deref = nir_build_deref_array(b, deref, index);
338 }
339
340 bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
341 new_var->data.mode == nir_var_shader_in;
342 return nir_build_deref_array(
343 b, deref, build_array_index(b, leader, nir_imm_int(b, base), vs_in));
344 }
345
346 static bool
347 nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
348 {
349 assert(!(modes & ~(nir_var_shader_in | nir_var_shader_out)));
350
351 nir_builder b;
352 nir_builder_init(&b, impl);
353
354 nir_metadata_require(impl, nir_metadata_dominance);
355
356 nir_shader *shader = impl->function->shader;
357 nir_variable *new_inputs[MAX_SLOTS][4] = {{0}};
358 nir_variable *new_outputs[MAX_SLOTS][4] = {{0}};
359 bool flat_inputs[MAX_SLOTS] = {0};
360 bool flat_outputs[MAX_SLOTS] = {0};
361
362 if (modes & nir_var_shader_in) {
363 /* Vertex shaders support overlapping inputs. We don't do those */
364 assert(b.shader->info.stage != MESA_SHADER_VERTEX);
365
366 /* If we don't actually merge any variables, remove that bit from modes
367 * so we don't bother doing extra non-work.
368 */
369 if (!create_new_io_vars(shader, &shader->inputs,
370 new_inputs, flat_inputs))
371 modes &= ~nir_var_shader_in;
372 }
373
374 if (modes & nir_var_shader_out) {
375 /* If we don't actually merge any variables, remove that bit from modes
376 * so we don't bother doing extra non-work.
377 */
378 if (!create_new_io_vars(shader, &shader->outputs,
379 new_outputs, flat_outputs))
380 modes &= ~nir_var_shader_out;
381 }
382
383 if (!modes)
384 return false;
385
386 bool progress = false;
387
388 /* Actually lower all the IO load/store intrinsics. Load instructions are
389 * lowered to a vector load and an ALU instruction to grab the channels we
390 * want. Outputs are lowered to a write-masked store of the vector output.
391 * For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
392 * to clean up the partial writes.
393 */
394 nir_foreach_block(block, impl) {
395 nir_foreach_instr_safe(instr, block) {
396 if (instr->type != nir_instr_type_intrinsic)
397 continue;
398
399 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
400
401 switch (intrin->intrinsic) {
402 case nir_intrinsic_load_deref:
403 case nir_intrinsic_interp_deref_at_centroid:
404 case nir_intrinsic_interp_deref_at_sample:
405 case nir_intrinsic_interp_deref_at_offset: {
406 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
407 if (!(old_deref->mode & modes))
408 break;
409
410 if (old_deref->mode == nir_var_shader_out)
411 assert(b.shader->info.stage == MESA_SHADER_TESS_CTRL ||
412 b.shader->info.stage == MESA_SHADER_FRAGMENT);
413
414 nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
415
416 const unsigned loc = get_slot(old_var);
417 const unsigned old_frac = old_var->data.location_frac;
418 nir_variable *new_var = old_deref->mode == nir_var_shader_in ?
419 new_inputs[loc][old_frac] :
420 new_outputs[loc][old_frac];
421 bool flat = old_deref->mode == nir_var_shader_in ?
422 flat_inputs[loc] : flat_outputs[loc];
423 if (!new_var)
424 break;
425
426 const unsigned new_frac = new_var->data.location_frac;
427
428 nir_component_mask_t vec4_comp_mask =
429 ((1 << intrin->num_components) - 1) << old_frac;
430
431 b.cursor = nir_before_instr(&intrin->instr);
432
433 /* Rewrite the load to use the new variable and only select a
434 * portion of the result.
435 */
436 nir_deref_instr *new_deref;
437 if (flat) {
438 new_deref = build_array_deref_of_new_var_flat(
439 shader, &b, new_var, old_deref, loc - get_slot(new_var));
440 } else {
441 assert(get_slot(new_var) == loc);
442 new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
443 assert(glsl_type_is_vector(new_deref->type));
444 }
445 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
446 nir_src_for_ssa(&new_deref->dest.ssa));
447
448 intrin->num_components =
449 glsl_get_components(new_deref->type);
450 intrin->dest.ssa.num_components = intrin->num_components;
451
452 b.cursor = nir_after_instr(&intrin->instr);
453
454 nir_ssa_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
455 vec4_comp_mask >> new_frac);
456 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
457 nir_src_for_ssa(new_vec),
458 new_vec->parent_instr);
459
460 progress = true;
461 break;
462 }
463
464 case nir_intrinsic_store_deref: {
465 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
466 if (old_deref->mode != nir_var_shader_out)
467 break;
468
469 nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
470
471 const unsigned loc = get_slot(old_var);
472 const unsigned old_frac = old_var->data.location_frac;
473 nir_variable *new_var = new_outputs[loc][old_frac];
474 bool flat = flat_outputs[loc];
475 if (!new_var)
476 break;
477
478 const unsigned new_frac = new_var->data.location_frac;
479
480 b.cursor = nir_before_instr(&intrin->instr);
481
482 /* Rewrite the store to be a masked store to the new variable */
483 nir_deref_instr *new_deref;
484 if (flat) {
485 new_deref = build_array_deref_of_new_var_flat(
486 shader, &b, new_var, old_deref, loc - get_slot(new_var));
487 } else {
488 assert(get_slot(new_var) == loc);
489 new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
490 assert(glsl_type_is_vector(new_deref->type));
491 }
492 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
493 nir_src_for_ssa(&new_deref->dest.ssa));
494
495 intrin->num_components =
496 glsl_get_components(new_deref->type);
497
498 nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
499
500 assert(intrin->src[1].is_ssa);
501 nir_ssa_def *old_value = intrin->src[1].ssa;
502 nir_ssa_def *comps[4];
503 for (unsigned c = 0; c < intrin->num_components; c++) {
504 if (new_frac + c >= old_frac &&
505 (old_wrmask & 1 << (new_frac + c - old_frac))) {
506 comps[c] = nir_channel(&b, old_value,
507 new_frac + c - old_frac);
508 } else {
509 comps[c] = nir_ssa_undef(&b, old_value->num_components,
510 old_value->bit_size);
511 }
512 }
513 nir_ssa_def *new_value = nir_vec(&b, comps, intrin->num_components);
514 nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
515 nir_src_for_ssa(new_value));
516
517 nir_intrinsic_set_write_mask(intrin,
518 old_wrmask << (old_frac - new_frac));
519
520 progress = true;
521 break;
522 }
523
524 default:
525 break;
526 }
527 }
528 }
529
530 if (progress) {
531 nir_metadata_preserve(impl, nir_metadata_block_index |
532 nir_metadata_dominance);
533 }
534
535 return progress;
536 }
537
538 bool
539 nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode modes)
540 {
541 bool progress = false;
542
543 nir_foreach_function(function, shader) {
544 if (function->impl)
545 progress |= nir_lower_io_to_vector_impl(function->impl, modes);
546 }
547
548 return progress;
549 }