nir: Add a new memory_barrier_tcs_patch intrinsic
[mesa.git] / src / compiler / nir / nir_lower_io_to_vector.c
1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 /** @file nir_lower_io_to_vector.c
29 *
30 * Merges compatible input/output variables residing in different components
31 * of the same location. It's expected that further passes such as
32 * nir_lower_io_to_temporaries will combine loads and stores of the merged
33 * variables, producing vector nir_load_input/nir_store_output instructions
34 * when all is said and done.
35 */
36
37 /* FRAG_RESULT_MAX+1 instead of just FRAG_RESULT_MAX because of how this pass
38 * handles dual source blending */
39 #define MAX_SLOTS MAX2(VARYING_SLOT_TESS_MAX, FRAG_RESULT_MAX+1)
40
41 static unsigned
42 get_slot(const nir_variable *var)
43 {
44 /* This handling of dual-source blending might not be correct when more than
45 * one render target is supported, but it seems no driver supports more than
46 * one. */
47 return var->data.location + var->data.index;
48 }
49
50 static const struct glsl_type *
51 get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
52 unsigned *num_vertices)
53 {
54 if (nir_is_per_vertex_io(var, shader->info.stage)) {
55 assert(glsl_type_is_array(var->type));
56 if (num_vertices)
57 *num_vertices = glsl_get_length(var->type);
58 return glsl_get_array_element(var->type);
59 } else {
60 if (num_vertices)
61 *num_vertices = 0;
62 return var->type;
63 }
64 }
65
66 static const struct glsl_type *
67 resize_array_vec_type(const struct glsl_type *type, unsigned num_components)
68 {
69 if (glsl_type_is_array(type)) {
70 const struct glsl_type *arr_elem =
71 resize_array_vec_type(glsl_get_array_element(type), num_components);
72 return glsl_array_type(arr_elem, glsl_get_length(type), 0);
73 } else {
74 assert(glsl_type_is_vector_or_scalar(type));
75 return glsl_vector_type(glsl_get_base_type(type), num_components);
76 }
77 }
78
79 static bool
80 variables_can_merge(const nir_shader *shader,
81 const nir_variable *a, const nir_variable *b,
82 bool same_array_structure)
83 {
84 if (a->data.compact || b->data.compact)
85 return false;
86
87 const struct glsl_type *a_type_tail = a->type;
88 const struct glsl_type *b_type_tail = b->type;
89
90 if (nir_is_per_vertex_io(a, shader->info.stage) !=
91 nir_is_per_vertex_io(b, shader->info.stage))
92 return false;
93
94 /* They must have the same array structure */
95 if (same_array_structure) {
96 while (glsl_type_is_array(a_type_tail)) {
97 if (!glsl_type_is_array(b_type_tail))
98 return false;
99
100 if (glsl_get_length(a_type_tail) != glsl_get_length(b_type_tail))
101 return false;
102
103 a_type_tail = glsl_get_array_element(a_type_tail);
104 b_type_tail = glsl_get_array_element(b_type_tail);
105 }
106 if (glsl_type_is_array(b_type_tail))
107 return false;
108 } else {
109 a_type_tail = glsl_without_array(a_type_tail);
110 b_type_tail = glsl_without_array(b_type_tail);
111 }
112
113 if (!glsl_type_is_vector_or_scalar(a_type_tail) ||
114 !glsl_type_is_vector_or_scalar(b_type_tail))
115 return false;
116
117 if (glsl_get_base_type(a_type_tail) != glsl_get_base_type(b_type_tail))
118 return false;
119
120 /* TODO: add 64/16bit support ? */
121 if (glsl_get_bit_size(a_type_tail) != 32)
122 return false;
123
124 assert(a->data.mode == b->data.mode);
125 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
126 a->data.mode == nir_var_shader_in &&
127 a->data.interpolation != b->data.interpolation)
128 return false;
129
130 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
131 a->data.mode == nir_var_shader_out &&
132 a->data.index != b->data.index)
133 return false;
134
135 return true;
136 }
137
138 static const struct glsl_type *
139 get_flat_type(const nir_shader *shader, nir_variable *old_vars[MAX_SLOTS][4],
140 unsigned *loc, nir_variable **first_var, unsigned *num_vertices)
141 {
142 unsigned todo = 1;
143 unsigned slots = 0;
144 unsigned num_vars = 0;
145 enum glsl_base_type base;
146 *num_vertices = 0;
147 *first_var = NULL;
148
149 while (todo) {
150 assert(*loc < MAX_SLOTS);
151 for (unsigned frac = 0; frac < 4; frac++) {
152 nir_variable *var = old_vars[*loc][frac];
153 if (!var)
154 continue;
155 if ((*first_var &&
156 !variables_can_merge(shader, var, *first_var, false)) ||
157 var->data.compact) {
158 (*loc)++;
159 return NULL;
160 }
161
162 if (!*first_var) {
163 if (!glsl_type_is_vector_or_scalar(glsl_without_array(var->type))) {
164 (*loc)++;
165 return NULL;
166 }
167 *first_var = var;
168 base = glsl_get_base_type(
169 glsl_without_array(get_per_vertex_type(shader, var, NULL)));
170 }
171
172 bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
173 var->data.mode == nir_var_shader_in;
174 unsigned var_slots = glsl_count_attribute_slots(
175 get_per_vertex_type(shader, var, num_vertices), vs_in);
176 todo = MAX2(todo, var_slots);
177 num_vars++;
178 }
179 todo--;
180 slots++;
181 (*loc)++;
182 }
183
184 if (num_vars <= 1)
185 return NULL;
186
187 if (slots == 1)
188 return glsl_vector_type(base, 4);
189 else
190 return glsl_array_type(glsl_vector_type(base, 4), slots, 0);
191 }
192
193 static bool
194 create_new_io_vars(nir_shader *shader, struct exec_list *io_list,
195 nir_variable *new_vars[MAX_SLOTS][4],
196 bool flat_vars[MAX_SLOTS])
197 {
198 if (exec_list_is_empty(io_list))
199 return false;
200
201 nir_variable *old_vars[MAX_SLOTS][4] = {{0}};
202
203 nir_foreach_variable(var, io_list) {
204 unsigned frac = var->data.location_frac;
205 old_vars[get_slot(var)][frac] = var;
206 }
207
208 bool merged_any_vars = false;
209
210 for (unsigned loc = 0; loc < MAX_SLOTS; loc++) {
211 unsigned frac = 0;
212 while (frac < 4) {
213 nir_variable *first_var = old_vars[loc][frac];
214 if (!first_var) {
215 frac++;
216 continue;
217 }
218
219 int first = frac;
220 bool found_merge = false;
221
222 while (frac < 4) {
223 nir_variable *var = old_vars[loc][frac];
224 if (!var)
225 break;
226
227 if (var != first_var) {
228 if (!variables_can_merge(shader, first_var, var, true))
229 break;
230
231 found_merge = true;
232 }
233
234 const unsigned num_components =
235 glsl_get_components(glsl_without_array(var->type));
236 if (!num_components) {
237 assert(frac == 0);
238 frac++;
239 break; /* The type was a struct. */
240 }
241
242 /* We had better not have any overlapping vars */
243 for (unsigned i = 1; i < num_components; i++)
244 assert(old_vars[loc][frac + i] == NULL);
245
246 frac += num_components;
247 }
248
249 if (!found_merge)
250 continue;
251
252 merged_any_vars = true;
253
254 nir_variable *var = nir_variable_clone(old_vars[loc][first], shader);
255 var->data.location_frac = first;
256 var->type = resize_array_vec_type(var->type, frac - first);
257
258 nir_shader_add_variable(shader, var);
259 for (unsigned i = first; i < frac; i++) {
260 new_vars[loc][i] = var;
261 old_vars[loc][i] = NULL;
262 }
263
264 old_vars[loc][first] = var;
265 }
266 }
267
268 /* "flat" mode: tries to ensure there is at most one variable per slot by
269 * merging variables into vec4s
270 */
271 for (unsigned loc = 0; loc < MAX_SLOTS;) {
272 nir_variable *first_var;
273 unsigned num_vertices;
274 unsigned new_loc = loc;
275 const struct glsl_type *flat_type =
276 get_flat_type(shader, old_vars, &new_loc, &first_var, &num_vertices);
277 if (flat_type) {
278 merged_any_vars = true;
279
280 nir_variable *var = nir_variable_clone(first_var, shader);
281 var->data.location_frac = 0;
282 if (num_vertices)
283 var->type = glsl_array_type(flat_type, num_vertices, 0);
284 else
285 var->type = flat_type;
286
287 nir_shader_add_variable(shader, var);
288 for (unsigned i = 0; i < glsl_get_length(flat_type); i++) {
289 for (unsigned j = 0; j < 4; j++)
290 new_vars[loc + i][j] = var;
291 flat_vars[loc + i] = true;
292 }
293 }
294 loc = new_loc;
295 }
296
297 return merged_any_vars;
298 }
299
300 static nir_deref_instr *
301 build_array_deref_of_new_var(nir_builder *b, nir_variable *new_var,
302 nir_deref_instr *leader)
303 {
304 if (leader->deref_type == nir_deref_type_var)
305 return nir_build_deref_var(b, new_var);
306
307 nir_deref_instr *parent =
308 build_array_deref_of_new_var(b, new_var, nir_deref_instr_parent(leader));
309
310 return nir_build_deref_follower(b, parent, leader);
311 }
312
313 static nir_ssa_def *
314 build_array_index(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *base,
315 bool vs_in)
316 {
317 switch (deref->deref_type) {
318 case nir_deref_type_var:
319 return base;
320 case nir_deref_type_array: {
321 nir_ssa_def *index = nir_i2i(b, deref->arr.index.ssa,
322 deref->dest.ssa.bit_size);
323 return nir_iadd(
324 b, build_array_index(b, nir_deref_instr_parent(deref), base, vs_in),
325 nir_amul_imm(b, index, glsl_count_attribute_slots(deref->type, vs_in)));
326 }
327 default:
328 unreachable("Invalid deref instruction type");
329 }
330 }
331
332 static nir_deref_instr *
333 build_array_deref_of_new_var_flat(nir_shader *shader,
334 nir_builder *b, nir_variable *new_var,
335 nir_deref_instr *leader, unsigned base)
336 {
337 nir_deref_instr *deref = nir_build_deref_var(b, new_var);
338
339 if (nir_is_per_vertex_io(new_var, shader->info.stage)) {
340 assert(leader->deref_type == nir_deref_type_array);
341 nir_ssa_def *index = leader->arr.index.ssa;
342 leader = nir_deref_instr_parent(leader);
343 deref = nir_build_deref_array(b, deref, index);
344 }
345
346 if (!glsl_type_is_array(deref->type))
347 return deref;
348
349 bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
350 new_var->data.mode == nir_var_shader_in;
351 return nir_build_deref_array(
352 b, deref, build_array_index(b, leader, nir_imm_int(b, base), vs_in));
353 }
354
355 static bool
356 nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
357 {
358 assert(!(modes & ~(nir_var_shader_in | nir_var_shader_out)));
359
360 nir_builder b;
361 nir_builder_init(&b, impl);
362
363 nir_metadata_require(impl, nir_metadata_dominance);
364
365 nir_shader *shader = impl->function->shader;
366 nir_variable *new_inputs[MAX_SLOTS][4] = {{0}};
367 nir_variable *new_outputs[MAX_SLOTS][4] = {{0}};
368 bool flat_inputs[MAX_SLOTS] = {0};
369 bool flat_outputs[MAX_SLOTS] = {0};
370
371 if (modes & nir_var_shader_in) {
372 /* Vertex shaders support overlapping inputs. We don't do those */
373 assert(b.shader->info.stage != MESA_SHADER_VERTEX);
374
375 /* If we don't actually merge any variables, remove that bit from modes
376 * so we don't bother doing extra non-work.
377 */
378 if (!create_new_io_vars(shader, &shader->inputs,
379 new_inputs, flat_inputs))
380 modes &= ~nir_var_shader_in;
381 }
382
383 if (modes & nir_var_shader_out) {
384 /* If we don't actually merge any variables, remove that bit from modes
385 * so we don't bother doing extra non-work.
386 */
387 if (!create_new_io_vars(shader, &shader->outputs,
388 new_outputs, flat_outputs))
389 modes &= ~nir_var_shader_out;
390 }
391
392 if (!modes)
393 return false;
394
395 bool progress = false;
396
397 /* Actually lower all the IO load/store intrinsics. Load instructions are
398 * lowered to a vector load and an ALU instruction to grab the channels we
399 * want. Outputs are lowered to a write-masked store of the vector output.
400 * For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
401 * to clean up the partial writes.
402 */
403 nir_foreach_block(block, impl) {
404 nir_foreach_instr_safe(instr, block) {
405 if (instr->type != nir_instr_type_intrinsic)
406 continue;
407
408 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
409
410 switch (intrin->intrinsic) {
411 case nir_intrinsic_load_deref:
412 case nir_intrinsic_interp_deref_at_centroid:
413 case nir_intrinsic_interp_deref_at_sample:
414 case nir_intrinsic_interp_deref_at_offset: {
415 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
416 if (!(old_deref->mode & modes))
417 break;
418
419 if (old_deref->mode == nir_var_shader_out)
420 assert(b.shader->info.stage == MESA_SHADER_TESS_CTRL ||
421 b.shader->info.stage == MESA_SHADER_FRAGMENT);
422
423 nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
424
425 const unsigned loc = get_slot(old_var);
426 const unsigned old_frac = old_var->data.location_frac;
427 nir_variable *new_var = old_deref->mode == nir_var_shader_in ?
428 new_inputs[loc][old_frac] :
429 new_outputs[loc][old_frac];
430 bool flat = old_deref->mode == nir_var_shader_in ?
431 flat_inputs[loc] : flat_outputs[loc];
432 if (!new_var)
433 break;
434
435 const unsigned new_frac = new_var->data.location_frac;
436
437 nir_component_mask_t vec4_comp_mask =
438 ((1 << intrin->num_components) - 1) << old_frac;
439
440 b.cursor = nir_before_instr(&intrin->instr);
441
442 /* Rewrite the load to use the new variable and only select a
443 * portion of the result.
444 */
445 nir_deref_instr *new_deref;
446 if (flat) {
447 new_deref = build_array_deref_of_new_var_flat(
448 shader, &b, new_var, old_deref, loc - get_slot(new_var));
449 } else {
450 assert(get_slot(new_var) == loc);
451 new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
452 assert(glsl_type_is_vector(new_deref->type));
453 }
454 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
455 nir_src_for_ssa(&new_deref->dest.ssa));
456
457 intrin->num_components =
458 glsl_get_components(new_deref->type);
459 intrin->dest.ssa.num_components = intrin->num_components;
460
461 b.cursor = nir_after_instr(&intrin->instr);
462
463 nir_ssa_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
464 vec4_comp_mask >> new_frac);
465 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
466 nir_src_for_ssa(new_vec),
467 new_vec->parent_instr);
468
469 progress = true;
470 break;
471 }
472
473 case nir_intrinsic_store_deref: {
474 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
475 if (old_deref->mode != nir_var_shader_out)
476 break;
477
478 nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
479
480 const unsigned loc = get_slot(old_var);
481 const unsigned old_frac = old_var->data.location_frac;
482 nir_variable *new_var = new_outputs[loc][old_frac];
483 bool flat = flat_outputs[loc];
484 if (!new_var)
485 break;
486
487 const unsigned new_frac = new_var->data.location_frac;
488
489 b.cursor = nir_before_instr(&intrin->instr);
490
491 /* Rewrite the store to be a masked store to the new variable */
492 nir_deref_instr *new_deref;
493 if (flat) {
494 new_deref = build_array_deref_of_new_var_flat(
495 shader, &b, new_var, old_deref, loc - get_slot(new_var));
496 } else {
497 assert(get_slot(new_var) == loc);
498 new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
499 assert(glsl_type_is_vector(new_deref->type));
500 }
501 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
502 nir_src_for_ssa(&new_deref->dest.ssa));
503
504 intrin->num_components =
505 glsl_get_components(new_deref->type);
506
507 nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
508
509 assert(intrin->src[1].is_ssa);
510 nir_ssa_def *old_value = intrin->src[1].ssa;
511 nir_ssa_def *comps[4];
512 for (unsigned c = 0; c < intrin->num_components; c++) {
513 if (new_frac + c >= old_frac &&
514 (old_wrmask & 1 << (new_frac + c - old_frac))) {
515 comps[c] = nir_channel(&b, old_value,
516 new_frac + c - old_frac);
517 } else {
518 comps[c] = nir_ssa_undef(&b, old_value->num_components,
519 old_value->bit_size);
520 }
521 }
522 nir_ssa_def *new_value = nir_vec(&b, comps, intrin->num_components);
523 nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
524 nir_src_for_ssa(new_value));
525
526 nir_intrinsic_set_write_mask(intrin,
527 old_wrmask << (old_frac - new_frac));
528
529 progress = true;
530 break;
531 }
532
533 default:
534 break;
535 }
536 }
537 }
538
539 if (progress) {
540 nir_metadata_preserve(impl, nir_metadata_block_index |
541 nir_metadata_dominance);
542 }
543
544 return progress;
545 }
546
547 bool
548 nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode modes)
549 {
550 bool progress = false;
551
552 nir_foreach_function(function, shader) {
553 if (function->impl)
554 progress |= nir_lower_io_to_vector_impl(function->impl, modes);
555 }
556
557 return progress;
558 }