intel/nir: Enable load/store vectorization
[mesa.git] / src / intel / compiler / brw_nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_shader.h"
26 #include "dev/gen_debug.h"
27 #include "compiler/glsl_types.h"
28 #include "compiler/nir/nir_builder.h"
29 #include "util/u_math.h"
30
31 static bool
32 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
33 GLenum primitive_mode)
34 {
35 const int location = nir_intrinsic_base(intr);
36 const unsigned component = nir_intrinsic_component(intr);
37 bool out_of_bounds;
38
39 if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
40 switch (primitive_mode) {
41 case GL_QUADS:
42 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
43 nir_intrinsic_set_base(intr, 0);
44 nir_intrinsic_set_component(intr, 3 - component);
45 out_of_bounds = false;
46 break;
47 case GL_TRIANGLES:
48 /* gl_TessLevelInner[0] lives at DWord 4. */
49 nir_intrinsic_set_base(intr, 1);
50 out_of_bounds = component > 0;
51 break;
52 case GL_ISOLINES:
53 out_of_bounds = true;
54 break;
55 default:
56 unreachable("Bogus tessellation domain");
57 }
58 } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
59 if (primitive_mode == GL_ISOLINES) {
60 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
61 nir_intrinsic_set_base(intr, 1);
62 nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
63 out_of_bounds = component > 1;
64 } else {
65 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
66 nir_intrinsic_set_base(intr, 1);
67 nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
68 out_of_bounds = component == 3 && primitive_mode == GL_TRIANGLES;
69 }
70 } else {
71 return false;
72 }
73
74 if (out_of_bounds) {
75 if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
76 b->cursor = nir_before_instr(&intr->instr);
77 nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
78 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(undef));
79 }
80 nir_instr_remove(&intr->instr);
81 }
82
83 return true;
84 }
85
86 static bool
87 is_input(nir_intrinsic_instr *intrin)
88 {
89 return intrin->intrinsic == nir_intrinsic_load_input ||
90 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
91 intrin->intrinsic == nir_intrinsic_load_interpolated_input;
92 }
93
94 static bool
95 is_output(nir_intrinsic_instr *intrin)
96 {
97 return intrin->intrinsic == nir_intrinsic_load_output ||
98 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
99 intrin->intrinsic == nir_intrinsic_store_output ||
100 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
101 }
102
103
104 static bool
105 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
106 const struct brw_vue_map *vue_map,
107 GLenum tes_primitive_mode)
108 {
109 const bool is_passthrough_tcs = b->shader->info.name &&
110 strcmp(b->shader->info.name, "passthrough") == 0;
111
112 nir_foreach_instr_safe(instr, block) {
113 if (instr->type != nir_instr_type_intrinsic)
114 continue;
115
116 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
117
118 gl_shader_stage stage = b->shader->info.stage;
119
120 if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
121 (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
122
123 if (!is_passthrough_tcs &&
124 remap_tess_levels(b, intrin, tes_primitive_mode))
125 continue;
126
127 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
128 assert(vue_slot != -1);
129 intrin->const_index[0] = vue_slot;
130
131 nir_src *vertex = nir_get_io_vertex_index_src(intrin);
132 if (vertex) {
133 if (nir_src_is_const(*vertex)) {
134 intrin->const_index[0] += nir_src_as_uint(*vertex) *
135 vue_map->num_per_vertex_slots;
136 } else {
137 b->cursor = nir_before_instr(&intrin->instr);
138
139 /* Multiply by the number of per-vertex slots. */
140 nir_ssa_def *vertex_offset =
141 nir_imul(b,
142 nir_ssa_for_src(b, *vertex, 1),
143 nir_imm_int(b,
144 vue_map->num_per_vertex_slots));
145
146 /* Add it to the existing offset */
147 nir_src *offset = nir_get_io_offset_src(intrin);
148 nir_ssa_def *total_offset =
149 nir_iadd(b, vertex_offset,
150 nir_ssa_for_src(b, *offset, 1));
151
152 nir_instr_rewrite_src(&intrin->instr, offset,
153 nir_src_for_ssa(total_offset));
154 }
155 }
156 }
157 }
158 return true;
159 }
160
161 void
162 brw_nir_lower_vs_inputs(nir_shader *nir,
163 const uint8_t *vs_attrib_wa_flags)
164 {
165 /* Start with the location of the variable's base. */
166 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
167 var->data.driver_location = var->data.location;
168 }
169
170 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
171 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
172 * whether it is a double-precision type or not.
173 */
174 nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
175 nir_lower_io_lower_64bit_to_32);
176
177 /* This pass needs actual constants */
178 nir_opt_constant_folding(nir);
179
180 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
181
182 brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
183
184 /* The last step is to remap VERT_ATTRIB_* to actual registers */
185
186 /* Whether or not we have any system generated values. gl_DrawID is not
187 * included here as it lives in its own vec4.
188 */
189 const bool has_sgvs =
190 nir->info.system_values_read &
191 (BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX) |
192 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
193 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
194 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID));
195
196 const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
197
198 nir_foreach_function(function, nir) {
199 if (!function->impl)
200 continue;
201
202 nir_builder b;
203 nir_builder_init(&b, function->impl);
204
205 nir_foreach_block(block, function->impl) {
206 nir_foreach_instr_safe(instr, block) {
207 if (instr->type != nir_instr_type_intrinsic)
208 continue;
209
210 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
211
212 switch (intrin->intrinsic) {
213 case nir_intrinsic_load_first_vertex:
214 case nir_intrinsic_load_base_instance:
215 case nir_intrinsic_load_vertex_id_zero_base:
216 case nir_intrinsic_load_instance_id:
217 case nir_intrinsic_load_is_indexed_draw:
218 case nir_intrinsic_load_draw_id: {
219 b.cursor = nir_after_instr(&intrin->instr);
220
221 /* gl_VertexID and friends are stored by the VF as the last
222 * vertex element. We convert them to load_input intrinsics at
223 * the right location.
224 */
225 nir_intrinsic_instr *load =
226 nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
227 load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
228
229 nir_intrinsic_set_base(load, num_inputs);
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_load_first_vertex:
232 nir_intrinsic_set_component(load, 0);
233 break;
234 case nir_intrinsic_load_base_instance:
235 nir_intrinsic_set_component(load, 1);
236 break;
237 case nir_intrinsic_load_vertex_id_zero_base:
238 nir_intrinsic_set_component(load, 2);
239 break;
240 case nir_intrinsic_load_instance_id:
241 nir_intrinsic_set_component(load, 3);
242 break;
243 case nir_intrinsic_load_draw_id:
244 case nir_intrinsic_load_is_indexed_draw:
245 /* gl_DrawID and IsIndexedDraw are stored right after
246 * gl_VertexID and friends if any of them exist.
247 */
248 nir_intrinsic_set_base(load, num_inputs + has_sgvs);
249 if (intrin->intrinsic == nir_intrinsic_load_draw_id)
250 nir_intrinsic_set_component(load, 0);
251 else
252 nir_intrinsic_set_component(load, 1);
253 break;
254 default:
255 unreachable("Invalid system value intrinsic");
256 }
257
258 load->num_components = 1;
259 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
260 nir_builder_instr_insert(&b, &load->instr);
261
262 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
263 nir_src_for_ssa(&load->dest.ssa));
264 nir_instr_remove(&intrin->instr);
265 break;
266 }
267
268 case nir_intrinsic_load_input: {
269 /* Attributes come in a contiguous block, ordered by their
270 * gl_vert_attrib value. That means we can compute the slot
271 * number for an attribute by masking out the enabled attributes
272 * before it and counting the bits.
273 */
274 int attr = nir_intrinsic_base(intrin);
275 int slot = util_bitcount64(nir->info.inputs_read &
276 BITFIELD64_MASK(attr));
277 nir_intrinsic_set_base(intrin, slot);
278 break;
279 }
280
281 default:
282 break; /* Nothing to do */
283 }
284 }
285 }
286 }
287 }
288
289 void
290 brw_nir_lower_vue_inputs(nir_shader *nir,
291 const struct brw_vue_map *vue_map)
292 {
293 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
294 var->data.driver_location = var->data.location;
295 }
296
297 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
298 nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
299 nir_lower_io_lower_64bit_to_32);
300
301 /* This pass needs actual constants */
302 nir_opt_constant_folding(nir);
303
304 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
305
306 nir_foreach_function(function, nir) {
307 if (!function->impl)
308 continue;
309
310 nir_foreach_block(block, function->impl) {
311 nir_foreach_instr(instr, block) {
312 if (instr->type != nir_instr_type_intrinsic)
313 continue;
314
315 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
316
317 if (intrin->intrinsic == nir_intrinsic_load_input ||
318 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
319 /* Offset 0 is the VUE header, which contains
320 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
321 * VARYING_SLOT_PSIZ [.w].
322 */
323 int varying = nir_intrinsic_base(intrin);
324 int vue_slot;
325 switch (varying) {
326 case VARYING_SLOT_PSIZ:
327 nir_intrinsic_set_base(intrin, 0);
328 nir_intrinsic_set_component(intrin, 3);
329 break;
330
331 default:
332 vue_slot = vue_map->varying_to_slot[varying];
333 assert(vue_slot != -1);
334 nir_intrinsic_set_base(intrin, vue_slot);
335 break;
336 }
337 }
338 }
339 }
340 }
341 }
342
343 void
344 brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
345 {
346 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
347 var->data.driver_location = var->data.location;
348 }
349
350 nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
351 nir_lower_io_lower_64bit_to_32);
352
353 /* This pass needs actual constants */
354 nir_opt_constant_folding(nir);
355
356 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
357
358 nir_foreach_function(function, nir) {
359 if (function->impl) {
360 nir_builder b;
361 nir_builder_init(&b, function->impl);
362 nir_foreach_block(block, function->impl) {
363 remap_patch_urb_offsets(block, &b, vue_map,
364 nir->info.tess.primitive_mode);
365 }
366 }
367 }
368 }
369
370 void
371 brw_nir_lower_fs_inputs(nir_shader *nir,
372 const struct gen_device_info *devinfo,
373 const struct brw_wm_prog_key *key)
374 {
375 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
376 var->data.driver_location = var->data.location;
377
378 /* Apply default interpolation mode.
379 *
380 * Everything defaults to smooth except for the legacy GL color
381 * built-in variables, which might be flat depending on API state.
382 */
383 if (var->data.interpolation == INTERP_MODE_NONE) {
384 const bool flat = key->flat_shade &&
385 (var->data.location == VARYING_SLOT_COL0 ||
386 var->data.location == VARYING_SLOT_COL1);
387
388 var->data.interpolation = flat ? INTERP_MODE_FLAT
389 : INTERP_MODE_SMOOTH;
390 }
391
392 /* On Ironlake and below, there is only one interpolation mode.
393 * Centroid interpolation doesn't mean anything on this hardware --
394 * there is no multisampling.
395 */
396 if (devinfo->gen < 6) {
397 var->data.centroid = false;
398 var->data.sample = false;
399 }
400 }
401
402 nir_lower_io_options lower_io_options = nir_lower_io_lower_64bit_to_32;
403 if (key->persample_interp)
404 lower_io_options |= nir_lower_io_force_sample_interpolation;
405
406 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
407 if (devinfo->gen >= 11)
408 nir_lower_interpolation(nir, ~0);
409
410 /* This pass needs actual constants */
411 nir_opt_constant_folding(nir);
412
413 nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
414 }
415
416 void
417 brw_nir_lower_vue_outputs(nir_shader *nir)
418 {
419 nir_foreach_variable(var, &nir->outputs) {
420 var->data.driver_location = var->data.location;
421 }
422
423 nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
424 nir_lower_io_lower_64bit_to_32);
425 }
426
427 void
428 brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
429 GLenum tes_primitive_mode)
430 {
431 nir_foreach_variable(var, &nir->outputs) {
432 var->data.driver_location = var->data.location;
433 }
434
435 nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
436 nir_lower_io_lower_64bit_to_32);
437
438 /* This pass needs actual constants */
439 nir_opt_constant_folding(nir);
440
441 nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
442
443 nir_foreach_function(function, nir) {
444 if (function->impl) {
445 nir_builder b;
446 nir_builder_init(&b, function->impl);
447 nir_foreach_block(block, function->impl) {
448 remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
449 }
450 }
451 }
452 }
453
454 void
455 brw_nir_lower_fs_outputs(nir_shader *nir)
456 {
457 nir_foreach_variable(var, &nir->outputs) {
458 var->data.driver_location =
459 SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
460 SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
461 }
462
463 nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
464 }
465
466 #define OPT(pass, ...) ({ \
467 bool this_progress = false; \
468 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
469 if (this_progress) \
470 progress = true; \
471 this_progress; \
472 })
473
474 static nir_variable_mode
475 brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
476 gl_shader_stage stage)
477 {
478 nir_variable_mode indirect_mask = 0;
479
480 if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
481 indirect_mask |= nir_var_shader_in;
482 if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
483 indirect_mask |= nir_var_shader_out;
484 if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
485 indirect_mask |= nir_var_function_temp;
486
487 return indirect_mask;
488 }
489
490 void
491 brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
492 bool is_scalar, bool allow_copies)
493 {
494 nir_variable_mode indirect_mask =
495 brw_nir_no_indirect_mask(compiler, nir->info.stage);
496
497 bool progress;
498 unsigned lower_flrp =
499 (nir->options->lower_flrp16 ? 16 : 0) |
500 (nir->options->lower_flrp32 ? 32 : 0) |
501 (nir->options->lower_flrp64 ? 64 : 0);
502
503 do {
504 progress = false;
505 OPT(nir_split_array_vars, nir_var_function_temp);
506 OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
507 OPT(nir_opt_deref);
508 OPT(nir_lower_vars_to_ssa);
509 if (allow_copies) {
510 /* Only run this pass in the first call to brw_nir_optimize. Later
511 * calls assume that we've lowered away any copy_deref instructions
512 * and we don't want to introduce any more.
513 */
514 OPT(nir_opt_find_array_copies);
515 }
516 OPT(nir_opt_copy_prop_vars);
517 OPT(nir_opt_dead_write_vars);
518 OPT(nir_opt_combine_stores, nir_var_all);
519
520 if (is_scalar) {
521 OPT(nir_lower_alu_to_scalar, NULL, NULL);
522 }
523
524 OPT(nir_copy_prop);
525
526 if (is_scalar) {
527 OPT(nir_lower_phis_to_scalar);
528 }
529
530 OPT(nir_copy_prop);
531 OPT(nir_opt_dce);
532 OPT(nir_opt_cse);
533 OPT(nir_opt_combine_stores, nir_var_all);
534
535 /* Passing 0 to the peephole select pass causes it to convert
536 * if-statements that contain only move instructions in the branches
537 * regardless of the count.
538 *
539 * Passing 1 to the peephole select pass causes it to convert
540 * if-statements that contain at most a single ALU instruction (total)
541 * in both branches. Before Gen6, some math instructions were
542 * prohibitively expensive and the results of compare operations need an
543 * extra resolve step. For these reasons, this pass is more harmful
544 * than good on those platforms.
545 *
546 * For indirect loads of uniforms (push constants), we assume that array
547 * indices will nearly always be in bounds and the cost of the load is
548 * low. Therefore there shouldn't be a performance benefit to avoid it.
549 * However, in vec4 tessellation shaders, these loads operate by
550 * actually pulling from memory.
551 */
552 const bool is_vec4_tessellation = !is_scalar &&
553 (nir->info.stage == MESA_SHADER_TESS_CTRL ||
554 nir->info.stage == MESA_SHADER_TESS_EVAL);
555 OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
556 OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
557 compiler->devinfo->gen >= 6);
558
559 OPT(nir_opt_intrinsics);
560 OPT(nir_opt_idiv_const, 32);
561 OPT(nir_opt_algebraic);
562 OPT(nir_opt_constant_folding);
563
564 if (lower_flrp != 0) {
565 if (OPT(nir_lower_flrp,
566 lower_flrp,
567 false /* always_precise */,
568 compiler->devinfo->gen >= 6)) {
569 OPT(nir_opt_constant_folding);
570 }
571
572 /* Nothing should rematerialize any flrps, so we only need to do this
573 * lowering once.
574 */
575 lower_flrp = 0;
576 }
577
578 OPT(nir_opt_dead_cf);
579 if (OPT(nir_opt_trivial_continues)) {
580 /* If nir_opt_trivial_continues makes progress, then we need to clean
581 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
582 * to make progress.
583 */
584 OPT(nir_copy_prop);
585 OPT(nir_opt_dce);
586 }
587 OPT(nir_opt_if, false);
588 OPT(nir_opt_conditional_discard);
589 if (nir->options->max_unroll_iterations != 0) {
590 OPT(nir_opt_loop_unroll, indirect_mask);
591 }
592 OPT(nir_opt_remove_phis);
593 OPT(nir_opt_undef);
594 OPT(nir_lower_pack);
595 } while (progress);
596
597 /* Workaround Gfxbench unused local sampler variable which will trigger an
598 * assert in the opt_large_constants pass.
599 */
600 OPT(nir_remove_dead_variables, nir_var_function_temp);
601 }
602
603 static unsigned
604 lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
605 {
606 assert(alu->dest.dest.is_ssa);
607 if (alu->dest.dest.ssa.bit_size >= 32)
608 return 0;
609
610 const struct brw_compiler *compiler = (const struct brw_compiler *) data;
611
612 switch (alu->op) {
613 case nir_op_idiv:
614 case nir_op_imod:
615 case nir_op_irem:
616 case nir_op_udiv:
617 case nir_op_umod:
618 case nir_op_fceil:
619 case nir_op_ffloor:
620 case nir_op_ffract:
621 case nir_op_fround_even:
622 case nir_op_ftrunc:
623 return 32;
624 case nir_op_frcp:
625 case nir_op_frsq:
626 case nir_op_fsqrt:
627 case nir_op_fpow:
628 case nir_op_fexp2:
629 case nir_op_flog2:
630 case nir_op_fsin:
631 case nir_op_fcos:
632 return compiler->devinfo->gen < 9 ? 32 : 0;
633 default:
634 return 0;
635 }
636 }
637
638 /* Does some simple lowering and runs the standard suite of optimizations
639 *
640 * This is intended to be called more-or-less directly after you get the
641 * shader out of GLSL or some other source. While it is geared towards i965,
642 * it is not at all generator-specific except for the is_scalar flag. Even
643 * there, it is safe to call with is_scalar = false for a shader that is
644 * intended for the FS backend as long as nir_optimize is called again with
645 * is_scalar = true to scalarize everything prior to code gen.
646 */
647 void
648 brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
649 const nir_shader *softfp64)
650 {
651 const struct gen_device_info *devinfo = compiler->devinfo;
652 UNUSED bool progress; /* Written by OPT */
653
654 const bool is_scalar = compiler->scalar_stage[nir->info.stage];
655
656 if (is_scalar) {
657 OPT(nir_lower_alu_to_scalar, NULL, NULL);
658 }
659
660 if (nir->info.stage == MESA_SHADER_GEOMETRY)
661 OPT(nir_lower_gs_intrinsics, false);
662
663 /* See also brw_nir_trig_workarounds.py */
664 if (compiler->precise_trig &&
665 !(devinfo->gen >= 10 || devinfo->is_kabylake))
666 OPT(brw_nir_apply_trig_workarounds);
667
668 if (devinfo->gen >= 12)
669 OPT(brw_nir_clamp_image_1d_2d_array_sizes);
670
671 static const nir_lower_tex_options tex_options = {
672 .lower_txp = ~0,
673 .lower_txf_offset = true,
674 .lower_rect_offset = true,
675 .lower_tex_without_implicit_lod = true,
676 .lower_txd_cube_map = true,
677 .lower_txb_shadow_clamp = true,
678 .lower_txd_shadow_clamp = true,
679 .lower_txd_offset_clamp = true,
680 .lower_tg4_offsets = true,
681 };
682
683 OPT(nir_lower_tex, &tex_options);
684 OPT(nir_normalize_cubemap_coords);
685
686 OPT(nir_lower_global_vars_to_local);
687
688 OPT(nir_split_var_copies);
689 OPT(nir_split_struct_vars, nir_var_function_temp);
690
691 brw_nir_optimize(nir, compiler, is_scalar, true);
692
693 OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
694 OPT(nir_lower_int64, nir->options->lower_int64_options);
695
696 OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
697
698 if (is_scalar) {
699 OPT(nir_lower_load_const_to_scalar);
700 }
701
702 /* Lower a bunch of stuff */
703 OPT(nir_lower_var_copies);
704
705 /* This needs to be run after the first optimization pass but before we
706 * lower indirect derefs away
707 */
708 if (compiler->supports_shader_constants) {
709 OPT(nir_opt_large_constants, NULL, 32);
710 }
711
712 OPT(nir_lower_system_values);
713
714 const nir_lower_subgroups_options subgroups_options = {
715 .ballot_bit_size = 32,
716 .lower_to_scalar = true,
717 .lower_vote_trivial = !is_scalar,
718 .lower_shuffle = true,
719 .lower_quad_broadcast_dynamic = true,
720 };
721 OPT(nir_lower_subgroups, &subgroups_options);
722
723 OPT(nir_lower_clip_cull_distance_arrays);
724
725 if ((devinfo->gen >= 8 || devinfo->is_haswell) && is_scalar) {
726 /* TODO: Yes, we could in theory do this on gen6 and earlier. However,
727 * that would require plumbing through support for these indirect
728 * scratch read/write messages with message registers and that's just a
729 * pain. Also, the primary benefit of this is for compute shaders which
730 * won't run on gen6 and earlier anyway.
731 *
732 * On gen7 and earlier the scratch space size is limited to 12kB.
733 * By enabling this optimization we may easily exceed this limit without
734 * having any fallback.
735 *
736 * The threshold of 128B was chosen semi-arbitrarily. The idea is that
737 * 128B per channel on a SIMD8 program is 32 registers or 25% of the
738 * register file. Any array that large is likely to cause pressure
739 * issues. Also, this value is sufficiently high that the benchmarks
740 * known to suffer from large temporary array issues are helped but
741 * nothing else in shader-db is hurt except for maybe that one kerbal
742 * space program shader.
743 */
744 OPT(nir_lower_vars_to_scratch, nir_var_function_temp, 128,
745 glsl_get_natural_size_align_bytes);
746 }
747
748 nir_variable_mode indirect_mask =
749 brw_nir_no_indirect_mask(compiler, nir->info.stage);
750 OPT(nir_lower_indirect_derefs, indirect_mask);
751
752 /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
753 * SSBOs, our back-end is capable of loading an entire vec4 at a time and
754 * we would like to take advantage of that whenever possible regardless of
755 * whether or not the app gives us full loads. This should allow the
756 * optimizer to combine UBO and SSBO load operations and save us some send
757 * messages.
758 */
759 OPT(nir_lower_array_deref_of_vec,
760 nir_var_mem_ubo | nir_var_mem_ssbo,
761 nir_lower_direct_array_deref_of_vec_load);
762
763 /* Get rid of split copies */
764 brw_nir_optimize(nir, compiler, is_scalar, false);
765 }
766
767 void
768 brw_nir_link_shaders(const struct brw_compiler *compiler,
769 nir_shader *producer, nir_shader *consumer)
770 {
771 nir_lower_io_arrays_to_elements(producer, consumer);
772 nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
773 nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
774
775 const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
776 const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
777
778 if (p_is_scalar && c_is_scalar) {
779 NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
780 NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
781 brw_nir_optimize(producer, compiler, p_is_scalar, false);
782 brw_nir_optimize(consumer, compiler, c_is_scalar, false);
783 }
784
785 if (nir_link_opt_varyings(producer, consumer))
786 brw_nir_optimize(consumer, compiler, c_is_scalar, false);
787
788 NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
789 NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
790
791 if (nir_remove_unused_varyings(producer, consumer)) {
792 NIR_PASS_V(producer, nir_lower_global_vars_to_local);
793 NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
794
795 /* The backend might not be able to handle indirects on
796 * temporaries so we need to lower indirects on any of the
797 * varyings we have demoted here.
798 */
799 NIR_PASS_V(producer, nir_lower_indirect_derefs,
800 brw_nir_no_indirect_mask(compiler, producer->info.stage));
801 NIR_PASS_V(consumer, nir_lower_indirect_derefs,
802 brw_nir_no_indirect_mask(compiler, consumer->info.stage));
803
804 brw_nir_optimize(producer, compiler, p_is_scalar, false);
805 brw_nir_optimize(consumer, compiler, c_is_scalar, false);
806 }
807
808 NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
809 NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
810 NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
811
812 if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
813 /* Calling lower_io_to_vector creates output variable writes with
814 * write-masks. On non-TCS outputs, the back-end can't handle it and we
815 * need to call nir_lower_io_to_temporaries to get rid of them. This,
816 * in turn, creates temporary variables and extra copy_deref intrinsics
817 * that we need to clean up.
818 */
819 NIR_PASS_V(producer, nir_lower_io_to_temporaries,
820 nir_shader_get_entrypoint(producer), true, false);
821 NIR_PASS_V(producer, nir_lower_global_vars_to_local);
822 NIR_PASS_V(producer, nir_split_var_copies);
823 NIR_PASS_V(producer, nir_lower_var_copies);
824 }
825 }
826
827 static bool
828 brw_nir_should_vectorize_mem(unsigned align, unsigned bit_size,
829 unsigned num_components, unsigned high_offset,
830 nir_intrinsic_instr *low,
831 nir_intrinsic_instr *high)
832 {
833 /* Don't combine things to generate 64-bit loads/stores. We have to split
834 * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
835 * we don't want to make a mess for the back-end.
836 */
837 if (bit_size > 32)
838 return false;
839
840 /* We can handle at most a vec4 right now. Anything bigger would get
841 * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
842 */
843 if (num_components > 4)
844 return false;
845
846 if (align < bit_size / 8)
847 return false;
848
849 return true;
850 }
851
852 static
853 bool combine_all_barriers(nir_intrinsic_instr *a,
854 nir_intrinsic_instr *b,
855 void *data)
856 {
857 /* Translation to backend IR will get rid of modes we don't care about, so
858 * no harm in always combining them.
859 *
860 * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
861 * scheduling so that it can take advantage of the different semantics.
862 */
863 nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
864 nir_intrinsic_memory_modes(b));
865 nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
866 nir_intrinsic_memory_semantics(b));
867 nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
868 nir_intrinsic_memory_scope(b)));
869 return true;
870 }
871
872 static void
873 brw_vectorize_lower_mem_access(nir_shader *nir,
874 const struct brw_compiler *compiler,
875 bool is_scalar)
876 {
877 const struct gen_device_info *devinfo = compiler->devinfo;
878 bool progress = false;
879
880 if (is_scalar) {
881 OPT(nir_opt_load_store_vectorize,
882 nir_var_mem_ubo | nir_var_mem_ssbo |
883 nir_var_mem_global | nir_var_mem_shared,
884 brw_nir_should_vectorize_mem);
885 }
886
887 OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
888
889 while (progress) {
890 progress = false;
891
892 OPT(nir_lower_pack);
893 OPT(nir_copy_prop);
894 OPT(nir_opt_dce);
895 OPT(nir_opt_cse);
896 OPT(nir_opt_algebraic);
897 OPT(nir_opt_constant_folding);
898 }
899 }
900
901 /* Prepare the given shader for codegen
902 *
903 * This function is intended to be called right before going into the actual
904 * backend and is highly backend-specific. Also, once this function has been
905 * called on a shader, it will no longer be in SSA form so most optimizations
906 * will not work.
907 */
908 void
909 brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
910 bool is_scalar)
911 {
912 const struct gen_device_info *devinfo = compiler->devinfo;
913 bool debug_enabled =
914 (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->info.stage));
915
916 UNUSED bool progress; /* Written by OPT */
917
918 OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
919
920 do {
921 progress = false;
922 OPT(nir_opt_algebraic_before_ffma);
923 } while (progress);
924
925 brw_nir_optimize(nir, compiler, is_scalar, false);
926
927 brw_vectorize_lower_mem_access(nir, compiler, is_scalar);
928
929 if (OPT(nir_lower_int64, nir->options->lower_int64_options))
930 brw_nir_optimize(nir, compiler, is_scalar, false);
931
932 if (devinfo->gen >= 6) {
933 /* Try and fuse multiply-adds */
934 OPT(brw_nir_opt_peephole_ffma);
935 }
936
937 if (OPT(nir_opt_comparison_pre)) {
938 OPT(nir_copy_prop);
939 OPT(nir_opt_dce);
940 OPT(nir_opt_cse);
941
942 /* Do the select peepehole again. nir_opt_comparison_pre (combined with
943 * the other optimization passes) will have removed at least one
944 * instruction from one of the branches of the if-statement, so now it
945 * might be under the threshold of conversion to bcsel.
946 *
947 * See brw_nir_optimize for the explanation of is_vec4_tessellation.
948 */
949 const bool is_vec4_tessellation = !is_scalar &&
950 (nir->info.stage == MESA_SHADER_TESS_CTRL ||
951 nir->info.stage == MESA_SHADER_TESS_EVAL);
952 OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
953 OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
954 compiler->devinfo->gen >= 6);
955 }
956
957 do {
958 progress = false;
959 if (OPT(nir_opt_algebraic_late)) {
960 /* At this late stage, anything that makes more constants will wreak
961 * havok on the vec4 backend. The handling of constants in the vec4
962 * backend is not good.
963 */
964 if (is_scalar)
965 OPT(nir_opt_constant_folding);
966
967 OPT(nir_copy_prop);
968 OPT(nir_opt_dce);
969 OPT(nir_opt_cse);
970 }
971 } while (progress);
972
973
974 OPT(brw_nir_lower_conversions);
975
976 if (is_scalar)
977 OPT(nir_lower_alu_to_scalar, NULL, NULL);
978
979 while (OPT(nir_opt_algebraic_distribute_src_mods)) {
980 OPT(nir_copy_prop);
981 OPT(nir_opt_dce);
982 OPT(nir_opt_cse);
983 }
984
985 OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
986 OPT(nir_copy_prop);
987 OPT(nir_opt_dce);
988 OPT(nir_opt_move, nir_move_comparisons);
989
990 OPT(nir_lower_bool_to_int32);
991 OPT(nir_copy_prop);
992 OPT(nir_opt_dce);
993
994 OPT(nir_lower_locals_to_regs);
995
996 if (unlikely(debug_enabled)) {
997 /* Re-index SSA defs so we print more sensible numbers. */
998 nir_foreach_function(function, nir) {
999 if (function->impl)
1000 nir_index_ssa_defs(function->impl);
1001 }
1002
1003 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1004 _mesa_shader_stage_to_string(nir->info.stage));
1005 nir_print_shader(nir, stderr);
1006 }
1007
1008 OPT(nir_convert_from_ssa, true);
1009
1010 if (!is_scalar) {
1011 OPT(nir_move_vec_src_uses_to_dest);
1012 OPT(nir_lower_vec_to_movs);
1013 }
1014
1015 OPT(nir_opt_dce);
1016
1017 if (OPT(nir_opt_rematerialize_compares))
1018 OPT(nir_opt_dce);
1019
1020 /* This is the last pass we run before we start emitting stuff. It
1021 * determines when we need to insert boolean resolves on Gen <= 5. We
1022 * run it last because it stashes data in instr->pass_flags and we don't
1023 * want that to be squashed by other NIR passes.
1024 */
1025 if (devinfo->gen <= 5)
1026 brw_nir_analyze_boolean_resolves(nir);
1027
1028 nir_sweep(nir);
1029
1030 if (unlikely(debug_enabled)) {
1031 fprintf(stderr, "NIR (final form) for %s shader:\n",
1032 _mesa_shader_stage_to_string(nir->info.stage));
1033 nir_print_shader(nir, stderr);
1034 }
1035 }
1036
1037 static bool
1038 brw_nir_apply_sampler_key(nir_shader *nir,
1039 const struct brw_compiler *compiler,
1040 const struct brw_sampler_prog_key_data *key_tex)
1041 {
1042 const struct gen_device_info *devinfo = compiler->devinfo;
1043 nir_lower_tex_options tex_options = {
1044 .lower_txd_clamp_bindless_sampler = true,
1045 .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1046 };
1047
1048 /* Iron Lake and prior require lowering of all rectangle textures */
1049 if (devinfo->gen < 6)
1050 tex_options.lower_rect = true;
1051
1052 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
1053 if (devinfo->gen < 8) {
1054 tex_options.saturate_s = key_tex->gl_clamp_mask[0];
1055 tex_options.saturate_t = key_tex->gl_clamp_mask[1];
1056 tex_options.saturate_r = key_tex->gl_clamp_mask[2];
1057 }
1058
1059 /* Prior to Haswell, we have to fake texture swizzle */
1060 for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
1061 if (key_tex->swizzles[s] == SWIZZLE_NOOP)
1062 continue;
1063
1064 tex_options.swizzle_result |= BITFIELD_BIT(s);
1065 for (unsigned c = 0; c < 4; c++)
1066 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
1067 }
1068
1069 /* Prior to Haswell, we have to lower gradients on shadow samplers */
1070 tex_options.lower_txd_shadow = devinfo->gen < 8 && !devinfo->is_haswell;
1071
1072 tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
1073 tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
1074 tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
1075 tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
1076 tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
1077 tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
1078
1079 /* Setup array of scaling factors for each texture. */
1080 memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
1081 sizeof(tex_options.scale_factors));
1082
1083 return nir_lower_tex(nir, &tex_options);
1084 }
1085
1086 static unsigned
1087 get_subgroup_size(gl_shader_stage stage,
1088 const struct brw_base_prog_key *key,
1089 unsigned max_subgroup_size)
1090 {
1091 switch (key->subgroup_size_type) {
1092 case BRW_SUBGROUP_SIZE_API_CONSTANT:
1093 /* We have to use the global constant size. */
1094 return BRW_SUBGROUP_SIZE;
1095
1096 case BRW_SUBGROUP_SIZE_UNIFORM:
1097 /* It has to be uniform across all invocations but can vary per stage
1098 * if we want. This gives us a bit more freedom.
1099 *
1100 * For compute, brw_nir_apply_key is called per-dispatch-width so this
1101 * is the actual subgroup size and not a maximum. However, we only
1102 * invoke one size of any given compute shader so it's still guaranteed
1103 * to be uniform across invocations.
1104 */
1105 return max_subgroup_size;
1106
1107 case BRW_SUBGROUP_SIZE_VARYING:
1108 /* The subgroup size is allowed to be fully varying. For geometry
1109 * stages, we know it's always 8 which is max_subgroup_size so we can
1110 * return that. For compute, brw_nir_apply_key is called once per
1111 * dispatch-width so max_subgroup_size is the real subgroup size.
1112 *
1113 * For fragment, we return 0 and let it fall through to the back-end
1114 * compiler. This means we can't optimize based on subgroup size but
1115 * that's a risk the client took when it asked for a varying subgroup
1116 * size.
1117 */
1118 return stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1119
1120 case BRW_SUBGROUP_SIZE_REQUIRE_8:
1121 case BRW_SUBGROUP_SIZE_REQUIRE_16:
1122 case BRW_SUBGROUP_SIZE_REQUIRE_32:
1123 assert(stage == MESA_SHADER_COMPUTE);
1124 /* These enum values are expressly chosen to be equal to the subgroup
1125 * size that they require.
1126 */
1127 return key->subgroup_size_type;
1128 }
1129
1130 unreachable("Invalid subgroup size type");
1131 }
1132
1133 void
1134 brw_nir_apply_key(nir_shader *nir,
1135 const struct brw_compiler *compiler,
1136 const struct brw_base_prog_key *key,
1137 unsigned max_subgroup_size,
1138 bool is_scalar)
1139 {
1140 bool progress = false;
1141
1142 OPT(brw_nir_apply_sampler_key, compiler, &key->tex);
1143
1144 const nir_lower_subgroups_options subgroups_options = {
1145 .subgroup_size = get_subgroup_size(nir->info.stage, key,
1146 max_subgroup_size),
1147 .ballot_bit_size = 32,
1148 .lower_subgroup_masks = true,
1149 };
1150 OPT(nir_lower_subgroups, &subgroups_options);
1151
1152 if (progress)
1153 brw_nir_optimize(nir, compiler, is_scalar, false);
1154 }
1155
1156 enum brw_conditional_mod
1157 brw_cmod_for_nir_comparison(nir_op op)
1158 {
1159 switch (op) {
1160 case nir_op_flt:
1161 case nir_op_flt32:
1162 case nir_op_ilt:
1163 case nir_op_ilt32:
1164 case nir_op_ult:
1165 case nir_op_ult32:
1166 return BRW_CONDITIONAL_L;
1167
1168 case nir_op_fge:
1169 case nir_op_fge32:
1170 case nir_op_ige:
1171 case nir_op_ige32:
1172 case nir_op_uge:
1173 case nir_op_uge32:
1174 return BRW_CONDITIONAL_GE;
1175
1176 case nir_op_feq:
1177 case nir_op_feq32:
1178 case nir_op_ieq:
1179 case nir_op_ieq32:
1180 case nir_op_b32all_fequal2:
1181 case nir_op_b32all_iequal2:
1182 case nir_op_b32all_fequal3:
1183 case nir_op_b32all_iequal3:
1184 case nir_op_b32all_fequal4:
1185 case nir_op_b32all_iequal4:
1186 return BRW_CONDITIONAL_Z;
1187
1188 case nir_op_fne:
1189 case nir_op_fne32:
1190 case nir_op_ine:
1191 case nir_op_ine32:
1192 case nir_op_b32any_fnequal2:
1193 case nir_op_b32any_inequal2:
1194 case nir_op_b32any_fnequal3:
1195 case nir_op_b32any_inequal3:
1196 case nir_op_b32any_fnequal4:
1197 case nir_op_b32any_inequal4:
1198 return BRW_CONDITIONAL_NZ;
1199
1200 default:
1201 unreachable("Unsupported NIR comparison op");
1202 }
1203 }
1204
1205 uint32_t
1206 brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
1207 {
1208 switch (atomic->intrinsic) {
1209 #define AOP_CASE(atom) \
1210 case nir_intrinsic_image_atomic_##atom: \
1211 case nir_intrinsic_bindless_image_atomic_##atom: \
1212 case nir_intrinsic_ssbo_atomic_##atom: \
1213 case nir_intrinsic_shared_atomic_##atom: \
1214 case nir_intrinsic_global_atomic_##atom
1215
1216 AOP_CASE(add): {
1217 unsigned src_idx;
1218 switch (atomic->intrinsic) {
1219 case nir_intrinsic_image_atomic_add:
1220 case nir_intrinsic_bindless_image_atomic_add:
1221 src_idx = 3;
1222 break;
1223 case nir_intrinsic_ssbo_atomic_add:
1224 src_idx = 2;
1225 break;
1226 case nir_intrinsic_shared_atomic_add:
1227 case nir_intrinsic_global_atomic_add:
1228 src_idx = 1;
1229 break;
1230 default:
1231 unreachable("Invalid add atomic opcode");
1232 }
1233
1234 if (nir_src_is_const(atomic->src[src_idx])) {
1235 int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
1236 if (add_val == 1)
1237 return BRW_AOP_INC;
1238 else if (add_val == -1)
1239 return BRW_AOP_DEC;
1240 }
1241 return BRW_AOP_ADD;
1242 }
1243
1244 AOP_CASE(imin): return BRW_AOP_IMIN;
1245 AOP_CASE(umin): return BRW_AOP_UMIN;
1246 AOP_CASE(imax): return BRW_AOP_IMAX;
1247 AOP_CASE(umax): return BRW_AOP_UMAX;
1248 AOP_CASE(and): return BRW_AOP_AND;
1249 AOP_CASE(or): return BRW_AOP_OR;
1250 AOP_CASE(xor): return BRW_AOP_XOR;
1251 AOP_CASE(exchange): return BRW_AOP_MOV;
1252 AOP_CASE(comp_swap): return BRW_AOP_CMPWR;
1253
1254 #undef AOP_CASE
1255 #define AOP_CASE(atom) \
1256 case nir_intrinsic_ssbo_atomic_##atom: \
1257 case nir_intrinsic_shared_atomic_##atom: \
1258 case nir_intrinsic_global_atomic_##atom
1259
1260 AOP_CASE(fmin): return BRW_AOP_FMIN;
1261 AOP_CASE(fmax): return BRW_AOP_FMAX;
1262 AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR;
1263
1264 #undef AOP_CASE
1265
1266 default:
1267 unreachable("Unsupported NIR atomic intrinsic");
1268 }
1269 }
1270
1271 enum brw_reg_type
1272 brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
1273 {
1274 switch (type) {
1275 case nir_type_uint:
1276 case nir_type_uint32:
1277 return BRW_REGISTER_TYPE_UD;
1278 case nir_type_bool:
1279 case nir_type_int:
1280 case nir_type_bool32:
1281 case nir_type_int32:
1282 return BRW_REGISTER_TYPE_D;
1283 case nir_type_float:
1284 case nir_type_float32:
1285 return BRW_REGISTER_TYPE_F;
1286 case nir_type_float16:
1287 return BRW_REGISTER_TYPE_HF;
1288 case nir_type_float64:
1289 return BRW_REGISTER_TYPE_DF;
1290 case nir_type_int64:
1291 return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
1292 case nir_type_uint64:
1293 return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
1294 case nir_type_int16:
1295 return BRW_REGISTER_TYPE_W;
1296 case nir_type_uint16:
1297 return BRW_REGISTER_TYPE_UW;
1298 case nir_type_int8:
1299 return BRW_REGISTER_TYPE_B;
1300 case nir_type_uint8:
1301 return BRW_REGISTER_TYPE_UB;
1302 default:
1303 unreachable("unknown type");
1304 }
1305
1306 return BRW_REGISTER_TYPE_F;
1307 }
1308
1309 /* Returns the glsl_base_type corresponding to a nir_alu_type.
1310 * This is used by both brw_vec4_nir and brw_fs_nir.
1311 */
1312 enum glsl_base_type
1313 brw_glsl_base_type_for_nir_type(nir_alu_type type)
1314 {
1315 switch (type) {
1316 case nir_type_float:
1317 case nir_type_float32:
1318 return GLSL_TYPE_FLOAT;
1319
1320 case nir_type_float16:
1321 return GLSL_TYPE_FLOAT16;
1322
1323 case nir_type_float64:
1324 return GLSL_TYPE_DOUBLE;
1325
1326 case nir_type_int:
1327 case nir_type_int32:
1328 return GLSL_TYPE_INT;
1329
1330 case nir_type_uint:
1331 case nir_type_uint32:
1332 return GLSL_TYPE_UINT;
1333
1334 case nir_type_int16:
1335 return GLSL_TYPE_INT16;
1336
1337 case nir_type_uint16:
1338 return GLSL_TYPE_UINT16;
1339
1340 default:
1341 unreachable("bad type");
1342 }
1343 }
1344
1345 nir_shader *
1346 brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
1347 const nir_shader_compiler_options *options,
1348 const struct brw_tcs_prog_key *key)
1349 {
1350 nir_builder b;
1351 nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_TESS_CTRL,
1352 options);
1353 nir_shader *nir = b.shader;
1354 nir_variable *var;
1355 nir_intrinsic_instr *load;
1356 nir_intrinsic_instr *store;
1357 nir_ssa_def *zero = nir_imm_int(&b, 0);
1358 nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
1359
1360 nir->info.inputs_read = key->outputs_written &
1361 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1362 nir->info.outputs_written = key->outputs_written;
1363 nir->info.tess.tcs_vertices_out = key->input_vertices;
1364 nir->info.name = ralloc_strdup(nir, "passthrough");
1365 nir->num_uniforms = 8 * sizeof(uint32_t);
1366
1367 var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
1368 var->data.location = 0;
1369 var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
1370 var->data.location = 1;
1371
1372 /* Write the patch URB header. */
1373 for (int i = 0; i <= 1; i++) {
1374 load = nir_intrinsic_instr_create(nir, nir_intrinsic_load_uniform);
1375 load->num_components = 4;
1376 load->src[0] = nir_src_for_ssa(zero);
1377 nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
1378 nir_intrinsic_set_base(load, i * 4 * sizeof(uint32_t));
1379 nir_builder_instr_insert(&b, &load->instr);
1380
1381 store = nir_intrinsic_instr_create(nir, nir_intrinsic_store_output);
1382 store->num_components = 4;
1383 store->src[0] = nir_src_for_ssa(&load->dest.ssa);
1384 store->src[1] = nir_src_for_ssa(zero);
1385 nir_intrinsic_set_base(store, VARYING_SLOT_TESS_LEVEL_INNER - i);
1386 nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
1387 nir_builder_instr_insert(&b, &store->instr);
1388 }
1389
1390 /* Copy inputs to outputs. */
1391 uint64_t varyings = nir->info.inputs_read;
1392
1393 while (varyings != 0) {
1394 const int varying = ffsll(varyings) - 1;
1395
1396 load = nir_intrinsic_instr_create(nir,
1397 nir_intrinsic_load_per_vertex_input);
1398 load->num_components = 4;
1399 load->src[0] = nir_src_for_ssa(invoc_id);
1400 load->src[1] = nir_src_for_ssa(zero);
1401 nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
1402 nir_intrinsic_set_base(load, varying);
1403 nir_builder_instr_insert(&b, &load->instr);
1404
1405 store = nir_intrinsic_instr_create(nir,
1406 nir_intrinsic_store_per_vertex_output);
1407 store->num_components = 4;
1408 store->src[0] = nir_src_for_ssa(&load->dest.ssa);
1409 store->src[1] = nir_src_for_ssa(invoc_id);
1410 store->src[2] = nir_src_for_ssa(zero);
1411 nir_intrinsic_set_base(store, varying);
1412 nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
1413 nir_builder_instr_insert(&b, &store->instr);
1414
1415 varyings &= ~BITFIELD64_BIT(varying);
1416 }
1417
1418 nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
1419
1420 brw_preprocess_nir(compiler, nir, NULL);
1421
1422 return nir;
1423 }