nir: Get rid of *_indirect variants of input/output load/store intrinsics
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_shader.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "glsl/nir/nir_builder.h"
28 #include "program/prog_to_nir.h"
29
30 struct remap_vs_attrs_state {
31 nir_builder b;
32 uint64_t inputs_read;
33 };
34
35 static bool
36 remap_vs_attrs(nir_block *block, void *void_state)
37 {
38 struct remap_vs_attrs_state *state = void_state;
39
40 nir_foreach_instr_safe(block, instr) {
41 if (instr->type != nir_instr_type_intrinsic)
42 continue;
43
44 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
45
46 if (intrin->intrinsic == nir_intrinsic_load_input) {
47 /* Attributes come in a contiguous block, ordered by their
48 * gl_vert_attrib value. That means we can compute the slot
49 * number for an attribute by masking out the enabled attributes
50 * before it and counting the bits.
51 */
52 nir_const_value *const_offset = nir_src_as_const_value(intrin->src[0]);
53
54 /* We set EmitNoIndirect for VS inputs, so there are no indirects. */
55 assert(const_offset);
56
57 int attr = intrin->const_index[0] + const_offset->u[0];
58 int slot = _mesa_bitcount_64(state->inputs_read &
59 BITFIELD64_MASK(attr));
60
61 /* The NIR -> FS pass will just add the base and offset together, so
62 * there's no reason to keep them separate. Just put it all in
63 * const_index[0] and set the offset src[0] to load_const(0).
64 */
65 intrin->const_index[0] = 4 * slot;
66
67 state->b.cursor = nir_before_instr(&intrin->instr);
68 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
69 nir_src_for_ssa(nir_imm_int(&state->b, 0)));
70 }
71 }
72 return true;
73 }
74
75 static void
76 brw_nir_lower_inputs(nir_shader *nir,
77 const struct brw_device_info *devinfo,
78 bool is_scalar)
79 {
80 switch (nir->stage) {
81 case MESA_SHADER_VERTEX:
82 /* Start with the location of the variable's base. */
83 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
84 var->data.driver_location = var->data.location;
85 }
86
87 /* Now use nir_lower_io to walk dereference chains. Attribute arrays
88 * are loaded as one vec4 per element (or matrix column), so we use
89 * type_size_vec4 here.
90 */
91 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
92
93 if (is_scalar) {
94 /* Finally, translate VERT_ATTRIB_* values into the actual registers.
95 *
96 * Note that we can use nir->info.inputs_read instead of
97 * key->inputs_read since the two are identical aside from Gen4-5
98 * edge flag differences.
99 */
100 struct remap_vs_attrs_state remap_state = {
101 .inputs_read = nir->info.inputs_read,
102 };
103
104 /* This pass needs actual constants */
105 nir_opt_constant_folding(nir);
106
107 nir_foreach_overload(nir, overload) {
108 if (overload->impl) {
109 nir_builder_init(&remap_state.b, overload->impl);
110 nir_foreach_block(overload->impl, remap_vs_attrs, &remap_state);
111 }
112 }
113 }
114 break;
115 case MESA_SHADER_GEOMETRY: {
116 if (!is_scalar) {
117 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
118 var->data.driver_location = var->data.location;
119 }
120 } else {
121 /* The GLSL linker will have already matched up GS inputs and
122 * the outputs of prior stages. The driver does extend VS outputs
123 * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
124 * neither of which offer geometry shader support. So we can
125 * safely ignore that.
126 *
127 * For SSO pipelines, we use a fixed VUE map layout based on variable
128 * locations, so we can rely on rendezvous-by-location to make this
129 * work.
130 *
131 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
132 * written by previous stages and shows up via payload magic.
133 */
134 struct brw_vue_map input_vue_map;
135 GLbitfield64 inputs_read =
136 nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
137 brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
138 nir->info.separate_shader);
139
140 /* Start with the slot for the variable's base. */
141 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
142 assert(input_vue_map.varying_to_slot[var->data.location] != -1);
143 var->data.driver_location =
144 input_vue_map.varying_to_slot[var->data.location];
145 }
146
147 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
148 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
149 }
150 break;
151 }
152 case MESA_SHADER_FRAGMENT:
153 assert(is_scalar);
154 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
155 type_size_scalar);
156 break;
157 case MESA_SHADER_COMPUTE:
158 /* Compute shaders have no inputs. */
159 assert(exec_list_is_empty(&nir->inputs));
160 break;
161 default:
162 unreachable("unsupported shader stage");
163 }
164 }
165
166 static void
167 brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
168 {
169 switch (nir->stage) {
170 case MESA_SHADER_VERTEX:
171 case MESA_SHADER_GEOMETRY:
172 if (is_scalar) {
173 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
174 type_size_vec4_times_4);
175 nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
176 } else {
177 nir_foreach_variable(var, &nir->outputs)
178 var->data.driver_location = var->data.location;
179 }
180 break;
181 case MESA_SHADER_FRAGMENT:
182 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
183 type_size_scalar);
184 break;
185 case MESA_SHADER_COMPUTE:
186 /* Compute shaders have no outputs. */
187 assert(exec_list_is_empty(&nir->outputs));
188 break;
189 default:
190 unreachable("unsupported shader stage");
191 }
192 }
193
194 static int
195 type_size_scalar_bytes(const struct glsl_type *type)
196 {
197 return type_size_scalar(type) * 4;
198 }
199
200 static int
201 type_size_vec4_bytes(const struct glsl_type *type)
202 {
203 return type_size_vec4(type) * 16;
204 }
205
206 static void
207 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
208 {
209 if (is_scalar) {
210 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
211 type_size_scalar_bytes);
212 nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
213 } else {
214 nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
215 type_size_vec4_bytes);
216 nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
217 }
218 }
219
220 #include "util/debug.h"
221
222 static bool
223 should_clone_nir()
224 {
225 static int should_clone = -1;
226 if (should_clone < 1)
227 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
228
229 return should_clone;
230 }
231
232 #define _OPT(do_pass) (({ \
233 bool this_progress = true; \
234 do_pass \
235 nir_validate_shader(nir); \
236 if (should_clone_nir()) { \
237 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
238 ralloc_free(nir); \
239 nir = clone; \
240 } \
241 this_progress; \
242 }))
243
244 #define OPT(pass, ...) _OPT( \
245 nir_metadata_set_validation_flag(nir); \
246 this_progress = pass(nir ,##__VA_ARGS__); \
247 if (this_progress) { \
248 progress = true; \
249 nir_metadata_check_validation_flag(nir); \
250 } \
251 )
252
253 #define OPT_V(pass, ...) _OPT( \
254 pass(nir, ##__VA_ARGS__); \
255 )
256
257 static nir_shader *
258 nir_optimize(nir_shader *nir, bool is_scalar)
259 {
260 bool progress;
261 do {
262 progress = false;
263 OPT_V(nir_lower_vars_to_ssa);
264
265 if (is_scalar) {
266 OPT_V(nir_lower_alu_to_scalar);
267 }
268
269 OPT(nir_copy_prop);
270
271 if (is_scalar) {
272 OPT_V(nir_lower_phis_to_scalar);
273 }
274
275 OPT(nir_copy_prop);
276 OPT(nir_opt_dce);
277 OPT(nir_opt_cse);
278 OPT(nir_opt_peephole_select);
279 OPT(nir_opt_algebraic);
280 OPT(nir_opt_constant_folding);
281 OPT(nir_opt_dead_cf);
282 OPT(nir_opt_remove_phis);
283 OPT(nir_opt_undef);
284 } while (progress);
285
286 return nir;
287 }
288
289 /* Does some simple lowering and runs the standard suite of optimizations
290 *
291 * This is intended to be called more-or-less directly after you get the
292 * shader out of GLSL or some other source. While it is geared towards i965,
293 * it is not at all generator-specific except for the is_scalar flag. Even
294 * there, it is safe to call with is_scalar = false for a shader that is
295 * intended for the FS backend as long as nir_optimize is called again with
296 * is_scalar = true to scalarize everything prior to code gen.
297 */
298 nir_shader *
299 brw_preprocess_nir(nir_shader *nir, bool is_scalar)
300 {
301 bool progress; /* Written by OPT and OPT_V */
302 (void)progress;
303
304 if (nir->stage == MESA_SHADER_GEOMETRY)
305 OPT(nir_lower_gs_intrinsics);
306
307 static const nir_lower_tex_options tex_options = {
308 .lower_txp = ~0,
309 };
310
311 OPT(nir_lower_tex, &tex_options);
312 OPT(nir_normalize_cubemap_coords);
313
314 OPT(nir_lower_global_vars_to_local);
315
316 OPT(nir_split_var_copies);
317
318 nir = nir_optimize(nir, is_scalar);
319
320 /* Lower a bunch of stuff */
321 OPT_V(nir_lower_var_copies);
322
323 /* Get rid of split copies */
324 nir = nir_optimize(nir, is_scalar);
325
326 OPT(nir_remove_dead_variables);
327
328 return nir;
329 }
330
331 /* Lowers inputs, outputs, uniforms, and samplers for i965
332 *
333 * This function does all of the standard lowering prior to post-processing.
334 * The lowering done is highly gen, stage, and backend-specific. The
335 * shader_prog parameter is optional and is used only for lowering sampler
336 * derefs and atomics for GLSL shaders.
337 */
338 nir_shader *
339 brw_lower_nir(nir_shader *nir,
340 const struct brw_device_info *devinfo,
341 const struct gl_shader_program *shader_prog,
342 bool is_scalar)
343 {
344 bool progress; /* Written by OPT and OPT_V */
345 (void)progress;
346
347 OPT_V(brw_nir_lower_inputs, devinfo, is_scalar);
348 OPT_V(brw_nir_lower_outputs, is_scalar);
349 OPT_V(brw_nir_lower_uniforms, is_scalar);
350 OPT_V(nir_lower_io, nir_var_all, is_scalar ? type_size_scalar : type_size_vec4);
351
352 if (shader_prog) {
353 OPT_V(nir_lower_samplers, shader_prog);
354 }
355
356 OPT(nir_lower_system_values);
357
358 if (shader_prog) {
359 OPT_V(nir_lower_atomics, shader_prog);
360 }
361
362 return nir_optimize(nir, is_scalar);
363 }
364
365 /* Prepare the given shader for codegen
366 *
367 * This function is intended to be called right before going into the actual
368 * backend and is highly backend-specific. Also, once this function has been
369 * called on a shader, it will no longer be in SSA form so most optimizations
370 * will not work.
371 */
372 nir_shader *
373 brw_postprocess_nir(nir_shader *nir,
374 const struct brw_device_info *devinfo,
375 bool is_scalar)
376 {
377 bool debug_enabled =
378 (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage));
379
380 bool progress; /* Written by OPT and OPT_V */
381 (void)progress;
382
383 if (devinfo->gen >= 6) {
384 /* Try and fuse multiply-adds */
385 OPT(brw_nir_opt_peephole_ffma);
386 }
387
388 OPT(nir_opt_algebraic_late);
389
390 OPT(nir_lower_locals_to_regs);
391
392 OPT_V(nir_lower_to_source_mods);
393 OPT(nir_copy_prop);
394 OPT(nir_opt_dce);
395
396 if (unlikely(debug_enabled)) {
397 /* Re-index SSA defs so we print more sensible numbers. */
398 nir_foreach_overload(nir, overload) {
399 if (overload->impl)
400 nir_index_ssa_defs(overload->impl);
401 }
402
403 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
404 _mesa_shader_stage_to_string(nir->stage));
405 nir_print_shader(nir, stderr);
406 }
407
408 OPT_V(nir_convert_from_ssa, true);
409
410 if (!is_scalar) {
411 OPT_V(nir_move_vec_src_uses_to_dest);
412 OPT(nir_lower_vec_to_movs);
413 }
414
415 /* This is the last pass we run before we start emitting stuff. It
416 * determines when we need to insert boolean resolves on Gen <= 5. We
417 * run it last because it stashes data in instr->pass_flags and we don't
418 * want that to be squashed by other NIR passes.
419 */
420 if (devinfo->gen <= 5)
421 brw_nir_analyze_boolean_resolves(nir);
422
423 nir_sweep(nir);
424
425 if (unlikely(debug_enabled)) {
426 fprintf(stderr, "NIR (final form) for %s shader:\n",
427 _mesa_shader_stage_to_string(nir->stage));
428 nir_print_shader(nir, stderr);
429 }
430
431 return nir;
432 }
433
434 nir_shader *
435 brw_create_nir(struct brw_context *brw,
436 const struct gl_shader_program *shader_prog,
437 const struct gl_program *prog,
438 gl_shader_stage stage,
439 bool is_scalar)
440 {
441 struct gl_context *ctx = &brw->ctx;
442 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
443 const nir_shader_compiler_options *options =
444 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
445 bool progress;
446 nir_shader *nir;
447
448 /* First, lower the GLSL IR or Mesa IR to NIR */
449 if (shader_prog) {
450 nir = glsl_to_nir(shader_prog, stage, options);
451 } else {
452 nir = prog_to_nir(prog, options);
453 OPT_V(nir_convert_to_ssa); /* turn registers into SSA */
454 }
455 nir_validate_shader(nir);
456
457 (void)progress;
458
459 nir = brw_preprocess_nir(nir, is_scalar);
460 nir = brw_lower_nir(nir, devinfo, shader_prog, is_scalar);
461
462 return nir;
463 }
464
465 nir_shader *
466 brw_nir_apply_sampler_key(nir_shader *nir,
467 const struct brw_device_info *devinfo,
468 const struct brw_sampler_prog_key_data *key_tex,
469 bool is_scalar)
470 {
471 nir_lower_tex_options tex_options = { 0 };
472
473 /* Iron Lake and prior require lowering of all rectangle textures */
474 if (devinfo->gen < 6)
475 tex_options.lower_rect = true;
476
477 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
478 if (devinfo->gen < 8) {
479 tex_options.saturate_s = key_tex->gl_clamp_mask[0];
480 tex_options.saturate_t = key_tex->gl_clamp_mask[1];
481 tex_options.saturate_r = key_tex->gl_clamp_mask[2];
482 }
483
484 /* Prior to Haswell, we have to fake texture swizzle */
485 for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
486 if (key_tex->swizzles[s] == SWIZZLE_NOOP)
487 continue;
488
489 tex_options.swizzle_result |= (1 << s);
490 for (unsigned c = 0; c < 4; c++)
491 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
492 }
493
494 if (nir_lower_tex(nir, &tex_options)) {
495 nir_validate_shader(nir);
496 nir = nir_optimize(nir, is_scalar);
497 }
498
499 return nir;
500 }
501
502 enum brw_reg_type
503 brw_type_for_nir_type(nir_alu_type type)
504 {
505 switch (type) {
506 case nir_type_uint:
507 return BRW_REGISTER_TYPE_UD;
508 case nir_type_bool:
509 case nir_type_int:
510 return BRW_REGISTER_TYPE_D;
511 case nir_type_float:
512 return BRW_REGISTER_TYPE_F;
513 default:
514 unreachable("unknown type");
515 }
516
517 return BRW_REGISTER_TYPE_F;
518 }
519
520 /* Returns the glsl_base_type corresponding to a nir_alu_type.
521 * This is used by both brw_vec4_nir and brw_fs_nir.
522 */
523 enum glsl_base_type
524 brw_glsl_base_type_for_nir_type(nir_alu_type type)
525 {
526 switch (type) {
527 case nir_type_float:
528 return GLSL_TYPE_FLOAT;
529
530 case nir_type_int:
531 return GLSL_TYPE_INT;
532
533 case nir_type_uint:
534 return GLSL_TYPE_UINT;
535
536 default:
537 unreachable("bad type");
538 }
539 }