mesa/i965/i915/r200: eliminate gl_vertex_program
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_shader.h"
26 #include "compiler/glsl_types.h"
27 #include "compiler/nir/nir_builder.h"
28
29 static bool
30 is_input(nir_intrinsic_instr *intrin)
31 {
32 return intrin->intrinsic == nir_intrinsic_load_input ||
33 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
34 intrin->intrinsic == nir_intrinsic_load_interpolated_input;
35 }
36
37 static bool
38 is_output(nir_intrinsic_instr *intrin)
39 {
40 return intrin->intrinsic == nir_intrinsic_load_output ||
41 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
42 intrin->intrinsic == nir_intrinsic_store_output ||
43 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
44 }
45
46 /**
47 * In many cases, we just add the base and offset together, so there's no
48 * reason to keep them separate. Sometimes, combining them is essential:
49 * if a shader only accesses part of a compound variable (such as a matrix
50 * or array), the variable's base may not actually exist in the VUE map.
51 *
52 * This pass adds constant offsets to instr->const_index[0], and resets
53 * the offset source to 0. Non-constant offsets remain unchanged - since
54 * we don't know what part of a compound variable is accessed, we allocate
55 * storage for the entire thing.
56 */
57
58 static bool
59 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
60 nir_variable_mode mode)
61 {
62 nir_foreach_instr_safe(instr, block) {
63 if (instr->type != nir_instr_type_intrinsic)
64 continue;
65
66 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
67
68 if ((mode == nir_var_shader_in && is_input(intrin)) ||
69 (mode == nir_var_shader_out && is_output(intrin))) {
70 nir_src *offset = nir_get_io_offset_src(intrin);
71 nir_const_value *const_offset = nir_src_as_const_value(*offset);
72
73 if (const_offset) {
74 intrin->const_index[0] += const_offset->u32[0];
75 b->cursor = nir_before_instr(&intrin->instr);
76 nir_instr_rewrite_src(&intrin->instr, offset,
77 nir_src_for_ssa(nir_imm_int(b, 0)));
78 }
79 }
80 }
81 return true;
82 }
83
84 static void
85 add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
86 {
87 nir_foreach_function(f, nir) {
88 if (f->impl) {
89 nir_builder b;
90 nir_builder_init(&b, f->impl);
91 nir_foreach_block(block, f->impl) {
92 add_const_offset_to_base_block(block, &b, mode);
93 }
94 }
95 }
96 }
97
98 static bool
99 remap_vs_attrs(nir_block *block, shader_info *nir_info)
100 {
101 nir_foreach_instr(instr, block) {
102 if (instr->type != nir_instr_type_intrinsic)
103 continue;
104
105 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
106
107 if (intrin->intrinsic == nir_intrinsic_load_input) {
108 /* Attributes come in a contiguous block, ordered by their
109 * gl_vert_attrib value. That means we can compute the slot
110 * number for an attribute by masking out the enabled attributes
111 * before it and counting the bits.
112 */
113 int attr = intrin->const_index[0];
114 int slot = _mesa_bitcount_64(nir_info->inputs_read &
115 BITFIELD64_MASK(attr));
116 int dslot = _mesa_bitcount_64(nir_info->double_inputs_read &
117 BITFIELD64_MASK(attr));
118 intrin->const_index[0] = 4 * (slot + dslot);
119 }
120 }
121 return true;
122 }
123
124 static bool
125 remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map)
126 {
127 nir_foreach_instr(instr, block) {
128 if (instr->type != nir_instr_type_intrinsic)
129 continue;
130
131 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
132
133 if (intrin->intrinsic == nir_intrinsic_load_input ||
134 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
135 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
136 assert(vue_slot != -1);
137 intrin->const_index[0] = vue_slot;
138 }
139 }
140 return true;
141 }
142
143 static bool
144 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
145 const struct brw_vue_map *vue_map)
146 {
147 nir_foreach_instr_safe(instr, block) {
148 if (instr->type != nir_instr_type_intrinsic)
149 continue;
150
151 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
152
153 gl_shader_stage stage = b->shader->stage;
154
155 if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
156 (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
157 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
158 assert(vue_slot != -1);
159 intrin->const_index[0] = vue_slot;
160
161 nir_src *vertex = nir_get_io_vertex_index_src(intrin);
162 if (vertex) {
163 nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
164 if (const_vertex) {
165 intrin->const_index[0] += const_vertex->u32[0] *
166 vue_map->num_per_vertex_slots;
167 } else {
168 b->cursor = nir_before_instr(&intrin->instr);
169
170 /* Multiply by the number of per-vertex slots. */
171 nir_ssa_def *vertex_offset =
172 nir_imul(b,
173 nir_ssa_for_src(b, *vertex, 1),
174 nir_imm_int(b,
175 vue_map->num_per_vertex_slots));
176
177 /* Add it to the existing offset */
178 nir_src *offset = nir_get_io_offset_src(intrin);
179 nir_ssa_def *total_offset =
180 nir_iadd(b, vertex_offset,
181 nir_ssa_for_src(b, *offset, 1));
182
183 nir_instr_rewrite_src(&intrin->instr, offset,
184 nir_src_for_ssa(total_offset));
185 }
186 }
187 }
188 }
189 return true;
190 }
191
192 void
193 brw_nir_lower_vs_inputs(nir_shader *nir,
194 bool is_scalar,
195 bool use_legacy_snorm_formula,
196 const uint8_t *vs_attrib_wa_flags)
197 {
198 /* Start with the location of the variable's base. */
199 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
200 var->data.driver_location = var->data.location;
201 }
202
203 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
204 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
205 * whether it is a double-precision type or not.
206 */
207 nir_lower_io(nir, nir_var_shader_in, type_size_vs_input, 0);
208
209 /* This pass needs actual constants */
210 nir_opt_constant_folding(nir);
211
212 add_const_offset_to_base(nir, nir_var_shader_in);
213
214 brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula,
215 vs_attrib_wa_flags);
216
217 if (is_scalar) {
218 /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
219
220 nir_foreach_function(function, nir) {
221 if (function->impl) {
222 nir_foreach_block(block, function->impl) {
223 remap_vs_attrs(block, nir->info);
224 }
225 }
226 }
227 }
228 }
229
230 void
231 brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar,
232 const struct brw_vue_map *vue_map)
233 {
234 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
235 var->data.driver_location = var->data.location;
236 }
237
238 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
239 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
240
241 if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) {
242 /* This pass needs actual constants */
243 nir_opt_constant_folding(nir);
244
245 add_const_offset_to_base(nir, nir_var_shader_in);
246
247 nir_foreach_function(function, nir) {
248 if (function->impl) {
249 nir_foreach_block(block, function->impl) {
250 remap_inputs_with_vue_map(block, vue_map);
251 }
252 }
253 }
254 }
255 }
256
257 void
258 brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
259 {
260 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
261 var->data.driver_location = var->data.location;
262 }
263
264 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
265
266 /* This pass needs actual constants */
267 nir_opt_constant_folding(nir);
268
269 add_const_offset_to_base(nir, nir_var_shader_in);
270
271 nir_foreach_function(function, nir) {
272 if (function->impl) {
273 nir_builder b;
274 nir_builder_init(&b, function->impl);
275 nir_foreach_block(block, function->impl) {
276 remap_patch_urb_offsets(block, &b, vue_map);
277 }
278 }
279 }
280 }
281
282 void
283 brw_nir_lower_fs_inputs(nir_shader *nir,
284 const struct gen_device_info *devinfo,
285 const struct brw_wm_prog_key *key)
286 {
287 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
288 var->data.driver_location = var->data.location;
289
290 /* Apply default interpolation mode.
291 *
292 * Everything defaults to smooth except for the legacy GL color
293 * built-in variables, which might be flat depending on API state.
294 */
295 if (var->data.interpolation == INTERP_MODE_NONE) {
296 const bool flat = key->flat_shade &&
297 (var->data.location == VARYING_SLOT_COL0 ||
298 var->data.location == VARYING_SLOT_COL1);
299
300 var->data.interpolation = flat ? INTERP_MODE_FLAT
301 : INTERP_MODE_SMOOTH;
302 }
303
304 /* On Ironlake and below, there is only one interpolation mode.
305 * Centroid interpolation doesn't mean anything on this hardware --
306 * there is no multisampling.
307 */
308 if (devinfo->gen < 6) {
309 var->data.centroid = false;
310 var->data.sample = false;
311 }
312 }
313
314 nir_lower_io_options lower_io_options = 0;
315 if (key->persample_interp)
316 lower_io_options |= nir_lower_io_force_sample_interpolation;
317
318 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
319
320 /* This pass needs actual constants */
321 nir_opt_constant_folding(nir);
322
323 add_const_offset_to_base(nir, nir_var_shader_in);
324 }
325
326 void
327 brw_nir_lower_vue_outputs(nir_shader *nir,
328 bool is_scalar)
329 {
330 nir_foreach_variable(var, &nir->outputs) {
331 var->data.driver_location = var->data.location;
332 }
333
334 nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
335 }
336
337 void
338 brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
339 {
340 nir_foreach_variable(var, &nir->outputs) {
341 var->data.driver_location = var->data.location;
342 }
343
344 nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
345
346 /* This pass needs actual constants */
347 nir_opt_constant_folding(nir);
348
349 add_const_offset_to_base(nir, nir_var_shader_out);
350
351 nir_foreach_function(function, nir) {
352 if (function->impl) {
353 nir_builder b;
354 nir_builder_init(&b, function->impl);
355 nir_foreach_block(block, function->impl) {
356 remap_patch_urb_offsets(block, &b, vue_map);
357 }
358 }
359 }
360 }
361
362 void
363 brw_nir_lower_fs_outputs(nir_shader *nir)
364 {
365 nir_foreach_variable(var, &nir->outputs) {
366 var->data.driver_location =
367 SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
368 SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
369 }
370
371 nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
372 }
373
374 void
375 brw_nir_lower_cs_shared(nir_shader *nir)
376 {
377 nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
378 type_size_scalar_bytes);
379 nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
380 }
381
382 #define OPT(pass, ...) ({ \
383 bool this_progress = false; \
384 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
385 if (this_progress) \
386 progress = true; \
387 this_progress; \
388 })
389
390 #define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
391
392 static nir_shader *
393 nir_optimize(nir_shader *nir, bool is_scalar)
394 {
395 bool progress;
396 do {
397 progress = false;
398 OPT_V(nir_lower_vars_to_ssa);
399
400 if (is_scalar) {
401 OPT(nir_lower_alu_to_scalar);
402 }
403
404 OPT(nir_copy_prop);
405
406 if (is_scalar) {
407 OPT(nir_lower_phis_to_scalar);
408 }
409
410 OPT(nir_copy_prop);
411 OPT(nir_opt_dce);
412 OPT(nir_opt_cse);
413 OPT(nir_opt_peephole_select, 0);
414 OPT(nir_opt_algebraic);
415 OPT(nir_opt_constant_folding);
416 OPT(nir_opt_dead_cf);
417 OPT(nir_opt_remove_phis);
418 OPT(nir_opt_undef);
419 OPT_V(nir_lower_doubles, nir_lower_drcp |
420 nir_lower_dsqrt |
421 nir_lower_drsq |
422 nir_lower_dtrunc |
423 nir_lower_dfloor |
424 nir_lower_dceil |
425 nir_lower_dfract |
426 nir_lower_dround_even |
427 nir_lower_dmod);
428 OPT_V(nir_lower_double_pack);
429 } while (progress);
430
431 return nir;
432 }
433
434 /* Does some simple lowering and runs the standard suite of optimizations
435 *
436 * This is intended to be called more-or-less directly after you get the
437 * shader out of GLSL or some other source. While it is geared towards i965,
438 * it is not at all generator-specific except for the is_scalar flag. Even
439 * there, it is safe to call with is_scalar = false for a shader that is
440 * intended for the FS backend as long as nir_optimize is called again with
441 * is_scalar = true to scalarize everything prior to code gen.
442 */
443 nir_shader *
444 brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
445 {
446 bool progress; /* Written by OPT and OPT_V */
447 (void)progress;
448
449 const bool is_scalar = compiler->scalar_stage[nir->stage];
450
451 if (nir->stage == MESA_SHADER_GEOMETRY)
452 OPT(nir_lower_gs_intrinsics);
453
454 if (compiler->precise_trig)
455 OPT(brw_nir_apply_trig_workarounds);
456
457 static const nir_lower_tex_options tex_options = {
458 .lower_txp = ~0,
459 .lower_txf_offset = true,
460 .lower_rect_offset = true,
461 };
462
463 OPT(nir_lower_tex, &tex_options);
464 OPT(nir_normalize_cubemap_coords);
465
466 OPT(nir_lower_global_vars_to_local);
467
468 OPT(nir_split_var_copies);
469
470 nir = nir_optimize(nir, is_scalar);
471
472 if (is_scalar) {
473 OPT_V(nir_lower_load_const_to_scalar);
474 }
475
476 /* Lower a bunch of stuff */
477 OPT_V(nir_lower_var_copies);
478
479 /* Get rid of split copies */
480 nir = nir_optimize(nir, is_scalar);
481
482 OPT(nir_remove_dead_variables, nir_var_local);
483
484 return nir;
485 }
486
487 /* Prepare the given shader for codegen
488 *
489 * This function is intended to be called right before going into the actual
490 * backend and is highly backend-specific. Also, once this function has been
491 * called on a shader, it will no longer be in SSA form so most optimizations
492 * will not work.
493 */
494 nir_shader *
495 brw_postprocess_nir(nir_shader *nir,
496 const struct gen_device_info *devinfo,
497 bool is_scalar)
498 {
499 bool debug_enabled =
500 (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage));
501
502 bool progress; /* Written by OPT and OPT_V */
503 (void)progress;
504
505 nir = nir_optimize(nir, is_scalar);
506
507 if (devinfo->gen >= 6) {
508 /* Try and fuse multiply-adds */
509 OPT(brw_nir_opt_peephole_ffma);
510 }
511
512 OPT(nir_opt_algebraic_late);
513
514 OPT(nir_lower_locals_to_regs);
515
516 OPT_V(nir_lower_to_source_mods);
517 OPT(nir_copy_prop);
518 OPT(nir_opt_dce);
519
520 if (unlikely(debug_enabled)) {
521 /* Re-index SSA defs so we print more sensible numbers. */
522 nir_foreach_function(function, nir) {
523 if (function->impl)
524 nir_index_ssa_defs(function->impl);
525 }
526
527 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
528 _mesa_shader_stage_to_string(nir->stage));
529 nir_print_shader(nir, stderr);
530 }
531
532 OPT_V(nir_convert_from_ssa, true);
533
534 if (!is_scalar) {
535 OPT_V(nir_move_vec_src_uses_to_dest);
536 OPT(nir_lower_vec_to_movs);
537 }
538
539 /* This is the last pass we run before we start emitting stuff. It
540 * determines when we need to insert boolean resolves on Gen <= 5. We
541 * run it last because it stashes data in instr->pass_flags and we don't
542 * want that to be squashed by other NIR passes.
543 */
544 if (devinfo->gen <= 5)
545 brw_nir_analyze_boolean_resolves(nir);
546
547 nir_sweep(nir);
548
549 if (unlikely(debug_enabled)) {
550 fprintf(stderr, "NIR (final form) for %s shader:\n",
551 _mesa_shader_stage_to_string(nir->stage));
552 nir_print_shader(nir, stderr);
553 }
554
555 return nir;
556 }
557
558 nir_shader *
559 brw_nir_apply_sampler_key(nir_shader *nir,
560 const struct gen_device_info *devinfo,
561 const struct brw_sampler_prog_key_data *key_tex,
562 bool is_scalar)
563 {
564 nir_lower_tex_options tex_options = { 0 };
565
566 /* Iron Lake and prior require lowering of all rectangle textures */
567 if (devinfo->gen < 6)
568 tex_options.lower_rect = true;
569
570 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
571 if (devinfo->gen < 8) {
572 tex_options.saturate_s = key_tex->gl_clamp_mask[0];
573 tex_options.saturate_t = key_tex->gl_clamp_mask[1];
574 tex_options.saturate_r = key_tex->gl_clamp_mask[2];
575 }
576
577 /* Prior to Haswell, we have to fake texture swizzle */
578 for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
579 if (key_tex->swizzles[s] == SWIZZLE_NOOP)
580 continue;
581
582 tex_options.swizzle_result |= (1 << s);
583 for (unsigned c = 0; c < 4; c++)
584 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
585 }
586
587 tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
588 tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
589 tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
590
591 if (nir_lower_tex(nir, &tex_options)) {
592 nir_validate_shader(nir);
593 nir = nir_optimize(nir, is_scalar);
594 }
595
596 return nir;
597 }
598
599 enum brw_reg_type
600 brw_type_for_nir_type(nir_alu_type type)
601 {
602 switch (type) {
603 case nir_type_uint:
604 case nir_type_uint32:
605 return BRW_REGISTER_TYPE_UD;
606 case nir_type_bool:
607 case nir_type_int:
608 case nir_type_bool32:
609 case nir_type_int32:
610 return BRW_REGISTER_TYPE_D;
611 case nir_type_float:
612 case nir_type_float32:
613 return BRW_REGISTER_TYPE_F;
614 case nir_type_float64:
615 return BRW_REGISTER_TYPE_DF;
616 case nir_type_int64:
617 case nir_type_uint64:
618 /* TODO we should only see these in moves, so for now it's ok, but when
619 * we add actual 64-bit integer support we should fix this.
620 */
621 return BRW_REGISTER_TYPE_DF;
622 default:
623 unreachable("unknown type");
624 }
625
626 return BRW_REGISTER_TYPE_F;
627 }
628
629 /* Returns the glsl_base_type corresponding to a nir_alu_type.
630 * This is used by both brw_vec4_nir and brw_fs_nir.
631 */
632 enum glsl_base_type
633 brw_glsl_base_type_for_nir_type(nir_alu_type type)
634 {
635 switch (type) {
636 case nir_type_float:
637 case nir_type_float32:
638 return GLSL_TYPE_FLOAT;
639
640 case nir_type_float64:
641 return GLSL_TYPE_DOUBLE;
642
643 case nir_type_int:
644 case nir_type_int32:
645 return GLSL_TYPE_INT;
646
647 case nir_type_uint:
648 case nir_type_uint32:
649 return GLSL_TYPE_UINT;
650
651 default:
652 unreachable("bad type");
653 }
654 }