i965: Move the back-end compiler to src/intel/compiler
[mesa.git] / src / intel / compiler / brw_nir.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include "brw_reg.h"
27 #include "compiler/nir/nir.h"
28 #include "brw_compiler.h"
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 int type_size_scalar(const struct glsl_type *type);
35 int type_size_vec4(const struct glsl_type *type);
36 int type_size_dvec4(const struct glsl_type *type);
37
38 static inline int
39 type_size_scalar_bytes(const struct glsl_type *type)
40 {
41 return type_size_scalar(type) * 4;
42 }
43
44 static inline int
45 type_size_vec4_bytes(const struct glsl_type *type)
46 {
47 return type_size_vec4(type) * 16;
48 }
49
50 /* Flags set in the instr->pass_flags field by i965 analysis passes */
51 enum {
52 BRW_NIR_NON_BOOLEAN = 0x0,
53
54 /* Indicates that the given instruction's destination is a boolean
55 * value but that it needs to be resolved before it can be used.
56 * On Gen <= 5, CMP instructions return a 32-bit value where the bottom
57 * bit represents the actual true/false value of the compare and the top
58 * 31 bits are undefined. In order to use this value, we have to do a
59 * "resolve" operation by replacing the value of the CMP with -(x & 1)
60 * to sign-extend the bottom bit to 0/~0.
61 */
62 BRW_NIR_BOOLEAN_NEEDS_RESOLVE = 0x1,
63
64 /* Indicates that the given instruction's destination is a boolean
65 * value that has intentionally been left unresolved. Not all boolean
66 * values need to be resolved immediately. For instance, if we have
67 *
68 * CMP r1 r2 r3
69 * CMP r4 r5 r6
70 * AND r7 r1 r4
71 *
72 * We don't have to resolve the result of the two CMP instructions
73 * immediately because the AND still does an AND of the bottom bits.
74 * Instead, we can save ourselves instructions by delaying the resolve
75 * until after the AND. The result of the two CMP instructions is left
76 * as BRW_NIR_BOOLEAN_UNRESOLVED.
77 */
78 BRW_NIR_BOOLEAN_UNRESOLVED = 0x2,
79
80 /* Indicates a that the given instruction's destination is a boolean
81 * value that does not need a resolve. For instance, if you AND two
82 * values that are BRW_NIR_BOOLEAN_NEEDS_RESOLVE then we know that both
83 * values will be 0/~0 before we get them and the result of the AND is
84 * also guaranteed to be 0/~0 and does not need a resolve.
85 */
86 BRW_NIR_BOOLEAN_NO_RESOLVE = 0x3,
87
88 /* A mask to mask the boolean status values off of instr->pass_flags */
89 BRW_NIR_BOOLEAN_MASK = 0x3,
90 };
91
92 void brw_nir_analyze_boolean_resolves(nir_shader *nir);
93
94 nir_shader *brw_preprocess_nir(const struct brw_compiler *compiler,
95 nir_shader *nir);
96
97 bool brw_nir_lower_intrinsics(nir_shader *nir,
98 struct brw_stage_prog_data *prog_data);
99 void brw_nir_lower_vs_inputs(nir_shader *nir,
100 bool is_scalar,
101 bool use_legacy_snorm_formula,
102 const uint8_t *vs_attrib_wa_flags);
103 void brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar,
104 const struct brw_vue_map *vue_map);
105 void brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue);
106 void brw_nir_lower_fs_inputs(nir_shader *nir,
107 const struct gen_device_info *devinfo,
108 const struct brw_wm_prog_key *key);
109 void brw_nir_lower_vue_outputs(nir_shader *nir, bool is_scalar);
110 void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue,
111 GLenum tes_primitive_mode);
112 void brw_nir_lower_fs_outputs(nir_shader *nir);
113 void brw_nir_lower_cs_shared(nir_shader *nir);
114
115 nir_shader *brw_postprocess_nir(nir_shader *nir,
116 const struct brw_compiler *compiler,
117 bool is_scalar);
118
119 bool brw_nir_apply_attribute_workarounds(nir_shader *nir,
120 bool use_legacy_snorm_formula,
121 const uint8_t *attrib_wa_flags);
122
123 bool brw_nir_apply_trig_workarounds(nir_shader *nir);
124
125 void brw_nir_apply_tcs_quads_workaround(nir_shader *nir);
126
127 nir_shader *brw_nir_apply_sampler_key(nir_shader *nir,
128 const struct brw_compiler *compiler,
129 const struct brw_sampler_prog_key_data *key,
130 bool is_scalar);
131
132 enum brw_reg_type brw_type_for_nir_type(const struct gen_device_info *devinfo,
133 nir_alu_type type);
134
135 enum glsl_base_type brw_glsl_base_type_for_nir_type(nir_alu_type type);
136
137 void brw_nir_setup_glsl_uniforms(nir_shader *shader,
138 const struct gl_program *prog,
139 struct brw_stage_prog_data *stage_prog_data,
140 bool is_scalar);
141
142 void brw_nir_setup_arb_uniforms(nir_shader *shader, struct gl_program *prog,
143 struct brw_stage_prog_data *stage_prog_data);
144
145 bool brw_nir_opt_peephole_ffma(nir_shader *shader);
146
147 #define BRW_NIR_FRAG_OUTPUT_INDEX_SHIFT 0
148 #define BRW_NIR_FRAG_OUTPUT_INDEX_MASK INTEL_MASK(0, 0)
149 #define BRW_NIR_FRAG_OUTPUT_LOCATION_SHIFT 1
150 #define BRW_NIR_FRAG_OUTPUT_LOCATION_MASK INTEL_MASK(31, 1)
151
152 #ifdef __cplusplus
153 }
154 #endif