i965: Don't use _mesa_ir_link_shader to do our dirty work
[mesa.git] / src / mesa / drivers / dri / i965 / brw_shader.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 extern "C" {
25 #include "main/macros.h"
26 #include "brw_context.h"
27 #include "brw_vs.h"
28 }
29 #include "brw_fs.h"
30 #include "glsl/ir_optimization.h"
31 #include "glsl/ir_print_visitor.h"
32
33 struct gl_shader *
34 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
35 {
36 struct brw_shader *shader;
37
38 shader = rzalloc(NULL, struct brw_shader);
39 if (shader) {
40 shader->base.Type = type;
41 shader->base.Name = name;
42 _mesa_init_shader(ctx, &shader->base);
43 }
44
45 return &shader->base;
46 }
47
48 struct gl_shader_program *
49 brw_new_shader_program(struct gl_context *ctx, GLuint name)
50 {
51 struct brw_shader_program *prog;
52 prog = rzalloc(NULL, struct brw_shader_program);
53 if (prog) {
54 prog->base.Name = name;
55 _mesa_init_shader_program(ctx, &prog->base);
56 }
57 return &prog->base;
58 }
59
60 /**
61 * Performs a compile of the shader stages even when we don't know
62 * what non-orthogonal state will be set, in the hope that it reflects
63 * the eventual NOS used, and thus allows us to produce link failures.
64 */
65 bool
66 brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
67 {
68 struct brw_context *brw = brw_context(ctx);
69
70 if (brw->precompile && !brw_fs_precompile(ctx, prog))
71 return false;
72
73 if (!brw_vs_precompile(ctx, prog))
74 return false;
75
76 return true;
77 }
78
79 GLboolean
80 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
81 {
82 struct brw_context *brw = brw_context(ctx);
83 struct intel_context *intel = &brw->intel;
84 unsigned int stage;
85
86 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
87 struct brw_shader *shader =
88 (struct brw_shader *)shProg->_LinkedShaders[stage];
89 static const GLenum targets[] = {
90 GL_VERTEX_PROGRAM_ARB,
91 GL_FRAGMENT_PROGRAM_ARB,
92 GL_GEOMETRY_PROGRAM_NV
93 };
94
95 if (!shader)
96 continue;
97
98 struct gl_program *prog =
99 ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
100 if (!prog)
101 return NULL;
102 prog->Parameters = _mesa_new_parameter_list();
103
104 _mesa_generate_parameters_list_for_uniforms(shProg, &shader->base,
105 prog->Parameters);
106
107 if (stage == 0) {
108 struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
109 vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
110 }
111
112 if (stage == 1) {
113 class uses_kill_visitor : public ir_hierarchical_visitor {
114 public:
115 uses_kill_visitor() : uses_kill(false)
116 {
117 /* empty */
118 }
119
120 virtual ir_visitor_status visit_enter(class ir_discard *ir)
121 {
122 this->uses_kill = true;
123 return visit_stop;
124 }
125
126 bool uses_kill;
127 };
128
129 uses_kill_visitor v;
130
131 v.run(shader->base.ir);
132
133 struct gl_fragment_program *fp = (struct gl_fragment_program *) prog;
134 fp->UsesKill = v.uses_kill;
135 }
136
137 void *mem_ctx = ralloc_context(NULL);
138 bool progress;
139
140 if (shader->ir)
141 ralloc_free(shader->ir);
142 shader->ir = new(shader) exec_list;
143 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
144
145 do_mat_op_to_vec(shader->ir);
146 lower_instructions(shader->ir,
147 MOD_TO_FRACT |
148 DIV_TO_MUL_RCP |
149 SUB_TO_ADD_NEG |
150 EXP_TO_EXP2 |
151 LOG_TO_LOG2);
152
153 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
154 * if-statements need to be flattened.
155 */
156 if (intel->gen < 6)
157 lower_if_to_cond_assign(shader->ir, 16);
158
159 do_lower_texture_projection(shader->ir);
160 do_vec_index_to_cond_assign(shader->ir);
161 brw_do_cubemap_normalize(shader->ir);
162 lower_noise(shader->ir);
163 lower_quadop_vector(shader->ir, false);
164
165 bool input = true;
166 bool output = stage == MESA_SHADER_FRAGMENT;
167 bool temp = stage == MESA_SHADER_FRAGMENT;
168 bool uniform = stage == MESA_SHADER_FRAGMENT;
169
170 lower_variable_index_to_cond_assign(shader->ir,
171 input, output, temp, uniform);
172
173 do {
174 progress = false;
175
176 if (stage == MESA_SHADER_FRAGMENT) {
177 brw_do_channel_expressions(shader->ir);
178 brw_do_vector_splitting(shader->ir);
179 }
180
181 progress = do_lower_jumps(shader->ir, true, true,
182 true, /* main return */
183 false, /* continue */
184 false /* loops */
185 ) || progress;
186
187 progress = do_common_optimization(shader->ir, true, true, 32)
188 || progress;
189 } while (progress);
190
191 /* Make a pass over the IR to add state references for any built-in
192 * uniforms that are used. This has to be done now (during linking).
193 * Code generation doesn't happen until the first time this shader is
194 * used for rendering. Waiting until then to generate the parameters is
195 * too late. At that point, the values for the built-in informs won't
196 * get sent to the shader.
197 */
198 foreach_list(node, shader->ir) {
199 ir_variable *var = ((ir_instruction *) node)->as_variable();
200
201 if ((var == NULL) || (var->mode != ir_var_uniform)
202 || (strncmp(var->name, "gl_", 3) != 0))
203 continue;
204
205 const ir_state_slot *const slots = var->state_slots;
206 assert(var->state_slots != NULL);
207
208 for (unsigned int i = 0; i < var->num_state_slots; i++) {
209 _mesa_add_state_reference(prog->Parameters,
210 (gl_state_index *) slots[i].tokens);
211 }
212 }
213
214 validate_ir_tree(shader->ir);
215
216 reparent_ir(shader->ir, shader->ir);
217 ralloc_free(mem_ctx);
218
219 do_set_program_inouts(shader->ir, prog,
220 shader->base.Type == GL_FRAGMENT_SHADER);
221
222 prog->SamplersUsed = shader->base.active_samplers;
223 _mesa_update_shader_textures_used(shProg, prog);
224
225 _mesa_reference_program(ctx, &shader->base.Program, prog);
226
227 /* This has to be done last. Any operation that can cause
228 * prog->ParameterValues to get reallocated (e.g., anything that adds a
229 * program constant) has to happen before creating this linkage.
230 */
231 _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
232 }
233
234 if (!brw_shader_precompile(ctx, shProg))
235 return false;
236
237 return true;
238 }
239
240
241 int
242 brw_type_for_base_type(const struct glsl_type *type)
243 {
244 switch (type->base_type) {
245 case GLSL_TYPE_FLOAT:
246 return BRW_REGISTER_TYPE_F;
247 case GLSL_TYPE_INT:
248 case GLSL_TYPE_BOOL:
249 return BRW_REGISTER_TYPE_D;
250 case GLSL_TYPE_UINT:
251 return BRW_REGISTER_TYPE_UD;
252 case GLSL_TYPE_ARRAY:
253 return brw_type_for_base_type(type->fields.array);
254 case GLSL_TYPE_STRUCT:
255 case GLSL_TYPE_SAMPLER:
256 /* These should be overridden with the type of the member when
257 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
258 * way to trip up if we don't.
259 */
260 return BRW_REGISTER_TYPE_UD;
261 default:
262 assert(!"not reached");
263 return BRW_REGISTER_TYPE_F;
264 }
265 }
266
267 uint32_t
268 brw_conditional_for_comparison(unsigned int op)
269 {
270 switch (op) {
271 case ir_binop_less:
272 return BRW_CONDITIONAL_L;
273 case ir_binop_greater:
274 return BRW_CONDITIONAL_G;
275 case ir_binop_lequal:
276 return BRW_CONDITIONAL_LE;
277 case ir_binop_gequal:
278 return BRW_CONDITIONAL_GE;
279 case ir_binop_equal:
280 case ir_binop_all_equal: /* same as equal for scalars */
281 return BRW_CONDITIONAL_Z;
282 case ir_binop_nequal:
283 case ir_binop_any_nequal: /* same as nequal for scalars */
284 return BRW_CONDITIONAL_NZ;
285 default:
286 assert(!"not reached: bad operation for comparison");
287 return BRW_CONDITIONAL_NZ;
288 }
289 }
290
291 uint32_t
292 brw_math_function(enum opcode op)
293 {
294 switch (op) {
295 case SHADER_OPCODE_RCP:
296 return BRW_MATH_FUNCTION_INV;
297 case SHADER_OPCODE_RSQ:
298 return BRW_MATH_FUNCTION_RSQ;
299 case SHADER_OPCODE_SQRT:
300 return BRW_MATH_FUNCTION_SQRT;
301 case SHADER_OPCODE_EXP2:
302 return BRW_MATH_FUNCTION_EXP;
303 case SHADER_OPCODE_LOG2:
304 return BRW_MATH_FUNCTION_LOG;
305 case SHADER_OPCODE_POW:
306 return BRW_MATH_FUNCTION_POW;
307 case SHADER_OPCODE_SIN:
308 return BRW_MATH_FUNCTION_SIN;
309 case SHADER_OPCODE_COS:
310 return BRW_MATH_FUNCTION_COS;
311 case SHADER_OPCODE_INT_QUOTIENT:
312 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
313 case SHADER_OPCODE_INT_REMAINDER:
314 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
315 default:
316 assert(!"not reached: unknown math function");
317 return 0;
318 }
319 }
320
321 uint32_t
322 brw_texture_offset(ir_constant *offset)
323 {
324 assert(offset != NULL);
325
326 signed char offsets[3];
327 for (unsigned i = 0; i < offset->type->vector_elements; i++)
328 offsets[i] = (signed char) offset->value.i[i];
329
330 /* Combine all three offsets into a single unsigned dword:
331 *
332 * bits 11:8 - U Offset (X component)
333 * bits 7:4 - V Offset (Y component)
334 * bits 3:0 - R Offset (Z component)
335 */
336 unsigned offset_bits = 0;
337 for (unsigned i = 0; i < offset->type->vector_elements; i++) {
338 const unsigned shift = 4 * (2 - i);
339 offset_bits |= (offsets[i] << shift) & (0xF << shift);
340 }
341 return offset_bits;
342 }