i965/vs: Run the shader backend at link time and return compile failures.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 extern "C" {
32
33 #include <sys/types.h>
34
35 #include "main/macros.h"
36 #include "main/shaderobj.h"
37 #include "main/uniforms.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/register_allocate.h"
41 #include "program/sampler.h"
42 #include "program/hash_table.h"
43 #include "brw_context.h"
44 #include "brw_eu.h"
45 #include "brw_wm.h"
46 }
47 #include "brw_shader.h"
48 #include "brw_fs.h"
49 #include "../glsl/glsl_types.h"
50 #include "../glsl/ir_print_visitor.h"
51
52 #define MAX_INSTRUCTION (1 << 30)
53
54 int
55 fs_visitor::type_size(const struct glsl_type *type)
56 {
57 unsigned int size, i;
58
59 switch (type->base_type) {
60 case GLSL_TYPE_UINT:
61 case GLSL_TYPE_INT:
62 case GLSL_TYPE_FLOAT:
63 case GLSL_TYPE_BOOL:
64 return type->components();
65 case GLSL_TYPE_ARRAY:
66 return type_size(type->fields.array) * type->length;
67 case GLSL_TYPE_STRUCT:
68 size = 0;
69 for (i = 0; i < type->length; i++) {
70 size += type_size(type->fields.structure[i].type);
71 }
72 return size;
73 case GLSL_TYPE_SAMPLER:
74 /* Samplers take up no register space, since they're baked in at
75 * link time.
76 */
77 return 0;
78 default:
79 assert(!"not reached");
80 return 0;
81 }
82 }
83
84 void
85 fs_visitor::fail(const char *format, ...)
86 {
87 va_list va;
88 char *msg;
89
90 if (failed)
91 return;
92
93 failed = true;
94
95 va_start(va, format);
96 msg = ralloc_vasprintf(mem_ctx, format, va);
97 va_end(va);
98 msg = ralloc_asprintf(mem_ctx, "FS compile failed: %s\n", msg);
99
100 this->fail_msg = msg;
101
102 if (INTEL_DEBUG & DEBUG_WM) {
103 fprintf(stderr, "%s", msg);
104 }
105 }
106
107 void
108 fs_visitor::push_force_uncompressed()
109 {
110 force_uncompressed_stack++;
111 }
112
113 void
114 fs_visitor::pop_force_uncompressed()
115 {
116 force_uncompressed_stack--;
117 assert(force_uncompressed_stack >= 0);
118 }
119
120 void
121 fs_visitor::push_force_sechalf()
122 {
123 force_sechalf_stack++;
124 }
125
126 void
127 fs_visitor::pop_force_sechalf()
128 {
129 force_sechalf_stack--;
130 assert(force_sechalf_stack >= 0);
131 }
132
133 /**
134 * Returns how many MRFs an FS opcode will write over.
135 *
136 * Note that this is not the 0 or 1 implied writes in an actual gen
137 * instruction -- the FS opcodes often generate MOVs in addition.
138 */
139 int
140 fs_visitor::implied_mrf_writes(fs_inst *inst)
141 {
142 if (inst->mlen == 0)
143 return 0;
144
145 switch (inst->opcode) {
146 case SHADER_OPCODE_RCP:
147 case SHADER_OPCODE_RSQ:
148 case SHADER_OPCODE_SQRT:
149 case SHADER_OPCODE_EXP2:
150 case SHADER_OPCODE_LOG2:
151 case SHADER_OPCODE_SIN:
152 case SHADER_OPCODE_COS:
153 return 1 * c->dispatch_width / 8;
154 case SHADER_OPCODE_POW:
155 return 2 * c->dispatch_width / 8;
156 case FS_OPCODE_TEX:
157 case FS_OPCODE_TXB:
158 case FS_OPCODE_TXD:
159 case FS_OPCODE_TXL:
160 return 1;
161 case FS_OPCODE_FB_WRITE:
162 return 2;
163 case FS_OPCODE_PULL_CONSTANT_LOAD:
164 case FS_OPCODE_UNSPILL:
165 return 1;
166 case FS_OPCODE_SPILL:
167 return 2;
168 default:
169 assert(!"not reached");
170 return inst->mlen;
171 }
172 }
173
174 int
175 fs_visitor::virtual_grf_alloc(int size)
176 {
177 if (virtual_grf_array_size <= virtual_grf_next) {
178 if (virtual_grf_array_size == 0)
179 virtual_grf_array_size = 16;
180 else
181 virtual_grf_array_size *= 2;
182 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
183 virtual_grf_array_size);
184 }
185 virtual_grf_sizes[virtual_grf_next] = size;
186 return virtual_grf_next++;
187 }
188
189 /** Fixed HW reg constructor. */
190 fs_reg::fs_reg(enum register_file file, int reg)
191 {
192 init();
193 this->file = file;
194 this->reg = reg;
195 this->type = BRW_REGISTER_TYPE_F;
196 }
197
198 /** Fixed HW reg constructor. */
199 fs_reg::fs_reg(enum register_file file, int reg, uint32_t type)
200 {
201 init();
202 this->file = file;
203 this->reg = reg;
204 this->type = type;
205 }
206
207 /** Automatic reg constructor. */
208 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
209 {
210 init();
211
212 this->file = GRF;
213 this->reg = v->virtual_grf_alloc(v->type_size(type));
214 this->reg_offset = 0;
215 this->type = brw_type_for_base_type(type);
216 }
217
218 fs_reg *
219 fs_visitor::variable_storage(ir_variable *var)
220 {
221 return (fs_reg *)hash_table_find(this->variable_ht, var);
222 }
223
224 void
225 import_uniforms_callback(const void *key,
226 void *data,
227 void *closure)
228 {
229 struct hash_table *dst_ht = (struct hash_table *)closure;
230 const fs_reg *reg = (const fs_reg *)data;
231
232 if (reg->file != UNIFORM)
233 return;
234
235 hash_table_insert(dst_ht, data, key);
236 }
237
238 /* For 16-wide, we need to follow from the uniform setup of 8-wide dispatch.
239 * This brings in those uniform definitions
240 */
241 void
242 fs_visitor::import_uniforms(fs_visitor *v)
243 {
244 hash_table_call_foreach(v->variable_ht,
245 import_uniforms_callback,
246 variable_ht);
247 this->params_remap = v->params_remap;
248 }
249
250 /* Our support for uniforms is piggy-backed on the struct
251 * gl_fragment_program, because that's where the values actually
252 * get stored, rather than in some global gl_shader_program uniform
253 * store.
254 */
255 int
256 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
257 {
258 unsigned int offset = 0;
259
260 if (type->is_matrix()) {
261 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
262 type->vector_elements,
263 1);
264
265 for (unsigned int i = 0; i < type->matrix_columns; i++) {
266 offset += setup_uniform_values(loc + offset, column);
267 }
268
269 return offset;
270 }
271
272 switch (type->base_type) {
273 case GLSL_TYPE_FLOAT:
274 case GLSL_TYPE_UINT:
275 case GLSL_TYPE_INT:
276 case GLSL_TYPE_BOOL:
277 for (unsigned int i = 0; i < type->vector_elements; i++) {
278 unsigned int param = c->prog_data.nr_params++;
279
280 assert(param < ARRAY_SIZE(c->prog_data.param));
281
282 switch (type->base_type) {
283 case GLSL_TYPE_FLOAT:
284 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
285 break;
286 case GLSL_TYPE_UINT:
287 c->prog_data.param_convert[param] = PARAM_CONVERT_F2U;
288 break;
289 case GLSL_TYPE_INT:
290 c->prog_data.param_convert[param] = PARAM_CONVERT_F2I;
291 break;
292 case GLSL_TYPE_BOOL:
293 c->prog_data.param_convert[param] = PARAM_CONVERT_F2B;
294 break;
295 default:
296 assert(!"not reached");
297 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
298 break;
299 }
300 this->param_index[param] = loc;
301 this->param_offset[param] = i;
302 }
303 return 1;
304
305 case GLSL_TYPE_STRUCT:
306 for (unsigned int i = 0; i < type->length; i++) {
307 offset += setup_uniform_values(loc + offset,
308 type->fields.structure[i].type);
309 }
310 return offset;
311
312 case GLSL_TYPE_ARRAY:
313 for (unsigned int i = 0; i < type->length; i++) {
314 offset += setup_uniform_values(loc + offset, type->fields.array);
315 }
316 return offset;
317
318 case GLSL_TYPE_SAMPLER:
319 /* The sampler takes up a slot, but we don't use any values from it. */
320 return 1;
321
322 default:
323 assert(!"not reached");
324 return 0;
325 }
326 }
327
328
329 /* Our support for builtin uniforms is even scarier than non-builtin.
330 * It sits on top of the PROG_STATE_VAR parameters that are
331 * automatically updated from GL context state.
332 */
333 void
334 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
335 {
336 const ir_state_slot *const slots = ir->state_slots;
337 assert(ir->state_slots != NULL);
338
339 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
340 /* This state reference has already been setup by ir_to_mesa, but we'll
341 * get the same index back here.
342 */
343 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
344 (gl_state_index *)slots[i].tokens);
345
346 /* Add each of the unique swizzles of the element as a parameter.
347 * This'll end up matching the expected layout of the
348 * array/matrix/structure we're trying to fill in.
349 */
350 int last_swiz = -1;
351 for (unsigned int j = 0; j < 4; j++) {
352 int swiz = GET_SWZ(slots[i].swizzle, j);
353 if (swiz == last_swiz)
354 break;
355 last_swiz = swiz;
356
357 c->prog_data.param_convert[c->prog_data.nr_params] =
358 PARAM_NO_CONVERT;
359 this->param_index[c->prog_data.nr_params] = index;
360 this->param_offset[c->prog_data.nr_params] = swiz;
361 c->prog_data.nr_params++;
362 }
363 }
364 }
365
366 fs_reg *
367 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
368 {
369 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
370 fs_reg wpos = *reg;
371 bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
372
373 /* gl_FragCoord.x */
374 if (ir->pixel_center_integer) {
375 emit(BRW_OPCODE_MOV, wpos, this->pixel_x);
376 } else {
377 emit(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f));
378 }
379 wpos.reg_offset++;
380
381 /* gl_FragCoord.y */
382 if (!flip && ir->pixel_center_integer) {
383 emit(BRW_OPCODE_MOV, wpos, this->pixel_y);
384 } else {
385 fs_reg pixel_y = this->pixel_y;
386 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
387
388 if (flip) {
389 pixel_y.negate = true;
390 offset += c->key.drawable_height - 1.0;
391 }
392
393 emit(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset));
394 }
395 wpos.reg_offset++;
396
397 /* gl_FragCoord.z */
398 if (intel->gen >= 6) {
399 emit(BRW_OPCODE_MOV, wpos,
400 fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
401 } else {
402 emit(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y,
403 interp_reg(FRAG_ATTRIB_WPOS, 2));
404 }
405 wpos.reg_offset++;
406
407 /* gl_FragCoord.w: Already set up in emit_interpolation */
408 emit(BRW_OPCODE_MOV, wpos, this->wpos_w);
409
410 return reg;
411 }
412
413 fs_reg *
414 fs_visitor::emit_general_interpolation(ir_variable *ir)
415 {
416 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
417 /* Interpolation is always in floating point regs. */
418 reg->type = BRW_REGISTER_TYPE_F;
419 fs_reg attr = *reg;
420
421 unsigned int array_elements;
422 const glsl_type *type;
423
424 if (ir->type->is_array()) {
425 array_elements = ir->type->length;
426 if (array_elements == 0) {
427 fail("dereferenced array '%s' has length 0\n", ir->name);
428 }
429 type = ir->type->fields.array;
430 } else {
431 array_elements = 1;
432 type = ir->type;
433 }
434
435 int location = ir->location;
436 for (unsigned int i = 0; i < array_elements; i++) {
437 for (unsigned int j = 0; j < type->matrix_columns; j++) {
438 if (urb_setup[location] == -1) {
439 /* If there's no incoming setup data for this slot, don't
440 * emit interpolation for it.
441 */
442 attr.reg_offset += type->vector_elements;
443 location++;
444 continue;
445 }
446
447 bool is_gl_Color =
448 location == FRAG_ATTRIB_COL0 || location == FRAG_ATTRIB_COL1;
449
450 if (c->key.flat_shade && is_gl_Color) {
451 /* Constant interpolation (flat shading) case. The SF has
452 * handed us defined values in only the constant offset
453 * field of the setup reg.
454 */
455 for (unsigned int k = 0; k < type->vector_elements; k++) {
456 struct brw_reg interp = interp_reg(location, k);
457 interp = suboffset(interp, 3);
458 emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
459 attr.reg_offset++;
460 }
461 } else {
462 /* Perspective interpolation case. */
463 for (unsigned int k = 0; k < type->vector_elements; k++) {
464 /* FINISHME: At some point we probably want to push
465 * this farther by giving similar treatment to the
466 * other potentially constant components of the
467 * attribute, as well as making brw_vs_constval.c
468 * handle varyings other than gl_TexCoord.
469 */
470 if (location >= FRAG_ATTRIB_TEX0 &&
471 location <= FRAG_ATTRIB_TEX7 &&
472 k == 3 && !(c->key.proj_attrib_mask & (1 << location))) {
473 emit(BRW_OPCODE_MOV, attr, fs_reg(1.0f));
474 } else {
475 struct brw_reg interp = interp_reg(location, k);
476 emit(FS_OPCODE_LINTERP, attr,
477 this->delta_x, this->delta_y, fs_reg(interp));
478 }
479 attr.reg_offset++;
480 }
481
482 if (intel->gen < 6) {
483 attr.reg_offset -= type->vector_elements;
484 for (unsigned int k = 0; k < type->vector_elements; k++) {
485 emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
486 attr.reg_offset++;
487 }
488 }
489 }
490 location++;
491 }
492 }
493
494 return reg;
495 }
496
497 fs_reg *
498 fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
499 {
500 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
501
502 /* The frontfacing comes in as a bit in the thread payload. */
503 if (intel->gen >= 6) {
504 emit(BRW_OPCODE_ASR, *reg,
505 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
506 fs_reg(15));
507 emit(BRW_OPCODE_NOT, *reg, *reg);
508 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1));
509 } else {
510 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
511 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
512 * us front face
513 */
514 fs_inst *inst = emit(BRW_OPCODE_CMP, *reg,
515 fs_reg(r1_6ud),
516 fs_reg(1u << 31));
517 inst->conditional_mod = BRW_CONDITIONAL_L;
518 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u));
519 }
520
521 return reg;
522 }
523
524 fs_inst *
525 fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src)
526 {
527 switch (opcode) {
528 case SHADER_OPCODE_RCP:
529 case SHADER_OPCODE_RSQ:
530 case SHADER_OPCODE_SQRT:
531 case SHADER_OPCODE_EXP2:
532 case SHADER_OPCODE_LOG2:
533 case SHADER_OPCODE_SIN:
534 case SHADER_OPCODE_COS:
535 break;
536 default:
537 assert(!"not reached: bad math opcode");
538 return NULL;
539 }
540
541 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
542 * might be able to do better by doing execsize = 1 math and then
543 * expanding that result out, but we would need to be careful with
544 * masking.
545 *
546 * The hardware ignores source modifiers (negate and abs) on math
547 * instructions, so we also move to a temp to set those up.
548 */
549 if (intel->gen >= 6 && (src.file == UNIFORM ||
550 src.abs ||
551 src.negate)) {
552 fs_reg expanded = fs_reg(this, glsl_type::float_type);
553 emit(BRW_OPCODE_MOV, expanded, src);
554 src = expanded;
555 }
556
557 fs_inst *inst = emit(opcode, dst, src);
558
559 if (intel->gen < 6) {
560 inst->base_mrf = 2;
561 inst->mlen = c->dispatch_width / 8;
562 }
563
564 return inst;
565 }
566
567 fs_inst *
568 fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
569 {
570 int base_mrf = 2;
571 fs_inst *inst;
572
573 assert(opcode == SHADER_OPCODE_POW);
574
575 if (intel->gen >= 6) {
576 /* Can't do hstride == 0 args to gen6 math, so expand it out.
577 *
578 * The hardware ignores source modifiers (negate and abs) on math
579 * instructions, so we also move to a temp to set those up.
580 */
581 if (src0.file == UNIFORM || src0.abs || src0.negate) {
582 fs_reg expanded = fs_reg(this, glsl_type::float_type);
583 emit(BRW_OPCODE_MOV, expanded, src0);
584 src0 = expanded;
585 }
586
587 if (src1.file == UNIFORM || src1.abs || src1.negate) {
588 fs_reg expanded = fs_reg(this, glsl_type::float_type);
589 emit(BRW_OPCODE_MOV, expanded, src1);
590 src1 = expanded;
591 }
592
593 inst = emit(opcode, dst, src0, src1);
594 } else {
595 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1);
596 inst = emit(opcode, dst, src0, reg_null_f);
597
598 inst->base_mrf = base_mrf;
599 inst->mlen = 2 * c->dispatch_width / 8;
600 }
601 return inst;
602 }
603
604 /**
605 * To be called after the last _mesa_add_state_reference() call, to
606 * set up prog_data.param[] for assign_curb_setup() and
607 * setup_pull_constants().
608 */
609 void
610 fs_visitor::setup_paramvalues_refs()
611 {
612 if (c->dispatch_width != 8)
613 return;
614
615 /* Set up the pointers to ParamValues now that that array is finalized. */
616 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
617 c->prog_data.param[i] =
618 (const float *)fp->Base.Parameters->ParameterValues[this->param_index[i]] +
619 this->param_offset[i];
620 }
621 }
622
623 void
624 fs_visitor::assign_curb_setup()
625 {
626 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
627 if (c->dispatch_width == 8) {
628 c->prog_data.first_curbe_grf = c->nr_payload_regs;
629 } else {
630 c->prog_data.first_curbe_grf_16 = c->nr_payload_regs;
631 }
632
633 /* Map the offsets in the UNIFORM file to fixed HW regs. */
634 foreach_list(node, &this->instructions) {
635 fs_inst *inst = (fs_inst *)node;
636
637 for (unsigned int i = 0; i < 3; i++) {
638 if (inst->src[i].file == UNIFORM) {
639 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
640 struct brw_reg brw_reg = brw_vec1_grf(c->nr_payload_regs +
641 constant_nr / 8,
642 constant_nr % 8);
643
644 inst->src[i].file = FIXED_HW_REG;
645 inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
646 }
647 }
648 }
649 }
650
651 void
652 fs_visitor::calculate_urb_setup()
653 {
654 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
655 urb_setup[i] = -1;
656 }
657
658 int urb_next = 0;
659 /* Figure out where each of the incoming setup attributes lands. */
660 if (intel->gen >= 6) {
661 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
662 if (fp->Base.InputsRead & BITFIELD64_BIT(i)) {
663 urb_setup[i] = urb_next++;
664 }
665 }
666 } else {
667 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
668 for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) {
669 if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) {
670 int fp_index;
671
672 if (i >= VERT_RESULT_VAR0)
673 fp_index = i - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0);
674 else if (i <= VERT_RESULT_TEX7)
675 fp_index = i;
676 else
677 fp_index = -1;
678
679 if (fp_index >= 0)
680 urb_setup[fp_index] = urb_next++;
681 }
682 }
683 }
684
685 /* Each attribute is 4 setup channels, each of which is half a reg. */
686 c->prog_data.urb_read_length = urb_next * 2;
687 }
688
689 void
690 fs_visitor::assign_urb_setup()
691 {
692 int urb_start = c->nr_payload_regs + c->prog_data.curb_read_length;
693
694 /* Offset all the urb_setup[] index by the actual position of the
695 * setup regs, now that the location of the constants has been chosen.
696 */
697 foreach_list(node, &this->instructions) {
698 fs_inst *inst = (fs_inst *)node;
699
700 if (inst->opcode == FS_OPCODE_LINTERP) {
701 assert(inst->src[2].file == FIXED_HW_REG);
702 inst->src[2].fixed_hw_reg.nr += urb_start;
703 }
704
705 if (inst->opcode == FS_OPCODE_CINTERP) {
706 assert(inst->src[0].file == FIXED_HW_REG);
707 inst->src[0].fixed_hw_reg.nr += urb_start;
708 }
709 }
710
711 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
712 }
713
714 /**
715 * Split large virtual GRFs into separate components if we can.
716 *
717 * This is mostly duplicated with what brw_fs_vector_splitting does,
718 * but that's really conservative because it's afraid of doing
719 * splitting that doesn't result in real progress after the rest of
720 * the optimization phases, which would cause infinite looping in
721 * optimization. We can do it once here, safely. This also has the
722 * opportunity to split interpolated values, or maybe even uniforms,
723 * which we don't have at the IR level.
724 *
725 * We want to split, because virtual GRFs are what we register
726 * allocate and spill (due to contiguousness requirements for some
727 * instructions), and they're what we naturally generate in the
728 * codegen process, but most virtual GRFs don't actually need to be
729 * contiguous sets of GRFs. If we split, we'll end up with reduced
730 * live intervals and better dead code elimination and coalescing.
731 */
732 void
733 fs_visitor::split_virtual_grfs()
734 {
735 int num_vars = this->virtual_grf_next;
736 bool split_grf[num_vars];
737 int new_virtual_grf[num_vars];
738
739 /* Try to split anything > 0 sized. */
740 for (int i = 0; i < num_vars; i++) {
741 if (this->virtual_grf_sizes[i] != 1)
742 split_grf[i] = true;
743 else
744 split_grf[i] = false;
745 }
746
747 if (brw->has_pln) {
748 /* PLN opcodes rely on the delta_xy being contiguous. */
749 split_grf[this->delta_x.reg] = false;
750 }
751
752 foreach_list(node, &this->instructions) {
753 fs_inst *inst = (fs_inst *)node;
754
755 /* Texturing produces 4 contiguous registers, so no splitting. */
756 if (inst->is_tex()) {
757 split_grf[inst->dst.reg] = false;
758 }
759 }
760
761 /* Allocate new space for split regs. Note that the virtual
762 * numbers will be contiguous.
763 */
764 for (int i = 0; i < num_vars; i++) {
765 if (split_grf[i]) {
766 new_virtual_grf[i] = virtual_grf_alloc(1);
767 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
768 int reg = virtual_grf_alloc(1);
769 assert(reg == new_virtual_grf[i] + j - 1);
770 (void) reg;
771 }
772 this->virtual_grf_sizes[i] = 1;
773 }
774 }
775
776 foreach_list(node, &this->instructions) {
777 fs_inst *inst = (fs_inst *)node;
778
779 if (inst->dst.file == GRF &&
780 split_grf[inst->dst.reg] &&
781 inst->dst.reg_offset != 0) {
782 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
783 inst->dst.reg_offset - 1);
784 inst->dst.reg_offset = 0;
785 }
786 for (int i = 0; i < 3; i++) {
787 if (inst->src[i].file == GRF &&
788 split_grf[inst->src[i].reg] &&
789 inst->src[i].reg_offset != 0) {
790 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
791 inst->src[i].reg_offset - 1);
792 inst->src[i].reg_offset = 0;
793 }
794 }
795 }
796 this->live_intervals_valid = false;
797 }
798
799 bool
800 fs_visitor::remove_dead_constants()
801 {
802 if (c->dispatch_width == 8) {
803 this->params_remap = ralloc_array(mem_ctx, int, c->prog_data.nr_params);
804
805 for (unsigned int i = 0; i < c->prog_data.nr_params; i++)
806 this->params_remap[i] = -1;
807
808 /* Find which params are still in use. */
809 foreach_list(node, &this->instructions) {
810 fs_inst *inst = (fs_inst *)node;
811
812 for (int i = 0; i < 3; i++) {
813 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
814
815 if (inst->src[i].file != UNIFORM)
816 continue;
817
818 assert(constant_nr < (int)c->prog_data.nr_params);
819
820 /* For now, set this to non-negative. We'll give it the
821 * actual new number in a moment, in order to keep the
822 * register numbers nicely ordered.
823 */
824 this->params_remap[constant_nr] = 0;
825 }
826 }
827
828 /* Figure out what the new numbers for the params will be. At some
829 * point when we're doing uniform array access, we're going to want
830 * to keep the distinction between .reg and .reg_offset, but for
831 * now we don't care.
832 */
833 unsigned int new_nr_params = 0;
834 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
835 if (this->params_remap[i] != -1) {
836 this->params_remap[i] = new_nr_params++;
837 }
838 }
839
840 /* Update the list of params to be uploaded to match our new numbering. */
841 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
842 int remapped = this->params_remap[i];
843
844 if (remapped == -1)
845 continue;
846
847 /* We've already done setup_paramvalues_refs() so no need to worry
848 * about param_index and param_offset.
849 */
850 c->prog_data.param[remapped] = c->prog_data.param[i];
851 c->prog_data.param_convert[remapped] = c->prog_data.param_convert[i];
852 }
853
854 c->prog_data.nr_params = new_nr_params;
855 } else {
856 /* This should have been generated in the 8-wide pass already. */
857 assert(this->params_remap);
858 }
859
860 /* Now do the renumbering of the shader to remove unused params. */
861 foreach_list(node, &this->instructions) {
862 fs_inst *inst = (fs_inst *)node;
863
864 for (int i = 0; i < 3; i++) {
865 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
866
867 if (inst->src[i].file != UNIFORM)
868 continue;
869
870 assert(this->params_remap[constant_nr] != -1);
871 inst->src[i].reg = this->params_remap[constant_nr];
872 inst->src[i].reg_offset = 0;
873 }
874 }
875
876 return true;
877 }
878
879 /**
880 * Choose accesses from the UNIFORM file to demote to using the pull
881 * constant buffer.
882 *
883 * We allow a fragment shader to have more than the specified minimum
884 * maximum number of fragment shader uniform components (64). If
885 * there are too many of these, they'd fill up all of register space.
886 * So, this will push some of them out to the pull constant buffer and
887 * update the program to load them.
888 */
889 void
890 fs_visitor::setup_pull_constants()
891 {
892 /* Only allow 16 registers (128 uniform components) as push constants. */
893 unsigned int max_uniform_components = 16 * 8;
894 if (c->prog_data.nr_params <= max_uniform_components)
895 return;
896
897 if (c->dispatch_width == 16) {
898 fail("Pull constants not supported in 16-wide\n");
899 return;
900 }
901
902 /* Just demote the end of the list. We could probably do better
903 * here, demoting things that are rarely used in the program first.
904 */
905 int pull_uniform_base = max_uniform_components;
906 int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
907
908 foreach_list(node, &this->instructions) {
909 fs_inst *inst = (fs_inst *)node;
910
911 for (int i = 0; i < 3; i++) {
912 if (inst->src[i].file != UNIFORM)
913 continue;
914
915 int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
916 if (uniform_nr < pull_uniform_base)
917 continue;
918
919 fs_reg dst = fs_reg(this, glsl_type::float_type);
920 fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
921 dst);
922 pull->offset = ((uniform_nr - pull_uniform_base) * 4) & ~15;
923 pull->ir = inst->ir;
924 pull->annotation = inst->annotation;
925 pull->base_mrf = 14;
926 pull->mlen = 1;
927
928 inst->insert_before(pull);
929
930 inst->src[i].file = GRF;
931 inst->src[i].reg = dst.reg;
932 inst->src[i].reg_offset = 0;
933 inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
934 }
935 }
936
937 for (int i = 0; i < pull_uniform_count; i++) {
938 c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
939 c->prog_data.pull_param_convert[i] =
940 c->prog_data.param_convert[pull_uniform_base + i];
941 }
942 c->prog_data.nr_params -= pull_uniform_count;
943 c->prog_data.nr_pull_params = pull_uniform_count;
944 }
945
946 void
947 fs_visitor::calculate_live_intervals()
948 {
949 int num_vars = this->virtual_grf_next;
950 int *def = ralloc_array(mem_ctx, int, num_vars);
951 int *use = ralloc_array(mem_ctx, int, num_vars);
952 int loop_depth = 0;
953 int loop_start = 0;
954
955 if (this->live_intervals_valid)
956 return;
957
958 for (int i = 0; i < num_vars; i++) {
959 def[i] = MAX_INSTRUCTION;
960 use[i] = -1;
961 }
962
963 int ip = 0;
964 foreach_list(node, &this->instructions) {
965 fs_inst *inst = (fs_inst *)node;
966
967 if (inst->opcode == BRW_OPCODE_DO) {
968 if (loop_depth++ == 0)
969 loop_start = ip;
970 } else if (inst->opcode == BRW_OPCODE_WHILE) {
971 loop_depth--;
972
973 if (loop_depth == 0) {
974 /* Patches up the use of vars marked for being live across
975 * the whole loop.
976 */
977 for (int i = 0; i < num_vars; i++) {
978 if (use[i] == loop_start) {
979 use[i] = ip;
980 }
981 }
982 }
983 } else {
984 for (unsigned int i = 0; i < 3; i++) {
985 if (inst->src[i].file == GRF) {
986 int reg = inst->src[i].reg;
987
988 if (!loop_depth) {
989 use[reg] = ip;
990 } else {
991 def[reg] = MIN2(loop_start, def[reg]);
992 use[reg] = loop_start;
993
994 /* Nobody else is going to go smash our start to
995 * later in the loop now, because def[reg] now
996 * points before the bb header.
997 */
998 }
999 }
1000 }
1001 if (inst->dst.file == GRF) {
1002 int reg = inst->dst.reg;
1003
1004 if (!loop_depth) {
1005 def[reg] = MIN2(def[reg], ip);
1006 } else {
1007 def[reg] = MIN2(def[reg], loop_start);
1008 }
1009 }
1010 }
1011
1012 ip++;
1013 }
1014
1015 ralloc_free(this->virtual_grf_def);
1016 ralloc_free(this->virtual_grf_use);
1017 this->virtual_grf_def = def;
1018 this->virtual_grf_use = use;
1019
1020 this->live_intervals_valid = true;
1021 }
1022
1023 /**
1024 * Attempts to move immediate constants into the immediate
1025 * constant slot of following instructions.
1026 *
1027 * Immediate constants are a bit tricky -- they have to be in the last
1028 * operand slot, you can't do abs/negate on them,
1029 */
1030
1031 bool
1032 fs_visitor::propagate_constants()
1033 {
1034 bool progress = false;
1035
1036 calculate_live_intervals();
1037
1038 foreach_list(node, &this->instructions) {
1039 fs_inst *inst = (fs_inst *)node;
1040
1041 if (inst->opcode != BRW_OPCODE_MOV ||
1042 inst->predicated ||
1043 inst->dst.file != GRF || inst->src[0].file != IMM ||
1044 inst->dst.type != inst->src[0].type ||
1045 (c->dispatch_width == 16 &&
1046 (inst->force_uncompressed || inst->force_sechalf)))
1047 continue;
1048
1049 /* Don't bother with cases where we should have had the
1050 * operation on the constant folded in GLSL already.
1051 */
1052 if (inst->saturate)
1053 continue;
1054
1055 /* Found a move of a constant to a GRF. Find anything else using the GRF
1056 * before it's written, and replace it with the constant if we can.
1057 */
1058 for (fs_inst *scan_inst = (fs_inst *)inst->next;
1059 !scan_inst->is_tail_sentinel();
1060 scan_inst = (fs_inst *)scan_inst->next) {
1061 if (scan_inst->opcode == BRW_OPCODE_DO ||
1062 scan_inst->opcode == BRW_OPCODE_WHILE ||
1063 scan_inst->opcode == BRW_OPCODE_ELSE ||
1064 scan_inst->opcode == BRW_OPCODE_ENDIF) {
1065 break;
1066 }
1067
1068 for (int i = 2; i >= 0; i--) {
1069 if (scan_inst->src[i].file != GRF ||
1070 scan_inst->src[i].reg != inst->dst.reg ||
1071 scan_inst->src[i].reg_offset != inst->dst.reg_offset)
1072 continue;
1073
1074 /* Don't bother with cases where we should have had the
1075 * operation on the constant folded in GLSL already.
1076 */
1077 if (scan_inst->src[i].negate || scan_inst->src[i].abs)
1078 continue;
1079
1080 switch (scan_inst->opcode) {
1081 case BRW_OPCODE_MOV:
1082 scan_inst->src[i] = inst->src[0];
1083 progress = true;
1084 break;
1085
1086 case BRW_OPCODE_MUL:
1087 case BRW_OPCODE_ADD:
1088 if (i == 1) {
1089 scan_inst->src[i] = inst->src[0];
1090 progress = true;
1091 } else if (i == 0 && scan_inst->src[1].file != IMM) {
1092 /* Fit this constant in by commuting the operands */
1093 scan_inst->src[0] = scan_inst->src[1];
1094 scan_inst->src[1] = inst->src[0];
1095 progress = true;
1096 }
1097 break;
1098
1099 case BRW_OPCODE_CMP:
1100 if (i == 1) {
1101 scan_inst->src[i] = inst->src[0];
1102 progress = true;
1103 } else if (i == 0 && scan_inst->src[1].file != IMM) {
1104 uint32_t new_cmod;
1105
1106 new_cmod = brw_swap_cmod(scan_inst->conditional_mod);
1107 if (new_cmod != ~0u) {
1108 /* Fit this constant in by swapping the operands and
1109 * flipping the test
1110 */
1111 scan_inst->src[0] = scan_inst->src[1];
1112 scan_inst->src[1] = inst->src[0];
1113 scan_inst->conditional_mod = new_cmod;
1114 progress = true;
1115 }
1116 }
1117 break;
1118
1119 case BRW_OPCODE_SEL:
1120 if (i == 1) {
1121 scan_inst->src[i] = inst->src[0];
1122 progress = true;
1123 } else if (i == 0 && scan_inst->src[1].file != IMM) {
1124 scan_inst->src[0] = scan_inst->src[1];
1125 scan_inst->src[1] = inst->src[0];
1126
1127 /* If this was predicated, flipping operands means
1128 * we also need to flip the predicate.
1129 */
1130 if (scan_inst->conditional_mod == BRW_CONDITIONAL_NONE) {
1131 scan_inst->predicate_inverse =
1132 !scan_inst->predicate_inverse;
1133 }
1134 progress = true;
1135 }
1136 break;
1137
1138 case SHADER_OPCODE_RCP:
1139 /* The hardware doesn't do math on immediate values
1140 * (because why are you doing that, seriously?), but
1141 * the correct answer is to just constant fold it
1142 * anyway.
1143 */
1144 assert(i == 0);
1145 if (inst->src[0].imm.f != 0.0f) {
1146 scan_inst->opcode = BRW_OPCODE_MOV;
1147 scan_inst->src[0] = inst->src[0];
1148 scan_inst->src[0].imm.f = 1.0f / scan_inst->src[0].imm.f;
1149 progress = true;
1150 }
1151 break;
1152
1153 default:
1154 break;
1155 }
1156 }
1157
1158 if (scan_inst->dst.file == GRF &&
1159 scan_inst->dst.reg == inst->dst.reg &&
1160 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
1161 scan_inst->is_tex())) {
1162 break;
1163 }
1164 }
1165 }
1166
1167 if (progress)
1168 this->live_intervals_valid = false;
1169
1170 return progress;
1171 }
1172
1173
1174 /**
1175 * Attempts to move immediate constants into the immediate
1176 * constant slot of following instructions.
1177 *
1178 * Immediate constants are a bit tricky -- they have to be in the last
1179 * operand slot, you can't do abs/negate on them,
1180 */
1181
1182 bool
1183 fs_visitor::opt_algebraic()
1184 {
1185 bool progress = false;
1186
1187 calculate_live_intervals();
1188
1189 foreach_list(node, &this->instructions) {
1190 fs_inst *inst = (fs_inst *)node;
1191
1192 switch (inst->opcode) {
1193 case BRW_OPCODE_MUL:
1194 if (inst->src[1].file != IMM)
1195 continue;
1196
1197 /* a * 1.0 = a */
1198 if (inst->src[1].type == BRW_REGISTER_TYPE_F &&
1199 inst->src[1].imm.f == 1.0) {
1200 inst->opcode = BRW_OPCODE_MOV;
1201 inst->src[1] = reg_undef;
1202 progress = true;
1203 break;
1204 }
1205
1206 break;
1207 default:
1208 break;
1209 }
1210 }
1211
1212 return progress;
1213 }
1214
1215 /**
1216 * Must be called after calculate_live_intervales() to remove unused
1217 * writes to registers -- register allocation will fail otherwise
1218 * because something deffed but not used won't be considered to
1219 * interfere with other regs.
1220 */
1221 bool
1222 fs_visitor::dead_code_eliminate()
1223 {
1224 bool progress = false;
1225 int pc = 0;
1226
1227 calculate_live_intervals();
1228
1229 foreach_list_safe(node, &this->instructions) {
1230 fs_inst *inst = (fs_inst *)node;
1231
1232 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
1233 inst->remove();
1234 progress = true;
1235 }
1236
1237 pc++;
1238 }
1239
1240 if (progress)
1241 live_intervals_valid = false;
1242
1243 return progress;
1244 }
1245
1246 bool
1247 fs_visitor::register_coalesce()
1248 {
1249 bool progress = false;
1250 int if_depth = 0;
1251 int loop_depth = 0;
1252
1253 foreach_list_safe(node, &this->instructions) {
1254 fs_inst *inst = (fs_inst *)node;
1255
1256 /* Make sure that we dominate the instructions we're going to
1257 * scan for interfering with our coalescing, or we won't have
1258 * scanned enough to see if anything interferes with our
1259 * coalescing. We don't dominate the following instructions if
1260 * we're in a loop or an if block.
1261 */
1262 switch (inst->opcode) {
1263 case BRW_OPCODE_DO:
1264 loop_depth++;
1265 break;
1266 case BRW_OPCODE_WHILE:
1267 loop_depth--;
1268 break;
1269 case BRW_OPCODE_IF:
1270 if_depth++;
1271 break;
1272 case BRW_OPCODE_ENDIF:
1273 if_depth--;
1274 break;
1275 default:
1276 break;
1277 }
1278 if (loop_depth || if_depth)
1279 continue;
1280
1281 if (inst->opcode != BRW_OPCODE_MOV ||
1282 inst->predicated ||
1283 inst->saturate ||
1284 inst->dst.file != GRF || (inst->src[0].file != GRF &&
1285 inst->src[0].file != UNIFORM)||
1286 inst->dst.type != inst->src[0].type)
1287 continue;
1288
1289 bool has_source_modifiers = inst->src[0].abs || inst->src[0].negate;
1290
1291 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
1292 * them: check for no writes to either one until the exit of the
1293 * program.
1294 */
1295 bool interfered = false;
1296
1297 for (fs_inst *scan_inst = (fs_inst *)inst->next;
1298 !scan_inst->is_tail_sentinel();
1299 scan_inst = (fs_inst *)scan_inst->next) {
1300 if (scan_inst->dst.file == GRF) {
1301 if (scan_inst->dst.reg == inst->dst.reg &&
1302 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
1303 scan_inst->is_tex())) {
1304 interfered = true;
1305 break;
1306 }
1307 if (inst->src[0].file == GRF &&
1308 scan_inst->dst.reg == inst->src[0].reg &&
1309 (scan_inst->dst.reg_offset == inst->src[0].reg_offset ||
1310 scan_inst->is_tex())) {
1311 interfered = true;
1312 break;
1313 }
1314 }
1315
1316 /* The gen6 MATH instruction can't handle source modifiers or
1317 * unusual register regions, so avoid coalescing those for
1318 * now. We should do something more specific.
1319 */
1320 if (intel->gen >= 6 &&
1321 scan_inst->is_math() &&
1322 (has_source_modifiers || inst->src[0].file == UNIFORM)) {
1323 interfered = true;
1324 break;
1325 }
1326 }
1327 if (interfered) {
1328 continue;
1329 }
1330
1331 /* Rewrite the later usage to point at the source of the move to
1332 * be removed.
1333 */
1334 for (fs_inst *scan_inst = inst;
1335 !scan_inst->is_tail_sentinel();
1336 scan_inst = (fs_inst *)scan_inst->next) {
1337 for (int i = 0; i < 3; i++) {
1338 if (scan_inst->src[i].file == GRF &&
1339 scan_inst->src[i].reg == inst->dst.reg &&
1340 scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
1341 fs_reg new_src = inst->src[0];
1342 new_src.negate ^= scan_inst->src[i].negate;
1343 new_src.abs |= scan_inst->src[i].abs;
1344 scan_inst->src[i] = new_src;
1345 }
1346 }
1347 }
1348
1349 inst->remove();
1350 progress = true;
1351 }
1352
1353 if (progress)
1354 live_intervals_valid = false;
1355
1356 return progress;
1357 }
1358
1359
1360 bool
1361 fs_visitor::compute_to_mrf()
1362 {
1363 bool progress = false;
1364 int next_ip = 0;
1365
1366 calculate_live_intervals();
1367
1368 foreach_list_safe(node, &this->instructions) {
1369 fs_inst *inst = (fs_inst *)node;
1370
1371 int ip = next_ip;
1372 next_ip++;
1373
1374 if (inst->opcode != BRW_OPCODE_MOV ||
1375 inst->predicated ||
1376 inst->dst.file != MRF || inst->src[0].file != GRF ||
1377 inst->dst.type != inst->src[0].type ||
1378 inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
1379 continue;
1380
1381 /* Work out which hardware MRF registers are written by this
1382 * instruction.
1383 */
1384 int mrf_low = inst->dst.reg & ~BRW_MRF_COMPR4;
1385 int mrf_high;
1386 if (inst->dst.reg & BRW_MRF_COMPR4) {
1387 mrf_high = mrf_low + 4;
1388 } else if (c->dispatch_width == 16 &&
1389 (!inst->force_uncompressed && !inst->force_sechalf)) {
1390 mrf_high = mrf_low + 1;
1391 } else {
1392 mrf_high = mrf_low;
1393 }
1394
1395 /* Can't compute-to-MRF this GRF if someone else was going to
1396 * read it later.
1397 */
1398 if (this->virtual_grf_use[inst->src[0].reg] > ip)
1399 continue;
1400
1401 /* Found a move of a GRF to a MRF. Let's see if we can go
1402 * rewrite the thing that made this GRF to write into the MRF.
1403 */
1404 fs_inst *scan_inst;
1405 for (scan_inst = (fs_inst *)inst->prev;
1406 scan_inst->prev != NULL;
1407 scan_inst = (fs_inst *)scan_inst->prev) {
1408 if (scan_inst->dst.file == GRF &&
1409 scan_inst->dst.reg == inst->src[0].reg) {
1410 /* Found the last thing to write our reg we want to turn
1411 * into a compute-to-MRF.
1412 */
1413
1414 if (scan_inst->is_tex()) {
1415 /* texturing writes several continuous regs, so we can't
1416 * compute-to-mrf that.
1417 */
1418 break;
1419 }
1420
1421 /* If it's predicated, it (probably) didn't populate all
1422 * the channels. We might be able to rewrite everything
1423 * that writes that reg, but it would require smarter
1424 * tracking to delay the rewriting until complete success.
1425 */
1426 if (scan_inst->predicated)
1427 break;
1428
1429 /* If it's half of register setup and not the same half as
1430 * our MOV we're trying to remove, bail for now.
1431 */
1432 if (scan_inst->force_uncompressed != inst->force_uncompressed ||
1433 scan_inst->force_sechalf != inst->force_sechalf) {
1434 break;
1435 }
1436
1437 /* SEND instructions can't have MRF as a destination. */
1438 if (scan_inst->mlen)
1439 break;
1440
1441 if (intel->gen >= 6) {
1442 /* gen6 math instructions must have the destination be
1443 * GRF, so no compute-to-MRF for them.
1444 */
1445 if (scan_inst->is_math()) {
1446 break;
1447 }
1448 }
1449
1450 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
1451 /* Found the creator of our MRF's source value. */
1452 scan_inst->dst.file = MRF;
1453 scan_inst->dst.reg = inst->dst.reg;
1454 scan_inst->saturate |= inst->saturate;
1455 inst->remove();
1456 progress = true;
1457 }
1458 break;
1459 }
1460
1461 /* We don't handle flow control here. Most computation of
1462 * values that end up in MRFs are shortly before the MRF
1463 * write anyway.
1464 */
1465 if (scan_inst->opcode == BRW_OPCODE_DO ||
1466 scan_inst->opcode == BRW_OPCODE_WHILE ||
1467 scan_inst->opcode == BRW_OPCODE_ELSE ||
1468 scan_inst->opcode == BRW_OPCODE_ENDIF) {
1469 break;
1470 }
1471
1472 /* You can't read from an MRF, so if someone else reads our
1473 * MRF's source GRF that we wanted to rewrite, that stops us.
1474 */
1475 bool interfered = false;
1476 for (int i = 0; i < 3; i++) {
1477 if (scan_inst->src[i].file == GRF &&
1478 scan_inst->src[i].reg == inst->src[0].reg &&
1479 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1480 interfered = true;
1481 }
1482 }
1483 if (interfered)
1484 break;
1485
1486 if (scan_inst->dst.file == MRF) {
1487 /* If somebody else writes our MRF here, we can't
1488 * compute-to-MRF before that.
1489 */
1490 int scan_mrf_low = scan_inst->dst.reg & ~BRW_MRF_COMPR4;
1491 int scan_mrf_high;
1492
1493 if (scan_inst->dst.reg & BRW_MRF_COMPR4) {
1494 scan_mrf_high = scan_mrf_low + 4;
1495 } else if (c->dispatch_width == 16 &&
1496 (!scan_inst->force_uncompressed &&
1497 !scan_inst->force_sechalf)) {
1498 scan_mrf_high = scan_mrf_low + 1;
1499 } else {
1500 scan_mrf_high = scan_mrf_low;
1501 }
1502
1503 if (mrf_low == scan_mrf_low ||
1504 mrf_low == scan_mrf_high ||
1505 mrf_high == scan_mrf_low ||
1506 mrf_high == scan_mrf_high) {
1507 break;
1508 }
1509 }
1510
1511 if (scan_inst->mlen > 0) {
1512 /* Found a SEND instruction, which means that there are
1513 * live values in MRFs from base_mrf to base_mrf +
1514 * scan_inst->mlen - 1. Don't go pushing our MRF write up
1515 * above it.
1516 */
1517 if (mrf_low >= scan_inst->base_mrf &&
1518 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
1519 break;
1520 }
1521 if (mrf_high >= scan_inst->base_mrf &&
1522 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
1523 break;
1524 }
1525 }
1526 }
1527 }
1528
1529 return progress;
1530 }
1531
1532 /**
1533 * Walks through basic blocks, locking for repeated MRF writes and
1534 * removing the later ones.
1535 */
1536 bool
1537 fs_visitor::remove_duplicate_mrf_writes()
1538 {
1539 fs_inst *last_mrf_move[16];
1540 bool progress = false;
1541
1542 /* Need to update the MRF tracking for compressed instructions. */
1543 if (c->dispatch_width == 16)
1544 return false;
1545
1546 memset(last_mrf_move, 0, sizeof(last_mrf_move));
1547
1548 foreach_list_safe(node, &this->instructions) {
1549 fs_inst *inst = (fs_inst *)node;
1550
1551 switch (inst->opcode) {
1552 case BRW_OPCODE_DO:
1553 case BRW_OPCODE_WHILE:
1554 case BRW_OPCODE_IF:
1555 case BRW_OPCODE_ELSE:
1556 case BRW_OPCODE_ENDIF:
1557 memset(last_mrf_move, 0, sizeof(last_mrf_move));
1558 continue;
1559 default:
1560 break;
1561 }
1562
1563 if (inst->opcode == BRW_OPCODE_MOV &&
1564 inst->dst.file == MRF) {
1565 fs_inst *prev_inst = last_mrf_move[inst->dst.reg];
1566 if (prev_inst && inst->equals(prev_inst)) {
1567 inst->remove();
1568 progress = true;
1569 continue;
1570 }
1571 }
1572
1573 /* Clear out the last-write records for MRFs that were overwritten. */
1574 if (inst->dst.file == MRF) {
1575 last_mrf_move[inst->dst.reg] = NULL;
1576 }
1577
1578 if (inst->mlen > 0) {
1579 /* Found a SEND instruction, which will include two or fewer
1580 * implied MRF writes. We could do better here.
1581 */
1582 for (int i = 0; i < implied_mrf_writes(inst); i++) {
1583 last_mrf_move[inst->base_mrf + i] = NULL;
1584 }
1585 }
1586
1587 /* Clear out any MRF move records whose sources got overwritten. */
1588 if (inst->dst.file == GRF) {
1589 for (unsigned int i = 0; i < Elements(last_mrf_move); i++) {
1590 if (last_mrf_move[i] &&
1591 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
1592 last_mrf_move[i] = NULL;
1593 }
1594 }
1595 }
1596
1597 if (inst->opcode == BRW_OPCODE_MOV &&
1598 inst->dst.file == MRF &&
1599 inst->src[0].file == GRF &&
1600 !inst->predicated) {
1601 last_mrf_move[inst->dst.reg] = inst;
1602 }
1603 }
1604
1605 return progress;
1606 }
1607
1608 bool
1609 fs_visitor::virtual_grf_interferes(int a, int b)
1610 {
1611 int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]);
1612 int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]);
1613
1614 /* We can't handle dead register writes here, without iterating
1615 * over the whole instruction stream to find every single dead
1616 * write to that register to compare to the live interval of the
1617 * other register. Just assert that dead_code_eliminate() has been
1618 * called.
1619 */
1620 assert((this->virtual_grf_use[a] != -1 ||
1621 this->virtual_grf_def[a] == MAX_INSTRUCTION) &&
1622 (this->virtual_grf_use[b] != -1 ||
1623 this->virtual_grf_def[b] == MAX_INSTRUCTION));
1624
1625 /* If the register is used to store 16 values of less than float
1626 * size (only the case for pixel_[xy]), then we can't allocate
1627 * another dword-sized thing to that register that would be used in
1628 * the same instruction. This is because when the GPU decodes (for
1629 * example):
1630 *
1631 * (declare (in ) vec4 gl_FragCoord@0x97766a0)
1632 * add(16) g6<1>F g6<8,8,1>UW 0.5F { align1 compr };
1633 *
1634 * it's actually processed as:
1635 * add(8) g6<1>F g6<8,8,1>UW 0.5F { align1 };
1636 * add(8) g7<1>F g6.8<8,8,1>UW 0.5F { align1 sechalf };
1637 *
1638 * so our second half values in g6 got overwritten in the first
1639 * half.
1640 */
1641 if (c->dispatch_width == 16 && (this->pixel_x.reg == a ||
1642 this->pixel_x.reg == b ||
1643 this->pixel_y.reg == a ||
1644 this->pixel_y.reg == b)) {
1645 return start <= end;
1646 }
1647
1648 return start < end;
1649 }
1650
1651 bool
1652 fs_visitor::run()
1653 {
1654 uint32_t prog_offset_16 = 0;
1655 uint32_t orig_nr_params = c->prog_data.nr_params;
1656
1657 brw_wm_payload_setup(brw, c);
1658
1659 if (c->dispatch_width == 16) {
1660 /* align to 64 byte boundary. */
1661 while ((c->func.nr_insn * sizeof(struct brw_instruction)) % 64) {
1662 brw_NOP(p);
1663 }
1664
1665 /* Save off the start of this 16-wide program in case we succeed. */
1666 prog_offset_16 = c->func.nr_insn * sizeof(struct brw_instruction);
1667
1668 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1669 }
1670
1671 if (0) {
1672 emit_dummy_fs();
1673 } else {
1674 calculate_urb_setup();
1675 if (intel->gen < 6)
1676 emit_interpolation_setup_gen4();
1677 else
1678 emit_interpolation_setup_gen6();
1679
1680 /* Generate FS IR for main(). (the visitor only descends into
1681 * functions called "main").
1682 */
1683 foreach_list(node, &*shader->ir) {
1684 ir_instruction *ir = (ir_instruction *)node;
1685 base_ir = ir;
1686 this->result = reg_undef;
1687 ir->accept(this);
1688 }
1689 if (failed)
1690 return false;
1691
1692 emit_fb_writes();
1693
1694 split_virtual_grfs();
1695
1696 setup_paramvalues_refs();
1697 setup_pull_constants();
1698
1699 bool progress;
1700 do {
1701 progress = false;
1702
1703 progress = remove_duplicate_mrf_writes() || progress;
1704
1705 progress = propagate_constants() || progress;
1706 progress = opt_algebraic() || progress;
1707 progress = register_coalesce() || progress;
1708 progress = compute_to_mrf() || progress;
1709 progress = dead_code_eliminate() || progress;
1710 } while (progress);
1711
1712 remove_dead_constants();
1713
1714 schedule_instructions();
1715
1716 assign_curb_setup();
1717 assign_urb_setup();
1718
1719 if (0) {
1720 /* Debug of register spilling: Go spill everything. */
1721 int virtual_grf_count = virtual_grf_next;
1722 for (int i = 0; i < virtual_grf_count; i++) {
1723 spill_reg(i);
1724 }
1725 }
1726
1727 if (0)
1728 assign_regs_trivial();
1729 else {
1730 while (!assign_regs()) {
1731 if (failed)
1732 break;
1733 }
1734 }
1735 }
1736 assert(force_uncompressed_stack == 0);
1737 assert(force_sechalf_stack == 0);
1738
1739 if (failed)
1740 return false;
1741
1742 generate_code();
1743
1744 if (c->dispatch_width == 8) {
1745 c->prog_data.reg_blocks = brw_register_blocks(grf_used);
1746 } else {
1747 c->prog_data.reg_blocks_16 = brw_register_blocks(grf_used);
1748 c->prog_data.prog_offset_16 = prog_offset_16;
1749
1750 /* Make sure we didn't try to sneak in an extra uniform */
1751 assert(orig_nr_params == c->prog_data.nr_params);
1752 }
1753
1754 return !failed;
1755 }
1756
1757 bool
1758 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
1759 struct gl_shader_program *prog)
1760 {
1761 struct intel_context *intel = &brw->intel;
1762
1763 if (!prog)
1764 return false;
1765
1766 struct brw_shader *shader =
1767 (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
1768 if (!shader)
1769 return false;
1770
1771 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
1772 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
1773 _mesa_print_ir(shader->ir, NULL);
1774 printf("\n\n");
1775 }
1776
1777 /* Now the main event: Visit the shader IR and generate our FS IR for it.
1778 */
1779 c->dispatch_width = 8;
1780
1781 fs_visitor v(c, prog, shader);
1782 if (!v.run()) {
1783 prog->LinkStatus = GL_FALSE;
1784 ralloc_strcat(&prog->InfoLog, v.fail_msg);
1785
1786 return false;
1787 }
1788
1789 if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0) {
1790 c->dispatch_width = 16;
1791 fs_visitor v2(c, prog, shader);
1792 v2.import_uniforms(&v);
1793 v2.run();
1794 }
1795
1796 c->prog_data.dispatch_width = 8;
1797
1798 return true;
1799 }
1800
1801 bool
1802 brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
1803 {
1804 struct brw_context *brw = brw_context(ctx);
1805 struct brw_wm_prog_key key;
1806 struct gl_fragment_program *fp = prog->FragmentProgram;
1807 struct brw_fragment_program *bfp = brw_fragment_program(fp);
1808
1809 if (!fp)
1810 return true;
1811
1812 memset(&key, 0, sizeof(key));
1813
1814 if (fp->UsesKill)
1815 key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
1816
1817 if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
1818 key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
1819
1820 /* Just assume depth testing. */
1821 key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
1822 key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
1823
1824 key.vp_outputs_written |= BITFIELD64_BIT(FRAG_ATTRIB_WPOS);
1825 for (int i = 0; i < FRAG_ATTRIB_MAX; i++) {
1826 int vp_index = -1;
1827
1828 if (!(fp->Base.InputsRead & BITFIELD64_BIT(i)))
1829 continue;
1830
1831 key.proj_attrib_mask |= 1 << i;
1832
1833 if (i <= FRAG_ATTRIB_TEX7)
1834 vp_index = i;
1835 else if (i >= FRAG_ATTRIB_VAR0)
1836 vp_index = i - FRAG_ATTRIB_VAR0 + VERT_RESULT_VAR0;
1837
1838 if (vp_index >= 0)
1839 key.vp_outputs_written |= BITFIELD64_BIT(vp_index);
1840 }
1841
1842 key.clamp_fragment_color = true;
1843
1844 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
1845 if (fp->Base.ShadowSamplers & (1 << i))
1846 key.compare_funcs[i] = GL_LESS;
1847
1848 /* FINISHME: depth compares might use (0,0,0,W) for example */
1849 key.tex_swizzles[i] = SWIZZLE_XYZW;
1850 }
1851
1852 if (fp->Base.InputsRead & FRAG_BIT_WPOS) {
1853 key.drawable_height = ctx->DrawBuffer->Height;
1854 key.render_to_fbo = ctx->DrawBuffer->Name != 0;
1855 }
1856
1857 key.nr_color_regions = 1;
1858
1859 key.program_string_id = bfp->id;
1860
1861 uint32_t old_prog_offset = brw->wm.prog_offset;
1862 struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
1863
1864 bool success = do_wm_prog(brw, prog, bfp, &key);
1865
1866 brw->wm.prog_offset = old_prog_offset;
1867 brw->wm.prog_data = old_prog_data;
1868
1869 return success;
1870 }