i965/vec4/gen8: Handle the MUL dest hazard exception
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_vec4.h"
25 #include "brw_cfg.h"
26 #include "brw_vs.h"
27 #include "brw_dead_control_flow.h"
28
29 extern "C" {
30 #include "main/macros.h"
31 #include "main/shaderobj.h"
32 #include "program/prog_print.h"
33 #include "program/prog_parameter.h"
34 }
35
36 #define MAX_INSTRUCTION (1 << 30)
37
38 using namespace brw;
39
40 namespace brw {
41
42 /**
43 * Common helper for constructing swizzles. When only a subset of
44 * channels of a vec4 are used, we don't want to reference the other
45 * channels, as that will tell optimization passes that those other
46 * channels are used.
47 */
48 unsigned
49 swizzle_for_size(int size)
50 {
51 static const unsigned size_swizzles[4] = {
52 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
53 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
54 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
55 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
56 };
57
58 assert((size >= 1) && (size <= 4));
59 return size_swizzles[size - 1];
60 }
61
62 void
63 src_reg::init()
64 {
65 memset(this, 0, sizeof(*this));
66
67 this->file = BAD_FILE;
68 }
69
70 src_reg::src_reg(register_file file, int reg, const glsl_type *type)
71 {
72 init();
73
74 this->file = file;
75 this->reg = reg;
76 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
77 this->swizzle = swizzle_for_size(type->vector_elements);
78 else
79 this->swizzle = BRW_SWIZZLE_XYZW;
80 }
81
82 /** Generic unset register constructor. */
83 src_reg::src_reg()
84 {
85 init();
86 }
87
88 src_reg::src_reg(float f)
89 {
90 init();
91
92 this->file = IMM;
93 this->type = BRW_REGISTER_TYPE_F;
94 this->fixed_hw_reg.dw1.f = f;
95 }
96
97 src_reg::src_reg(uint32_t u)
98 {
99 init();
100
101 this->file = IMM;
102 this->type = BRW_REGISTER_TYPE_UD;
103 this->fixed_hw_reg.dw1.ud = u;
104 }
105
106 src_reg::src_reg(int32_t i)
107 {
108 init();
109
110 this->file = IMM;
111 this->type = BRW_REGISTER_TYPE_D;
112 this->fixed_hw_reg.dw1.d = i;
113 }
114
115 src_reg::src_reg(struct brw_reg reg)
116 {
117 init();
118
119 this->file = HW_REG;
120 this->fixed_hw_reg = reg;
121 this->type = reg.type;
122 }
123
124 src_reg::src_reg(dst_reg reg)
125 {
126 init();
127
128 this->file = reg.file;
129 this->reg = reg.reg;
130 this->reg_offset = reg.reg_offset;
131 this->type = reg.type;
132 this->reladdr = reg.reladdr;
133 this->fixed_hw_reg = reg.fixed_hw_reg;
134
135 int swizzles[4];
136 int next_chan = 0;
137 int last = 0;
138
139 for (int i = 0; i < 4; i++) {
140 if (!(reg.writemask & (1 << i)))
141 continue;
142
143 swizzles[next_chan++] = last = i;
144 }
145
146 for (; next_chan < 4; next_chan++) {
147 swizzles[next_chan] = last;
148 }
149
150 this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
151 swizzles[2], swizzles[3]);
152 }
153
154 void
155 dst_reg::init()
156 {
157 memset(this, 0, sizeof(*this));
158 this->file = BAD_FILE;
159 this->writemask = WRITEMASK_XYZW;
160 }
161
162 dst_reg::dst_reg()
163 {
164 init();
165 }
166
167 dst_reg::dst_reg(register_file file, int reg)
168 {
169 init();
170
171 this->file = file;
172 this->reg = reg;
173 }
174
175 dst_reg::dst_reg(register_file file, int reg, const glsl_type *type,
176 int writemask)
177 {
178 init();
179
180 this->file = file;
181 this->reg = reg;
182 this->type = brw_type_for_base_type(type);
183 this->writemask = writemask;
184 }
185
186 dst_reg::dst_reg(struct brw_reg reg)
187 {
188 init();
189
190 this->file = HW_REG;
191 this->fixed_hw_reg = reg;
192 this->type = reg.type;
193 }
194
195 dst_reg::dst_reg(src_reg reg)
196 {
197 init();
198
199 this->file = reg.file;
200 this->reg = reg.reg;
201 this->reg_offset = reg.reg_offset;
202 this->type = reg.type;
203 /* How should we do writemasking when converting from a src_reg? It seems
204 * pretty obvious that for src.xxxx the caller wants to write to src.x, but
205 * what about for src.wx? Just special-case src.xxxx for now.
206 */
207 if (reg.swizzle == BRW_SWIZZLE_XXXX)
208 this->writemask = WRITEMASK_X;
209 else
210 this->writemask = WRITEMASK_XYZW;
211 this->reladdr = reg.reladdr;
212 this->fixed_hw_reg = reg.fixed_hw_reg;
213 }
214
215 bool
216 vec4_instruction::is_send_from_grf()
217 {
218 switch (opcode) {
219 case SHADER_OPCODE_SHADER_TIME_ADD:
220 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
221 return true;
222 default:
223 return false;
224 }
225 }
226
227 bool
228 vec4_instruction::can_do_source_mods(struct brw_context *brw)
229 {
230 if (brw->gen == 6 && is_math())
231 return false;
232
233 if (is_send_from_grf())
234 return false;
235
236 if (!backend_instruction::can_do_source_mods())
237 return false;
238
239 return true;
240 }
241
242 /**
243 * Returns how many MRFs an opcode will write over.
244 *
245 * Note that this is not the 0 or 1 implied writes in an actual gen
246 * instruction -- the generate_* functions generate additional MOVs
247 * for setup.
248 */
249 int
250 vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
251 {
252 if (inst->mlen == 0)
253 return 0;
254
255 switch (inst->opcode) {
256 case SHADER_OPCODE_RCP:
257 case SHADER_OPCODE_RSQ:
258 case SHADER_OPCODE_SQRT:
259 case SHADER_OPCODE_EXP2:
260 case SHADER_OPCODE_LOG2:
261 case SHADER_OPCODE_SIN:
262 case SHADER_OPCODE_COS:
263 return 1;
264 case SHADER_OPCODE_INT_QUOTIENT:
265 case SHADER_OPCODE_INT_REMAINDER:
266 case SHADER_OPCODE_POW:
267 return 2;
268 case VS_OPCODE_URB_WRITE:
269 return 1;
270 case VS_OPCODE_PULL_CONSTANT_LOAD:
271 return 2;
272 case SHADER_OPCODE_GEN4_SCRATCH_READ:
273 return 2;
274 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
275 return 3;
276 case GS_OPCODE_URB_WRITE:
277 case GS_OPCODE_URB_WRITE_ALLOCATE:
278 case GS_OPCODE_THREAD_END:
279 return 0;
280 case GS_OPCODE_FF_SYNC:
281 return 1;
282 case SHADER_OPCODE_SHADER_TIME_ADD:
283 return 0;
284 case SHADER_OPCODE_TEX:
285 case SHADER_OPCODE_TXL:
286 case SHADER_OPCODE_TXD:
287 case SHADER_OPCODE_TXF:
288 case SHADER_OPCODE_TXF_CMS:
289 case SHADER_OPCODE_TXF_MCS:
290 case SHADER_OPCODE_TXS:
291 case SHADER_OPCODE_TG4:
292 case SHADER_OPCODE_TG4_OFFSET:
293 return inst->header_present ? 1 : 0;
294 case SHADER_OPCODE_UNTYPED_ATOMIC:
295 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
296 return 0;
297 default:
298 unreachable("not reached");
299 }
300 }
301
302 bool
303 src_reg::equals(const src_reg &r) const
304 {
305 return (file == r.file &&
306 reg == r.reg &&
307 reg_offset == r.reg_offset &&
308 type == r.type &&
309 negate == r.negate &&
310 abs == r.abs &&
311 swizzle == r.swizzle &&
312 !reladdr && !r.reladdr &&
313 memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
314 sizeof(fixed_hw_reg)) == 0);
315 }
316
317 /* Replaces unused channels of a swizzle with channels that are used.
318 *
319 * For instance, this pass transforms
320 *
321 * mov vgrf4.yz, vgrf5.wxzy
322 *
323 * into
324 *
325 * mov vgrf4.yz, vgrf5.xxzx
326 *
327 * This eliminates false uses of some channels, letting dead code elimination
328 * remove the instructions that wrote them.
329 */
330 bool
331 vec4_visitor::opt_reduce_swizzle()
332 {
333 bool progress = false;
334
335 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
336 if (inst->dst.file == BAD_FILE || inst->dst.file == HW_REG)
337 continue;
338
339 int swizzle[4];
340
341 /* Determine which channels of the sources are read. */
342 switch (inst->opcode) {
343 case BRW_OPCODE_DP4:
344 case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0,
345 * but all four of src1.
346 */
347 swizzle[0] = 0;
348 swizzle[1] = 1;
349 swizzle[2] = 2;
350 swizzle[3] = 3;
351 break;
352 case BRW_OPCODE_DP3:
353 swizzle[0] = 0;
354 swizzle[1] = 1;
355 swizzle[2] = 2;
356 swizzle[3] = -1;
357 break;
358 case BRW_OPCODE_DP2:
359 swizzle[0] = 0;
360 swizzle[1] = 1;
361 swizzle[2] = -1;
362 swizzle[3] = -1;
363 break;
364 default:
365 swizzle[0] = inst->dst.writemask & WRITEMASK_X ? 0 : -1;
366 swizzle[1] = inst->dst.writemask & WRITEMASK_Y ? 1 : -1;
367 swizzle[2] = inst->dst.writemask & WRITEMASK_Z ? 2 : -1;
368 swizzle[3] = inst->dst.writemask & WRITEMASK_W ? 3 : -1;
369 break;
370 }
371
372 /* Resolve unread channels (-1) by assigning them the swizzle of the
373 * first channel that is used.
374 */
375 int first_used_channel = 0;
376 for (int i = 0; i < 4; i++) {
377 if (swizzle[i] != -1) {
378 first_used_channel = swizzle[i];
379 break;
380 }
381 }
382 for (int i = 0; i < 4; i++) {
383 if (swizzle[i] == -1) {
384 swizzle[i] = first_used_channel;
385 }
386 }
387
388 /* Update sources' swizzles. */
389 for (int i = 0; i < 3; i++) {
390 if (inst->src[i].file != GRF &&
391 inst->src[i].file != ATTR &&
392 inst->src[i].file != UNIFORM)
393 continue;
394
395 int swiz[4];
396 for (int j = 0; j < 4; j++) {
397 swiz[j] = BRW_GET_SWZ(inst->src[i].swizzle, swizzle[j]);
398 }
399
400 unsigned new_swizzle = BRW_SWIZZLE4(swiz[0], swiz[1], swiz[2], swiz[3]);
401 if (inst->src[i].swizzle != new_swizzle) {
402 inst->src[i].swizzle = new_swizzle;
403 progress = true;
404 }
405 }
406 }
407
408 if (progress)
409 invalidate_live_intervals();
410
411 return progress;
412 }
413
414 static bool
415 try_eliminate_instruction(vec4_instruction *inst, int new_writemask,
416 const struct brw_context *brw)
417 {
418 if (inst->has_side_effects())
419 return false;
420
421 if (new_writemask == 0) {
422 /* Don't dead code eliminate instructions that write to the
423 * accumulator as a side-effect. Instead just set the destination
424 * to the null register to free it.
425 */
426 if (inst->writes_accumulator || inst->writes_flag()) {
427 inst->dst = dst_reg(retype(brw_null_reg(), inst->dst.type));
428 } else {
429 inst->opcode = BRW_OPCODE_NOP;
430 }
431
432 return true;
433 } else if (inst->dst.writemask != new_writemask) {
434 switch (inst->opcode) {
435 case SHADER_OPCODE_TXF_CMS:
436 case SHADER_OPCODE_GEN4_SCRATCH_READ:
437 case VS_OPCODE_PULL_CONSTANT_LOAD:
438 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
439 break;
440 default:
441 /* Do not set a writemask on Gen6 for math instructions, those are
442 * executed using align1 mode that does not support a destination mask.
443 */
444 if (!(brw->gen == 6 && inst->is_math()) && !inst->is_tex()) {
445 inst->dst.writemask = new_writemask;
446 return true;
447 }
448 }
449 }
450
451 return false;
452 }
453
454 /**
455 * Must be called after calculate_live_intervals() to remove unused
456 * writes to registers -- register allocation will fail otherwise
457 * because something deffed but not used won't be considered to
458 * interfere with other regs.
459 */
460 bool
461 vec4_visitor::dead_code_eliminate()
462 {
463 bool progress = false;
464 int pc = -1;
465
466 calculate_live_intervals();
467
468 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
469 pc++;
470
471 bool inst_writes_flag = false;
472 if (inst->dst.file != GRF) {
473 if (inst->dst.is_null() && inst->writes_flag()) {
474 inst_writes_flag = true;
475 } else {
476 continue;
477 }
478 }
479
480 if (inst->dst.file == GRF) {
481 int write_mask = inst->dst.writemask;
482
483 for (int c = 0; c < 4; c++) {
484 if (write_mask & (1 << c)) {
485 assert(this->virtual_grf_end[inst->dst.reg * 4 + c] >= pc);
486 if (this->virtual_grf_end[inst->dst.reg * 4 + c] == pc) {
487 write_mask &= ~(1 << c);
488 }
489 }
490 }
491
492 progress = try_eliminate_instruction(inst, write_mask, brw) ||
493 progress;
494 }
495
496 if (inst->predicate || inst->prev == NULL)
497 continue;
498
499 int dead_channels;
500 if (inst_writes_flag) {
501 /* Arbitrarily chosen, other than not being an xyzw writemask. */
502 #define FLAG_WRITEMASK (1 << 5)
503 dead_channels = inst->reads_flag() ? 0 : FLAG_WRITEMASK;
504 } else {
505 dead_channels = inst->dst.writemask;
506
507 for (int i = 0; i < 3; i++) {
508 if (inst->src[i].file != GRF ||
509 inst->src[i].reg != inst->dst.reg)
510 continue;
511
512 for (int j = 0; j < 4; j++) {
513 int swiz = BRW_GET_SWZ(inst->src[i].swizzle, j);
514 dead_channels &= ~(1 << swiz);
515 }
516 }
517 }
518
519 foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
520 inst, block) {
521 if (dead_channels == 0)
522 break;
523
524 if (inst_writes_flag) {
525 if (scan_inst->dst.is_null() && scan_inst->writes_flag()) {
526 scan_inst->opcode = BRW_OPCODE_NOP;
527 progress = true;
528 continue;
529 } else if (scan_inst->reads_flag()) {
530 break;
531 }
532 }
533
534 if (inst->dst.file == scan_inst->dst.file &&
535 inst->dst.reg == scan_inst->dst.reg &&
536 inst->dst.reg_offset == scan_inst->dst.reg_offset) {
537 int new_writemask = scan_inst->dst.writemask & ~dead_channels;
538
539 progress = try_eliminate_instruction(scan_inst, new_writemask, brw) ||
540 progress;
541 }
542
543 for (int i = 0; i < 3; i++) {
544 if (scan_inst->src[i].file != inst->dst.file ||
545 scan_inst->src[i].reg != inst->dst.reg)
546 continue;
547
548 for (int j = 0; j < 4; j++) {
549 int swiz = BRW_GET_SWZ(scan_inst->src[i].swizzle, j);
550 dead_channels &= ~(1 << swiz);
551 }
552 }
553 }
554 }
555
556 if (progress) {
557 foreach_block_and_inst_safe (block, backend_instruction, inst, cfg) {
558 if (inst->opcode == BRW_OPCODE_NOP) {
559 inst->remove(block);
560 }
561 }
562
563 invalidate_live_intervals();
564 }
565
566 return progress;
567 }
568
569 void
570 vec4_visitor::split_uniform_registers()
571 {
572 /* Prior to this, uniforms have been in an array sized according to
573 * the number of vector uniforms present, sparsely filled (so an
574 * aggregate results in reg indices being skipped over). Now we're
575 * going to cut those aggregates up so each .reg index is one
576 * vector. The goal is to make elimination of unused uniform
577 * components easier later.
578 */
579 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
580 for (int i = 0 ; i < 3; i++) {
581 if (inst->src[i].file != UNIFORM)
582 continue;
583
584 assert(!inst->src[i].reladdr);
585
586 inst->src[i].reg += inst->src[i].reg_offset;
587 inst->src[i].reg_offset = 0;
588 }
589 }
590
591 /* Update that everything is now vector-sized. */
592 for (int i = 0; i < this->uniforms; i++) {
593 this->uniform_size[i] = 1;
594 }
595 }
596
597 void
598 vec4_visitor::pack_uniform_registers()
599 {
600 bool uniform_used[this->uniforms];
601 int new_loc[this->uniforms];
602 int new_chan[this->uniforms];
603
604 memset(uniform_used, 0, sizeof(uniform_used));
605 memset(new_loc, 0, sizeof(new_loc));
606 memset(new_chan, 0, sizeof(new_chan));
607
608 /* Find which uniform vectors are actually used by the program. We
609 * expect unused vector elements when we've moved array access out
610 * to pull constants, and from some GLSL code generators like wine.
611 */
612 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
613 for (int i = 0 ; i < 3; i++) {
614 if (inst->src[i].file != UNIFORM)
615 continue;
616
617 uniform_used[inst->src[i].reg] = true;
618 }
619 }
620
621 int new_uniform_count = 0;
622
623 /* Now, figure out a packing of the live uniform vectors into our
624 * push constants.
625 */
626 for (int src = 0; src < uniforms; src++) {
627 assert(src < uniform_array_size);
628 int size = this->uniform_vector_size[src];
629
630 if (!uniform_used[src]) {
631 this->uniform_vector_size[src] = 0;
632 continue;
633 }
634
635 int dst;
636 /* Find the lowest place we can slot this uniform in. */
637 for (dst = 0; dst < src; dst++) {
638 if (this->uniform_vector_size[dst] + size <= 4)
639 break;
640 }
641
642 if (src == dst) {
643 new_loc[src] = dst;
644 new_chan[src] = 0;
645 } else {
646 new_loc[src] = dst;
647 new_chan[src] = this->uniform_vector_size[dst];
648
649 /* Move the references to the data */
650 for (int j = 0; j < size; j++) {
651 stage_prog_data->param[dst * 4 + new_chan[src] + j] =
652 stage_prog_data->param[src * 4 + j];
653 }
654
655 this->uniform_vector_size[dst] += size;
656 this->uniform_vector_size[src] = 0;
657 }
658
659 new_uniform_count = MAX2(new_uniform_count, dst + 1);
660 }
661
662 this->uniforms = new_uniform_count;
663
664 /* Now, update the instructions for our repacked uniforms. */
665 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
666 for (int i = 0 ; i < 3; i++) {
667 int src = inst->src[i].reg;
668
669 if (inst->src[i].file != UNIFORM)
670 continue;
671
672 inst->src[i].reg = new_loc[src];
673
674 int sx = BRW_GET_SWZ(inst->src[i].swizzle, 0) + new_chan[src];
675 int sy = BRW_GET_SWZ(inst->src[i].swizzle, 1) + new_chan[src];
676 int sz = BRW_GET_SWZ(inst->src[i].swizzle, 2) + new_chan[src];
677 int sw = BRW_GET_SWZ(inst->src[i].swizzle, 3) + new_chan[src];
678 inst->src[i].swizzle = BRW_SWIZZLE4(sx, sy, sz, sw);
679 }
680 }
681 }
682
683 /**
684 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
685 *
686 * While GLSL IR also performs this optimization, we end up with it in
687 * our instruction stream for a couple of reasons. One is that we
688 * sometimes generate silly instructions, for example in array access
689 * where we'll generate "ADD offset, index, base" even if base is 0.
690 * The other is that GLSL IR's constant propagation doesn't track the
691 * components of aggregates, so some VS patterns (initialize matrix to
692 * 0, accumulate in vertex blending factors) end up breaking down to
693 * instructions involving 0.
694 */
695 bool
696 vec4_visitor::opt_algebraic()
697 {
698 bool progress = false;
699
700 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
701 switch (inst->opcode) {
702 case BRW_OPCODE_ADD:
703 if (inst->src[1].is_zero()) {
704 inst->opcode = BRW_OPCODE_MOV;
705 inst->src[1] = src_reg();
706 progress = true;
707 }
708 break;
709
710 case BRW_OPCODE_MUL:
711 if (inst->src[1].is_zero()) {
712 inst->opcode = BRW_OPCODE_MOV;
713 switch (inst->src[0].type) {
714 case BRW_REGISTER_TYPE_F:
715 inst->src[0] = src_reg(0.0f);
716 break;
717 case BRW_REGISTER_TYPE_D:
718 inst->src[0] = src_reg(0);
719 break;
720 case BRW_REGISTER_TYPE_UD:
721 inst->src[0] = src_reg(0u);
722 break;
723 default:
724 unreachable("not reached");
725 }
726 inst->src[1] = src_reg();
727 progress = true;
728 } else if (inst->src[1].is_one()) {
729 inst->opcode = BRW_OPCODE_MOV;
730 inst->src[1] = src_reg();
731 progress = true;
732 }
733 break;
734 case SHADER_OPCODE_RCP: {
735 vec4_instruction *prev = (vec4_instruction *)inst->prev;
736 if (prev->opcode == SHADER_OPCODE_SQRT) {
737 if (inst->src[0].equals(src_reg(prev->dst))) {
738 inst->opcode = SHADER_OPCODE_RSQ;
739 inst->src[0] = prev->src[0];
740 progress = true;
741 }
742 }
743 break;
744 }
745 default:
746 break;
747 }
748 }
749
750 if (progress)
751 invalidate_live_intervals();
752
753 return progress;
754 }
755
756 /**
757 * Only a limited number of hardware registers may be used for push
758 * constants, so this turns access to the overflowed constants into
759 * pull constants.
760 */
761 void
762 vec4_visitor::move_push_constants_to_pull_constants()
763 {
764 int pull_constant_loc[this->uniforms];
765
766 /* Only allow 32 registers (256 uniform components) as push constants,
767 * which is the limit on gen6.
768 *
769 * If changing this value, note the limitation about total_regs in
770 * brw_curbe.c.
771 */
772 int max_uniform_components = 32 * 8;
773 if (this->uniforms * 4 <= max_uniform_components)
774 return;
775
776 /* Make some sort of choice as to which uniforms get sent to pull
777 * constants. We could potentially do something clever here like
778 * look for the most infrequently used uniform vec4s, but leave
779 * that for later.
780 */
781 for (int i = 0; i < this->uniforms * 4; i += 4) {
782 pull_constant_loc[i / 4] = -1;
783
784 if (i >= max_uniform_components) {
785 const gl_constant_value **values = &stage_prog_data->param[i];
786
787 /* Try to find an existing copy of this uniform in the pull
788 * constants if it was part of an array access already.
789 */
790 for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j += 4) {
791 int matches;
792
793 for (matches = 0; matches < 4; matches++) {
794 if (stage_prog_data->pull_param[j + matches] != values[matches])
795 break;
796 }
797
798 if (matches == 4) {
799 pull_constant_loc[i / 4] = j / 4;
800 break;
801 }
802 }
803
804 if (pull_constant_loc[i / 4] == -1) {
805 assert(stage_prog_data->nr_pull_params % 4 == 0);
806 pull_constant_loc[i / 4] = stage_prog_data->nr_pull_params / 4;
807
808 for (int j = 0; j < 4; j++) {
809 stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
810 values[j];
811 }
812 }
813 }
814 }
815
816 /* Now actually rewrite usage of the things we've moved to pull
817 * constants.
818 */
819 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
820 for (int i = 0 ; i < 3; i++) {
821 if (inst->src[i].file != UNIFORM ||
822 pull_constant_loc[inst->src[i].reg] == -1)
823 continue;
824
825 int uniform = inst->src[i].reg;
826
827 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
828
829 emit_pull_constant_load(block, inst, temp, inst->src[i],
830 pull_constant_loc[uniform]);
831
832 inst->src[i].file = temp.file;
833 inst->src[i].reg = temp.reg;
834 inst->src[i].reg_offset = temp.reg_offset;
835 inst->src[i].reladdr = NULL;
836 }
837 }
838
839 /* Repack push constants to remove the now-unused ones. */
840 pack_uniform_registers();
841 }
842
843 /* Conditions for which we want to avoid setting the dependency control bits */
844 bool
845 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst)
846 {
847 #define IS_DWORD(reg) \
848 (reg.type == BRW_REGISTER_TYPE_UD || \
849 reg.type == BRW_REGISTER_TYPE_D)
850
851 /* From the destination hazard section of the spec:
852 * > Instructions other than send, may use this control as long as operations
853 * > that have different pipeline latencies are not mixed.
854 */
855 if (brw->gen >= 8) {
856 if (inst->opcode == BRW_OPCODE_MUL &&
857 IS_DWORD(inst->src[0]) &&
858 IS_DWORD(inst->src[1]))
859 return true;
860 }
861 #undef IS_DWORD
862
863 /*
864 * mlen:
865 * In the presence of send messages, totally interrupt dependency
866 * control. They're long enough that the chance of dependency
867 * control around them just doesn't matter.
868 *
869 * predicate:
870 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
871 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
872 * completes the scoreboard clear must have a non-zero execution mask. This
873 * means, if any kind of predication can change the execution mask or channel
874 * enable of the last instruction, the optimization must be avoided. This is
875 * to avoid instructions being shot down the pipeline when no writes are
876 * required.
877 *
878 * math:
879 * Dependency control does not work well over math instructions.
880 * NB: Discovered empirically
881 */
882 return (inst->mlen || inst->predicate || inst->is_math());
883 }
884
885 /**
886 * Sets the dependency control fields on instructions after register
887 * allocation and before the generator is run.
888 *
889 * When you have a sequence of instructions like:
890 *
891 * DP4 temp.x vertex uniform[0]
892 * DP4 temp.y vertex uniform[0]
893 * DP4 temp.z vertex uniform[0]
894 * DP4 temp.w vertex uniform[0]
895 *
896 * The hardware doesn't know that it can actually run the later instructions
897 * while the previous ones are in flight, producing stalls. However, we have
898 * manual fields we can set in the instructions that let it do so.
899 */
900 void
901 vec4_visitor::opt_set_dependency_control()
902 {
903 vec4_instruction *last_grf_write[BRW_MAX_GRF];
904 uint8_t grf_channels_written[BRW_MAX_GRF];
905 vec4_instruction *last_mrf_write[BRW_MAX_GRF];
906 uint8_t mrf_channels_written[BRW_MAX_GRF];
907
908 assert(prog_data->total_grf ||
909 !"Must be called after register allocation");
910
911 foreach_block (block, cfg) {
912 memset(last_grf_write, 0, sizeof(last_grf_write));
913 memset(last_mrf_write, 0, sizeof(last_mrf_write));
914
915 foreach_inst_in_block (vec4_instruction, inst, block) {
916 /* If we read from a register that we were doing dependency control
917 * on, don't do dependency control across the read.
918 */
919 for (int i = 0; i < 3; i++) {
920 int reg = inst->src[i].reg + inst->src[i].reg_offset;
921 if (inst->src[i].file == GRF) {
922 last_grf_write[reg] = NULL;
923 } else if (inst->src[i].file == HW_REG) {
924 memset(last_grf_write, 0, sizeof(last_grf_write));
925 break;
926 }
927 assert(inst->src[i].file != MRF);
928 }
929
930 if (is_dep_ctrl_unsafe(inst)) {
931 memset(last_grf_write, 0, sizeof(last_grf_write));
932 memset(last_mrf_write, 0, sizeof(last_mrf_write));
933 continue;
934 }
935
936 /* Now, see if we can do dependency control for this instruction
937 * against a previous one writing to its destination.
938 */
939 int reg = inst->dst.reg + inst->dst.reg_offset;
940 if (inst->dst.file == GRF) {
941 if (last_grf_write[reg] &&
942 !(inst->dst.writemask & grf_channels_written[reg])) {
943 last_grf_write[reg]->no_dd_clear = true;
944 inst->no_dd_check = true;
945 } else {
946 grf_channels_written[reg] = 0;
947 }
948
949 last_grf_write[reg] = inst;
950 grf_channels_written[reg] |= inst->dst.writemask;
951 } else if (inst->dst.file == MRF) {
952 if (last_mrf_write[reg] &&
953 !(inst->dst.writemask & mrf_channels_written[reg])) {
954 last_mrf_write[reg]->no_dd_clear = true;
955 inst->no_dd_check = true;
956 } else {
957 mrf_channels_written[reg] = 0;
958 }
959
960 last_mrf_write[reg] = inst;
961 mrf_channels_written[reg] |= inst->dst.writemask;
962 } else if (inst->dst.reg == HW_REG) {
963 if (inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE)
964 memset(last_grf_write, 0, sizeof(last_grf_write));
965 if (inst->dst.fixed_hw_reg.file == BRW_MESSAGE_REGISTER_FILE)
966 memset(last_mrf_write, 0, sizeof(last_mrf_write));
967 }
968 }
969 }
970 }
971
972 bool
973 vec4_instruction::can_reswizzle(int dst_writemask,
974 int swizzle,
975 int swizzle_mask)
976 {
977 /* If this instruction sets anything not referenced by swizzle, then we'd
978 * totally break it when we reswizzle.
979 */
980 if (dst.writemask & ~swizzle_mask)
981 return false;
982
983 if (mlen > 0)
984 return false;
985
986 return true;
987 }
988
989 /**
990 * For any channels in the swizzle's source that were populated by this
991 * instruction, rewrite the instruction to put the appropriate result directly
992 * in those channels.
993 *
994 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
995 */
996 void
997 vec4_instruction::reswizzle(int dst_writemask, int swizzle)
998 {
999 int new_writemask = 0;
1000 int new_swizzle[4] = { 0 };
1001
1002 /* Dot product instructions write a single result into all channels. */
1003 if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH &&
1004 opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2) {
1005 for (int i = 0; i < 3; i++) {
1006 if (src[i].file == BAD_FILE || src[i].file == IMM)
1007 continue;
1008
1009 for (int c = 0; c < 4; c++) {
1010 new_swizzle[c] = BRW_GET_SWZ(src[i].swizzle, BRW_GET_SWZ(swizzle, c));
1011 }
1012
1013 src[i].swizzle = BRW_SWIZZLE4(new_swizzle[0], new_swizzle[1],
1014 new_swizzle[2], new_swizzle[3]);
1015 }
1016 }
1017
1018 for (int c = 0; c < 4; c++) {
1019 int bit = 1 << BRW_GET_SWZ(swizzle, c);
1020 /* Skip components of the swizzle not used by the dst. */
1021 if (!(dst_writemask & (1 << c)))
1022 continue;
1023 /* If we were populating this component, then populate the
1024 * corresponding channel of the new dst.
1025 */
1026 if (dst.writemask & bit)
1027 new_writemask |= (1 << c);
1028 }
1029 dst.writemask = new_writemask;
1030 }
1031
1032 /*
1033 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1034 * just written and then MOVed into another reg and making the original write
1035 * of the GRF write directly to the final destination instead.
1036 */
1037 bool
1038 vec4_visitor::opt_register_coalesce()
1039 {
1040 bool progress = false;
1041 int next_ip = 0;
1042
1043 calculate_live_intervals();
1044
1045 foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
1046 int ip = next_ip;
1047 next_ip++;
1048
1049 if (inst->opcode != BRW_OPCODE_MOV ||
1050 (inst->dst.file != GRF && inst->dst.file != MRF) ||
1051 inst->predicate ||
1052 inst->src[0].file != GRF ||
1053 inst->dst.type != inst->src[0].type ||
1054 inst->src[0].abs || inst->src[0].negate || inst->src[0].reladdr)
1055 continue;
1056
1057 bool to_mrf = (inst->dst.file == MRF);
1058
1059 /* Can't coalesce this GRF if someone else was going to
1060 * read it later.
1061 */
1062 if (this->virtual_grf_end[inst->src[0].reg * 4 + 0] > ip ||
1063 this->virtual_grf_end[inst->src[0].reg * 4 + 1] > ip ||
1064 this->virtual_grf_end[inst->src[0].reg * 4 + 2] > ip ||
1065 this->virtual_grf_end[inst->src[0].reg * 4 + 3] > ip)
1066 continue;
1067
1068 /* We need to check interference with the final destination between this
1069 * instruction and the earliest instruction involved in writing the GRF
1070 * we're eliminating. To do that, keep track of which of our source
1071 * channels we've seen initialized.
1072 */
1073 bool chans_needed[4] = {false, false, false, false};
1074 int chans_remaining = 0;
1075 int swizzle_mask = 0;
1076 for (int i = 0; i < 4; i++) {
1077 int chan = BRW_GET_SWZ(inst->src[0].swizzle, i);
1078
1079 if (!(inst->dst.writemask & (1 << i)))
1080 continue;
1081
1082 swizzle_mask |= (1 << chan);
1083
1084 if (!chans_needed[chan]) {
1085 chans_needed[chan] = true;
1086 chans_remaining++;
1087 }
1088 }
1089
1090 /* Now walk up the instruction stream trying to see if we can rewrite
1091 * everything writing to the temporary to write into the destination
1092 * instead.
1093 */
1094 vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev;
1095 foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
1096 inst, block) {
1097 _scan_inst = scan_inst;
1098
1099 if (scan_inst->dst.file == GRF &&
1100 scan_inst->dst.reg == inst->src[0].reg &&
1101 scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
1102 /* Found something writing to the reg we want to coalesce away. */
1103 if (to_mrf) {
1104 /* SEND instructions can't have MRF as a destination. */
1105 if (scan_inst->mlen)
1106 break;
1107
1108 if (brw->gen == 6) {
1109 /* gen6 math instructions must have the destination be
1110 * GRF, so no compute-to-MRF for them.
1111 */
1112 if (scan_inst->is_math()) {
1113 break;
1114 }
1115 }
1116 }
1117
1118 /* If we can't handle the swizzle, bail. */
1119 if (!scan_inst->can_reswizzle(inst->dst.writemask,
1120 inst->src[0].swizzle,
1121 swizzle_mask)) {
1122 break;
1123 }
1124
1125 /* Mark which channels we found unconditional writes for. */
1126 if (!scan_inst->predicate) {
1127 for (int i = 0; i < 4; i++) {
1128 if (scan_inst->dst.writemask & (1 << i) &&
1129 chans_needed[i]) {
1130 chans_needed[i] = false;
1131 chans_remaining--;
1132 }
1133 }
1134 }
1135
1136 if (chans_remaining == 0)
1137 break;
1138 }
1139
1140 /* You can't read from an MRF, so if someone else reads our MRF's
1141 * source GRF that we wanted to rewrite, that stops us. If it's a
1142 * GRF we're trying to coalesce to, we don't actually handle
1143 * rewriting sources so bail in that case as well.
1144 */
1145 bool interfered = false;
1146 for (int i = 0; i < 3; i++) {
1147 if (scan_inst->src[i].file == GRF &&
1148 scan_inst->src[i].reg == inst->src[0].reg &&
1149 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1150 interfered = true;
1151 }
1152 }
1153 if (interfered)
1154 break;
1155
1156 /* If somebody else writes our destination here, we can't coalesce
1157 * before that.
1158 */
1159 if (scan_inst->dst.file == inst->dst.file &&
1160 scan_inst->dst.reg == inst->dst.reg) {
1161 break;
1162 }
1163
1164 /* Check for reads of the register we're trying to coalesce into. We
1165 * can't go rewriting instructions above that to put some other value
1166 * in the register instead.
1167 */
1168 if (to_mrf && scan_inst->mlen > 0) {
1169 if (inst->dst.reg >= scan_inst->base_mrf &&
1170 inst->dst.reg < scan_inst->base_mrf + scan_inst->mlen) {
1171 break;
1172 }
1173 } else {
1174 for (int i = 0; i < 3; i++) {
1175 if (scan_inst->src[i].file == inst->dst.file &&
1176 scan_inst->src[i].reg == inst->dst.reg &&
1177 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1178 interfered = true;
1179 }
1180 }
1181 if (interfered)
1182 break;
1183 }
1184 }
1185
1186 if (chans_remaining == 0) {
1187 /* If we've made it here, we have an MOV we want to coalesce out, and
1188 * a scan_inst pointing to the earliest instruction involved in
1189 * computing the value. Now go rewrite the instruction stream
1190 * between the two.
1191 */
1192 vec4_instruction *scan_inst = _scan_inst;
1193 while (scan_inst != inst) {
1194 if (scan_inst->dst.file == GRF &&
1195 scan_inst->dst.reg == inst->src[0].reg &&
1196 scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
1197 scan_inst->reswizzle(inst->dst.writemask,
1198 inst->src[0].swizzle);
1199 scan_inst->dst.file = inst->dst.file;
1200 scan_inst->dst.reg = inst->dst.reg;
1201 scan_inst->dst.reg_offset = inst->dst.reg_offset;
1202 scan_inst->saturate |= inst->saturate;
1203 }
1204 scan_inst = (vec4_instruction *)scan_inst->next;
1205 }
1206 inst->remove(block);
1207 progress = true;
1208 }
1209 }
1210
1211 if (progress)
1212 invalidate_live_intervals();
1213
1214 return progress;
1215 }
1216
1217 /**
1218 * Splits virtual GRFs requesting more than one contiguous physical register.
1219 *
1220 * We initially create large virtual GRFs for temporary structures, arrays,
1221 * and matrices, so that the dereference visitor functions can add reg_offsets
1222 * to work their way down to the actual member being accessed. But when it
1223 * comes to optimization, we'd like to treat each register as individual
1224 * storage if possible.
1225 *
1226 * So far, the only thing that might prevent splitting is a send message from
1227 * a GRF on IVB.
1228 */
1229 void
1230 vec4_visitor::split_virtual_grfs()
1231 {
1232 int num_vars = this->virtual_grf_count;
1233 int new_virtual_grf[num_vars];
1234 bool split_grf[num_vars];
1235
1236 memset(new_virtual_grf, 0, sizeof(new_virtual_grf));
1237
1238 /* Try to split anything > 0 sized. */
1239 for (int i = 0; i < num_vars; i++) {
1240 split_grf[i] = this->virtual_grf_sizes[i] != 1;
1241 }
1242
1243 /* Check that the instructions are compatible with the registers we're trying
1244 * to split.
1245 */
1246 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1247 /* If there's a SEND message loading from a GRF on gen7+, it needs to be
1248 * contiguous.
1249 */
1250 if (inst->is_send_from_grf()) {
1251 for (int i = 0; i < 3; i++) {
1252 if (inst->src[i].file == GRF) {
1253 split_grf[inst->src[i].reg] = false;
1254 }
1255 }
1256 }
1257 }
1258
1259 /* Allocate new space for split regs. Note that the virtual
1260 * numbers will be contiguous.
1261 */
1262 for (int i = 0; i < num_vars; i++) {
1263 if (!split_grf[i])
1264 continue;
1265
1266 new_virtual_grf[i] = virtual_grf_alloc(1);
1267 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
1268 int reg = virtual_grf_alloc(1);
1269 assert(reg == new_virtual_grf[i] + j - 1);
1270 (void) reg;
1271 }
1272 this->virtual_grf_sizes[i] = 1;
1273 }
1274
1275 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1276 if (inst->dst.file == GRF && split_grf[inst->dst.reg] &&
1277 inst->dst.reg_offset != 0) {
1278 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
1279 inst->dst.reg_offset - 1);
1280 inst->dst.reg_offset = 0;
1281 }
1282 for (int i = 0; i < 3; i++) {
1283 if (inst->src[i].file == GRF && split_grf[inst->src[i].reg] &&
1284 inst->src[i].reg_offset != 0) {
1285 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
1286 inst->src[i].reg_offset - 1);
1287 inst->src[i].reg_offset = 0;
1288 }
1289 }
1290 }
1291 invalidate_live_intervals();
1292 }
1293
1294 void
1295 vec4_visitor::dump_instruction(backend_instruction *be_inst)
1296 {
1297 dump_instruction(be_inst, stderr);
1298 }
1299
1300 void
1301 vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
1302 {
1303 vec4_instruction *inst = (vec4_instruction *)be_inst;
1304
1305 if (inst->predicate) {
1306 fprintf(file, "(%cf0) ",
1307 inst->predicate_inverse ? '-' : '+');
1308 }
1309
1310 fprintf(file, "%s", brw_instruction_name(inst->opcode));
1311 if (inst->conditional_mod) {
1312 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
1313 }
1314 fprintf(file, " ");
1315
1316 switch (inst->dst.file) {
1317 case GRF:
1318 fprintf(file, "vgrf%d.%d", inst->dst.reg, inst->dst.reg_offset);
1319 break;
1320 case MRF:
1321 fprintf(file, "m%d", inst->dst.reg);
1322 break;
1323 case HW_REG:
1324 if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1325 switch (inst->dst.fixed_hw_reg.nr) {
1326 case BRW_ARF_NULL:
1327 fprintf(file, "null");
1328 break;
1329 case BRW_ARF_ADDRESS:
1330 fprintf(file, "a0.%d", inst->dst.fixed_hw_reg.subnr);
1331 break;
1332 case BRW_ARF_ACCUMULATOR:
1333 fprintf(file, "acc%d", inst->dst.fixed_hw_reg.subnr);
1334 break;
1335 case BRW_ARF_FLAG:
1336 fprintf(file, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
1337 inst->dst.fixed_hw_reg.subnr);
1338 break;
1339 default:
1340 fprintf(file, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
1341 inst->dst.fixed_hw_reg.subnr);
1342 break;
1343 }
1344 } else {
1345 fprintf(file, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
1346 }
1347 if (inst->dst.fixed_hw_reg.subnr)
1348 fprintf(file, "+%d", inst->dst.fixed_hw_reg.subnr);
1349 break;
1350 case BAD_FILE:
1351 fprintf(file, "(null)");
1352 break;
1353 default:
1354 fprintf(file, "???");
1355 break;
1356 }
1357 if (inst->dst.writemask != WRITEMASK_XYZW) {
1358 fprintf(file, ".");
1359 if (inst->dst.writemask & 1)
1360 fprintf(file, "x");
1361 if (inst->dst.writemask & 2)
1362 fprintf(file, "y");
1363 if (inst->dst.writemask & 4)
1364 fprintf(file, "z");
1365 if (inst->dst.writemask & 8)
1366 fprintf(file, "w");
1367 }
1368 fprintf(file, ":%s", brw_reg_type_letters(inst->dst.type));
1369
1370 if (inst->src[0].file != BAD_FILE)
1371 fprintf(file, ", ");
1372
1373 for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
1374 if (inst->src[i].negate)
1375 fprintf(file, "-");
1376 if (inst->src[i].abs)
1377 fprintf(file, "|");
1378 switch (inst->src[i].file) {
1379 case GRF:
1380 fprintf(file, "vgrf%d", inst->src[i].reg);
1381 break;
1382 case ATTR:
1383 fprintf(file, "attr%d", inst->src[i].reg);
1384 break;
1385 case UNIFORM:
1386 fprintf(file, "u%d", inst->src[i].reg);
1387 break;
1388 case IMM:
1389 switch (inst->src[i].type) {
1390 case BRW_REGISTER_TYPE_F:
1391 fprintf(file, "%fF", inst->src[i].fixed_hw_reg.dw1.f);
1392 break;
1393 case BRW_REGISTER_TYPE_D:
1394 fprintf(file, "%dD", inst->src[i].fixed_hw_reg.dw1.d);
1395 break;
1396 case BRW_REGISTER_TYPE_UD:
1397 fprintf(file, "%uU", inst->src[i].fixed_hw_reg.dw1.ud);
1398 break;
1399 default:
1400 fprintf(file, "???");
1401 break;
1402 }
1403 break;
1404 case HW_REG:
1405 if (inst->src[i].fixed_hw_reg.negate)
1406 fprintf(file, "-");
1407 if (inst->src[i].fixed_hw_reg.abs)
1408 fprintf(file, "|");
1409 if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1410 switch (inst->src[i].fixed_hw_reg.nr) {
1411 case BRW_ARF_NULL:
1412 fprintf(file, "null");
1413 break;
1414 case BRW_ARF_ADDRESS:
1415 fprintf(file, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
1416 break;
1417 case BRW_ARF_ACCUMULATOR:
1418 fprintf(file, "acc%d", inst->src[i].fixed_hw_reg.subnr);
1419 break;
1420 case BRW_ARF_FLAG:
1421 fprintf(file, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
1422 inst->src[i].fixed_hw_reg.subnr);
1423 break;
1424 default:
1425 fprintf(file, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
1426 inst->src[i].fixed_hw_reg.subnr);
1427 break;
1428 }
1429 } else {
1430 fprintf(file, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
1431 }
1432 if (inst->src[i].fixed_hw_reg.subnr)
1433 fprintf(file, "+%d", inst->src[i].fixed_hw_reg.subnr);
1434 if (inst->src[i].fixed_hw_reg.abs)
1435 fprintf(file, "|");
1436 break;
1437 case BAD_FILE:
1438 fprintf(file, "(null)");
1439 break;
1440 default:
1441 fprintf(file, "???");
1442 break;
1443 }
1444
1445 /* Don't print .0; and only VGRFs have reg_offsets and sizes */
1446 if (inst->src[i].reg_offset != 0 &&
1447 inst->src[i].file == GRF &&
1448 virtual_grf_sizes[inst->src[i].reg] != 1)
1449 fprintf(file, ".%d", inst->src[i].reg_offset);
1450
1451 if (inst->src[i].file != IMM) {
1452 static const char *chans[4] = {"x", "y", "z", "w"};
1453 fprintf(file, ".");
1454 for (int c = 0; c < 4; c++) {
1455 fprintf(file, "%s", chans[BRW_GET_SWZ(inst->src[i].swizzle, c)]);
1456 }
1457 }
1458
1459 if (inst->src[i].abs)
1460 fprintf(file, "|");
1461
1462 if (inst->src[i].file != IMM) {
1463 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
1464 }
1465
1466 if (i < 2 && inst->src[i + 1].file != BAD_FILE)
1467 fprintf(file, ", ");
1468 }
1469
1470 fprintf(file, "\n");
1471 }
1472
1473
1474 static inline struct brw_reg
1475 attribute_to_hw_reg(int attr, bool interleaved)
1476 {
1477 if (interleaved)
1478 return stride(brw_vec4_grf(attr / 2, (attr % 2) * 4), 0, 4, 1);
1479 else
1480 return brw_vec8_grf(attr, 0);
1481 }
1482
1483
1484 /**
1485 * Replace each register of type ATTR in this->instructions with a reference
1486 * to a fixed HW register.
1487 *
1488 * If interleaved is true, then each attribute takes up half a register, with
1489 * register N containing attribute 2*N in its first half and attribute 2*N+1
1490 * in its second half (this corresponds to the payload setup used by geometry
1491 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1492 * false, then each attribute takes up a whole register, with register N
1493 * containing attribute N (this corresponds to the payload setup used by
1494 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1495 */
1496 void
1497 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map,
1498 bool interleaved)
1499 {
1500 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1501 /* We have to support ATTR as a destination for GL_FIXED fixup. */
1502 if (inst->dst.file == ATTR) {
1503 int grf = attribute_map[inst->dst.reg + inst->dst.reg_offset];
1504
1505 /* All attributes used in the shader need to have been assigned a
1506 * hardware register by the caller
1507 */
1508 assert(grf != 0);
1509
1510 struct brw_reg reg = attribute_to_hw_reg(grf, interleaved);
1511 reg.type = inst->dst.type;
1512 reg.dw1.bits.writemask = inst->dst.writemask;
1513
1514 inst->dst.file = HW_REG;
1515 inst->dst.fixed_hw_reg = reg;
1516 }
1517
1518 for (int i = 0; i < 3; i++) {
1519 if (inst->src[i].file != ATTR)
1520 continue;
1521
1522 int grf = attribute_map[inst->src[i].reg + inst->src[i].reg_offset];
1523
1524 /* All attributes used in the shader need to have been assigned a
1525 * hardware register by the caller
1526 */
1527 assert(grf != 0);
1528
1529 struct brw_reg reg = attribute_to_hw_reg(grf, interleaved);
1530 reg.dw1.bits.swizzle = inst->src[i].swizzle;
1531 reg.type = inst->src[i].type;
1532 if (inst->src[i].abs)
1533 reg = brw_abs(reg);
1534 if (inst->src[i].negate)
1535 reg = negate(reg);
1536
1537 inst->src[i].file = HW_REG;
1538 inst->src[i].fixed_hw_reg = reg;
1539 }
1540 }
1541 }
1542
1543 int
1544 vec4_vs_visitor::setup_attributes(int payload_reg)
1545 {
1546 int nr_attributes;
1547 int attribute_map[VERT_ATTRIB_MAX + 1];
1548 memset(attribute_map, 0, sizeof(attribute_map));
1549
1550 nr_attributes = 0;
1551 for (int i = 0; i < VERT_ATTRIB_MAX; i++) {
1552 if (vs_prog_data->inputs_read & BITFIELD64_BIT(i)) {
1553 attribute_map[i] = payload_reg + nr_attributes;
1554 nr_attributes++;
1555 }
1556 }
1557
1558 /* VertexID is stored by the VF as the last vertex element, but we
1559 * don't represent it with a flag in inputs_read, so we call it
1560 * VERT_ATTRIB_MAX.
1561 */
1562 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid) {
1563 attribute_map[VERT_ATTRIB_MAX] = payload_reg + nr_attributes;
1564 nr_attributes++;
1565 }
1566
1567 lower_attributes_to_hw_regs(attribute_map, false /* interleaved */);
1568
1569 /* The BSpec says we always have to read at least one thing from
1570 * the VF, and it appears that the hardware wedges otherwise.
1571 */
1572 if (nr_attributes == 0)
1573 nr_attributes = 1;
1574
1575 prog_data->urb_read_length = (nr_attributes + 1) / 2;
1576
1577 unsigned vue_entries =
1578 MAX2(nr_attributes, prog_data->vue_map.num_slots);
1579
1580 if (brw->gen == 6)
1581 prog_data->urb_entry_size = ALIGN(vue_entries, 8) / 8;
1582 else
1583 prog_data->urb_entry_size = ALIGN(vue_entries, 4) / 4;
1584
1585 return payload_reg + nr_attributes;
1586 }
1587
1588 int
1589 vec4_visitor::setup_uniforms(int reg)
1590 {
1591 prog_data->base.dispatch_grf_start_reg = reg;
1592
1593 /* The pre-gen6 VS requires that some push constants get loaded no
1594 * matter what, or the GPU would hang.
1595 */
1596 if (brw->gen < 6 && this->uniforms == 0) {
1597 assert(this->uniforms < this->uniform_array_size);
1598 this->uniform_vector_size[this->uniforms] = 1;
1599
1600 stage_prog_data->param =
1601 reralloc(NULL, stage_prog_data->param, const gl_constant_value *, 4);
1602 for (unsigned int i = 0; i < 4; i++) {
1603 unsigned int slot = this->uniforms * 4 + i;
1604 static gl_constant_value zero = { 0.0 };
1605 stage_prog_data->param[slot] = &zero;
1606 }
1607
1608 this->uniforms++;
1609 reg++;
1610 } else {
1611 reg += ALIGN(uniforms, 2) / 2;
1612 }
1613
1614 stage_prog_data->nr_params = this->uniforms * 4;
1615
1616 prog_data->base.curb_read_length =
1617 reg - prog_data->base.dispatch_grf_start_reg;
1618
1619 return reg;
1620 }
1621
1622 void
1623 vec4_vs_visitor::setup_payload(void)
1624 {
1625 int reg = 0;
1626
1627 /* The payload always contains important data in g0, which contains
1628 * the URB handles that are passed on to the URB write at the end
1629 * of the thread. So, we always start push constants at g1.
1630 */
1631 reg++;
1632
1633 reg = setup_uniforms(reg);
1634
1635 reg = setup_attributes(reg);
1636
1637 this->first_non_payload_grf = reg;
1638 }
1639
1640 void
1641 vec4_visitor::assign_binding_table_offsets()
1642 {
1643 assign_common_binding_table_offsets(0);
1644 }
1645
1646 src_reg
1647 vec4_visitor::get_timestamp()
1648 {
1649 assert(brw->gen >= 7);
1650
1651 src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
1652 BRW_ARF_TIMESTAMP,
1653 0,
1654 BRW_REGISTER_TYPE_UD,
1655 BRW_VERTICAL_STRIDE_0,
1656 BRW_WIDTH_4,
1657 BRW_HORIZONTAL_STRIDE_4,
1658 BRW_SWIZZLE_XYZW,
1659 WRITEMASK_XYZW));
1660
1661 dst_reg dst = dst_reg(this, glsl_type::uvec4_type);
1662
1663 vec4_instruction *mov = emit(MOV(dst, ts));
1664 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1665 * even if it's not enabled in the dispatch.
1666 */
1667 mov->force_writemask_all = true;
1668
1669 return src_reg(dst);
1670 }
1671
1672 void
1673 vec4_visitor::emit_shader_time_begin()
1674 {
1675 current_annotation = "shader time start";
1676 shader_start_time = get_timestamp();
1677 }
1678
1679 void
1680 vec4_visitor::emit_shader_time_end()
1681 {
1682 current_annotation = "shader time end";
1683 src_reg shader_end_time = get_timestamp();
1684
1685
1686 /* Check that there weren't any timestamp reset events (assuming these
1687 * were the only two timestamp reads that happened).
1688 */
1689 src_reg reset_end = shader_end_time;
1690 reset_end.swizzle = BRW_SWIZZLE_ZZZZ;
1691 vec4_instruction *test = emit(AND(dst_null_d(), reset_end, src_reg(1u)));
1692 test->conditional_mod = BRW_CONDITIONAL_Z;
1693
1694 emit(IF(BRW_PREDICATE_NORMAL));
1695
1696 /* Take the current timestamp and get the delta. */
1697 shader_start_time.negate = true;
1698 dst_reg diff = dst_reg(this, glsl_type::uint_type);
1699 emit(ADD(diff, shader_start_time, shader_end_time));
1700
1701 /* If there were no instructions between the two timestamp gets, the diff
1702 * is 2 cycles. Remove that overhead, so I can forget about that when
1703 * trying to determine the time taken for single instructions.
1704 */
1705 emit(ADD(diff, src_reg(diff), src_reg(-2u)));
1706
1707 emit_shader_time_write(st_base, src_reg(diff));
1708 emit_shader_time_write(st_written, src_reg(1u));
1709 emit(BRW_OPCODE_ELSE);
1710 emit_shader_time_write(st_reset, src_reg(1u));
1711 emit(BRW_OPCODE_ENDIF);
1712 }
1713
1714 void
1715 vec4_visitor::emit_shader_time_write(enum shader_time_shader_type type,
1716 src_reg value)
1717 {
1718 int shader_time_index =
1719 brw_get_shader_time_index(brw, shader_prog, prog, type);
1720
1721 dst_reg dst =
1722 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, 2));
1723
1724 dst_reg offset = dst;
1725 dst_reg time = dst;
1726 time.reg_offset++;
1727
1728 offset.type = BRW_REGISTER_TYPE_UD;
1729 emit(MOV(offset, src_reg(shader_time_index * SHADER_TIME_STRIDE)));
1730
1731 time.type = BRW_REGISTER_TYPE_UD;
1732 emit(MOV(time, src_reg(value)));
1733
1734 emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
1735 }
1736
1737 bool
1738 vec4_visitor::run()
1739 {
1740 sanity_param_count = prog->Parameters->NumParameters;
1741
1742 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1743 emit_shader_time_begin();
1744
1745 assign_binding_table_offsets();
1746
1747 emit_prolog();
1748
1749 /* Generate VS IR for main(). (the visitor only descends into
1750 * functions called "main").
1751 */
1752 if (shader) {
1753 visit_instructions(shader->base.ir);
1754 } else {
1755 emit_program_code();
1756 }
1757 base_ir = NULL;
1758
1759 if (key->userclip_active && !prog->UsesClipDistanceOut)
1760 setup_uniform_clipplane_values();
1761
1762 emit_thread_end();
1763
1764 calculate_cfg();
1765
1766 /* Before any optimization, push array accesses out to scratch
1767 * space where we need them to be. This pass may allocate new
1768 * virtual GRFs, so we want to do it early. It also makes sure
1769 * that we have reladdr computations available for CSE, since we'll
1770 * often do repeated subexpressions for those.
1771 */
1772 if (shader) {
1773 move_grf_array_access_to_scratch();
1774 move_uniform_array_access_to_pull_constants();
1775 } else {
1776 /* The ARB_vertex_program frontend emits pull constant loads directly
1777 * rather than using reladdr, so we don't need to walk through all the
1778 * instructions looking for things to move. There isn't anything.
1779 *
1780 * We do still need to split things to vec4 size.
1781 */
1782 split_uniform_registers();
1783 }
1784 pack_uniform_registers();
1785 move_push_constants_to_pull_constants();
1786 split_virtual_grfs();
1787
1788 const char *stage_name = stage == MESA_SHADER_GEOMETRY ? "gs" : "vs";
1789
1790 #define OPT(pass, args...) do { \
1791 pass_num++; \
1792 bool this_progress = pass(args); \
1793 \
1794 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
1795 char filename[64]; \
1796 snprintf(filename, 64, "%s-%04d-%02d-%02d-" #pass, \
1797 stage_name, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
1798 \
1799 backend_visitor::dump_instructions(filename); \
1800 } \
1801 \
1802 progress = progress || this_progress; \
1803 } while (false)
1804
1805
1806 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
1807 char filename[64];
1808 snprintf(filename, 64, "%s-%04d-00-start",
1809 stage_name, shader_prog ? shader_prog->Name : 0);
1810
1811 backend_visitor::dump_instructions(filename);
1812 }
1813
1814 bool progress;
1815 int iteration = 0;
1816 do {
1817 progress = false;
1818 iteration++;
1819 int pass_num = 0;
1820
1821 OPT(opt_reduce_swizzle);
1822 OPT(dead_code_eliminate);
1823 OPT(dead_control_flow_eliminate, this);
1824 OPT(opt_copy_propagation);
1825 OPT(opt_cse);
1826 OPT(opt_algebraic);
1827 OPT(opt_register_coalesce);
1828 } while (progress);
1829
1830
1831 if (failed)
1832 return false;
1833
1834 setup_payload();
1835
1836 if (false) {
1837 /* Debug of register spilling: Go spill everything. */
1838 const int grf_count = virtual_grf_count;
1839 float spill_costs[virtual_grf_count];
1840 bool no_spill[virtual_grf_count];
1841 evaluate_spill_costs(spill_costs, no_spill);
1842 for (int i = 0; i < grf_count; i++) {
1843 if (no_spill[i])
1844 continue;
1845 spill_reg(i);
1846 }
1847 }
1848
1849 while (!reg_allocate()) {
1850 if (failed)
1851 return false;
1852 }
1853
1854 opt_schedule_instructions();
1855
1856 opt_set_dependency_control();
1857
1858 /* If any state parameters were appended, then ParameterValues could have
1859 * been realloced, in which case the driver uniform storage set up by
1860 * _mesa_associate_uniform_storage() would point to freed memory. Make
1861 * sure that didn't happen.
1862 */
1863 assert(sanity_param_count == prog->Parameters->NumParameters);
1864
1865 return !failed;
1866 }
1867
1868 } /* namespace brw */
1869
1870 extern "C" {
1871
1872 /**
1873 * Compile a vertex shader.
1874 *
1875 * Returns the final assembly and the program's size.
1876 */
1877 const unsigned *
1878 brw_vs_emit(struct brw_context *brw,
1879 struct gl_shader_program *prog,
1880 struct brw_vs_compile *c,
1881 struct brw_vs_prog_data *prog_data,
1882 void *mem_ctx,
1883 unsigned *final_assembly_size)
1884 {
1885 bool start_busy = false;
1886 double start_time = 0;
1887
1888 if (unlikely(brw->perf_debug)) {
1889 start_busy = (brw->batch.last_bo &&
1890 drm_intel_bo_busy(brw->batch.last_bo));
1891 start_time = get_time();
1892 }
1893
1894 struct brw_shader *shader = NULL;
1895 if (prog)
1896 shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_VERTEX];
1897
1898 if (unlikely(INTEL_DEBUG & DEBUG_VS))
1899 brw_dump_ir("vertex", prog, &shader->base, &c->vp->program.Base);
1900
1901 vec4_vs_visitor v(brw, c, prog_data, prog, mem_ctx);
1902 if (!v.run()) {
1903 if (prog) {
1904 prog->LinkStatus = false;
1905 ralloc_strcat(&prog->InfoLog, v.fail_msg);
1906 }
1907
1908 _mesa_problem(NULL, "Failed to compile vertex shader: %s\n",
1909 v.fail_msg);
1910
1911 return NULL;
1912 }
1913
1914 const unsigned *assembly = NULL;
1915 vec4_generator g(brw, prog, &c->vp->program.Base, &prog_data->base,
1916 mem_ctx, INTEL_DEBUG & DEBUG_VS);
1917 assembly = g.generate_assembly(v.cfg, final_assembly_size);
1918
1919 if (unlikely(brw->perf_debug) && shader) {
1920 if (shader->compiled_once) {
1921 brw_vs_debug_recompile(brw, prog, &c->key);
1922 }
1923 if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
1924 perf_debug("VS compile took %.03f ms and stalled the GPU\n",
1925 (get_time() - start_time) * 1000);
1926 }
1927 shader->compiled_once = true;
1928 }
1929
1930 return assembly;
1931 }
1932
1933
1934 void
1935 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
1936 struct brw_vec4_prog_key *key,
1937 GLuint id, struct gl_program *prog)
1938 {
1939 key->program_string_id = id;
1940 key->clamp_vertex_color = ctx->API == API_OPENGL_COMPAT;
1941
1942 unsigned sampler_count = _mesa_fls(prog->SamplersUsed);
1943 for (unsigned i = 0; i < sampler_count; i++) {
1944 if (prog->ShadowSamplers & (1 << i)) {
1945 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
1946 key->tex.swizzles[i] =
1947 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
1948 } else {
1949 /* Color sampler: assume no swizzling. */
1950 key->tex.swizzles[i] = SWIZZLE_XYZW;
1951 }
1952 }
1953 }
1954
1955 } /* extern "C" */