4d893e15dca7077398bf96cf0f6a6e12b5fe47fb
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_vec4.h"
25 #include "brw_cfg.h"
26 #include "brw_vs.h"
27 #include "brw_dead_control_flow.h"
28
29 extern "C" {
30 #include "main/macros.h"
31 #include "main/shaderobj.h"
32 #include "program/prog_print.h"
33 #include "program/prog_parameter.h"
34 }
35
36 #define MAX_INSTRUCTION (1 << 30)
37
38 using namespace brw;
39
40 namespace brw {
41
42 /**
43 * Common helper for constructing swizzles. When only a subset of
44 * channels of a vec4 are used, we don't want to reference the other
45 * channels, as that will tell optimization passes that those other
46 * channels are used.
47 */
48 unsigned
49 swizzle_for_size(int size)
50 {
51 static const unsigned size_swizzles[4] = {
52 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
53 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
54 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
55 BRW_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
56 };
57
58 assert((size >= 1) && (size <= 4));
59 return size_swizzles[size - 1];
60 }
61
62 void
63 src_reg::init()
64 {
65 memset(this, 0, sizeof(*this));
66
67 this->file = BAD_FILE;
68 }
69
70 src_reg::src_reg(register_file file, int reg, const glsl_type *type)
71 {
72 init();
73
74 this->file = file;
75 this->reg = reg;
76 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
77 this->swizzle = swizzle_for_size(type->vector_elements);
78 else
79 this->swizzle = BRW_SWIZZLE_XYZW;
80 }
81
82 /** Generic unset register constructor. */
83 src_reg::src_reg()
84 {
85 init();
86 }
87
88 src_reg::src_reg(float f)
89 {
90 init();
91
92 this->file = IMM;
93 this->type = BRW_REGISTER_TYPE_F;
94 this->fixed_hw_reg.dw1.f = f;
95 }
96
97 src_reg::src_reg(uint32_t u)
98 {
99 init();
100
101 this->file = IMM;
102 this->type = BRW_REGISTER_TYPE_UD;
103 this->fixed_hw_reg.dw1.ud = u;
104 }
105
106 src_reg::src_reg(int32_t i)
107 {
108 init();
109
110 this->file = IMM;
111 this->type = BRW_REGISTER_TYPE_D;
112 this->fixed_hw_reg.dw1.d = i;
113 }
114
115 src_reg::src_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3)
116 {
117 init();
118
119 this->file = IMM;
120 this->type = BRW_REGISTER_TYPE_VF;
121 this->fixed_hw_reg.dw1.ud = (vf0 << 0) |
122 (vf1 << 8) |
123 (vf2 << 16) |
124 (vf3 << 24);
125 }
126
127 src_reg::src_reg(struct brw_reg reg)
128 {
129 init();
130
131 this->file = HW_REG;
132 this->fixed_hw_reg = reg;
133 this->type = reg.type;
134 }
135
136 src_reg::src_reg(dst_reg reg)
137 {
138 init();
139
140 this->file = reg.file;
141 this->reg = reg.reg;
142 this->reg_offset = reg.reg_offset;
143 this->type = reg.type;
144 this->reladdr = reg.reladdr;
145 this->fixed_hw_reg = reg.fixed_hw_reg;
146
147 int swizzles[4];
148 int next_chan = 0;
149 int last = 0;
150
151 for (int i = 0; i < 4; i++) {
152 if (!(reg.writemask & (1 << i)))
153 continue;
154
155 swizzles[next_chan++] = last = i;
156 }
157
158 for (; next_chan < 4; next_chan++) {
159 swizzles[next_chan] = last;
160 }
161
162 this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
163 swizzles[2], swizzles[3]);
164 }
165
166 void
167 dst_reg::init()
168 {
169 memset(this, 0, sizeof(*this));
170 this->file = BAD_FILE;
171 this->writemask = WRITEMASK_XYZW;
172 }
173
174 dst_reg::dst_reg()
175 {
176 init();
177 }
178
179 dst_reg::dst_reg(register_file file, int reg)
180 {
181 init();
182
183 this->file = file;
184 this->reg = reg;
185 }
186
187 dst_reg::dst_reg(register_file file, int reg, const glsl_type *type,
188 int writemask)
189 {
190 init();
191
192 this->file = file;
193 this->reg = reg;
194 this->type = brw_type_for_base_type(type);
195 this->writemask = writemask;
196 }
197
198 dst_reg::dst_reg(struct brw_reg reg)
199 {
200 init();
201
202 this->file = HW_REG;
203 this->fixed_hw_reg = reg;
204 this->type = reg.type;
205 }
206
207 dst_reg::dst_reg(src_reg reg)
208 {
209 init();
210
211 this->file = reg.file;
212 this->reg = reg.reg;
213 this->reg_offset = reg.reg_offset;
214 this->type = reg.type;
215 /* How should we do writemasking when converting from a src_reg? It seems
216 * pretty obvious that for src.xxxx the caller wants to write to src.x, but
217 * what about for src.wx? Just special-case src.xxxx for now.
218 */
219 if (reg.swizzle == BRW_SWIZZLE_XXXX)
220 this->writemask = WRITEMASK_X;
221 else
222 this->writemask = WRITEMASK_XYZW;
223 this->reladdr = reg.reladdr;
224 this->fixed_hw_reg = reg.fixed_hw_reg;
225 }
226
227 bool
228 vec4_instruction::is_send_from_grf()
229 {
230 switch (opcode) {
231 case SHADER_OPCODE_SHADER_TIME_ADD:
232 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
233 return true;
234 default:
235 return false;
236 }
237 }
238
239 bool
240 vec4_instruction::can_do_source_mods(struct brw_context *brw)
241 {
242 if (brw->gen == 6 && is_math())
243 return false;
244
245 if (is_send_from_grf())
246 return false;
247
248 if (!backend_instruction::can_do_source_mods())
249 return false;
250
251 return true;
252 }
253
254 /**
255 * Returns how many MRFs an opcode will write over.
256 *
257 * Note that this is not the 0 or 1 implied writes in an actual gen
258 * instruction -- the generate_* functions generate additional MOVs
259 * for setup.
260 */
261 int
262 vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
263 {
264 if (inst->mlen == 0)
265 return 0;
266
267 switch (inst->opcode) {
268 case SHADER_OPCODE_RCP:
269 case SHADER_OPCODE_RSQ:
270 case SHADER_OPCODE_SQRT:
271 case SHADER_OPCODE_EXP2:
272 case SHADER_OPCODE_LOG2:
273 case SHADER_OPCODE_SIN:
274 case SHADER_OPCODE_COS:
275 return 1;
276 case SHADER_OPCODE_INT_QUOTIENT:
277 case SHADER_OPCODE_INT_REMAINDER:
278 case SHADER_OPCODE_POW:
279 return 2;
280 case VS_OPCODE_URB_WRITE:
281 return 1;
282 case VS_OPCODE_PULL_CONSTANT_LOAD:
283 return 2;
284 case SHADER_OPCODE_GEN4_SCRATCH_READ:
285 return 2;
286 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
287 return 3;
288 case GS_OPCODE_URB_WRITE:
289 case GS_OPCODE_URB_WRITE_ALLOCATE:
290 case GS_OPCODE_THREAD_END:
291 return 0;
292 case GS_OPCODE_FF_SYNC:
293 return 1;
294 case SHADER_OPCODE_SHADER_TIME_ADD:
295 return 0;
296 case SHADER_OPCODE_TEX:
297 case SHADER_OPCODE_TXL:
298 case SHADER_OPCODE_TXD:
299 case SHADER_OPCODE_TXF:
300 case SHADER_OPCODE_TXF_CMS:
301 case SHADER_OPCODE_TXF_MCS:
302 case SHADER_OPCODE_TXS:
303 case SHADER_OPCODE_TG4:
304 case SHADER_OPCODE_TG4_OFFSET:
305 return inst->header_present ? 1 : 0;
306 case SHADER_OPCODE_UNTYPED_ATOMIC:
307 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
308 return 0;
309 default:
310 unreachable("not reached");
311 }
312 }
313
314 bool
315 src_reg::equals(const src_reg &r) const
316 {
317 return (file == r.file &&
318 reg == r.reg &&
319 reg_offset == r.reg_offset &&
320 type == r.type &&
321 negate == r.negate &&
322 abs == r.abs &&
323 swizzle == r.swizzle &&
324 !reladdr && !r.reladdr &&
325 memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
326 sizeof(fixed_hw_reg)) == 0);
327 }
328
329 /* Replaces unused channels of a swizzle with channels that are used.
330 *
331 * For instance, this pass transforms
332 *
333 * mov vgrf4.yz, vgrf5.wxzy
334 *
335 * into
336 *
337 * mov vgrf4.yz, vgrf5.xxzx
338 *
339 * This eliminates false uses of some channels, letting dead code elimination
340 * remove the instructions that wrote them.
341 */
342 bool
343 vec4_visitor::opt_reduce_swizzle()
344 {
345 bool progress = false;
346
347 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
348 if (inst->dst.file == BAD_FILE || inst->dst.file == HW_REG)
349 continue;
350
351 int swizzle[4];
352
353 /* Determine which channels of the sources are read. */
354 switch (inst->opcode) {
355 case VEC4_OPCODE_PACK_BYTES:
356 swizzle[0] = 0;
357 swizzle[1] = 1;
358 swizzle[2] = 2;
359 swizzle[3] = 3;
360 break;
361 case BRW_OPCODE_DP4:
362 case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0,
363 * but all four of src1.
364 */
365 swizzle[0] = 0;
366 swizzle[1] = 1;
367 swizzle[2] = 2;
368 swizzle[3] = 3;
369 break;
370 case BRW_OPCODE_DP3:
371 swizzle[0] = 0;
372 swizzle[1] = 1;
373 swizzle[2] = 2;
374 swizzle[3] = -1;
375 break;
376 case BRW_OPCODE_DP2:
377 swizzle[0] = 0;
378 swizzle[1] = 1;
379 swizzle[2] = -1;
380 swizzle[3] = -1;
381 break;
382 default:
383 swizzle[0] = inst->dst.writemask & WRITEMASK_X ? 0 : -1;
384 swizzle[1] = inst->dst.writemask & WRITEMASK_Y ? 1 : -1;
385 swizzle[2] = inst->dst.writemask & WRITEMASK_Z ? 2 : -1;
386 swizzle[3] = inst->dst.writemask & WRITEMASK_W ? 3 : -1;
387 break;
388 }
389
390 /* Resolve unread channels (-1) by assigning them the swizzle of the
391 * first channel that is used.
392 */
393 int first_used_channel = 0;
394 for (int i = 0; i < 4; i++) {
395 if (swizzle[i] != -1) {
396 first_used_channel = swizzle[i];
397 break;
398 }
399 }
400 for (int i = 0; i < 4; i++) {
401 if (swizzle[i] == -1) {
402 swizzle[i] = first_used_channel;
403 }
404 }
405
406 /* Update sources' swizzles. */
407 for (int i = 0; i < 3; i++) {
408 if (inst->src[i].file != GRF &&
409 inst->src[i].file != ATTR &&
410 inst->src[i].file != UNIFORM)
411 continue;
412
413 int swiz[4];
414 for (int j = 0; j < 4; j++) {
415 swiz[j] = BRW_GET_SWZ(inst->src[i].swizzle, swizzle[j]);
416 }
417
418 unsigned new_swizzle = BRW_SWIZZLE4(swiz[0], swiz[1], swiz[2], swiz[3]);
419 if (inst->src[i].swizzle != new_swizzle) {
420 inst->src[i].swizzle = new_swizzle;
421 progress = true;
422 }
423 }
424 }
425
426 if (progress)
427 invalidate_live_intervals();
428
429 return progress;
430 }
431
432 void
433 vec4_visitor::split_uniform_registers()
434 {
435 /* Prior to this, uniforms have been in an array sized according to
436 * the number of vector uniforms present, sparsely filled (so an
437 * aggregate results in reg indices being skipped over). Now we're
438 * going to cut those aggregates up so each .reg index is one
439 * vector. The goal is to make elimination of unused uniform
440 * components easier later.
441 */
442 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
443 for (int i = 0 ; i < 3; i++) {
444 if (inst->src[i].file != UNIFORM)
445 continue;
446
447 assert(!inst->src[i].reladdr);
448
449 inst->src[i].reg += inst->src[i].reg_offset;
450 inst->src[i].reg_offset = 0;
451 }
452 }
453
454 /* Update that everything is now vector-sized. */
455 for (int i = 0; i < this->uniforms; i++) {
456 this->uniform_size[i] = 1;
457 }
458 }
459
460 void
461 vec4_visitor::pack_uniform_registers()
462 {
463 bool uniform_used[this->uniforms];
464 int new_loc[this->uniforms];
465 int new_chan[this->uniforms];
466
467 memset(uniform_used, 0, sizeof(uniform_used));
468 memset(new_loc, 0, sizeof(new_loc));
469 memset(new_chan, 0, sizeof(new_chan));
470
471 /* Find which uniform vectors are actually used by the program. We
472 * expect unused vector elements when we've moved array access out
473 * to pull constants, and from some GLSL code generators like wine.
474 */
475 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
476 for (int i = 0 ; i < 3; i++) {
477 if (inst->src[i].file != UNIFORM)
478 continue;
479
480 uniform_used[inst->src[i].reg] = true;
481 }
482 }
483
484 int new_uniform_count = 0;
485
486 /* Now, figure out a packing of the live uniform vectors into our
487 * push constants.
488 */
489 for (int src = 0; src < uniforms; src++) {
490 assert(src < uniform_array_size);
491 int size = this->uniform_vector_size[src];
492
493 if (!uniform_used[src]) {
494 this->uniform_vector_size[src] = 0;
495 continue;
496 }
497
498 int dst;
499 /* Find the lowest place we can slot this uniform in. */
500 for (dst = 0; dst < src; dst++) {
501 if (this->uniform_vector_size[dst] + size <= 4)
502 break;
503 }
504
505 if (src == dst) {
506 new_loc[src] = dst;
507 new_chan[src] = 0;
508 } else {
509 new_loc[src] = dst;
510 new_chan[src] = this->uniform_vector_size[dst];
511
512 /* Move the references to the data */
513 for (int j = 0; j < size; j++) {
514 stage_prog_data->param[dst * 4 + new_chan[src] + j] =
515 stage_prog_data->param[src * 4 + j];
516 }
517
518 this->uniform_vector_size[dst] += size;
519 this->uniform_vector_size[src] = 0;
520 }
521
522 new_uniform_count = MAX2(new_uniform_count, dst + 1);
523 }
524
525 this->uniforms = new_uniform_count;
526
527 /* Now, update the instructions for our repacked uniforms. */
528 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
529 for (int i = 0 ; i < 3; i++) {
530 int src = inst->src[i].reg;
531
532 if (inst->src[i].file != UNIFORM)
533 continue;
534
535 inst->src[i].reg = new_loc[src];
536
537 int sx = BRW_GET_SWZ(inst->src[i].swizzle, 0) + new_chan[src];
538 int sy = BRW_GET_SWZ(inst->src[i].swizzle, 1) + new_chan[src];
539 int sz = BRW_GET_SWZ(inst->src[i].swizzle, 2) + new_chan[src];
540 int sw = BRW_GET_SWZ(inst->src[i].swizzle, 3) + new_chan[src];
541 inst->src[i].swizzle = BRW_SWIZZLE4(sx, sy, sz, sw);
542 }
543 }
544 }
545
546 /**
547 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
548 *
549 * While GLSL IR also performs this optimization, we end up with it in
550 * our instruction stream for a couple of reasons. One is that we
551 * sometimes generate silly instructions, for example in array access
552 * where we'll generate "ADD offset, index, base" even if base is 0.
553 * The other is that GLSL IR's constant propagation doesn't track the
554 * components of aggregates, so some VS patterns (initialize matrix to
555 * 0, accumulate in vertex blending factors) end up breaking down to
556 * instructions involving 0.
557 */
558 bool
559 vec4_visitor::opt_algebraic()
560 {
561 bool progress = false;
562
563 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
564 switch (inst->opcode) {
565 case BRW_OPCODE_ADD:
566 if (inst->src[1].is_zero()) {
567 inst->opcode = BRW_OPCODE_MOV;
568 inst->src[1] = src_reg();
569 progress = true;
570 }
571 break;
572
573 case BRW_OPCODE_MUL:
574 if (inst->src[1].is_zero()) {
575 inst->opcode = BRW_OPCODE_MOV;
576 switch (inst->src[0].type) {
577 case BRW_REGISTER_TYPE_F:
578 inst->src[0] = src_reg(0.0f);
579 break;
580 case BRW_REGISTER_TYPE_D:
581 inst->src[0] = src_reg(0);
582 break;
583 case BRW_REGISTER_TYPE_UD:
584 inst->src[0] = src_reg(0u);
585 break;
586 default:
587 unreachable("not reached");
588 }
589 inst->src[1] = src_reg();
590 progress = true;
591 } else if (inst->src[1].is_one()) {
592 inst->opcode = BRW_OPCODE_MOV;
593 inst->src[1] = src_reg();
594 progress = true;
595 }
596 break;
597 case SHADER_OPCODE_RCP: {
598 vec4_instruction *prev = (vec4_instruction *)inst->prev;
599 if (prev->opcode == SHADER_OPCODE_SQRT) {
600 if (inst->src[0].equals(src_reg(prev->dst))) {
601 inst->opcode = SHADER_OPCODE_RSQ;
602 inst->src[0] = prev->src[0];
603 progress = true;
604 }
605 }
606 break;
607 }
608 default:
609 break;
610 }
611 }
612
613 if (progress)
614 invalidate_live_intervals();
615
616 return progress;
617 }
618
619 /**
620 * Only a limited number of hardware registers may be used for push
621 * constants, so this turns access to the overflowed constants into
622 * pull constants.
623 */
624 void
625 vec4_visitor::move_push_constants_to_pull_constants()
626 {
627 int pull_constant_loc[this->uniforms];
628
629 /* Only allow 32 registers (256 uniform components) as push constants,
630 * which is the limit on gen6.
631 *
632 * If changing this value, note the limitation about total_regs in
633 * brw_curbe.c.
634 */
635 int max_uniform_components = 32 * 8;
636 if (this->uniforms * 4 <= max_uniform_components)
637 return;
638
639 /* Make some sort of choice as to which uniforms get sent to pull
640 * constants. We could potentially do something clever here like
641 * look for the most infrequently used uniform vec4s, but leave
642 * that for later.
643 */
644 for (int i = 0; i < this->uniforms * 4; i += 4) {
645 pull_constant_loc[i / 4] = -1;
646
647 if (i >= max_uniform_components) {
648 const gl_constant_value **values = &stage_prog_data->param[i];
649
650 /* Try to find an existing copy of this uniform in the pull
651 * constants if it was part of an array access already.
652 */
653 for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j += 4) {
654 int matches;
655
656 for (matches = 0; matches < 4; matches++) {
657 if (stage_prog_data->pull_param[j + matches] != values[matches])
658 break;
659 }
660
661 if (matches == 4) {
662 pull_constant_loc[i / 4] = j / 4;
663 break;
664 }
665 }
666
667 if (pull_constant_loc[i / 4] == -1) {
668 assert(stage_prog_data->nr_pull_params % 4 == 0);
669 pull_constant_loc[i / 4] = stage_prog_data->nr_pull_params / 4;
670
671 for (int j = 0; j < 4; j++) {
672 stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
673 values[j];
674 }
675 }
676 }
677 }
678
679 /* Now actually rewrite usage of the things we've moved to pull
680 * constants.
681 */
682 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
683 for (int i = 0 ; i < 3; i++) {
684 if (inst->src[i].file != UNIFORM ||
685 pull_constant_loc[inst->src[i].reg] == -1)
686 continue;
687
688 int uniform = inst->src[i].reg;
689
690 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
691
692 emit_pull_constant_load(block, inst, temp, inst->src[i],
693 pull_constant_loc[uniform]);
694
695 inst->src[i].file = temp.file;
696 inst->src[i].reg = temp.reg;
697 inst->src[i].reg_offset = temp.reg_offset;
698 inst->src[i].reladdr = NULL;
699 }
700 }
701
702 /* Repack push constants to remove the now-unused ones. */
703 pack_uniform_registers();
704 }
705
706 /* Conditions for which we want to avoid setting the dependency control bits */
707 bool
708 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst)
709 {
710 #define IS_DWORD(reg) \
711 (reg.type == BRW_REGISTER_TYPE_UD || \
712 reg.type == BRW_REGISTER_TYPE_D)
713
714 /* From the destination hazard section of the spec:
715 * > Instructions other than send, may use this control as long as operations
716 * > that have different pipeline latencies are not mixed.
717 */
718 if (brw->gen >= 8) {
719 if (inst->opcode == BRW_OPCODE_MUL &&
720 IS_DWORD(inst->src[0]) &&
721 IS_DWORD(inst->src[1]))
722 return true;
723 }
724 #undef IS_DWORD
725
726 /*
727 * mlen:
728 * In the presence of send messages, totally interrupt dependency
729 * control. They're long enough that the chance of dependency
730 * control around them just doesn't matter.
731 *
732 * predicate:
733 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
734 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
735 * completes the scoreboard clear must have a non-zero execution mask. This
736 * means, if any kind of predication can change the execution mask or channel
737 * enable of the last instruction, the optimization must be avoided. This is
738 * to avoid instructions being shot down the pipeline when no writes are
739 * required.
740 *
741 * math:
742 * Dependency control does not work well over math instructions.
743 * NB: Discovered empirically
744 */
745 return (inst->mlen || inst->predicate || inst->is_math());
746 }
747
748 /**
749 * Sets the dependency control fields on instructions after register
750 * allocation and before the generator is run.
751 *
752 * When you have a sequence of instructions like:
753 *
754 * DP4 temp.x vertex uniform[0]
755 * DP4 temp.y vertex uniform[0]
756 * DP4 temp.z vertex uniform[0]
757 * DP4 temp.w vertex uniform[0]
758 *
759 * The hardware doesn't know that it can actually run the later instructions
760 * while the previous ones are in flight, producing stalls. However, we have
761 * manual fields we can set in the instructions that let it do so.
762 */
763 void
764 vec4_visitor::opt_set_dependency_control()
765 {
766 vec4_instruction *last_grf_write[BRW_MAX_GRF];
767 uint8_t grf_channels_written[BRW_MAX_GRF];
768 vec4_instruction *last_mrf_write[BRW_MAX_GRF];
769 uint8_t mrf_channels_written[BRW_MAX_GRF];
770
771 assert(prog_data->total_grf ||
772 !"Must be called after register allocation");
773
774 foreach_block (block, cfg) {
775 memset(last_grf_write, 0, sizeof(last_grf_write));
776 memset(last_mrf_write, 0, sizeof(last_mrf_write));
777
778 foreach_inst_in_block (vec4_instruction, inst, block) {
779 /* If we read from a register that we were doing dependency control
780 * on, don't do dependency control across the read.
781 */
782 for (int i = 0; i < 3; i++) {
783 int reg = inst->src[i].reg + inst->src[i].reg_offset;
784 if (inst->src[i].file == GRF) {
785 last_grf_write[reg] = NULL;
786 } else if (inst->src[i].file == HW_REG) {
787 memset(last_grf_write, 0, sizeof(last_grf_write));
788 break;
789 }
790 assert(inst->src[i].file != MRF);
791 }
792
793 if (is_dep_ctrl_unsafe(inst)) {
794 memset(last_grf_write, 0, sizeof(last_grf_write));
795 memset(last_mrf_write, 0, sizeof(last_mrf_write));
796 continue;
797 }
798
799 /* Now, see if we can do dependency control for this instruction
800 * against a previous one writing to its destination.
801 */
802 int reg = inst->dst.reg + inst->dst.reg_offset;
803 if (inst->dst.file == GRF) {
804 if (last_grf_write[reg] &&
805 !(inst->dst.writemask & grf_channels_written[reg])) {
806 last_grf_write[reg]->no_dd_clear = true;
807 inst->no_dd_check = true;
808 } else {
809 grf_channels_written[reg] = 0;
810 }
811
812 last_grf_write[reg] = inst;
813 grf_channels_written[reg] |= inst->dst.writemask;
814 } else if (inst->dst.file == MRF) {
815 if (last_mrf_write[reg] &&
816 !(inst->dst.writemask & mrf_channels_written[reg])) {
817 last_mrf_write[reg]->no_dd_clear = true;
818 inst->no_dd_check = true;
819 } else {
820 mrf_channels_written[reg] = 0;
821 }
822
823 last_mrf_write[reg] = inst;
824 mrf_channels_written[reg] |= inst->dst.writemask;
825 } else if (inst->dst.reg == HW_REG) {
826 if (inst->dst.fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE)
827 memset(last_grf_write, 0, sizeof(last_grf_write));
828 if (inst->dst.fixed_hw_reg.file == BRW_MESSAGE_REGISTER_FILE)
829 memset(last_mrf_write, 0, sizeof(last_mrf_write));
830 }
831 }
832 }
833 }
834
835 bool
836 vec4_instruction::can_reswizzle(int dst_writemask,
837 int swizzle,
838 int swizzle_mask)
839 {
840 /* If this instruction sets anything not referenced by swizzle, then we'd
841 * totally break it when we reswizzle.
842 */
843 if (dst.writemask & ~swizzle_mask)
844 return false;
845
846 if (mlen > 0)
847 return false;
848
849 return true;
850 }
851
852 /**
853 * For any channels in the swizzle's source that were populated by this
854 * instruction, rewrite the instruction to put the appropriate result directly
855 * in those channels.
856 *
857 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
858 */
859 void
860 vec4_instruction::reswizzle(int dst_writemask, int swizzle)
861 {
862 int new_writemask = 0;
863 int new_swizzle[4] = { 0 };
864
865 /* Dot product instructions write a single result into all channels. */
866 if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH &&
867 opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2) {
868 for (int i = 0; i < 3; i++) {
869 if (src[i].file == BAD_FILE || src[i].file == IMM)
870 continue;
871
872 /* Destination write mask doesn't correspond to source swizzle for the
873 * pack_bytes instruction.
874 */
875 if (opcode == VEC4_OPCODE_PACK_BYTES)
876 continue;
877
878 for (int c = 0; c < 4; c++) {
879 new_swizzle[c] = BRW_GET_SWZ(src[i].swizzle, BRW_GET_SWZ(swizzle, c));
880 }
881
882 src[i].swizzle = BRW_SWIZZLE4(new_swizzle[0], new_swizzle[1],
883 new_swizzle[2], new_swizzle[3]);
884 }
885 }
886
887 for (int c = 0; c < 4; c++) {
888 int bit = 1 << BRW_GET_SWZ(swizzle, c);
889 /* Skip components of the swizzle not used by the dst. */
890 if (!(dst_writemask & (1 << c)))
891 continue;
892 /* If we were populating this component, then populate the
893 * corresponding channel of the new dst.
894 */
895 if (dst.writemask & bit)
896 new_writemask |= (1 << c);
897 }
898 dst.writemask = new_writemask;
899 }
900
901 /*
902 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
903 * just written and then MOVed into another reg and making the original write
904 * of the GRF write directly to the final destination instead.
905 */
906 bool
907 vec4_visitor::opt_register_coalesce()
908 {
909 bool progress = false;
910 int next_ip = 0;
911
912 calculate_live_intervals();
913
914 foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
915 int ip = next_ip;
916 next_ip++;
917
918 if (inst->opcode != BRW_OPCODE_MOV ||
919 (inst->dst.file != GRF && inst->dst.file != MRF) ||
920 inst->predicate ||
921 inst->src[0].file != GRF ||
922 inst->dst.type != inst->src[0].type ||
923 inst->src[0].abs || inst->src[0].negate || inst->src[0].reladdr)
924 continue;
925
926 bool to_mrf = (inst->dst.file == MRF);
927
928 /* Can't coalesce this GRF if someone else was going to
929 * read it later.
930 */
931 if (this->virtual_grf_end[inst->src[0].reg * 4 + 0] > ip ||
932 this->virtual_grf_end[inst->src[0].reg * 4 + 1] > ip ||
933 this->virtual_grf_end[inst->src[0].reg * 4 + 2] > ip ||
934 this->virtual_grf_end[inst->src[0].reg * 4 + 3] > ip)
935 continue;
936
937 /* We need to check interference with the final destination between this
938 * instruction and the earliest instruction involved in writing the GRF
939 * we're eliminating. To do that, keep track of which of our source
940 * channels we've seen initialized.
941 */
942 bool chans_needed[4] = {false, false, false, false};
943 int chans_remaining = 0;
944 int swizzle_mask = 0;
945 for (int i = 0; i < 4; i++) {
946 int chan = BRW_GET_SWZ(inst->src[0].swizzle, i);
947
948 if (!(inst->dst.writemask & (1 << i)))
949 continue;
950
951 swizzle_mask |= (1 << chan);
952
953 if (!chans_needed[chan]) {
954 chans_needed[chan] = true;
955 chans_remaining++;
956 }
957 }
958
959 /* Now walk up the instruction stream trying to see if we can rewrite
960 * everything writing to the temporary to write into the destination
961 * instead.
962 */
963 vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev;
964 foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
965 inst, block) {
966 _scan_inst = scan_inst;
967
968 if (scan_inst->dst.file == GRF &&
969 scan_inst->dst.reg == inst->src[0].reg &&
970 scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
971 /* Found something writing to the reg we want to coalesce away. */
972 if (to_mrf) {
973 /* SEND instructions can't have MRF as a destination. */
974 if (scan_inst->mlen)
975 break;
976
977 if (brw->gen == 6) {
978 /* gen6 math instructions must have the destination be
979 * GRF, so no compute-to-MRF for them.
980 */
981 if (scan_inst->is_math()) {
982 break;
983 }
984 }
985 }
986
987 /* If we can't handle the swizzle, bail. */
988 if (!scan_inst->can_reswizzle(inst->dst.writemask,
989 inst->src[0].swizzle,
990 swizzle_mask)) {
991 break;
992 }
993
994 /* Mark which channels we found unconditional writes for. */
995 if (!scan_inst->predicate) {
996 for (int i = 0; i < 4; i++) {
997 if (scan_inst->dst.writemask & (1 << i) &&
998 chans_needed[i]) {
999 chans_needed[i] = false;
1000 chans_remaining--;
1001 }
1002 }
1003 }
1004
1005 if (chans_remaining == 0)
1006 break;
1007 }
1008
1009 /* You can't read from an MRF, so if someone else reads our MRF's
1010 * source GRF that we wanted to rewrite, that stops us. If it's a
1011 * GRF we're trying to coalesce to, we don't actually handle
1012 * rewriting sources so bail in that case as well.
1013 */
1014 bool interfered = false;
1015 for (int i = 0; i < 3; i++) {
1016 if (scan_inst->src[i].file == GRF &&
1017 scan_inst->src[i].reg == inst->src[0].reg &&
1018 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1019 interfered = true;
1020 }
1021 }
1022 if (interfered)
1023 break;
1024
1025 /* If somebody else writes our destination here, we can't coalesce
1026 * before that.
1027 */
1028 if (scan_inst->dst.file == inst->dst.file &&
1029 scan_inst->dst.reg == inst->dst.reg) {
1030 break;
1031 }
1032
1033 /* Check for reads of the register we're trying to coalesce into. We
1034 * can't go rewriting instructions above that to put some other value
1035 * in the register instead.
1036 */
1037 if (to_mrf && scan_inst->mlen > 0) {
1038 if (inst->dst.reg >= scan_inst->base_mrf &&
1039 inst->dst.reg < scan_inst->base_mrf + scan_inst->mlen) {
1040 break;
1041 }
1042 } else {
1043 for (int i = 0; i < 3; i++) {
1044 if (scan_inst->src[i].file == inst->dst.file &&
1045 scan_inst->src[i].reg == inst->dst.reg &&
1046 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1047 interfered = true;
1048 }
1049 }
1050 if (interfered)
1051 break;
1052 }
1053 }
1054
1055 if (chans_remaining == 0) {
1056 /* If we've made it here, we have an MOV we want to coalesce out, and
1057 * a scan_inst pointing to the earliest instruction involved in
1058 * computing the value. Now go rewrite the instruction stream
1059 * between the two.
1060 */
1061 vec4_instruction *scan_inst = _scan_inst;
1062 while (scan_inst != inst) {
1063 if (scan_inst->dst.file == GRF &&
1064 scan_inst->dst.reg == inst->src[0].reg &&
1065 scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
1066 scan_inst->reswizzle(inst->dst.writemask,
1067 inst->src[0].swizzle);
1068 scan_inst->dst.file = inst->dst.file;
1069 scan_inst->dst.reg = inst->dst.reg;
1070 scan_inst->dst.reg_offset = inst->dst.reg_offset;
1071 scan_inst->saturate |= inst->saturate;
1072 }
1073 scan_inst = (vec4_instruction *)scan_inst->next;
1074 }
1075 inst->remove(block);
1076 progress = true;
1077 }
1078 }
1079
1080 if (progress)
1081 invalidate_live_intervals();
1082
1083 return progress;
1084 }
1085
1086 /**
1087 * Splits virtual GRFs requesting more than one contiguous physical register.
1088 *
1089 * We initially create large virtual GRFs for temporary structures, arrays,
1090 * and matrices, so that the dereference visitor functions can add reg_offsets
1091 * to work their way down to the actual member being accessed. But when it
1092 * comes to optimization, we'd like to treat each register as individual
1093 * storage if possible.
1094 *
1095 * So far, the only thing that might prevent splitting is a send message from
1096 * a GRF on IVB.
1097 */
1098 void
1099 vec4_visitor::split_virtual_grfs()
1100 {
1101 int num_vars = this->virtual_grf_count;
1102 int new_virtual_grf[num_vars];
1103 bool split_grf[num_vars];
1104
1105 memset(new_virtual_grf, 0, sizeof(new_virtual_grf));
1106
1107 /* Try to split anything > 0 sized. */
1108 for (int i = 0; i < num_vars; i++) {
1109 split_grf[i] = this->virtual_grf_sizes[i] != 1;
1110 }
1111
1112 /* Check that the instructions are compatible with the registers we're trying
1113 * to split.
1114 */
1115 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1116 /* If there's a SEND message loading from a GRF on gen7+, it needs to be
1117 * contiguous.
1118 */
1119 if (inst->is_send_from_grf()) {
1120 for (int i = 0; i < 3; i++) {
1121 if (inst->src[i].file == GRF) {
1122 split_grf[inst->src[i].reg] = false;
1123 }
1124 }
1125 }
1126 }
1127
1128 /* Allocate new space for split regs. Note that the virtual
1129 * numbers will be contiguous.
1130 */
1131 for (int i = 0; i < num_vars; i++) {
1132 if (!split_grf[i])
1133 continue;
1134
1135 new_virtual_grf[i] = virtual_grf_alloc(1);
1136 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
1137 int reg = virtual_grf_alloc(1);
1138 assert(reg == new_virtual_grf[i] + j - 1);
1139 (void) reg;
1140 }
1141 this->virtual_grf_sizes[i] = 1;
1142 }
1143
1144 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1145 if (inst->dst.file == GRF && split_grf[inst->dst.reg] &&
1146 inst->dst.reg_offset != 0) {
1147 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
1148 inst->dst.reg_offset - 1);
1149 inst->dst.reg_offset = 0;
1150 }
1151 for (int i = 0; i < 3; i++) {
1152 if (inst->src[i].file == GRF && split_grf[inst->src[i].reg] &&
1153 inst->src[i].reg_offset != 0) {
1154 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
1155 inst->src[i].reg_offset - 1);
1156 inst->src[i].reg_offset = 0;
1157 }
1158 }
1159 }
1160 invalidate_live_intervals();
1161 }
1162
1163 void
1164 vec4_visitor::dump_instruction(backend_instruction *be_inst)
1165 {
1166 dump_instruction(be_inst, stderr);
1167 }
1168
1169 void
1170 vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
1171 {
1172 vec4_instruction *inst = (vec4_instruction *)be_inst;
1173
1174 if (inst->predicate) {
1175 fprintf(file, "(%cf0) ",
1176 inst->predicate_inverse ? '-' : '+');
1177 }
1178
1179 fprintf(file, "%s", brw_instruction_name(inst->opcode));
1180 if (inst->conditional_mod) {
1181 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
1182 }
1183 fprintf(file, " ");
1184
1185 switch (inst->dst.file) {
1186 case GRF:
1187 fprintf(file, "vgrf%d.%d", inst->dst.reg, inst->dst.reg_offset);
1188 break;
1189 case MRF:
1190 fprintf(file, "m%d", inst->dst.reg);
1191 break;
1192 case HW_REG:
1193 if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1194 switch (inst->dst.fixed_hw_reg.nr) {
1195 case BRW_ARF_NULL:
1196 fprintf(file, "null");
1197 break;
1198 case BRW_ARF_ADDRESS:
1199 fprintf(file, "a0.%d", inst->dst.fixed_hw_reg.subnr);
1200 break;
1201 case BRW_ARF_ACCUMULATOR:
1202 fprintf(file, "acc%d", inst->dst.fixed_hw_reg.subnr);
1203 break;
1204 case BRW_ARF_FLAG:
1205 fprintf(file, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
1206 inst->dst.fixed_hw_reg.subnr);
1207 break;
1208 default:
1209 fprintf(file, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
1210 inst->dst.fixed_hw_reg.subnr);
1211 break;
1212 }
1213 } else {
1214 fprintf(file, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
1215 }
1216 if (inst->dst.fixed_hw_reg.subnr)
1217 fprintf(file, "+%d", inst->dst.fixed_hw_reg.subnr);
1218 break;
1219 case BAD_FILE:
1220 fprintf(file, "(null)");
1221 break;
1222 default:
1223 fprintf(file, "???");
1224 break;
1225 }
1226 if (inst->dst.writemask != WRITEMASK_XYZW) {
1227 fprintf(file, ".");
1228 if (inst->dst.writemask & 1)
1229 fprintf(file, "x");
1230 if (inst->dst.writemask & 2)
1231 fprintf(file, "y");
1232 if (inst->dst.writemask & 4)
1233 fprintf(file, "z");
1234 if (inst->dst.writemask & 8)
1235 fprintf(file, "w");
1236 }
1237 fprintf(file, ":%s", brw_reg_type_letters(inst->dst.type));
1238
1239 if (inst->src[0].file != BAD_FILE)
1240 fprintf(file, ", ");
1241
1242 for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
1243 if (inst->src[i].negate)
1244 fprintf(file, "-");
1245 if (inst->src[i].abs)
1246 fprintf(file, "|");
1247 switch (inst->src[i].file) {
1248 case GRF:
1249 fprintf(file, "vgrf%d", inst->src[i].reg);
1250 break;
1251 case ATTR:
1252 fprintf(file, "attr%d", inst->src[i].reg);
1253 break;
1254 case UNIFORM:
1255 fprintf(file, "u%d", inst->src[i].reg);
1256 break;
1257 case IMM:
1258 switch (inst->src[i].type) {
1259 case BRW_REGISTER_TYPE_F:
1260 fprintf(file, "%fF", inst->src[i].fixed_hw_reg.dw1.f);
1261 break;
1262 case BRW_REGISTER_TYPE_D:
1263 fprintf(file, "%dD", inst->src[i].fixed_hw_reg.dw1.d);
1264 break;
1265 case BRW_REGISTER_TYPE_UD:
1266 fprintf(file, "%uU", inst->src[i].fixed_hw_reg.dw1.ud);
1267 break;
1268 case BRW_REGISTER_TYPE_VF:
1269 fprintf(stderr, "[%-gF, %-gF, %-gF, %-gF]",
1270 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 0) & 0xff),
1271 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 8) & 0xff),
1272 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 16) & 0xff),
1273 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 24) & 0xff));
1274 break;
1275 default:
1276 fprintf(file, "???");
1277 break;
1278 }
1279 break;
1280 case HW_REG:
1281 if (inst->src[i].fixed_hw_reg.negate)
1282 fprintf(file, "-");
1283 if (inst->src[i].fixed_hw_reg.abs)
1284 fprintf(file, "|");
1285 if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1286 switch (inst->src[i].fixed_hw_reg.nr) {
1287 case BRW_ARF_NULL:
1288 fprintf(file, "null");
1289 break;
1290 case BRW_ARF_ADDRESS:
1291 fprintf(file, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
1292 break;
1293 case BRW_ARF_ACCUMULATOR:
1294 fprintf(file, "acc%d", inst->src[i].fixed_hw_reg.subnr);
1295 break;
1296 case BRW_ARF_FLAG:
1297 fprintf(file, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
1298 inst->src[i].fixed_hw_reg.subnr);
1299 break;
1300 default:
1301 fprintf(file, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
1302 inst->src[i].fixed_hw_reg.subnr);
1303 break;
1304 }
1305 } else {
1306 fprintf(file, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
1307 }
1308 if (inst->src[i].fixed_hw_reg.subnr)
1309 fprintf(file, "+%d", inst->src[i].fixed_hw_reg.subnr);
1310 if (inst->src[i].fixed_hw_reg.abs)
1311 fprintf(file, "|");
1312 break;
1313 case BAD_FILE:
1314 fprintf(file, "(null)");
1315 break;
1316 default:
1317 fprintf(file, "???");
1318 break;
1319 }
1320
1321 /* Don't print .0; and only VGRFs have reg_offsets and sizes */
1322 if (inst->src[i].reg_offset != 0 &&
1323 inst->src[i].file == GRF &&
1324 virtual_grf_sizes[inst->src[i].reg] != 1)
1325 fprintf(file, ".%d", inst->src[i].reg_offset);
1326
1327 if (inst->src[i].file != IMM) {
1328 static const char *chans[4] = {"x", "y", "z", "w"};
1329 fprintf(file, ".");
1330 for (int c = 0; c < 4; c++) {
1331 fprintf(file, "%s", chans[BRW_GET_SWZ(inst->src[i].swizzle, c)]);
1332 }
1333 }
1334
1335 if (inst->src[i].abs)
1336 fprintf(file, "|");
1337
1338 if (inst->src[i].file != IMM) {
1339 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
1340 }
1341
1342 if (i < 2 && inst->src[i + 1].file != BAD_FILE)
1343 fprintf(file, ", ");
1344 }
1345
1346 fprintf(file, "\n");
1347 }
1348
1349
1350 static inline struct brw_reg
1351 attribute_to_hw_reg(int attr, bool interleaved)
1352 {
1353 if (interleaved)
1354 return stride(brw_vec4_grf(attr / 2, (attr % 2) * 4), 0, 4, 1);
1355 else
1356 return brw_vec8_grf(attr, 0);
1357 }
1358
1359
1360 /**
1361 * Replace each register of type ATTR in this->instructions with a reference
1362 * to a fixed HW register.
1363 *
1364 * If interleaved is true, then each attribute takes up half a register, with
1365 * register N containing attribute 2*N in its first half and attribute 2*N+1
1366 * in its second half (this corresponds to the payload setup used by geometry
1367 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1368 * false, then each attribute takes up a whole register, with register N
1369 * containing attribute N (this corresponds to the payload setup used by
1370 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1371 */
1372 void
1373 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map,
1374 bool interleaved)
1375 {
1376 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1377 /* We have to support ATTR as a destination for GL_FIXED fixup. */
1378 if (inst->dst.file == ATTR) {
1379 int grf = attribute_map[inst->dst.reg + inst->dst.reg_offset];
1380
1381 /* All attributes used in the shader need to have been assigned a
1382 * hardware register by the caller
1383 */
1384 assert(grf != 0);
1385
1386 struct brw_reg reg = attribute_to_hw_reg(grf, interleaved);
1387 reg.type = inst->dst.type;
1388 reg.dw1.bits.writemask = inst->dst.writemask;
1389
1390 inst->dst.file = HW_REG;
1391 inst->dst.fixed_hw_reg = reg;
1392 }
1393
1394 for (int i = 0; i < 3; i++) {
1395 if (inst->src[i].file != ATTR)
1396 continue;
1397
1398 int grf = attribute_map[inst->src[i].reg + inst->src[i].reg_offset];
1399
1400 /* All attributes used in the shader need to have been assigned a
1401 * hardware register by the caller
1402 */
1403 assert(grf != 0);
1404
1405 struct brw_reg reg = attribute_to_hw_reg(grf, interleaved);
1406 reg.dw1.bits.swizzle = inst->src[i].swizzle;
1407 reg.type = inst->src[i].type;
1408 if (inst->src[i].abs)
1409 reg = brw_abs(reg);
1410 if (inst->src[i].negate)
1411 reg = negate(reg);
1412
1413 inst->src[i].file = HW_REG;
1414 inst->src[i].fixed_hw_reg = reg;
1415 }
1416 }
1417 }
1418
1419 int
1420 vec4_vs_visitor::setup_attributes(int payload_reg)
1421 {
1422 int nr_attributes;
1423 int attribute_map[VERT_ATTRIB_MAX + 1];
1424 memset(attribute_map, 0, sizeof(attribute_map));
1425
1426 nr_attributes = 0;
1427 for (int i = 0; i < VERT_ATTRIB_MAX; i++) {
1428 if (vs_prog_data->inputs_read & BITFIELD64_BIT(i)) {
1429 attribute_map[i] = payload_reg + nr_attributes;
1430 nr_attributes++;
1431 }
1432 }
1433
1434 /* VertexID is stored by the VF as the last vertex element, but we
1435 * don't represent it with a flag in inputs_read, so we call it
1436 * VERT_ATTRIB_MAX.
1437 */
1438 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid) {
1439 attribute_map[VERT_ATTRIB_MAX] = payload_reg + nr_attributes;
1440 nr_attributes++;
1441 }
1442
1443 lower_attributes_to_hw_regs(attribute_map, false /* interleaved */);
1444
1445 /* The BSpec says we always have to read at least one thing from
1446 * the VF, and it appears that the hardware wedges otherwise.
1447 */
1448 if (nr_attributes == 0)
1449 nr_attributes = 1;
1450
1451 prog_data->urb_read_length = (nr_attributes + 1) / 2;
1452
1453 unsigned vue_entries =
1454 MAX2(nr_attributes, prog_data->vue_map.num_slots);
1455
1456 if (brw->gen == 6)
1457 prog_data->urb_entry_size = ALIGN(vue_entries, 8) / 8;
1458 else
1459 prog_data->urb_entry_size = ALIGN(vue_entries, 4) / 4;
1460
1461 return payload_reg + nr_attributes;
1462 }
1463
1464 int
1465 vec4_visitor::setup_uniforms(int reg)
1466 {
1467 prog_data->base.dispatch_grf_start_reg = reg;
1468
1469 /* The pre-gen6 VS requires that some push constants get loaded no
1470 * matter what, or the GPU would hang.
1471 */
1472 if (brw->gen < 6 && this->uniforms == 0) {
1473 assert(this->uniforms < this->uniform_array_size);
1474 this->uniform_vector_size[this->uniforms] = 1;
1475
1476 stage_prog_data->param =
1477 reralloc(NULL, stage_prog_data->param, const gl_constant_value *, 4);
1478 for (unsigned int i = 0; i < 4; i++) {
1479 unsigned int slot = this->uniforms * 4 + i;
1480 static gl_constant_value zero = { 0.0 };
1481 stage_prog_data->param[slot] = &zero;
1482 }
1483
1484 this->uniforms++;
1485 reg++;
1486 } else {
1487 reg += ALIGN(uniforms, 2) / 2;
1488 }
1489
1490 stage_prog_data->nr_params = this->uniforms * 4;
1491
1492 prog_data->base.curb_read_length =
1493 reg - prog_data->base.dispatch_grf_start_reg;
1494
1495 return reg;
1496 }
1497
1498 void
1499 vec4_vs_visitor::setup_payload(void)
1500 {
1501 int reg = 0;
1502
1503 /* The payload always contains important data in g0, which contains
1504 * the URB handles that are passed on to the URB write at the end
1505 * of the thread. So, we always start push constants at g1.
1506 */
1507 reg++;
1508
1509 reg = setup_uniforms(reg);
1510
1511 reg = setup_attributes(reg);
1512
1513 this->first_non_payload_grf = reg;
1514 }
1515
1516 void
1517 vec4_visitor::assign_binding_table_offsets()
1518 {
1519 assign_common_binding_table_offsets(0);
1520 }
1521
1522 src_reg
1523 vec4_visitor::get_timestamp()
1524 {
1525 assert(brw->gen >= 7);
1526
1527 src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
1528 BRW_ARF_TIMESTAMP,
1529 0,
1530 BRW_REGISTER_TYPE_UD,
1531 BRW_VERTICAL_STRIDE_0,
1532 BRW_WIDTH_4,
1533 BRW_HORIZONTAL_STRIDE_4,
1534 BRW_SWIZZLE_XYZW,
1535 WRITEMASK_XYZW));
1536
1537 dst_reg dst = dst_reg(this, glsl_type::uvec4_type);
1538
1539 vec4_instruction *mov = emit(MOV(dst, ts));
1540 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1541 * even if it's not enabled in the dispatch.
1542 */
1543 mov->force_writemask_all = true;
1544
1545 return src_reg(dst);
1546 }
1547
1548 void
1549 vec4_visitor::emit_shader_time_begin()
1550 {
1551 current_annotation = "shader time start";
1552 shader_start_time = get_timestamp();
1553 }
1554
1555 void
1556 vec4_visitor::emit_shader_time_end()
1557 {
1558 current_annotation = "shader time end";
1559 src_reg shader_end_time = get_timestamp();
1560
1561
1562 /* Check that there weren't any timestamp reset events (assuming these
1563 * were the only two timestamp reads that happened).
1564 */
1565 src_reg reset_end = shader_end_time;
1566 reset_end.swizzle = BRW_SWIZZLE_ZZZZ;
1567 vec4_instruction *test = emit(AND(dst_null_d(), reset_end, src_reg(1u)));
1568 test->conditional_mod = BRW_CONDITIONAL_Z;
1569
1570 emit(IF(BRW_PREDICATE_NORMAL));
1571
1572 /* Take the current timestamp and get the delta. */
1573 shader_start_time.negate = true;
1574 dst_reg diff = dst_reg(this, glsl_type::uint_type);
1575 emit(ADD(diff, shader_start_time, shader_end_time));
1576
1577 /* If there were no instructions between the two timestamp gets, the diff
1578 * is 2 cycles. Remove that overhead, so I can forget about that when
1579 * trying to determine the time taken for single instructions.
1580 */
1581 emit(ADD(diff, src_reg(diff), src_reg(-2u)));
1582
1583 emit_shader_time_write(st_base, src_reg(diff));
1584 emit_shader_time_write(st_written, src_reg(1u));
1585 emit(BRW_OPCODE_ELSE);
1586 emit_shader_time_write(st_reset, src_reg(1u));
1587 emit(BRW_OPCODE_ENDIF);
1588 }
1589
1590 void
1591 vec4_visitor::emit_shader_time_write(enum shader_time_shader_type type,
1592 src_reg value)
1593 {
1594 int shader_time_index =
1595 brw_get_shader_time_index(brw, shader_prog, prog, type);
1596
1597 dst_reg dst =
1598 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, 2));
1599
1600 dst_reg offset = dst;
1601 dst_reg time = dst;
1602 time.reg_offset++;
1603
1604 offset.type = BRW_REGISTER_TYPE_UD;
1605 emit(MOV(offset, src_reg(shader_time_index * SHADER_TIME_STRIDE)));
1606
1607 time.type = BRW_REGISTER_TYPE_UD;
1608 emit(MOV(time, src_reg(value)));
1609
1610 emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
1611 }
1612
1613 bool
1614 vec4_visitor::run()
1615 {
1616 sanity_param_count = prog->Parameters->NumParameters;
1617
1618 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1619 emit_shader_time_begin();
1620
1621 assign_binding_table_offsets();
1622
1623 emit_prolog();
1624
1625 /* Generate VS IR for main(). (the visitor only descends into
1626 * functions called "main").
1627 */
1628 if (shader) {
1629 visit_instructions(shader->base.ir);
1630 } else {
1631 emit_program_code();
1632 }
1633 base_ir = NULL;
1634
1635 if (key->userclip_active && !prog->UsesClipDistanceOut)
1636 setup_uniform_clipplane_values();
1637
1638 emit_thread_end();
1639
1640 calculate_cfg();
1641
1642 /* Before any optimization, push array accesses out to scratch
1643 * space where we need them to be. This pass may allocate new
1644 * virtual GRFs, so we want to do it early. It also makes sure
1645 * that we have reladdr computations available for CSE, since we'll
1646 * often do repeated subexpressions for those.
1647 */
1648 if (shader) {
1649 move_grf_array_access_to_scratch();
1650 move_uniform_array_access_to_pull_constants();
1651 } else {
1652 /* The ARB_vertex_program frontend emits pull constant loads directly
1653 * rather than using reladdr, so we don't need to walk through all the
1654 * instructions looking for things to move. There isn't anything.
1655 *
1656 * We do still need to split things to vec4 size.
1657 */
1658 split_uniform_registers();
1659 }
1660 pack_uniform_registers();
1661 move_push_constants_to_pull_constants();
1662 split_virtual_grfs();
1663
1664 const char *stage_name = stage == MESA_SHADER_GEOMETRY ? "gs" : "vs";
1665
1666 #define OPT(pass, args...) do { \
1667 pass_num++; \
1668 bool this_progress = pass(args); \
1669 \
1670 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
1671 char filename[64]; \
1672 snprintf(filename, 64, "%s-%04d-%02d-%02d-" #pass, \
1673 stage_name, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
1674 \
1675 backend_visitor::dump_instructions(filename); \
1676 } \
1677 \
1678 progress = progress || this_progress; \
1679 } while (false)
1680
1681
1682 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
1683 char filename[64];
1684 snprintf(filename, 64, "%s-%04d-00-start",
1685 stage_name, shader_prog ? shader_prog->Name : 0);
1686
1687 backend_visitor::dump_instructions(filename);
1688 }
1689
1690 bool progress;
1691 int iteration = 0;
1692 do {
1693 progress = false;
1694 iteration++;
1695 int pass_num = 0;
1696
1697 OPT(opt_reduce_swizzle);
1698 OPT(dead_code_eliminate);
1699 OPT(dead_control_flow_eliminate, this);
1700 OPT(opt_copy_propagation);
1701 OPT(opt_cse);
1702 OPT(opt_algebraic);
1703 OPT(opt_register_coalesce);
1704 } while (progress);
1705
1706
1707 if (failed)
1708 return false;
1709
1710 setup_payload();
1711
1712 if (false) {
1713 /* Debug of register spilling: Go spill everything. */
1714 const int grf_count = virtual_grf_count;
1715 float spill_costs[virtual_grf_count];
1716 bool no_spill[virtual_grf_count];
1717 evaluate_spill_costs(spill_costs, no_spill);
1718 for (int i = 0; i < grf_count; i++) {
1719 if (no_spill[i])
1720 continue;
1721 spill_reg(i);
1722 }
1723 }
1724
1725 while (!reg_allocate()) {
1726 if (failed)
1727 return false;
1728 }
1729
1730 opt_schedule_instructions();
1731
1732 opt_set_dependency_control();
1733
1734 /* If any state parameters were appended, then ParameterValues could have
1735 * been realloced, in which case the driver uniform storage set up by
1736 * _mesa_associate_uniform_storage() would point to freed memory. Make
1737 * sure that didn't happen.
1738 */
1739 assert(sanity_param_count == prog->Parameters->NumParameters);
1740
1741 return !failed;
1742 }
1743
1744 } /* namespace brw */
1745
1746 extern "C" {
1747
1748 /**
1749 * Compile a vertex shader.
1750 *
1751 * Returns the final assembly and the program's size.
1752 */
1753 const unsigned *
1754 brw_vs_emit(struct brw_context *brw,
1755 struct gl_shader_program *prog,
1756 struct brw_vs_compile *c,
1757 struct brw_vs_prog_data *prog_data,
1758 void *mem_ctx,
1759 unsigned *final_assembly_size)
1760 {
1761 bool start_busy = false;
1762 double start_time = 0;
1763
1764 if (unlikely(brw->perf_debug)) {
1765 start_busy = (brw->batch.last_bo &&
1766 drm_intel_bo_busy(brw->batch.last_bo));
1767 start_time = get_time();
1768 }
1769
1770 struct brw_shader *shader = NULL;
1771 if (prog)
1772 shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_VERTEX];
1773
1774 if (unlikely(INTEL_DEBUG & DEBUG_VS))
1775 brw_dump_ir("vertex", prog, &shader->base, &c->vp->program.Base);
1776
1777 vec4_vs_visitor v(brw, c, prog_data, prog, mem_ctx);
1778 if (!v.run()) {
1779 if (prog) {
1780 prog->LinkStatus = false;
1781 ralloc_strcat(&prog->InfoLog, v.fail_msg);
1782 }
1783
1784 _mesa_problem(NULL, "Failed to compile vertex shader: %s\n",
1785 v.fail_msg);
1786
1787 return NULL;
1788 }
1789
1790 const unsigned *assembly = NULL;
1791 vec4_generator g(brw, prog, &c->vp->program.Base, &prog_data->base,
1792 mem_ctx, INTEL_DEBUG & DEBUG_VS);
1793 assembly = g.generate_assembly(v.cfg, final_assembly_size);
1794
1795 if (unlikely(brw->perf_debug) && shader) {
1796 if (shader->compiled_once) {
1797 brw_vs_debug_recompile(brw, prog, &c->key);
1798 }
1799 if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
1800 perf_debug("VS compile took %.03f ms and stalled the GPU\n",
1801 (get_time() - start_time) * 1000);
1802 }
1803 shader->compiled_once = true;
1804 }
1805
1806 return assembly;
1807 }
1808
1809
1810 void
1811 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
1812 struct brw_vec4_prog_key *key,
1813 GLuint id, struct gl_program *prog)
1814 {
1815 key->program_string_id = id;
1816 key->clamp_vertex_color = ctx->API == API_OPENGL_COMPAT;
1817
1818 unsigned sampler_count = _mesa_fls(prog->SamplersUsed);
1819 for (unsigned i = 0; i < sampler_count; i++) {
1820 if (prog->ShadowSamplers & (1 << i)) {
1821 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
1822 key->tex.swizzles[i] =
1823 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
1824 } else {
1825 /* Color sampler: assume no swizzling. */
1826 key->tex.swizzles[i] = SWIZZLE_XYZW;
1827 }
1828 }
1829 }
1830
1831 } /* extern "C" */