42fde07c4ef9548753b6f4fd272f19cd57f75106
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_vec4.h"
25 #include "brw_fs.h"
26 #include "brw_cfg.h"
27 #include "brw_vs.h"
28 #include "brw_nir.h"
29 #include "brw_vec4_builder.h"
30 #include "brw_vec4_live_variables.h"
31 #include "brw_dead_control_flow.h"
32 #include "program/prog_parameter.h"
33
34 #define MAX_INSTRUCTION (1 << 30)
35
36 using namespace brw;
37
38 namespace brw {
39
40 void
41 src_reg::init()
42 {
43 memset(this, 0, sizeof(*this));
44
45 this->file = BAD_FILE;
46 }
47
48 src_reg::src_reg(enum brw_reg_file file, int nr, const glsl_type *type)
49 {
50 init();
51
52 this->file = file;
53 this->nr = nr;
54 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
55 this->swizzle = brw_swizzle_for_size(type->vector_elements);
56 else
57 this->swizzle = BRW_SWIZZLE_XYZW;
58 if (type)
59 this->type = brw_type_for_base_type(type);
60 }
61
62 /** Generic unset register constructor. */
63 src_reg::src_reg()
64 {
65 init();
66 }
67
68 src_reg::src_reg(struct ::brw_reg reg) :
69 backend_reg(reg)
70 {
71 this->offset = 0;
72 this->reladdr = NULL;
73 }
74
75 src_reg::src_reg(const dst_reg &reg) :
76 backend_reg(reg)
77 {
78 this->reladdr = reg.reladdr;
79 this->swizzle = brw_swizzle_for_mask(reg.writemask);
80 }
81
82 void
83 dst_reg::init()
84 {
85 memset(this, 0, sizeof(*this));
86 this->file = BAD_FILE;
87 this->writemask = WRITEMASK_XYZW;
88 }
89
90 dst_reg::dst_reg()
91 {
92 init();
93 }
94
95 dst_reg::dst_reg(enum brw_reg_file file, int nr)
96 {
97 init();
98
99 this->file = file;
100 this->nr = nr;
101 }
102
103 dst_reg::dst_reg(enum brw_reg_file file, int nr, const glsl_type *type,
104 unsigned writemask)
105 {
106 init();
107
108 this->file = file;
109 this->nr = nr;
110 this->type = brw_type_for_base_type(type);
111 this->writemask = writemask;
112 }
113
114 dst_reg::dst_reg(enum brw_reg_file file, int nr, brw_reg_type type,
115 unsigned writemask)
116 {
117 init();
118
119 this->file = file;
120 this->nr = nr;
121 this->type = type;
122 this->writemask = writemask;
123 }
124
125 dst_reg::dst_reg(struct ::brw_reg reg) :
126 backend_reg(reg)
127 {
128 this->offset = 0;
129 this->reladdr = NULL;
130 }
131
132 dst_reg::dst_reg(const src_reg &reg) :
133 backend_reg(reg)
134 {
135 this->writemask = brw_mask_for_swizzle(reg.swizzle);
136 this->reladdr = reg.reladdr;
137 }
138
139 bool
140 dst_reg::equals(const dst_reg &r) const
141 {
142 return (this->backend_reg::equals(r) &&
143 (reladdr == r.reladdr ||
144 (reladdr && r.reladdr && reladdr->equals(*r.reladdr))));
145 }
146
147 bool
148 vec4_instruction::is_send_from_grf()
149 {
150 switch (opcode) {
151 case SHADER_OPCODE_SHADER_TIME_ADD:
152 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
153 case SHADER_OPCODE_UNTYPED_ATOMIC:
154 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
155 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
156 case SHADER_OPCODE_TYPED_ATOMIC:
157 case SHADER_OPCODE_TYPED_SURFACE_READ:
158 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
159 case VEC4_OPCODE_URB_READ:
160 case TCS_OPCODE_URB_WRITE:
161 case TCS_OPCODE_RELEASE_INPUT:
162 case SHADER_OPCODE_BARRIER:
163 return true;
164 default:
165 return false;
166 }
167 }
168
169 /**
170 * Returns true if this instruction's sources and destinations cannot
171 * safely be the same register.
172 *
173 * In most cases, a register can be written over safely by the same
174 * instruction that is its last use. For a single instruction, the
175 * sources are dereferenced before writing of the destination starts
176 * (naturally).
177 *
178 * However, there are a few cases where this can be problematic:
179 *
180 * - Virtual opcodes that translate to multiple instructions in the
181 * code generator: if src == dst and one instruction writes the
182 * destination before a later instruction reads the source, then
183 * src will have been clobbered.
184 *
185 * The register allocator uses this information to set up conflicts between
186 * GRF sources and the destination.
187 */
188 bool
189 vec4_instruction::has_source_and_destination_hazard() const
190 {
191 switch (opcode) {
192 case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
193 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
194 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
195 return true;
196 default:
197 return false;
198 }
199 }
200
201 unsigned
202 vec4_instruction::size_read(unsigned arg) const
203 {
204 switch (opcode) {
205 case SHADER_OPCODE_SHADER_TIME_ADD:
206 case SHADER_OPCODE_UNTYPED_ATOMIC:
207 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
208 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
209 case SHADER_OPCODE_TYPED_ATOMIC:
210 case SHADER_OPCODE_TYPED_SURFACE_READ:
211 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
212 case TCS_OPCODE_URB_WRITE:
213 if (arg == 0)
214 return mlen * REG_SIZE;
215 break;
216 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
217 if (arg == 1)
218 return mlen * REG_SIZE;
219 break;
220 default:
221 break;
222 }
223
224 switch (src[arg].file) {
225 case BAD_FILE:
226 return 0;
227 case IMM:
228 case UNIFORM:
229 return 4 * type_sz(src[arg].type);
230 default:
231 /* XXX - Represent actual vertical stride. */
232 return exec_size * type_sz(src[arg].type);
233 }
234 }
235
236 bool
237 vec4_instruction::can_do_source_mods(const struct gen_device_info *devinfo)
238 {
239 if (devinfo->gen == 6 && is_math())
240 return false;
241
242 if (is_send_from_grf())
243 return false;
244
245 if (!backend_instruction::can_do_source_mods())
246 return false;
247
248 return true;
249 }
250
251 bool
252 vec4_instruction::can_do_writemask(const struct gen_device_info *devinfo)
253 {
254 switch (opcode) {
255 case SHADER_OPCODE_GEN4_SCRATCH_READ:
256 case VEC4_OPCODE_FROM_DOUBLE:
257 case VEC4_OPCODE_TO_DOUBLE:
258 case VEC4_OPCODE_PICK_LOW_32BIT:
259 case VEC4_OPCODE_PICK_HIGH_32BIT:
260 case VEC4_OPCODE_SET_LOW_32BIT:
261 case VEC4_OPCODE_SET_HIGH_32BIT:
262 case VS_OPCODE_PULL_CONSTANT_LOAD:
263 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
264 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9:
265 case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
266 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
267 case TES_OPCODE_CREATE_INPUT_READ_HEADER:
268 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
269 case VEC4_OPCODE_URB_READ:
270 case SHADER_OPCODE_MOV_INDIRECT:
271 return false;
272 default:
273 /* The MATH instruction on Gen6 only executes in align1 mode, which does
274 * not support writemasking.
275 */
276 if (devinfo->gen == 6 && is_math())
277 return false;
278
279 if (is_tex())
280 return false;
281
282 return true;
283 }
284 }
285
286 bool
287 vec4_instruction::can_change_types() const
288 {
289 return dst.type == src[0].type &&
290 !src[0].abs && !src[0].negate && !saturate &&
291 (opcode == BRW_OPCODE_MOV ||
292 (opcode == BRW_OPCODE_SEL &&
293 dst.type == src[1].type &&
294 predicate != BRW_PREDICATE_NONE &&
295 !src[1].abs && !src[1].negate));
296 }
297
298 /**
299 * Returns how many MRFs an opcode will write over.
300 *
301 * Note that this is not the 0 or 1 implied writes in an actual gen
302 * instruction -- the generate_* functions generate additional MOVs
303 * for setup.
304 */
305 int
306 vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
307 {
308 if (inst->mlen == 0 || inst->is_send_from_grf())
309 return 0;
310
311 switch (inst->opcode) {
312 case SHADER_OPCODE_RCP:
313 case SHADER_OPCODE_RSQ:
314 case SHADER_OPCODE_SQRT:
315 case SHADER_OPCODE_EXP2:
316 case SHADER_OPCODE_LOG2:
317 case SHADER_OPCODE_SIN:
318 case SHADER_OPCODE_COS:
319 return 1;
320 case SHADER_OPCODE_INT_QUOTIENT:
321 case SHADER_OPCODE_INT_REMAINDER:
322 case SHADER_OPCODE_POW:
323 case TCS_OPCODE_THREAD_END:
324 return 2;
325 case VS_OPCODE_URB_WRITE:
326 return 1;
327 case VS_OPCODE_PULL_CONSTANT_LOAD:
328 return 2;
329 case SHADER_OPCODE_GEN4_SCRATCH_READ:
330 return 2;
331 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
332 return 3;
333 case GS_OPCODE_URB_WRITE:
334 case GS_OPCODE_URB_WRITE_ALLOCATE:
335 case GS_OPCODE_THREAD_END:
336 return 0;
337 case GS_OPCODE_FF_SYNC:
338 return 1;
339 case TCS_OPCODE_URB_WRITE:
340 return 0;
341 case SHADER_OPCODE_SHADER_TIME_ADD:
342 return 0;
343 case SHADER_OPCODE_TEX:
344 case SHADER_OPCODE_TXL:
345 case SHADER_OPCODE_TXD:
346 case SHADER_OPCODE_TXF:
347 case SHADER_OPCODE_TXF_CMS:
348 case SHADER_OPCODE_TXF_CMS_W:
349 case SHADER_OPCODE_TXF_MCS:
350 case SHADER_OPCODE_TXS:
351 case SHADER_OPCODE_TG4:
352 case SHADER_OPCODE_TG4_OFFSET:
353 case SHADER_OPCODE_SAMPLEINFO:
354 case VS_OPCODE_GET_BUFFER_SIZE:
355 return inst->header_size;
356 default:
357 unreachable("not reached");
358 }
359 }
360
361 bool
362 src_reg::equals(const src_reg &r) const
363 {
364 return (this->backend_reg::equals(r) &&
365 !reladdr && !r.reladdr);
366 }
367
368 bool
369 vec4_visitor::opt_vector_float()
370 {
371 bool progress = false;
372
373 foreach_block(block, cfg) {
374 int last_reg = -1, last_offset = -1;
375 enum brw_reg_file last_reg_file = BAD_FILE;
376
377 uint8_t imm[4] = { 0 };
378 int inst_count = 0;
379 vec4_instruction *imm_inst[4];
380 unsigned writemask = 0;
381 enum brw_reg_type dest_type = BRW_REGISTER_TYPE_F;
382
383 foreach_inst_in_block_safe(vec4_instruction, inst, block) {
384 int vf = -1;
385 enum brw_reg_type need_type;
386
387 /* Look for unconditional MOVs from an immediate with a partial
388 * writemask. Skip type-conversion MOVs other than integer 0,
389 * where the type doesn't matter. See if the immediate can be
390 * represented as a VF.
391 */
392 if (inst->opcode == BRW_OPCODE_MOV &&
393 inst->src[0].file == IMM &&
394 inst->predicate == BRW_PREDICATE_NONE &&
395 inst->dst.writemask != WRITEMASK_XYZW &&
396 type_sz(inst->src[0].type) < 8 &&
397 (inst->src[0].type == inst->dst.type || inst->src[0].d == 0)) {
398
399 vf = brw_float_to_vf(inst->src[0].d);
400 need_type = BRW_REGISTER_TYPE_D;
401
402 if (vf == -1) {
403 vf = brw_float_to_vf(inst->src[0].f);
404 need_type = BRW_REGISTER_TYPE_F;
405 }
406 } else {
407 last_reg = -1;
408 }
409
410 /* If this wasn't a MOV, or the destination register doesn't match,
411 * or we have to switch destination types, then this breaks our
412 * sequence. Combine anything we've accumulated so far.
413 */
414 if (last_reg != inst->dst.nr ||
415 last_offset != inst->dst.offset ||
416 last_reg_file != inst->dst.file ||
417 (vf > 0 && dest_type != need_type)) {
418
419 if (inst_count > 1) {
420 unsigned vf;
421 memcpy(&vf, imm, sizeof(vf));
422 vec4_instruction *mov = MOV(imm_inst[0]->dst, brw_imm_vf(vf));
423 mov->dst.type = dest_type;
424 mov->dst.writemask = writemask;
425 inst->insert_before(block, mov);
426
427 for (int i = 0; i < inst_count; i++) {
428 imm_inst[i]->remove(block);
429 }
430
431 progress = true;
432 }
433
434 inst_count = 0;
435 last_reg = -1;
436 writemask = 0;
437 dest_type = BRW_REGISTER_TYPE_F;
438
439 for (int i = 0; i < 4; i++) {
440 imm[i] = 0;
441 }
442 }
443
444 /* Record this instruction's value (if it was representable). */
445 if (vf != -1) {
446 if ((inst->dst.writemask & WRITEMASK_X) != 0)
447 imm[0] = vf;
448 if ((inst->dst.writemask & WRITEMASK_Y) != 0)
449 imm[1] = vf;
450 if ((inst->dst.writemask & WRITEMASK_Z) != 0)
451 imm[2] = vf;
452 if ((inst->dst.writemask & WRITEMASK_W) != 0)
453 imm[3] = vf;
454
455 writemask |= inst->dst.writemask;
456 imm_inst[inst_count++] = inst;
457
458 last_reg = inst->dst.nr;
459 last_offset = inst->dst.offset;
460 last_reg_file = inst->dst.file;
461 if (vf > 0)
462 dest_type = need_type;
463 }
464 }
465 }
466
467 if (progress)
468 invalidate_live_intervals();
469
470 return progress;
471 }
472
473 /* Replaces unused channels of a swizzle with channels that are used.
474 *
475 * For instance, this pass transforms
476 *
477 * mov vgrf4.yz, vgrf5.wxzy
478 *
479 * into
480 *
481 * mov vgrf4.yz, vgrf5.xxzx
482 *
483 * This eliminates false uses of some channels, letting dead code elimination
484 * remove the instructions that wrote them.
485 */
486 bool
487 vec4_visitor::opt_reduce_swizzle()
488 {
489 bool progress = false;
490
491 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
492 if (inst->dst.file == BAD_FILE ||
493 inst->dst.file == ARF ||
494 inst->dst.file == FIXED_GRF ||
495 inst->is_send_from_grf())
496 continue;
497
498 unsigned swizzle;
499
500 /* Determine which channels of the sources are read. */
501 switch (inst->opcode) {
502 case VEC4_OPCODE_PACK_BYTES:
503 case BRW_OPCODE_DP4:
504 case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0,
505 * but all four of src1.
506 */
507 swizzle = brw_swizzle_for_size(4);
508 break;
509 case BRW_OPCODE_DP3:
510 swizzle = brw_swizzle_for_size(3);
511 break;
512 case BRW_OPCODE_DP2:
513 swizzle = brw_swizzle_for_size(2);
514 break;
515
516 case VEC4_OPCODE_TO_DOUBLE:
517 case VEC4_OPCODE_FROM_DOUBLE:
518 case VEC4_OPCODE_PICK_LOW_32BIT:
519 case VEC4_OPCODE_PICK_HIGH_32BIT:
520 case VEC4_OPCODE_SET_LOW_32BIT:
521 case VEC4_OPCODE_SET_HIGH_32BIT:
522 swizzle = brw_swizzle_for_size(4);
523 break;
524
525 default:
526 swizzle = brw_swizzle_for_mask(inst->dst.writemask);
527 break;
528 }
529
530 /* Update sources' swizzles. */
531 for (int i = 0; i < 3; i++) {
532 if (inst->src[i].file != VGRF &&
533 inst->src[i].file != ATTR &&
534 inst->src[i].file != UNIFORM)
535 continue;
536
537 const unsigned new_swizzle =
538 brw_compose_swizzle(swizzle, inst->src[i].swizzle);
539 if (inst->src[i].swizzle != new_swizzle) {
540 inst->src[i].swizzle = new_swizzle;
541 progress = true;
542 }
543 }
544 }
545
546 if (progress)
547 invalidate_live_intervals();
548
549 return progress;
550 }
551
552 void
553 vec4_visitor::split_uniform_registers()
554 {
555 /* Prior to this, uniforms have been in an array sized according to
556 * the number of vector uniforms present, sparsely filled (so an
557 * aggregate results in reg indices being skipped over). Now we're
558 * going to cut those aggregates up so each .nr index is one
559 * vector. The goal is to make elimination of unused uniform
560 * components easier later.
561 */
562 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
563 for (int i = 0 ; i < 3; i++) {
564 if (inst->src[i].file != UNIFORM)
565 continue;
566
567 assert(!inst->src[i].reladdr);
568
569 inst->src[i].nr += inst->src[i].offset / 16;
570 inst->src[i].offset %= 16;
571 }
572 }
573 }
574
575 void
576 vec4_visitor::pack_uniform_registers()
577 {
578 uint8_t chans_used[this->uniforms];
579 int new_loc[this->uniforms];
580 int new_chan[this->uniforms];
581
582 memset(chans_used, 0, sizeof(chans_used));
583 memset(new_loc, 0, sizeof(new_loc));
584 memset(new_chan, 0, sizeof(new_chan));
585
586 /* Find which uniform vectors are actually used by the program. We
587 * expect unused vector elements when we've moved array access out
588 * to pull constants, and from some GLSL code generators like wine.
589 */
590 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
591 unsigned readmask;
592 switch (inst->opcode) {
593 case VEC4_OPCODE_PACK_BYTES:
594 case BRW_OPCODE_DP4:
595 case BRW_OPCODE_DPH:
596 readmask = 0xf;
597 break;
598 case BRW_OPCODE_DP3:
599 readmask = 0x7;
600 break;
601 case BRW_OPCODE_DP2:
602 readmask = 0x3;
603 break;
604 default:
605 readmask = inst->dst.writemask;
606 break;
607 }
608
609 for (int i = 0 ; i < 3; i++) {
610 if (inst->src[i].file != UNIFORM)
611 continue;
612
613 int reg = inst->src[i].nr;
614 for (int c = 0; c < 4; c++) {
615 if (!(readmask & (1 << c)))
616 continue;
617
618 chans_used[reg] = MAX2(chans_used[reg],
619 BRW_GET_SWZ(inst->src[i].swizzle, c) + 1);
620 }
621 }
622
623 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
624 inst->src[0].file == UNIFORM) {
625 assert(inst->src[2].file == BRW_IMMEDIATE_VALUE);
626 assert(inst->src[0].subnr == 0);
627
628 unsigned bytes_read = inst->src[2].ud;
629 assert(bytes_read % 4 == 0);
630 unsigned vec4s_read = DIV_ROUND_UP(bytes_read, 16);
631
632 /* We just mark every register touched by a MOV_INDIRECT as being
633 * fully used. This ensures that it doesn't broken up piecewise by
634 * the next part of our packing algorithm.
635 */
636 int reg = inst->src[0].nr;
637 for (unsigned i = 0; i < vec4s_read; i++)
638 chans_used[reg + i] = 4;
639 }
640 }
641
642 int new_uniform_count = 0;
643
644 /* Now, figure out a packing of the live uniform vectors into our
645 * push constants.
646 */
647 for (int src = 0; src < uniforms; src++) {
648 int size = chans_used[src];
649
650 if (size == 0)
651 continue;
652
653 int dst;
654 /* Find the lowest place we can slot this uniform in. */
655 for (dst = 0; dst < src; dst++) {
656 if (chans_used[dst] + size <= 4)
657 break;
658 }
659
660 if (src == dst) {
661 new_loc[src] = dst;
662 new_chan[src] = 0;
663 } else {
664 new_loc[src] = dst;
665 new_chan[src] = chans_used[dst];
666
667 /* Move the references to the data */
668 for (int j = 0; j < size; j++) {
669 stage_prog_data->param[dst * 4 + new_chan[src] + j] =
670 stage_prog_data->param[src * 4 + j];
671 }
672
673 chans_used[dst] += size;
674 chans_used[src] = 0;
675 }
676
677 new_uniform_count = MAX2(new_uniform_count, dst + 1);
678 }
679
680 this->uniforms = new_uniform_count;
681
682 /* Now, update the instructions for our repacked uniforms. */
683 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
684 for (int i = 0 ; i < 3; i++) {
685 int src = inst->src[i].nr;
686
687 if (inst->src[i].file != UNIFORM)
688 continue;
689
690 inst->src[i].nr = new_loc[src];
691 inst->src[i].swizzle += BRW_SWIZZLE4(new_chan[src], new_chan[src],
692 new_chan[src], new_chan[src]);
693 }
694 }
695 }
696
697 /**
698 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
699 *
700 * While GLSL IR also performs this optimization, we end up with it in
701 * our instruction stream for a couple of reasons. One is that we
702 * sometimes generate silly instructions, for example in array access
703 * where we'll generate "ADD offset, index, base" even if base is 0.
704 * The other is that GLSL IR's constant propagation doesn't track the
705 * components of aggregates, so some VS patterns (initialize matrix to
706 * 0, accumulate in vertex blending factors) end up breaking down to
707 * instructions involving 0.
708 */
709 bool
710 vec4_visitor::opt_algebraic()
711 {
712 bool progress = false;
713
714 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
715 switch (inst->opcode) {
716 case BRW_OPCODE_MOV:
717 if (inst->src[0].file != IMM)
718 break;
719
720 if (inst->saturate) {
721 if (inst->dst.type != inst->src[0].type)
722 assert(!"unimplemented: saturate mixed types");
723
724 if (brw_saturate_immediate(inst->dst.type,
725 &inst->src[0].as_brw_reg())) {
726 inst->saturate = false;
727 progress = true;
728 }
729 }
730 break;
731
732 case VEC4_OPCODE_UNPACK_UNIFORM:
733 if (inst->src[0].file != UNIFORM) {
734 inst->opcode = BRW_OPCODE_MOV;
735 progress = true;
736 }
737 break;
738
739 case BRW_OPCODE_ADD:
740 if (inst->src[1].is_zero()) {
741 inst->opcode = BRW_OPCODE_MOV;
742 inst->src[1] = src_reg();
743 progress = true;
744 }
745 break;
746
747 case BRW_OPCODE_MUL:
748 if (inst->src[1].is_zero()) {
749 inst->opcode = BRW_OPCODE_MOV;
750 switch (inst->src[0].type) {
751 case BRW_REGISTER_TYPE_F:
752 inst->src[0] = brw_imm_f(0.0f);
753 break;
754 case BRW_REGISTER_TYPE_D:
755 inst->src[0] = brw_imm_d(0);
756 break;
757 case BRW_REGISTER_TYPE_UD:
758 inst->src[0] = brw_imm_ud(0u);
759 break;
760 default:
761 unreachable("not reached");
762 }
763 inst->src[1] = src_reg();
764 progress = true;
765 } else if (inst->src[1].is_one()) {
766 inst->opcode = BRW_OPCODE_MOV;
767 inst->src[1] = src_reg();
768 progress = true;
769 } else if (inst->src[1].is_negative_one()) {
770 inst->opcode = BRW_OPCODE_MOV;
771 inst->src[0].negate = !inst->src[0].negate;
772 inst->src[1] = src_reg();
773 progress = true;
774 }
775 break;
776 case BRW_OPCODE_CMP:
777 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
778 inst->src[0].abs &&
779 inst->src[0].negate &&
780 inst->src[1].is_zero()) {
781 inst->src[0].abs = false;
782 inst->src[0].negate = false;
783 inst->conditional_mod = BRW_CONDITIONAL_Z;
784 progress = true;
785 break;
786 }
787 break;
788 case SHADER_OPCODE_BROADCAST:
789 if (is_uniform(inst->src[0]) ||
790 inst->src[1].is_zero()) {
791 inst->opcode = BRW_OPCODE_MOV;
792 inst->src[1] = src_reg();
793 inst->force_writemask_all = true;
794 progress = true;
795 }
796 break;
797
798 default:
799 break;
800 }
801 }
802
803 if (progress)
804 invalidate_live_intervals();
805
806 return progress;
807 }
808
809 /**
810 * Only a limited number of hardware registers may be used for push
811 * constants, so this turns access to the overflowed constants into
812 * pull constants.
813 */
814 void
815 vec4_visitor::move_push_constants_to_pull_constants()
816 {
817 int pull_constant_loc[this->uniforms];
818
819 /* Only allow 32 registers (256 uniform components) as push constants,
820 * which is the limit on gen6.
821 *
822 * If changing this value, note the limitation about total_regs in
823 * brw_curbe.c.
824 */
825 int max_uniform_components = 32 * 8;
826 if (this->uniforms * 4 <= max_uniform_components)
827 return;
828
829 /* Make some sort of choice as to which uniforms get sent to pull
830 * constants. We could potentially do something clever here like
831 * look for the most infrequently used uniform vec4s, but leave
832 * that for later.
833 */
834 for (int i = 0; i < this->uniforms * 4; i += 4) {
835 pull_constant_loc[i / 4] = -1;
836
837 if (i >= max_uniform_components) {
838 const gl_constant_value **values = &stage_prog_data->param[i];
839
840 /* Try to find an existing copy of this uniform in the pull
841 * constants if it was part of an array access already.
842 */
843 for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j += 4) {
844 int matches;
845
846 for (matches = 0; matches < 4; matches++) {
847 if (stage_prog_data->pull_param[j + matches] != values[matches])
848 break;
849 }
850
851 if (matches == 4) {
852 pull_constant_loc[i / 4] = j / 4;
853 break;
854 }
855 }
856
857 if (pull_constant_loc[i / 4] == -1) {
858 assert(stage_prog_data->nr_pull_params % 4 == 0);
859 pull_constant_loc[i / 4] = stage_prog_data->nr_pull_params / 4;
860
861 for (int j = 0; j < 4; j++) {
862 stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
863 values[j];
864 }
865 }
866 }
867 }
868
869 /* Now actually rewrite usage of the things we've moved to pull
870 * constants.
871 */
872 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
873 for (int i = 0 ; i < 3; i++) {
874 if (inst->src[i].file != UNIFORM ||
875 pull_constant_loc[inst->src[i].nr] == -1)
876 continue;
877
878 int uniform = inst->src[i].nr;
879
880 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
881
882 emit_pull_constant_load(block, inst, temp, inst->src[i],
883 pull_constant_loc[uniform], src_reg());
884
885 inst->src[i].file = temp.file;
886 inst->src[i].nr = temp.nr;
887 inst->src[i].offset %= 16;
888 inst->src[i].reladdr = NULL;
889 }
890 }
891
892 /* Repack push constants to remove the now-unused ones. */
893 pack_uniform_registers();
894 }
895
896 /* Conditions for which we want to avoid setting the dependency control bits */
897 bool
898 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst)
899 {
900 #define IS_DWORD(reg) \
901 (reg.type == BRW_REGISTER_TYPE_UD || \
902 reg.type == BRW_REGISTER_TYPE_D)
903
904 /* "When source or destination datatype is 64b or operation is integer DWord
905 * multiply, DepCtrl must not be used."
906 * May apply to future SoCs as well.
907 */
908 if (devinfo->is_cherryview) {
909 if (inst->opcode == BRW_OPCODE_MUL &&
910 IS_DWORD(inst->src[0]) &&
911 IS_DWORD(inst->src[1]))
912 return true;
913 }
914 #undef IS_DWORD
915
916 if (devinfo->gen >= 8) {
917 if (inst->opcode == BRW_OPCODE_F32TO16)
918 return true;
919 }
920
921 /*
922 * mlen:
923 * In the presence of send messages, totally interrupt dependency
924 * control. They're long enough that the chance of dependency
925 * control around them just doesn't matter.
926 *
927 * predicate:
928 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
929 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
930 * completes the scoreboard clear must have a non-zero execution mask. This
931 * means, if any kind of predication can change the execution mask or channel
932 * enable of the last instruction, the optimization must be avoided. This is
933 * to avoid instructions being shot down the pipeline when no writes are
934 * required.
935 *
936 * math:
937 * Dependency control does not work well over math instructions.
938 * NB: Discovered empirically
939 */
940 return (inst->mlen || inst->predicate || inst->is_math());
941 }
942
943 /**
944 * Sets the dependency control fields on instructions after register
945 * allocation and before the generator is run.
946 *
947 * When you have a sequence of instructions like:
948 *
949 * DP4 temp.x vertex uniform[0]
950 * DP4 temp.y vertex uniform[0]
951 * DP4 temp.z vertex uniform[0]
952 * DP4 temp.w vertex uniform[0]
953 *
954 * The hardware doesn't know that it can actually run the later instructions
955 * while the previous ones are in flight, producing stalls. However, we have
956 * manual fields we can set in the instructions that let it do so.
957 */
958 void
959 vec4_visitor::opt_set_dependency_control()
960 {
961 vec4_instruction *last_grf_write[BRW_MAX_GRF];
962 uint8_t grf_channels_written[BRW_MAX_GRF];
963 vec4_instruction *last_mrf_write[BRW_MAX_GRF];
964 uint8_t mrf_channels_written[BRW_MAX_GRF];
965
966 assert(prog_data->total_grf ||
967 !"Must be called after register allocation");
968
969 foreach_block (block, cfg) {
970 memset(last_grf_write, 0, sizeof(last_grf_write));
971 memset(last_mrf_write, 0, sizeof(last_mrf_write));
972
973 foreach_inst_in_block (vec4_instruction, inst, block) {
974 /* If we read from a register that we were doing dependency control
975 * on, don't do dependency control across the read.
976 */
977 for (int i = 0; i < 3; i++) {
978 int reg = inst->src[i].nr + inst->src[i].offset / REG_SIZE;
979 if (inst->src[i].file == VGRF) {
980 last_grf_write[reg] = NULL;
981 } else if (inst->src[i].file == FIXED_GRF) {
982 memset(last_grf_write, 0, sizeof(last_grf_write));
983 break;
984 }
985 assert(inst->src[i].file != MRF);
986 }
987
988 if (is_dep_ctrl_unsafe(inst)) {
989 memset(last_grf_write, 0, sizeof(last_grf_write));
990 memset(last_mrf_write, 0, sizeof(last_mrf_write));
991 continue;
992 }
993
994 /* Now, see if we can do dependency control for this instruction
995 * against a previous one writing to its destination.
996 */
997 int reg = inst->dst.nr + inst->dst.offset / REG_SIZE;
998 if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
999 if (last_grf_write[reg] &&
1000 last_grf_write[reg]->dst.offset == inst->dst.offset &&
1001 !(inst->dst.writemask & grf_channels_written[reg])) {
1002 last_grf_write[reg]->no_dd_clear = true;
1003 inst->no_dd_check = true;
1004 } else {
1005 grf_channels_written[reg] = 0;
1006 }
1007
1008 last_grf_write[reg] = inst;
1009 grf_channels_written[reg] |= inst->dst.writemask;
1010 } else if (inst->dst.file == MRF) {
1011 if (last_mrf_write[reg] &&
1012 last_mrf_write[reg]->dst.offset == inst->dst.offset &&
1013 !(inst->dst.writemask & mrf_channels_written[reg])) {
1014 last_mrf_write[reg]->no_dd_clear = true;
1015 inst->no_dd_check = true;
1016 } else {
1017 mrf_channels_written[reg] = 0;
1018 }
1019
1020 last_mrf_write[reg] = inst;
1021 mrf_channels_written[reg] |= inst->dst.writemask;
1022 }
1023 }
1024 }
1025 }
1026
1027 bool
1028 vec4_instruction::can_reswizzle(const struct gen_device_info *devinfo,
1029 int dst_writemask,
1030 int swizzle,
1031 int swizzle_mask)
1032 {
1033 /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
1034 * are not allowed.
1035 */
1036 if (devinfo->gen == 6 && is_math() && swizzle != BRW_SWIZZLE_XYZW)
1037 return false;
1038
1039 if (!can_do_writemask(devinfo) && dst_writemask != WRITEMASK_XYZW)
1040 return false;
1041
1042 /* If this instruction sets anything not referenced by swizzle, then we'd
1043 * totally break it when we reswizzle.
1044 */
1045 if (dst.writemask & ~swizzle_mask)
1046 return false;
1047
1048 if (mlen > 0)
1049 return false;
1050
1051 for (int i = 0; i < 3; i++) {
1052 if (src[i].is_accumulator())
1053 return false;
1054 }
1055
1056 return true;
1057 }
1058
1059 /**
1060 * For any channels in the swizzle's source that were populated by this
1061 * instruction, rewrite the instruction to put the appropriate result directly
1062 * in those channels.
1063 *
1064 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1065 */
1066 void
1067 vec4_instruction::reswizzle(int dst_writemask, int swizzle)
1068 {
1069 /* Destination write mask doesn't correspond to source swizzle for the dot
1070 * product and pack_bytes instructions.
1071 */
1072 if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH &&
1073 opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2 &&
1074 opcode != VEC4_OPCODE_PACK_BYTES) {
1075 for (int i = 0; i < 3; i++) {
1076 if (src[i].file == BAD_FILE || src[i].file == IMM)
1077 continue;
1078
1079 src[i].swizzle = brw_compose_swizzle(swizzle, src[i].swizzle);
1080 }
1081 }
1082
1083 /* Apply the specified swizzle and writemask to the original mask of
1084 * written components.
1085 */
1086 dst.writemask = dst_writemask &
1087 brw_apply_swizzle_to_mask(swizzle, dst.writemask);
1088 }
1089
1090 /*
1091 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1092 * just written and then MOVed into another reg and making the original write
1093 * of the GRF write directly to the final destination instead.
1094 */
1095 bool
1096 vec4_visitor::opt_register_coalesce()
1097 {
1098 bool progress = false;
1099 int next_ip = 0;
1100
1101 calculate_live_intervals();
1102
1103 foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
1104 int ip = next_ip;
1105 next_ip++;
1106
1107 if (inst->opcode != BRW_OPCODE_MOV ||
1108 (inst->dst.file != VGRF && inst->dst.file != MRF) ||
1109 inst->predicate ||
1110 inst->src[0].file != VGRF ||
1111 inst->dst.type != inst->src[0].type ||
1112 inst->src[0].abs || inst->src[0].negate || inst->src[0].reladdr)
1113 continue;
1114
1115 /* Remove no-op MOVs */
1116 if (inst->dst.file == inst->src[0].file &&
1117 inst->dst.nr == inst->src[0].nr &&
1118 inst->dst.offset == inst->src[0].offset) {
1119 bool is_nop_mov = true;
1120
1121 for (unsigned c = 0; c < 4; c++) {
1122 if ((inst->dst.writemask & (1 << c)) == 0)
1123 continue;
1124
1125 if (BRW_GET_SWZ(inst->src[0].swizzle, c) != c) {
1126 is_nop_mov = false;
1127 break;
1128 }
1129 }
1130
1131 if (is_nop_mov) {
1132 inst->remove(block);
1133 progress = true;
1134 continue;
1135 }
1136 }
1137
1138 bool to_mrf = (inst->dst.file == MRF);
1139
1140 /* Can't coalesce this GRF if someone else was going to
1141 * read it later.
1142 */
1143 if (var_range_end(var_from_reg(alloc, dst_reg(inst->src[0])), 8) > ip)
1144 continue;
1145
1146 /* We need to check interference with the final destination between this
1147 * instruction and the earliest instruction involved in writing the GRF
1148 * we're eliminating. To do that, keep track of which of our source
1149 * channels we've seen initialized.
1150 */
1151 const unsigned chans_needed =
1152 brw_apply_inv_swizzle_to_mask(inst->src[0].swizzle,
1153 inst->dst.writemask);
1154 unsigned chans_remaining = chans_needed;
1155
1156 /* Now walk up the instruction stream trying to see if we can rewrite
1157 * everything writing to the temporary to write into the destination
1158 * instead.
1159 */
1160 vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev;
1161 foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
1162 inst) {
1163 _scan_inst = scan_inst;
1164
1165 if (regions_overlap(inst->src[0], inst->size_read(0),
1166 scan_inst->dst, scan_inst->size_written)) {
1167 /* Found something writing to the reg we want to coalesce away. */
1168 if (to_mrf) {
1169 /* SEND instructions can't have MRF as a destination. */
1170 if (scan_inst->mlen)
1171 break;
1172
1173 if (devinfo->gen == 6) {
1174 /* gen6 math instructions must have the destination be
1175 * VGRF, so no compute-to-MRF for them.
1176 */
1177 if (scan_inst->is_math()) {
1178 break;
1179 }
1180 }
1181 }
1182
1183 /* This doesn't handle saturation on the instruction we
1184 * want to coalesce away if the register types do not match.
1185 * But if scan_inst is a non type-converting 'mov', we can fix
1186 * the types later.
1187 */
1188 if (inst->saturate &&
1189 inst->dst.type != scan_inst->dst.type &&
1190 !(scan_inst->opcode == BRW_OPCODE_MOV &&
1191 scan_inst->dst.type == scan_inst->src[0].type))
1192 break;
1193
1194 /* Only allow coalescing between registers of the same type size.
1195 * Otherwise we would need to make the pass aware of the fact that
1196 * channel sizes are different for single and double precision.
1197 */
1198 if (type_sz(inst->src[0].type) != type_sz(scan_inst->src[0].type))
1199 break;
1200
1201 /* Check that scan_inst writes the same amount of data as the
1202 * instruction, otherwise coalescing would lead to writing a
1203 * different (larger or smaller) region of the destination
1204 */
1205 if (scan_inst->size_written != inst->size_written)
1206 break;
1207
1208 /* If we can't handle the swizzle, bail. */
1209 if (!scan_inst->can_reswizzle(devinfo, inst->dst.writemask,
1210 inst->src[0].swizzle,
1211 chans_needed)) {
1212 break;
1213 }
1214
1215 /* This only handles coalescing writes of 8 channels (1 register
1216 * for single-precision and 2 registers for double-precision)
1217 * starting at the source offset of the copy instruction.
1218 */
1219 if (DIV_ROUND_UP(scan_inst->size_written,
1220 type_sz(scan_inst->dst.type)) > 8 ||
1221 scan_inst->dst.offset != inst->src[0].offset)
1222 break;
1223
1224 /* Mark which channels we found unconditional writes for. */
1225 if (!scan_inst->predicate)
1226 chans_remaining &= ~scan_inst->dst.writemask;
1227
1228 if (chans_remaining == 0)
1229 break;
1230 }
1231
1232 /* You can't read from an MRF, so if someone else reads our MRF's
1233 * source GRF that we wanted to rewrite, that stops us. If it's a
1234 * GRF we're trying to coalesce to, we don't actually handle
1235 * rewriting sources so bail in that case as well.
1236 */
1237 bool interfered = false;
1238 for (int i = 0; i < 3; i++) {
1239 if (regions_overlap(inst->src[0], inst->size_read(0),
1240 scan_inst->src[i], scan_inst->size_read(i)))
1241 interfered = true;
1242 }
1243 if (interfered)
1244 break;
1245
1246 /* If somebody else writes the same channels of our destination here,
1247 * we can't coalesce before that.
1248 */
1249 if (regions_overlap(inst->dst, inst->size_written,
1250 scan_inst->dst, scan_inst->size_written) &&
1251 (inst->dst.writemask & scan_inst->dst.writemask) != 0) {
1252 break;
1253 }
1254
1255 /* Check for reads of the register we're trying to coalesce into. We
1256 * can't go rewriting instructions above that to put some other value
1257 * in the register instead.
1258 */
1259 if (to_mrf && scan_inst->mlen > 0) {
1260 if (inst->dst.nr >= scan_inst->base_mrf &&
1261 inst->dst.nr < scan_inst->base_mrf + scan_inst->mlen) {
1262 break;
1263 }
1264 } else {
1265 for (int i = 0; i < 3; i++) {
1266 if (regions_overlap(inst->dst, inst->size_written,
1267 scan_inst->src[i], scan_inst->size_read(i)))
1268 interfered = true;
1269 }
1270 if (interfered)
1271 break;
1272 }
1273 }
1274
1275 if (chans_remaining == 0) {
1276 /* If we've made it here, we have an MOV we want to coalesce out, and
1277 * a scan_inst pointing to the earliest instruction involved in
1278 * computing the value. Now go rewrite the instruction stream
1279 * between the two.
1280 */
1281 vec4_instruction *scan_inst = _scan_inst;
1282 while (scan_inst != inst) {
1283 if (scan_inst->dst.file == VGRF &&
1284 scan_inst->dst.nr == inst->src[0].nr &&
1285 scan_inst->dst.offset == inst->src[0].offset) {
1286 scan_inst->reswizzle(inst->dst.writemask,
1287 inst->src[0].swizzle);
1288 scan_inst->dst.file = inst->dst.file;
1289 scan_inst->dst.nr = inst->dst.nr;
1290 scan_inst->dst.offset = inst->dst.offset;
1291 if (inst->saturate &&
1292 inst->dst.type != scan_inst->dst.type) {
1293 /* If we have reached this point, scan_inst is a non
1294 * type-converting 'mov' and we can modify its register types
1295 * to match the ones in inst. Otherwise, we could have an
1296 * incorrect saturation result.
1297 */
1298 scan_inst->dst.type = inst->dst.type;
1299 scan_inst->src[0].type = inst->src[0].type;
1300 }
1301 scan_inst->saturate |= inst->saturate;
1302 }
1303 scan_inst = (vec4_instruction *)scan_inst->next;
1304 }
1305 inst->remove(block);
1306 progress = true;
1307 }
1308 }
1309
1310 if (progress)
1311 invalidate_live_intervals();
1312
1313 return progress;
1314 }
1315
1316 /**
1317 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1318 * flow. We could probably do better here with some form of divergence
1319 * analysis.
1320 */
1321 bool
1322 vec4_visitor::eliminate_find_live_channel()
1323 {
1324 bool progress = false;
1325 unsigned depth = 0;
1326
1327 if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
1328 /* The optimization below assumes that channel zero is live on thread
1329 * dispatch, which may not be the case if the fixed function dispatches
1330 * threads sparsely.
1331 */
1332 return false;
1333 }
1334
1335 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
1336 switch (inst->opcode) {
1337 case BRW_OPCODE_IF:
1338 case BRW_OPCODE_DO:
1339 depth++;
1340 break;
1341
1342 case BRW_OPCODE_ENDIF:
1343 case BRW_OPCODE_WHILE:
1344 depth--;
1345 break;
1346
1347 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1348 if (depth == 0) {
1349 inst->opcode = BRW_OPCODE_MOV;
1350 inst->src[0] = brw_imm_d(0);
1351 inst->force_writemask_all = true;
1352 progress = true;
1353 }
1354 break;
1355
1356 default:
1357 break;
1358 }
1359 }
1360
1361 return progress;
1362 }
1363
1364 /**
1365 * Splits virtual GRFs requesting more than one contiguous physical register.
1366 *
1367 * We initially create large virtual GRFs for temporary structures, arrays,
1368 * and matrices, so that the visitor functions can add offsets to work their
1369 * way down to the actual member being accessed. But when it comes to
1370 * optimization, we'd like to treat each register as individual storage if
1371 * possible.
1372 *
1373 * So far, the only thing that might prevent splitting is a send message from
1374 * a GRF on IVB.
1375 */
1376 void
1377 vec4_visitor::split_virtual_grfs()
1378 {
1379 int num_vars = this->alloc.count;
1380 int new_virtual_grf[num_vars];
1381 bool split_grf[num_vars];
1382
1383 memset(new_virtual_grf, 0, sizeof(new_virtual_grf));
1384
1385 /* Try to split anything > 0 sized. */
1386 for (int i = 0; i < num_vars; i++) {
1387 split_grf[i] = this->alloc.sizes[i] != 1;
1388 }
1389
1390 /* Check that the instructions are compatible with the registers we're trying
1391 * to split.
1392 */
1393 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1394 if (inst->dst.file == VGRF && regs_written(inst) > 1)
1395 split_grf[inst->dst.nr] = false;
1396
1397 for (int i = 0; i < 3; i++) {
1398 if (inst->src[i].file == VGRF && regs_read(inst, i) > 1)
1399 split_grf[inst->src[i].nr] = false;
1400 }
1401 }
1402
1403 /* Allocate new space for split regs. Note that the virtual
1404 * numbers will be contiguous.
1405 */
1406 for (int i = 0; i < num_vars; i++) {
1407 if (!split_grf[i])
1408 continue;
1409
1410 new_virtual_grf[i] = alloc.allocate(1);
1411 for (unsigned j = 2; j < this->alloc.sizes[i]; j++) {
1412 unsigned reg = alloc.allocate(1);
1413 assert(reg == new_virtual_grf[i] + j - 1);
1414 (void) reg;
1415 }
1416 this->alloc.sizes[i] = 1;
1417 }
1418
1419 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1420 if (inst->dst.file == VGRF && split_grf[inst->dst.nr] &&
1421 inst->dst.offset / REG_SIZE != 0) {
1422 inst->dst.nr = (new_virtual_grf[inst->dst.nr] +
1423 inst->dst.offset / REG_SIZE - 1);
1424 inst->dst.offset %= REG_SIZE;
1425 }
1426 for (int i = 0; i < 3; i++) {
1427 if (inst->src[i].file == VGRF && split_grf[inst->src[i].nr] &&
1428 inst->src[i].offset / REG_SIZE != 0) {
1429 inst->src[i].nr = (new_virtual_grf[inst->src[i].nr] +
1430 inst->src[i].offset / REG_SIZE - 1);
1431 inst->src[i].offset %= REG_SIZE;
1432 }
1433 }
1434 }
1435 invalidate_live_intervals();
1436 }
1437
1438 void
1439 vec4_visitor::dump_instruction(backend_instruction *be_inst)
1440 {
1441 dump_instruction(be_inst, stderr);
1442 }
1443
1444 void
1445 vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
1446 {
1447 vec4_instruction *inst = (vec4_instruction *)be_inst;
1448
1449 if (inst->predicate) {
1450 fprintf(file, "(%cf0.%d%s) ",
1451 inst->predicate_inverse ? '-' : '+',
1452 inst->flag_subreg,
1453 pred_ctrl_align16[inst->predicate]);
1454 }
1455
1456 fprintf(file, "%s(%d)", brw_instruction_name(devinfo, inst->opcode),
1457 inst->exec_size);
1458 if (inst->saturate)
1459 fprintf(file, ".sat");
1460 if (inst->conditional_mod) {
1461 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
1462 if (!inst->predicate &&
1463 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
1464 inst->opcode != BRW_OPCODE_IF &&
1465 inst->opcode != BRW_OPCODE_WHILE))) {
1466 fprintf(file, ".f0.%d", inst->flag_subreg);
1467 }
1468 }
1469 fprintf(file, " ");
1470
1471 switch (inst->dst.file) {
1472 case VGRF:
1473 fprintf(file, "vgrf%d", inst->dst.nr);
1474 break;
1475 case FIXED_GRF:
1476 fprintf(file, "g%d", inst->dst.nr);
1477 break;
1478 case MRF:
1479 fprintf(file, "m%d", inst->dst.nr);
1480 break;
1481 case ARF:
1482 switch (inst->dst.nr) {
1483 case BRW_ARF_NULL:
1484 fprintf(file, "null");
1485 break;
1486 case BRW_ARF_ADDRESS:
1487 fprintf(file, "a0.%d", inst->dst.subnr);
1488 break;
1489 case BRW_ARF_ACCUMULATOR:
1490 fprintf(file, "acc%d", inst->dst.subnr);
1491 break;
1492 case BRW_ARF_FLAG:
1493 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
1494 break;
1495 default:
1496 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
1497 break;
1498 }
1499 break;
1500 case BAD_FILE:
1501 fprintf(file, "(null)");
1502 break;
1503 case IMM:
1504 case ATTR:
1505 case UNIFORM:
1506 unreachable("not reached");
1507 }
1508 if (inst->dst.offset ||
1509 (inst->dst.file == VGRF &&
1510 alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
1511 const unsigned reg_size = (inst->dst.file == UNIFORM ? 16 : REG_SIZE);
1512 fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
1513 inst->dst.offset % reg_size);
1514 }
1515 if (inst->dst.writemask != WRITEMASK_XYZW) {
1516 fprintf(file, ".");
1517 if (inst->dst.writemask & 1)
1518 fprintf(file, "x");
1519 if (inst->dst.writemask & 2)
1520 fprintf(file, "y");
1521 if (inst->dst.writemask & 4)
1522 fprintf(file, "z");
1523 if (inst->dst.writemask & 8)
1524 fprintf(file, "w");
1525 }
1526 fprintf(file, ":%s", brw_reg_type_letters(inst->dst.type));
1527
1528 if (inst->src[0].file != BAD_FILE)
1529 fprintf(file, ", ");
1530
1531 for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
1532 if (inst->src[i].negate)
1533 fprintf(file, "-");
1534 if (inst->src[i].abs)
1535 fprintf(file, "|");
1536 switch (inst->src[i].file) {
1537 case VGRF:
1538 fprintf(file, "vgrf%d", inst->src[i].nr);
1539 break;
1540 case FIXED_GRF:
1541 fprintf(file, "g%d", inst->src[i].nr);
1542 break;
1543 case ATTR:
1544 fprintf(file, "attr%d", inst->src[i].nr);
1545 break;
1546 case UNIFORM:
1547 fprintf(file, "u%d", inst->src[i].nr);
1548 break;
1549 case IMM:
1550 switch (inst->src[i].type) {
1551 case BRW_REGISTER_TYPE_F:
1552 fprintf(file, "%fF", inst->src[i].f);
1553 break;
1554 case BRW_REGISTER_TYPE_DF:
1555 fprintf(file, "%fDF", inst->src[i].df);
1556 break;
1557 case BRW_REGISTER_TYPE_D:
1558 fprintf(file, "%dD", inst->src[i].d);
1559 break;
1560 case BRW_REGISTER_TYPE_UD:
1561 fprintf(file, "%uU", inst->src[i].ud);
1562 break;
1563 case BRW_REGISTER_TYPE_VF:
1564 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
1565 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
1566 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
1567 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
1568 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
1569 break;
1570 default:
1571 fprintf(file, "???");
1572 break;
1573 }
1574 break;
1575 case ARF:
1576 switch (inst->src[i].nr) {
1577 case BRW_ARF_NULL:
1578 fprintf(file, "null");
1579 break;
1580 case BRW_ARF_ADDRESS:
1581 fprintf(file, "a0.%d", inst->src[i].subnr);
1582 break;
1583 case BRW_ARF_ACCUMULATOR:
1584 fprintf(file, "acc%d", inst->src[i].subnr);
1585 break;
1586 case BRW_ARF_FLAG:
1587 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
1588 break;
1589 default:
1590 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
1591 break;
1592 }
1593 break;
1594 case BAD_FILE:
1595 fprintf(file, "(null)");
1596 break;
1597 case MRF:
1598 unreachable("not reached");
1599 }
1600
1601 if (inst->src[i].offset ||
1602 (inst->src[i].file == VGRF &&
1603 alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
1604 const unsigned reg_size = (inst->src[i].file == UNIFORM ? 16 : REG_SIZE);
1605 fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
1606 inst->src[i].offset % reg_size);
1607 }
1608
1609 if (inst->src[i].file != IMM) {
1610 static const char *chans[4] = {"x", "y", "z", "w"};
1611 fprintf(file, ".");
1612 for (int c = 0; c < 4; c++) {
1613 fprintf(file, "%s", chans[BRW_GET_SWZ(inst->src[i].swizzle, c)]);
1614 }
1615 }
1616
1617 if (inst->src[i].abs)
1618 fprintf(file, "|");
1619
1620 if (inst->src[i].file != IMM) {
1621 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
1622 }
1623
1624 if (i < 2 && inst->src[i + 1].file != BAD_FILE)
1625 fprintf(file, ", ");
1626 }
1627
1628 if (inst->force_writemask_all)
1629 fprintf(file, " NoMask");
1630
1631 if (inst->exec_size != 8)
1632 fprintf(file, " group%d", inst->group);
1633
1634 fprintf(file, "\n");
1635 }
1636
1637
1638 static inline struct brw_reg
1639 attribute_to_hw_reg(int attr, bool interleaved)
1640 {
1641 if (interleaved)
1642 return stride(brw_vec4_grf(attr / 2, (attr % 2) * 4), 0, 4, 1);
1643 else
1644 return brw_vec8_grf(attr, 0);
1645 }
1646
1647
1648 /**
1649 * Replace each register of type ATTR in this->instructions with a reference
1650 * to a fixed HW register.
1651 *
1652 * If interleaved is true, then each attribute takes up half a register, with
1653 * register N containing attribute 2*N in its first half and attribute 2*N+1
1654 * in its second half (this corresponds to the payload setup used by geometry
1655 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1656 * false, then each attribute takes up a whole register, with register N
1657 * containing attribute N (this corresponds to the payload setup used by
1658 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1659 */
1660 void
1661 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map,
1662 bool interleaved)
1663 {
1664 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1665 for (int i = 0; i < 3; i++) {
1666 if (inst->src[i].file != ATTR)
1667 continue;
1668
1669 int grf = attribute_map[inst->src[i].nr +
1670 inst->src[i].offset / REG_SIZE];
1671 assert(inst->src[i].offset % REG_SIZE == 0);
1672
1673 /* All attributes used in the shader need to have been assigned a
1674 * hardware register by the caller
1675 */
1676 assert(grf != 0);
1677
1678 struct brw_reg reg = attribute_to_hw_reg(grf, interleaved);
1679 reg.swizzle = inst->src[i].swizzle;
1680 reg.type = inst->src[i].type;
1681 if (inst->src[i].abs)
1682 reg = brw_abs(reg);
1683 if (inst->src[i].negate)
1684 reg = negate(reg);
1685
1686 inst->src[i] = reg;
1687 }
1688 }
1689 }
1690
1691 int
1692 vec4_vs_visitor::setup_attributes(int payload_reg)
1693 {
1694 int nr_attributes;
1695 int attribute_map[VERT_ATTRIB_MAX + 2];
1696 memset(attribute_map, 0, sizeof(attribute_map));
1697
1698 nr_attributes = 0;
1699 for (int i = 0; i < VERT_ATTRIB_MAX; i++) {
1700 if (vs_prog_data->inputs_read & BITFIELD64_BIT(i)) {
1701 attribute_map[i] = payload_reg + nr_attributes;
1702 nr_attributes++;
1703 }
1704 }
1705
1706 /* VertexID is stored by the VF as the last vertex element, but we
1707 * don't represent it with a flag in inputs_read, so we call it
1708 * VERT_ATTRIB_MAX.
1709 */
1710 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
1711 vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) {
1712 attribute_map[VERT_ATTRIB_MAX] = payload_reg + nr_attributes;
1713 nr_attributes++;
1714 }
1715
1716 if (vs_prog_data->uses_drawid) {
1717 attribute_map[VERT_ATTRIB_MAX + 1] = payload_reg + nr_attributes;
1718 nr_attributes++;
1719 }
1720
1721 lower_attributes_to_hw_regs(attribute_map, false /* interleaved */);
1722
1723 return payload_reg + vs_prog_data->nr_attributes;
1724 }
1725
1726 int
1727 vec4_visitor::setup_uniforms(int reg)
1728 {
1729 prog_data->base.dispatch_grf_start_reg = reg;
1730
1731 /* The pre-gen6 VS requires that some push constants get loaded no
1732 * matter what, or the GPU would hang.
1733 */
1734 if (devinfo->gen < 6 && this->uniforms == 0) {
1735 stage_prog_data->param =
1736 reralloc(NULL, stage_prog_data->param, const gl_constant_value *, 4);
1737 for (unsigned int i = 0; i < 4; i++) {
1738 unsigned int slot = this->uniforms * 4 + i;
1739 static gl_constant_value zero = { 0.0 };
1740 stage_prog_data->param[slot] = &zero;
1741 }
1742
1743 this->uniforms++;
1744 reg++;
1745 } else {
1746 reg += ALIGN(uniforms, 2) / 2;
1747 }
1748
1749 stage_prog_data->nr_params = this->uniforms * 4;
1750
1751 prog_data->base.curb_read_length =
1752 reg - prog_data->base.dispatch_grf_start_reg;
1753
1754 return reg;
1755 }
1756
1757 void
1758 vec4_vs_visitor::setup_payload(void)
1759 {
1760 int reg = 0;
1761
1762 /* The payload always contains important data in g0, which contains
1763 * the URB handles that are passed on to the URB write at the end
1764 * of the thread. So, we always start push constants at g1.
1765 */
1766 reg++;
1767
1768 reg = setup_uniforms(reg);
1769
1770 reg = setup_attributes(reg);
1771
1772 this->first_non_payload_grf = reg;
1773 }
1774
1775 bool
1776 vec4_visitor::lower_minmax()
1777 {
1778 assert(devinfo->gen < 6);
1779
1780 bool progress = false;
1781
1782 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
1783 const vec4_builder ibld(this, block, inst);
1784
1785 if (inst->opcode == BRW_OPCODE_SEL &&
1786 inst->predicate == BRW_PREDICATE_NONE) {
1787 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
1788 * the original SEL.L/GE instruction
1789 */
1790 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
1791 inst->conditional_mod);
1792 inst->predicate = BRW_PREDICATE_NORMAL;
1793 inst->conditional_mod = BRW_CONDITIONAL_NONE;
1794
1795 progress = true;
1796 }
1797 }
1798
1799 if (progress)
1800 invalidate_live_intervals();
1801
1802 return progress;
1803 }
1804
1805 src_reg
1806 vec4_visitor::get_timestamp()
1807 {
1808 assert(devinfo->gen >= 7);
1809
1810 src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
1811 BRW_ARF_TIMESTAMP,
1812 0,
1813 0,
1814 0,
1815 BRW_REGISTER_TYPE_UD,
1816 BRW_VERTICAL_STRIDE_0,
1817 BRW_WIDTH_4,
1818 BRW_HORIZONTAL_STRIDE_4,
1819 BRW_SWIZZLE_XYZW,
1820 WRITEMASK_XYZW));
1821
1822 dst_reg dst = dst_reg(this, glsl_type::uvec4_type);
1823
1824 vec4_instruction *mov = emit(MOV(dst, ts));
1825 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1826 * even if it's not enabled in the dispatch.
1827 */
1828 mov->force_writemask_all = true;
1829
1830 return src_reg(dst);
1831 }
1832
1833 void
1834 vec4_visitor::emit_shader_time_begin()
1835 {
1836 current_annotation = "shader time start";
1837 shader_start_time = get_timestamp();
1838 }
1839
1840 void
1841 vec4_visitor::emit_shader_time_end()
1842 {
1843 current_annotation = "shader time end";
1844 src_reg shader_end_time = get_timestamp();
1845
1846
1847 /* Check that there weren't any timestamp reset events (assuming these
1848 * were the only two timestamp reads that happened).
1849 */
1850 src_reg reset_end = shader_end_time;
1851 reset_end.swizzle = BRW_SWIZZLE_ZZZZ;
1852 vec4_instruction *test = emit(AND(dst_null_ud(), reset_end, brw_imm_ud(1u)));
1853 test->conditional_mod = BRW_CONDITIONAL_Z;
1854
1855 emit(IF(BRW_PREDICATE_NORMAL));
1856
1857 /* Take the current timestamp and get the delta. */
1858 shader_start_time.negate = true;
1859 dst_reg diff = dst_reg(this, glsl_type::uint_type);
1860 emit(ADD(diff, shader_start_time, shader_end_time));
1861
1862 /* If there were no instructions between the two timestamp gets, the diff
1863 * is 2 cycles. Remove that overhead, so I can forget about that when
1864 * trying to determine the time taken for single instructions.
1865 */
1866 emit(ADD(diff, src_reg(diff), brw_imm_ud(-2u)));
1867
1868 emit_shader_time_write(0, src_reg(diff));
1869 emit_shader_time_write(1, brw_imm_ud(1u));
1870 emit(BRW_OPCODE_ELSE);
1871 emit_shader_time_write(2, brw_imm_ud(1u));
1872 emit(BRW_OPCODE_ENDIF);
1873 }
1874
1875 void
1876 vec4_visitor::emit_shader_time_write(int shader_time_subindex, src_reg value)
1877 {
1878 dst_reg dst =
1879 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, 2));
1880
1881 dst_reg offset = dst;
1882 dst_reg time = dst;
1883 time.offset += REG_SIZE;
1884
1885 offset.type = BRW_REGISTER_TYPE_UD;
1886 int index = shader_time_index * 3 + shader_time_subindex;
1887 emit(MOV(offset, brw_imm_d(index * SHADER_TIME_STRIDE)));
1888
1889 time.type = BRW_REGISTER_TYPE_UD;
1890 emit(MOV(time, value));
1891
1892 vec4_instruction *inst =
1893 emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
1894 inst->mlen = 2;
1895 }
1896
1897 void
1898 vec4_visitor::convert_to_hw_regs()
1899 {
1900 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1901 for (int i = 0; i < 3; i++) {
1902 struct src_reg &src = inst->src[i];
1903 struct brw_reg reg;
1904 switch (src.file) {
1905 case VGRF: {
1906 const unsigned type_size = type_sz(src.type);
1907 const unsigned width = REG_SIZE / 2 / MAX2(4, type_size);
1908 reg = byte_offset(brw_vecn_grf(width, src.nr, 0), src.offset);
1909 reg.type = src.type;
1910 reg.abs = src.abs;
1911 reg.negate = src.negate;
1912 break;
1913 }
1914
1915 case UNIFORM: {
1916 const unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(src.type));
1917 reg = stride(byte_offset(brw_vec4_grf(
1918 prog_data->base.dispatch_grf_start_reg +
1919 src.nr / 2, src.nr % 2 * 4),
1920 src.offset),
1921 0, width, 1);
1922 reg.type = src.type;
1923 reg.abs = src.abs;
1924 reg.negate = src.negate;
1925
1926 /* This should have been moved to pull constants. */
1927 assert(!src.reladdr);
1928 break;
1929 }
1930
1931 case FIXED_GRF:
1932 if (type_sz(src.type) == 8) {
1933 reg = src.as_brw_reg();
1934 break;
1935 }
1936 /* fallthrough */
1937 case ARF:
1938 case IMM:
1939 continue;
1940
1941 case BAD_FILE:
1942 /* Probably unused. */
1943 reg = brw_null_reg();
1944 break;
1945
1946 case MRF:
1947 case ATTR:
1948 unreachable("not reached");
1949 }
1950
1951 apply_logical_swizzle(&reg, inst, i);
1952 src = reg;
1953 }
1954
1955 if (inst->is_3src(devinfo)) {
1956 /* 3-src instructions with scalar sources support arbitrary subnr,
1957 * but don't actually use swizzles. Convert swizzle into subnr.
1958 */
1959 for (int i = 0; i < 3; i++) {
1960 if (inst->src[i].vstride == BRW_VERTICAL_STRIDE_0) {
1961 assert(brw_is_single_value_swizzle(inst->src[i].swizzle));
1962 inst->src[i].subnr += 4 * BRW_GET_SWZ(inst->src[i].swizzle, 0);
1963 }
1964 }
1965 }
1966
1967 dst_reg &dst = inst->dst;
1968 struct brw_reg reg;
1969
1970 switch (inst->dst.file) {
1971 case VGRF:
1972 reg = byte_offset(brw_vec8_grf(dst.nr, 0), dst.offset);
1973 reg.type = dst.type;
1974 reg.writemask = dst.writemask;
1975 break;
1976
1977 case MRF:
1978 reg = byte_offset(brw_message_reg(dst.nr), dst.offset);
1979 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
1980 reg.type = dst.type;
1981 reg.writemask = dst.writemask;
1982 break;
1983
1984 case ARF:
1985 case FIXED_GRF:
1986 reg = dst.as_brw_reg();
1987 break;
1988
1989 case BAD_FILE:
1990 reg = brw_null_reg();
1991 break;
1992
1993 case IMM:
1994 case ATTR:
1995 case UNIFORM:
1996 unreachable("not reached");
1997 }
1998
1999 dst = reg;
2000 }
2001 }
2002
2003 /**
2004 * Get the closest native SIMD width supported by the hardware for instruction
2005 * \p inst. The instruction will be left untouched by
2006 * vec4_visitor::lower_simd_width() if the returned value matches the
2007 * instruction's original execution size.
2008 */
2009 static unsigned
2010 get_lowered_simd_width(const struct gen_device_info *devinfo,
2011 const vec4_instruction *inst)
2012 {
2013 unsigned lowered_width = MIN2(16, inst->exec_size);
2014
2015 /* We need to split some cases of double-precision instructions that write
2016 * 2 registers. We only need to care about this in gen7 because that is the
2017 * only hardware that implements fp64 in Align16.
2018 */
2019 if (devinfo->gen == 7 && inst->size_written > REG_SIZE) {
2020 /* Align16 8-wide double-precision SEL does not work well. Verified
2021 * empirically.
2022 */
2023 if (inst->opcode == BRW_OPCODE_SEL && type_sz(inst->dst.type) == 8)
2024 lowered_width = MIN2(lowered_width, 4);
2025
2026 /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
2027 * Register Addressing:
2028 *
2029 * "When destination spans two registers, the source MUST span two
2030 * registers."
2031 */
2032 for (unsigned i = 0; i < 3; i++) {
2033 if (inst->src[i].file == BAD_FILE)
2034 continue;
2035 if (inst->size_read(i) <= REG_SIZE)
2036 lowered_width = MIN2(lowered_width, 4);
2037 }
2038 }
2039
2040 return lowered_width;
2041 }
2042
2043 static bool
2044 dst_src_regions_overlap(vec4_instruction *inst)
2045 {
2046 if (inst->size_written == 0)
2047 return false;
2048
2049 unsigned dst_start = inst->dst.offset;
2050 unsigned dst_end = dst_start + inst->size_written - 1;
2051 for (int i = 0; i < 3; i++) {
2052 if (inst->src[i].file == BAD_FILE)
2053 continue;
2054
2055 if (inst->dst.file != inst->src[i].file ||
2056 inst->dst.nr != inst->src[i].nr)
2057 continue;
2058
2059 unsigned src_start = inst->src[i].offset;
2060 unsigned src_end = src_start + inst->size_read(i) - 1;
2061
2062 if ((dst_start >= src_start && dst_start <= src_end) ||
2063 (dst_end >= src_start && dst_end <= src_end) ||
2064 (dst_start <= src_start && dst_end >= src_end)) {
2065 return true;
2066 }
2067 }
2068
2069 return false;
2070 }
2071
2072 bool
2073 vec4_visitor::lower_simd_width()
2074 {
2075 bool progress = false;
2076
2077 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2078 const unsigned lowered_width = get_lowered_simd_width(devinfo, inst);
2079 assert(lowered_width <= inst->exec_size);
2080 if (lowered_width == inst->exec_size)
2081 continue;
2082
2083 /* We need to deal with source / destination overlaps when splitting.
2084 * The hardware supports reading from and writing to the same register
2085 * in the same instruction, but we need to be careful that each split
2086 * instruction we produce does not corrupt the source of the next.
2087 *
2088 * The easiest way to handle this is to make the split instructions write
2089 * to temporaries if there is an src/dst overlap and then move from the
2090 * temporaries to the original destination. We also need to consider
2091 * instructions that do partial writes via align1 opcodes, in which case
2092 * we need to make sure that the we initialize the temporary with the
2093 * value of the instruction's dst.
2094 */
2095 bool needs_temp = dst_src_regions_overlap(inst);
2096 for (unsigned n = 0; n < inst->exec_size / lowered_width; n++) {
2097 unsigned channel_offset = lowered_width * n;
2098
2099 unsigned size_written = lowered_width * type_sz(inst->dst.type);
2100
2101 /* Create the split instruction from the original so that we copy all
2102 * relevant instruction fields, then set the width and calculate the
2103 * new dst/src regions.
2104 */
2105 vec4_instruction *linst = new(mem_ctx) vec4_instruction(*inst);
2106 linst->exec_size = lowered_width;
2107 linst->group = channel_offset;
2108 linst->size_written = size_written;
2109
2110 /* Compute split dst region */
2111 dst_reg dst;
2112 if (needs_temp) {
2113 unsigned num_regs = DIV_ROUND_UP(size_written, REG_SIZE);
2114 dst = retype(dst_reg(VGRF, alloc.allocate(num_regs)),
2115 inst->dst.type);
2116 if (inst->is_align1_partial_write()) {
2117 vec4_instruction *copy = MOV(dst, src_reg(inst->dst));
2118 copy->exec_size = lowered_width;
2119 copy->group = channel_offset;
2120 copy->size_written = size_written;
2121 inst->insert_before(block, copy);
2122 }
2123 } else {
2124 dst = horiz_offset(inst->dst, channel_offset);
2125 }
2126 linst->dst = dst;
2127
2128 /* Compute split source regions */
2129 for (int i = 0; i < 3; i++) {
2130 if (linst->src[i].file == BAD_FILE)
2131 continue;
2132
2133 if (!is_uniform(linst->src[i]))
2134 linst->src[i] = horiz_offset(linst->src[i], channel_offset);
2135 }
2136
2137 inst->insert_before(block, linst);
2138
2139 /* If we used a temporary to store the result of the split
2140 * instruction, copy the result to the original destination
2141 */
2142 if (needs_temp) {
2143 vec4_instruction *mov =
2144 MOV(offset(inst->dst, lowered_width, n), src_reg(dst));
2145 mov->exec_size = lowered_width;
2146 mov->group = channel_offset;
2147 mov->size_written = size_written;
2148 mov->predicate = inst->predicate;
2149 inst->insert_before(block, mov);
2150 }
2151 }
2152
2153 inst->remove(block);
2154 progress = true;
2155 }
2156
2157 if (progress)
2158 invalidate_live_intervals();
2159
2160 return progress;
2161 }
2162
2163 static bool
2164 is_align1_df(vec4_instruction *inst)
2165 {
2166 switch (inst->opcode) {
2167 case VEC4_OPCODE_FROM_DOUBLE:
2168 case VEC4_OPCODE_TO_DOUBLE:
2169 case VEC4_OPCODE_PICK_LOW_32BIT:
2170 case VEC4_OPCODE_PICK_HIGH_32BIT:
2171 case VEC4_OPCODE_SET_LOW_32BIT:
2172 case VEC4_OPCODE_SET_HIGH_32BIT:
2173 return true;
2174 default:
2175 return false;
2176 }
2177 }
2178
2179 static brw_predicate
2180 scalarize_predicate(brw_predicate predicate, unsigned writemask)
2181 {
2182 if (predicate != BRW_PREDICATE_NORMAL)
2183 return predicate;
2184
2185 switch (writemask) {
2186 case WRITEMASK_X:
2187 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
2188 case WRITEMASK_Y:
2189 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
2190 case WRITEMASK_Z:
2191 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
2192 case WRITEMASK_W:
2193 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
2194 default:
2195 unreachable("invalid writemask");
2196 }
2197 }
2198
2199 bool
2200 vec4_visitor::scalarize_df()
2201 {
2202 bool progress = false;
2203
2204 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2205 /* Skip DF instructions that operate in Align1 mode */
2206 if (is_align1_df(inst))
2207 continue;
2208
2209 /* Check if this is a double-precision instruction */
2210 bool is_double = type_sz(inst->dst.type) == 8;
2211 for (int arg = 0; !is_double && arg < 3; arg++) {
2212 is_double = inst->src[arg].file != BAD_FILE &&
2213 type_sz(inst->src[arg].type) == 8;
2214 }
2215
2216 if (!is_double)
2217 continue;
2218
2219 /* Generate scalar instructions for each enabled channel */
2220 for (unsigned chan = 0; chan < 4; chan++) {
2221 unsigned chan_mask = 1 << chan;
2222 if (!(inst->dst.writemask & chan_mask))
2223 continue;
2224
2225 vec4_instruction *scalar_inst = new(mem_ctx) vec4_instruction(*inst);
2226
2227 for (unsigned i = 0; i < 3; i++) {
2228 unsigned swz = BRW_GET_SWZ(inst->src[i].swizzle, chan);
2229 scalar_inst->src[i].swizzle = BRW_SWIZZLE4(swz, swz, swz, swz);
2230 }
2231
2232 scalar_inst->dst.writemask = chan_mask;
2233
2234 if (inst->predicate != BRW_PREDICATE_NONE) {
2235 scalar_inst->predicate =
2236 scalarize_predicate(inst->predicate, chan_mask);
2237 }
2238
2239 inst->insert_before(block, scalar_inst);
2240 }
2241
2242 inst->remove(block);
2243 progress = true;
2244 }
2245
2246 if (progress)
2247 invalidate_live_intervals();
2248
2249 return progress;
2250 }
2251
2252 /* The align16 hardware can only do 32-bit swizzle channels, so we need to
2253 * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
2254 * to 32-bit swizzle channels in hardware registers.
2255 *
2256 * @inst and @arg identify the original vec4 IR source operand we need to
2257 * translate the swizzle for and @hw_reg is the hardware register where we
2258 * will write the hardware swizzle to use.
2259 *
2260 * This pass assumes that Align16/DF instructions have been fully scalarized
2261 * previously so there is just one 64-bit swizzle channel to deal with for any
2262 * given Vec4 IR source.
2263 */
2264 void
2265 vec4_visitor::apply_logical_swizzle(struct brw_reg *hw_reg,
2266 vec4_instruction *inst, int arg)
2267 {
2268 src_reg reg = inst->src[arg];
2269
2270 if (reg.file == BAD_FILE || reg.file == BRW_IMMEDIATE_VALUE)
2271 return;
2272
2273 /* If this is not a 64-bit operand or this is a scalar instruction we don't
2274 * need to do anything about the swizzles.
2275 */
2276 if(type_sz(reg.type) < 8 || is_align1_df(inst)) {
2277 hw_reg->swizzle = reg.swizzle;
2278 return;
2279 }
2280
2281 /* Otherwise we should have scalarized the instruction, so take the single
2282 * 64-bit logical swizzle channel and translate it to 32-bit
2283 */
2284 assert(brw_is_single_value_swizzle(reg.swizzle));
2285
2286 /* To gain access to Z/W components we need to select the second half
2287 * of the register and then use a X/Y swizzle to select Z/W respectively.
2288 */
2289 unsigned swizzle = BRW_GET_SWZ(reg.swizzle, 0);
2290
2291 if (swizzle >= 2) {
2292 *hw_reg = suboffset(*hw_reg, 2);
2293 swizzle -= 2;
2294 }
2295
2296 /* Any 64-bit source with an offset at 16B is intended to address the
2297 * second half of a register and needs a vertical stride of 0 so we:
2298 *
2299 * 1. Don't violate register region restrictions.
2300 * 2. Activate the gen7 instruction decompresion bug exploit when
2301 * execsize > 4
2302 */
2303 if (hw_reg->subnr % REG_SIZE == 16) {
2304 assert(devinfo->gen == 7);
2305 hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
2306 }
2307
2308 hw_reg->swizzle = BRW_SWIZZLE4(swizzle * 2, swizzle * 2 + 1,
2309 swizzle * 2, swizzle * 2 + 1);
2310 }
2311
2312 bool
2313 vec4_visitor::run()
2314 {
2315 if (shader_time_index >= 0)
2316 emit_shader_time_begin();
2317
2318 emit_prolog();
2319
2320 emit_nir_code();
2321 if (failed)
2322 return false;
2323 base_ir = NULL;
2324
2325 emit_thread_end();
2326
2327 calculate_cfg();
2328
2329 /* Before any optimization, push array accesses out to scratch
2330 * space where we need them to be. This pass may allocate new
2331 * virtual GRFs, so we want to do it early. It also makes sure
2332 * that we have reladdr computations available for CSE, since we'll
2333 * often do repeated subexpressions for those.
2334 */
2335 move_grf_array_access_to_scratch();
2336 move_uniform_array_access_to_pull_constants();
2337
2338 pack_uniform_registers();
2339 move_push_constants_to_pull_constants();
2340 split_virtual_grfs();
2341
2342 #define OPT(pass, args...) ({ \
2343 pass_num++; \
2344 bool this_progress = pass(args); \
2345 \
2346 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
2347 char filename[64]; \
2348 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
2349 stage_abbrev, nir->info->name, iteration, pass_num); \
2350 \
2351 backend_shader::dump_instructions(filename); \
2352 } \
2353 \
2354 progress = progress || this_progress; \
2355 this_progress; \
2356 })
2357
2358
2359 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
2360 char filename[64];
2361 snprintf(filename, 64, "%s-%s-00-00-start",
2362 stage_abbrev, nir->info->name);
2363
2364 backend_shader::dump_instructions(filename);
2365 }
2366
2367 bool progress;
2368 int iteration = 0;
2369 int pass_num = 0;
2370 do {
2371 progress = false;
2372 pass_num = 0;
2373 iteration++;
2374
2375 OPT(opt_predicated_break, this);
2376 OPT(opt_reduce_swizzle);
2377 OPT(dead_code_eliminate);
2378 OPT(dead_control_flow_eliminate, this);
2379 OPT(opt_copy_propagation);
2380 OPT(opt_cmod_propagation);
2381 OPT(opt_cse);
2382 OPT(opt_algebraic);
2383 OPT(opt_register_coalesce);
2384 OPT(eliminate_find_live_channel);
2385 } while (progress);
2386
2387 pass_num = 0;
2388
2389 if (OPT(opt_vector_float)) {
2390 OPT(opt_cse);
2391 OPT(opt_copy_propagation, false);
2392 OPT(opt_copy_propagation, true);
2393 OPT(dead_code_eliminate);
2394 }
2395
2396 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
2397 OPT(opt_cmod_propagation);
2398 OPT(opt_cse);
2399 OPT(opt_copy_propagation);
2400 OPT(dead_code_eliminate);
2401 }
2402
2403 if (OPT(lower_simd_width)) {
2404 OPT(opt_copy_propagation);
2405 OPT(dead_code_eliminate);
2406 }
2407
2408 if (failed)
2409 return false;
2410
2411 OPT(scalarize_df);
2412
2413 setup_payload();
2414
2415 if (unlikely(INTEL_DEBUG & DEBUG_SPILL_VEC4)) {
2416 /* Debug of register spilling: Go spill everything. */
2417 const int grf_count = alloc.count;
2418 float spill_costs[alloc.count];
2419 bool no_spill[alloc.count];
2420 evaluate_spill_costs(spill_costs, no_spill);
2421 for (int i = 0; i < grf_count; i++) {
2422 if (no_spill[i])
2423 continue;
2424 spill_reg(i);
2425 }
2426 }
2427
2428 bool allocated_without_spills = reg_allocate();
2429
2430 if (!allocated_without_spills) {
2431 compiler->shader_perf_log(log_data,
2432 "%s shader triggered register spilling. "
2433 "Try reducing the number of live vec4 values "
2434 "to improve performance.\n",
2435 stage_name);
2436
2437 while (!reg_allocate()) {
2438 if (failed)
2439 return false;
2440 }
2441 }
2442
2443 opt_schedule_instructions();
2444
2445 opt_set_dependency_control();
2446
2447 convert_to_hw_regs();
2448
2449 if (last_scratch > 0) {
2450 prog_data->base.total_scratch =
2451 brw_get_scratch_size(last_scratch * REG_SIZE);
2452 }
2453
2454 return !failed;
2455 }
2456
2457 } /* namespace brw */
2458
2459 extern "C" {
2460
2461 /**
2462 * Compile a vertex shader.
2463 *
2464 * Returns the final assembly and the program's size.
2465 */
2466 const unsigned *
2467 brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
2468 void *mem_ctx,
2469 const struct brw_vs_prog_key *key,
2470 struct brw_vs_prog_data *prog_data,
2471 const nir_shader *src_shader,
2472 gl_clip_plane *clip_planes,
2473 bool use_legacy_snorm_formula,
2474 int shader_time_index,
2475 unsigned *final_assembly_size,
2476 char **error_str)
2477 {
2478 const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
2479 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
2480 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
2481 brw_nir_lower_vs_inputs(shader, is_scalar,
2482 use_legacy_snorm_formula, key->gl_attrib_wa_flags);
2483 brw_nir_lower_vue_outputs(shader, is_scalar);
2484 shader = brw_postprocess_nir(shader, compiler, is_scalar);
2485
2486 const unsigned *assembly = NULL;
2487
2488 prog_data->base.clip_distance_mask =
2489 ((1 << shader->info->clip_distance_array_size) - 1);
2490 prog_data->base.cull_distance_mask =
2491 ((1 << shader->info->cull_distance_array_size) - 1) <<
2492 shader->info->clip_distance_array_size;
2493
2494 unsigned nr_attributes = _mesa_bitcount_64(prog_data->inputs_read);
2495
2496 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2497 * incoming vertex attribute. So, add an extra slot.
2498 */
2499 if (shader->info->system_values_read &
2500 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
2501 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
2502 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
2503 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID))) {
2504 nr_attributes++;
2505 }
2506
2507 /* gl_DrawID has its very own vec4 */
2508 if (shader->info->system_values_read &
2509 BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
2510 nr_attributes++;
2511 }
2512
2513 unsigned nr_attribute_slots =
2514 nr_attributes +
2515 _mesa_bitcount_64(shader->info->double_inputs_read);
2516
2517 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2518 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
2519 * vec4 mode, the hardware appears to wedge unless we read something.
2520 */
2521 if (is_scalar)
2522 prog_data->base.urb_read_length =
2523 DIV_ROUND_UP(nr_attribute_slots, 2);
2524 else
2525 prog_data->base.urb_read_length =
2526 DIV_ROUND_UP(MAX2(nr_attribute_slots, 1), 2);
2527
2528 prog_data->nr_attributes = nr_attributes;
2529 prog_data->nr_attribute_slots = nr_attribute_slots;
2530
2531 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2532 * (overwriting the original contents), we need to make sure the size is
2533 * the larger of the two.
2534 */
2535 const unsigned vue_entries =
2536 MAX2(nr_attribute_slots, (unsigned)prog_data->base.vue_map.num_slots);
2537
2538 if (compiler->devinfo->gen == 6)
2539 prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 8);
2540 else
2541 prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
2542
2543 if (is_scalar) {
2544 prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
2545
2546 fs_visitor v(compiler, log_data, mem_ctx, key, &prog_data->base.base,
2547 NULL, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
2548 shader, 8, shader_time_index);
2549 if (!v.run_vs(clip_planes)) {
2550 if (error_str)
2551 *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
2552
2553 return NULL;
2554 }
2555
2556 prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
2557
2558 fs_generator g(compiler, log_data, mem_ctx, (void *) key,
2559 &prog_data->base.base, v.promoted_constants,
2560 v.runtime_check_aads_emit, MESA_SHADER_VERTEX);
2561 if (INTEL_DEBUG & DEBUG_VS) {
2562 const char *debug_name =
2563 ralloc_asprintf(mem_ctx, "%s vertex shader %s",
2564 shader->info->label ? shader->info->label :
2565 "unnamed",
2566 shader->info->name);
2567
2568 g.enable_debug(debug_name);
2569 }
2570 g.generate_code(v.cfg, 8);
2571 assembly = g.get_assembly(final_assembly_size);
2572 }
2573
2574 if (!assembly) {
2575 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
2576
2577 vec4_vs_visitor v(compiler, log_data, key, prog_data,
2578 shader, clip_planes, mem_ctx,
2579 shader_time_index, use_legacy_snorm_formula);
2580 if (!v.run()) {
2581 if (error_str)
2582 *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
2583
2584 return NULL;
2585 }
2586
2587 assembly = brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
2588 shader, &prog_data->base, v.cfg,
2589 final_assembly_size);
2590 }
2591
2592 return assembly;
2593 }
2594
2595 } /* extern "C" */