2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_print_visitor.h"
28 #include "main/macros.h"
29 #include "program/prog_print.h"
30 #include "program/prog_parameter.h"
33 #define MAX_INSTRUCTION (1 << 30)
40 * Common helper for constructing swizzles. When only a subset of
41 * channels of a vec4 are used, we don't want to reference the other
42 * channels, as that will tell optimization passes that those other
46 swizzle_for_size(int size
)
48 static const unsigned size_swizzles
[4] = {
49 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
),
50 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
),
51 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_Z
),
52 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_W
),
55 assert((size
>= 1) && (size
<= 4));
56 return size_swizzles
[size
- 1];
62 memset(this, 0, sizeof(*this));
64 this->file
= BAD_FILE
;
67 src_reg::src_reg(register_file file
, int reg
, const glsl_type
*type
)
73 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
74 this->swizzle
= swizzle_for_size(type
->vector_elements
);
76 this->swizzle
= SWIZZLE_XYZW
;
79 /** Generic unset register constructor. */
85 src_reg::src_reg(float f
)
90 this->type
= BRW_REGISTER_TYPE_F
;
94 src_reg::src_reg(uint32_t u
)
99 this->type
= BRW_REGISTER_TYPE_UD
;
103 src_reg::src_reg(int32_t i
)
108 this->type
= BRW_REGISTER_TYPE_D
;
112 src_reg::src_reg(dst_reg reg
)
116 this->file
= reg
.file
;
118 this->reg_offset
= reg
.reg_offset
;
119 this->type
= reg
.type
;
120 this->reladdr
= reg
.reladdr
;
121 this->fixed_hw_reg
= reg
.fixed_hw_reg
;
127 for (int i
= 0; i
< 4; i
++) {
128 if (!(reg
.writemask
& (1 << i
)))
131 swizzles
[next_chan
++] = last
= i
;
134 for (; next_chan
< 4; next_chan
++) {
135 swizzles
[next_chan
] = last
;
138 this->swizzle
= BRW_SWIZZLE4(swizzles
[0], swizzles
[1],
139 swizzles
[2], swizzles
[3]);
143 vec4_instruction::is_tex()
145 return (opcode
== SHADER_OPCODE_TEX
||
146 opcode
== SHADER_OPCODE_TXD
||
147 opcode
== SHADER_OPCODE_TXF
||
148 opcode
== SHADER_OPCODE_TXL
||
149 opcode
== SHADER_OPCODE_TXS
);
155 memset(this, 0, sizeof(*this));
156 this->file
= BAD_FILE
;
157 this->writemask
= WRITEMASK_XYZW
;
165 dst_reg::dst_reg(register_file file
, int reg
)
173 dst_reg::dst_reg(register_file file
, int reg
, const glsl_type
*type
,
180 this->type
= brw_type_for_base_type(type
);
181 this->writemask
= writemask
;
184 dst_reg::dst_reg(struct brw_reg reg
)
189 this->fixed_hw_reg
= reg
;
192 dst_reg::dst_reg(src_reg reg
)
196 this->file
= reg
.file
;
198 this->reg_offset
= reg
.reg_offset
;
199 this->type
= reg
.type
;
200 this->writemask
= WRITEMASK_XYZW
;
201 this->reladdr
= reg
.reladdr
;
202 this->fixed_hw_reg
= reg
.fixed_hw_reg
;
206 vec4_instruction::is_math()
208 return (opcode
== SHADER_OPCODE_RCP
||
209 opcode
== SHADER_OPCODE_RSQ
||
210 opcode
== SHADER_OPCODE_SQRT
||
211 opcode
== SHADER_OPCODE_EXP2
||
212 opcode
== SHADER_OPCODE_LOG2
||
213 opcode
== SHADER_OPCODE_SIN
||
214 opcode
== SHADER_OPCODE_COS
||
215 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
216 opcode
== SHADER_OPCODE_INT_REMAINDER
||
217 opcode
== SHADER_OPCODE_POW
);
220 * Returns how many MRFs an opcode will write over.
222 * Note that this is not the 0 or 1 implied writes in an actual gen
223 * instruction -- the generate_* functions generate additional MOVs
227 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
232 switch (inst
->opcode
) {
233 case SHADER_OPCODE_RCP
:
234 case SHADER_OPCODE_RSQ
:
235 case SHADER_OPCODE_SQRT
:
236 case SHADER_OPCODE_EXP2
:
237 case SHADER_OPCODE_LOG2
:
238 case SHADER_OPCODE_SIN
:
239 case SHADER_OPCODE_COS
:
241 case SHADER_OPCODE_POW
:
243 case VS_OPCODE_URB_WRITE
:
245 case VS_OPCODE_PULL_CONSTANT_LOAD
:
247 case VS_OPCODE_SCRATCH_READ
:
249 case VS_OPCODE_SCRATCH_WRITE
:
252 assert(!"not reached");
258 src_reg::equals(src_reg
*r
)
260 return (file
== r
->file
&&
262 reg_offset
== r
->reg_offset
&&
264 negate
== r
->negate
&&
266 swizzle
== r
->swizzle
&&
267 !reladdr
&& !r
->reladdr
&&
268 memcmp(&fixed_hw_reg
, &r
->fixed_hw_reg
,
269 sizeof(fixed_hw_reg
)) == 0 &&
274 * Must be called after calculate_live_intervales() to remove unused
275 * writes to registers -- register allocation will fail otherwise
276 * because something deffed but not used won't be considered to
277 * interfere with other regs.
280 vec4_visitor::dead_code_eliminate()
282 bool progress
= false;
285 calculate_live_intervals();
287 foreach_list_safe(node
, &this->instructions
) {
288 vec4_instruction
*inst
= (vec4_instruction
*)node
;
290 if (inst
->dst
.file
== GRF
&& this->virtual_grf_use
[inst
->dst
.reg
] <= pc
) {
299 live_intervals_valid
= false;
305 vec4_visitor::split_uniform_registers()
307 /* Prior to this, uniforms have been in an array sized according to
308 * the number of vector uniforms present, sparsely filled (so an
309 * aggregate results in reg indices being skipped over). Now we're
310 * going to cut those aggregates up so each .reg index is one
311 * vector. The goal is to make elimination of unused uniform
312 * components easier later.
314 foreach_list(node
, &this->instructions
) {
315 vec4_instruction
*inst
= (vec4_instruction
*)node
;
317 for (int i
= 0 ; i
< 3; i
++) {
318 if (inst
->src
[i
].file
!= UNIFORM
)
321 assert(!inst
->src
[i
].reladdr
);
323 inst
->src
[i
].reg
+= inst
->src
[i
].reg_offset
;
324 inst
->src
[i
].reg_offset
= 0;
328 /* Update that everything is now vector-sized. */
329 for (int i
= 0; i
< this->uniforms
; i
++) {
330 this->uniform_size
[i
] = 1;
335 vec4_visitor::pack_uniform_registers()
337 bool uniform_used
[this->uniforms
];
338 int new_loc
[this->uniforms
];
339 int new_chan
[this->uniforms
];
341 memset(uniform_used
, 0, sizeof(uniform_used
));
342 memset(new_loc
, 0, sizeof(new_loc
));
343 memset(new_chan
, 0, sizeof(new_chan
));
345 /* Find which uniform vectors are actually used by the program. We
346 * expect unused vector elements when we've moved array access out
347 * to pull constants, and from some GLSL code generators like wine.
349 foreach_list(node
, &this->instructions
) {
350 vec4_instruction
*inst
= (vec4_instruction
*)node
;
352 for (int i
= 0 ; i
< 3; i
++) {
353 if (inst
->src
[i
].file
!= UNIFORM
)
356 uniform_used
[inst
->src
[i
].reg
] = true;
360 int new_uniform_count
= 0;
362 /* Now, figure out a packing of the live uniform vectors into our
365 for (int src
= 0; src
< uniforms
; src
++) {
366 int size
= this->uniform_vector_size
[src
];
368 if (!uniform_used
[src
]) {
369 this->uniform_vector_size
[src
] = 0;
374 /* Find the lowest place we can slot this uniform in. */
375 for (dst
= 0; dst
< src
; dst
++) {
376 if (this->uniform_vector_size
[dst
] + size
<= 4)
385 new_chan
[src
] = this->uniform_vector_size
[dst
];
387 /* Move the references to the data */
388 for (int j
= 0; j
< size
; j
++) {
389 c
->prog_data
.param
[dst
* 4 + new_chan
[src
] + j
] =
390 c
->prog_data
.param
[src
* 4 + j
];
393 this->uniform_vector_size
[dst
] += size
;
394 this->uniform_vector_size
[src
] = 0;
397 new_uniform_count
= MAX2(new_uniform_count
, dst
+ 1);
400 this->uniforms
= new_uniform_count
;
402 /* Now, update the instructions for our repacked uniforms. */
403 foreach_list(node
, &this->instructions
) {
404 vec4_instruction
*inst
= (vec4_instruction
*)node
;
406 for (int i
= 0 ; i
< 3; i
++) {
407 int src
= inst
->src
[i
].reg
;
409 if (inst
->src
[i
].file
!= UNIFORM
)
412 inst
->src
[i
].reg
= new_loc
[src
];
414 int sx
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 0) + new_chan
[src
];
415 int sy
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 1) + new_chan
[src
];
416 int sz
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 2) + new_chan
[src
];
417 int sw
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 3) + new_chan
[src
];
418 inst
->src
[i
].swizzle
= BRW_SWIZZLE4(sx
, sy
, sz
, sw
);
424 src_reg::is_zero() const
429 if (type
== BRW_REGISTER_TYPE_F
) {
437 src_reg::is_one() const
442 if (type
== BRW_REGISTER_TYPE_F
) {
450 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
452 * While GLSL IR also performs this optimization, we end up with it in
453 * our instruction stream for a couple of reasons. One is that we
454 * sometimes generate silly instructions, for example in array access
455 * where we'll generate "ADD offset, index, base" even if base is 0.
456 * The other is that GLSL IR's constant propagation doesn't track the
457 * components of aggregates, so some VS patterns (initialize matrix to
458 * 0, accumulate in vertex blending factors) end up breaking down to
459 * instructions involving 0.
462 vec4_visitor::opt_algebraic()
464 bool progress
= false;
466 foreach_list(node
, &this->instructions
) {
467 vec4_instruction
*inst
= (vec4_instruction
*)node
;
469 switch (inst
->opcode
) {
471 if (inst
->src
[1].is_zero()) {
472 inst
->opcode
= BRW_OPCODE_MOV
;
473 inst
->src
[1] = src_reg();
479 if (inst
->src
[1].is_zero()) {
480 inst
->opcode
= BRW_OPCODE_MOV
;
481 switch (inst
->src
[0].type
) {
482 case BRW_REGISTER_TYPE_F
:
483 inst
->src
[0] = src_reg(0.0f
);
485 case BRW_REGISTER_TYPE_D
:
486 inst
->src
[0] = src_reg(0);
488 case BRW_REGISTER_TYPE_UD
:
489 inst
->src
[0] = src_reg(0u);
492 assert(!"not reached");
493 inst
->src
[0] = src_reg(0.0f
);
496 inst
->src
[1] = src_reg();
498 } else if (inst
->src
[1].is_one()) {
499 inst
->opcode
= BRW_OPCODE_MOV
;
500 inst
->src
[1] = src_reg();
510 this->live_intervals_valid
= false;
516 * Only a limited number of hardware registers may be used for push
517 * constants, so this turns access to the overflowed constants into
521 vec4_visitor::move_push_constants_to_pull_constants()
523 int pull_constant_loc
[this->uniforms
];
525 /* Only allow 32 registers (256 uniform components) as push constants,
526 * which is the limit on gen6.
528 int max_uniform_components
= 32 * 8;
529 if (this->uniforms
* 4 <= max_uniform_components
)
532 /* Make some sort of choice as to which uniforms get sent to pull
533 * constants. We could potentially do something clever here like
534 * look for the most infrequently used uniform vec4s, but leave
537 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
538 pull_constant_loc
[i
/ 4] = -1;
540 if (i
>= max_uniform_components
) {
541 const float **values
= &prog_data
->param
[i
];
543 /* Try to find an existing copy of this uniform in the pull
544 * constants if it was part of an array access already.
546 for (unsigned int j
= 0; j
< prog_data
->nr_pull_params
; j
+= 4) {
549 for (matches
= 0; matches
< 4; matches
++) {
550 if (prog_data
->pull_param
[j
+ matches
] != values
[matches
])
555 pull_constant_loc
[i
/ 4] = j
/ 4;
560 if (pull_constant_loc
[i
/ 4] == -1) {
561 assert(prog_data
->nr_pull_params
% 4 == 0);
562 pull_constant_loc
[i
/ 4] = prog_data
->nr_pull_params
/ 4;
564 for (int j
= 0; j
< 4; j
++) {
565 prog_data
->pull_param
[prog_data
->nr_pull_params
++] = values
[j
];
571 /* Now actually rewrite usage of the things we've moved to pull
574 foreach_list_safe(node
, &this->instructions
) {
575 vec4_instruction
*inst
= (vec4_instruction
*)node
;
577 for (int i
= 0 ; i
< 3; i
++) {
578 if (inst
->src
[i
].file
!= UNIFORM
||
579 pull_constant_loc
[inst
->src
[i
].reg
] == -1)
582 int uniform
= inst
->src
[i
].reg
;
584 dst_reg temp
= dst_reg(this, glsl_type::vec4_type
);
586 emit_pull_constant_load(inst
, temp
, inst
->src
[i
],
587 pull_constant_loc
[uniform
]);
589 inst
->src
[i
].file
= temp
.file
;
590 inst
->src
[i
].reg
= temp
.reg
;
591 inst
->src
[i
].reg_offset
= temp
.reg_offset
;
592 inst
->src
[i
].reladdr
= NULL
;
596 /* Repack push constants to remove the now-unused ones. */
597 pack_uniform_registers();
601 * Tries to reduce extra MOV instructions by taking GRFs that get just
602 * written and then MOVed into an MRF and making the original write of
603 * the GRF write directly to the MRF instead.
606 vec4_visitor::opt_compute_to_mrf()
608 bool progress
= false;
611 calculate_live_intervals();
613 foreach_list_safe(node
, &this->instructions
) {
614 vec4_instruction
*inst
= (vec4_instruction
*)node
;
619 if (inst
->opcode
!= BRW_OPCODE_MOV
||
621 inst
->dst
.file
!= MRF
|| inst
->src
[0].file
!= GRF
||
622 inst
->dst
.type
!= inst
->src
[0].type
||
623 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
626 int mrf
= inst
->dst
.reg
;
628 /* Can't compute-to-MRF this GRF if someone else was going to
631 if (this->virtual_grf_use
[inst
->src
[0].reg
] > ip
)
634 /* We need to check interference with the MRF between this
635 * instruction and the earliest instruction involved in writing
636 * the GRF we're eliminating. To do that, keep track of which
637 * of our source channels we've seen initialized.
639 bool chans_needed
[4] = {false, false, false, false};
640 int chans_remaining
= 0;
641 for (int i
= 0; i
< 4; i
++) {
642 int chan
= BRW_GET_SWZ(inst
->src
[0].swizzle
, i
);
644 if (!(inst
->dst
.writemask
& (1 << i
)))
647 /* We don't handle compute-to-MRF across a swizzle. We would
648 * need to be able to rewrite instructions above to output
649 * results to different channels.
654 if (!chans_needed
[chan
]) {
655 chans_needed
[chan
] = true;
659 if (chans_remaining
> 4)
662 /* Now walk up the instruction stream trying to see if we can
663 * rewrite everything writing to the GRF into the MRF instead.
665 vec4_instruction
*scan_inst
;
666 for (scan_inst
= (vec4_instruction
*)inst
->prev
;
667 scan_inst
->prev
!= NULL
;
668 scan_inst
= (vec4_instruction
*)scan_inst
->prev
) {
669 if (scan_inst
->dst
.file
== GRF
&&
670 scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
671 scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
672 /* Found something writing to the reg we want to turn into
676 /* SEND instructions can't have MRF as a destination. */
680 if (intel
->gen
>= 6) {
681 /* gen6 math instructions must have the destination be
682 * GRF, so no compute-to-MRF for them.
684 if (scan_inst
->is_math()) {
689 /* Mark which channels we found unconditional writes for. */
690 if (!scan_inst
->predicate
) {
691 for (int i
= 0; i
< 4; i
++) {
692 if (scan_inst
->dst
.writemask
& (1 << i
) &&
694 chans_needed
[i
] = false;
700 if (chans_remaining
== 0)
704 /* We don't handle flow control here. Most computation of
705 * values that end up in MRFs are shortly before the MRF
708 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
709 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
710 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
711 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
715 /* You can't read from an MRF, so if someone else reads our
716 * MRF's source GRF that we wanted to rewrite, that stops us.
718 bool interfered
= false;
719 for (int i
= 0; i
< 3; i
++) {
720 if (scan_inst
->src
[i
].file
== GRF
&&
721 scan_inst
->src
[i
].reg
== inst
->src
[0].reg
&&
722 scan_inst
->src
[i
].reg_offset
== inst
->src
[0].reg_offset
) {
729 /* If somebody else writes our MRF here, we can't
730 * compute-to-MRF before that.
732 if (scan_inst
->dst
.file
== MRF
&& mrf
== scan_inst
->dst
.reg
)
735 if (scan_inst
->mlen
> 0) {
736 /* Found a SEND instruction, which means that there are
737 * live values in MRFs from base_mrf to base_mrf +
738 * scan_inst->mlen - 1. Don't go pushing our MRF write up
741 if (mrf
>= scan_inst
->base_mrf
&&
742 mrf
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
748 if (chans_remaining
== 0) {
749 /* If we've made it here, we have an inst we want to
750 * compute-to-MRF, and a scan_inst pointing to the earliest
751 * instruction involved in computing the value. Now go
752 * rewrite the instruction stream between the two.
755 while (scan_inst
!= inst
) {
756 if (scan_inst
->dst
.file
== GRF
&&
757 scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
758 scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
759 scan_inst
->dst
.file
= MRF
;
760 scan_inst
->dst
.reg
= mrf
;
761 scan_inst
->dst
.reg_offset
= 0;
762 scan_inst
->dst
.writemask
&= inst
->dst
.writemask
;
763 scan_inst
->saturate
|= inst
->saturate
;
765 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
773 live_intervals_valid
= false;
779 * Splits virtual GRFs requesting more than one contiguous physical register.
781 * We initially create large virtual GRFs for temporary structures, arrays,
782 * and matrices, so that the dereference visitor functions can add reg_offsets
783 * to work their way down to the actual member being accessed.
785 * Unlike in the FS visitor, though, we have no SEND messages that return more
786 * than 1 register. We also don't do any array access in register space,
787 * which would have required contiguous physical registers. Thus, all those
788 * large virtual GRFs can be split up into independent single-register virtual
789 * GRFs, making allocation and optimization easier.
792 vec4_visitor::split_virtual_grfs()
794 int num_vars
= this->virtual_grf_count
;
795 int new_virtual_grf
[num_vars
];
797 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
799 /* Allocate new space for split regs. Note that the virtual
800 * numbers will be contiguous.
802 for (int i
= 0; i
< num_vars
; i
++) {
803 if (this->virtual_grf_sizes
[i
] == 1)
806 new_virtual_grf
[i
] = virtual_grf_alloc(1);
807 for (int j
= 2; j
< this->virtual_grf_sizes
[i
]; j
++) {
808 int reg
= virtual_grf_alloc(1);
809 assert(reg
== new_virtual_grf
[i
] + j
- 1);
812 this->virtual_grf_sizes
[i
] = 1;
815 foreach_list(node
, &this->instructions
) {
816 vec4_instruction
*inst
= (vec4_instruction
*)node
;
818 if (inst
->dst
.file
== GRF
&&
819 new_virtual_grf
[inst
->dst
.reg
] &&
820 inst
->dst
.reg_offset
!= 0) {
821 inst
->dst
.reg
= (new_virtual_grf
[inst
->dst
.reg
] +
822 inst
->dst
.reg_offset
- 1);
823 inst
->dst
.reg_offset
= 0;
825 for (int i
= 0; i
< 3; i
++) {
826 if (inst
->src
[i
].file
== GRF
&&
827 new_virtual_grf
[inst
->src
[i
].reg
] &&
828 inst
->src
[i
].reg_offset
!= 0) {
829 inst
->src
[i
].reg
= (new_virtual_grf
[inst
->src
[i
].reg
] +
830 inst
->src
[i
].reg_offset
- 1);
831 inst
->src
[i
].reg_offset
= 0;
835 this->live_intervals_valid
= false;
839 vec4_visitor::dump_instruction(vec4_instruction
*inst
)
841 if (inst
->opcode
< ARRAY_SIZE(opcode_descs
) &&
842 opcode_descs
[inst
->opcode
].name
) {
843 printf("%s ", opcode_descs
[inst
->opcode
].name
);
845 printf("op%d ", inst
->opcode
);
848 switch (inst
->dst
.file
) {
850 printf("vgrf%d.%d", inst
->dst
.reg
, inst
->dst
.reg_offset
);
853 printf("m%d", inst
->dst
.reg
);
862 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
864 if (inst
->dst
.writemask
& 1)
866 if (inst
->dst
.writemask
& 2)
868 if (inst
->dst
.writemask
& 4)
870 if (inst
->dst
.writemask
& 8)
875 for (int i
= 0; i
< 3; i
++) {
876 switch (inst
->src
[i
].file
) {
878 printf("vgrf%d", inst
->src
[i
].reg
);
881 printf("attr%d", inst
->src
[i
].reg
);
884 printf("u%d", inst
->src
[i
].reg
);
894 if (inst
->src
[i
].reg_offset
)
895 printf(".%d", inst
->src
[i
].reg_offset
);
897 static const char *chans
[4] = {"x", "y", "z", "w"};
899 for (int c
= 0; c
< 4; c
++) {
900 printf("%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
911 vec4_visitor::dump_instructions()
914 foreach_list_safe(node
, &this->instructions
) {
915 vec4_instruction
*inst
= (vec4_instruction
*)node
;
916 printf("%d: ", ip
++);
917 dump_instruction(inst
);
922 vec4_visitor::setup_attributes(int payload_reg
)
925 int attribute_map
[VERT_ATTRIB_MAX
+ 1];
928 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
929 if (prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
930 attribute_map
[i
] = payload_reg
+ nr_attributes
;
935 /* VertexID is stored by the VF as the last vertex element, but we
936 * don't represent it with a flag in inputs_read, so we call it
939 if (prog_data
->uses_vertexid
) {
940 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
944 foreach_list(node
, &this->instructions
) {
945 vec4_instruction
*inst
= (vec4_instruction
*)node
;
947 /* We have to support ATTR as a destination for GL_FIXED fixup. */
948 if (inst
->dst
.file
== ATTR
) {
949 int grf
= attribute_map
[inst
->dst
.reg
+ inst
->dst
.reg_offset
];
951 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
952 reg
.type
= inst
->dst
.type
;
953 reg
.dw1
.bits
.writemask
= inst
->dst
.writemask
;
955 inst
->dst
.file
= HW_REG
;
956 inst
->dst
.fixed_hw_reg
= reg
;
959 for (int i
= 0; i
< 3; i
++) {
960 if (inst
->src
[i
].file
!= ATTR
)
963 int grf
= attribute_map
[inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
];
965 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
966 reg
.dw1
.bits
.swizzle
= inst
->src
[i
].swizzle
;
967 reg
.type
= inst
->src
[i
].type
;
968 if (inst
->src
[i
].abs
)
970 if (inst
->src
[i
].negate
)
973 inst
->src
[i
].file
= HW_REG
;
974 inst
->src
[i
].fixed_hw_reg
= reg
;
978 /* The BSpec says we always have to read at least one thing from
979 * the VF, and it appears that the hardware wedges otherwise.
981 if (nr_attributes
== 0)
984 prog_data
->urb_read_length
= (nr_attributes
+ 1) / 2;
986 unsigned vue_entries
= MAX2(nr_attributes
, c
->prog_data
.vue_map
.num_slots
);
989 c
->prog_data
.urb_entry_size
= ALIGN(vue_entries
, 8) / 8;
991 c
->prog_data
.urb_entry_size
= ALIGN(vue_entries
, 4) / 4;
993 return payload_reg
+ nr_attributes
;
997 vec4_visitor::setup_uniforms(int reg
)
999 /* The pre-gen6 VS requires that some push constants get loaded no
1000 * matter what, or the GPU would hang.
1002 if (intel
->gen
< 6 && this->uniforms
== 0) {
1003 this->uniform_vector_size
[this->uniforms
] = 1;
1005 for (unsigned int i
= 0; i
< 4; i
++) {
1006 unsigned int slot
= this->uniforms
* 4 + i
;
1007 static float zero
= 0.0;
1008 c
->prog_data
.param
[slot
] = &zero
;
1014 reg
+= ALIGN(uniforms
, 2) / 2;
1017 c
->prog_data
.nr_params
= this->uniforms
* 4;
1019 c
->prog_data
.curb_read_length
= reg
- 1;
1025 vec4_visitor::setup_payload(void)
1029 /* The payload always contains important data in g0, which contains
1030 * the URB handles that are passed on to the URB write at the end
1031 * of the thread. So, we always start push constants at g1.
1035 reg
= setup_uniforms(reg
);
1037 reg
= setup_attributes(reg
);
1039 this->first_non_payload_grf
= reg
;
1045 emit_attribute_fixups();
1047 /* Generate VS IR for main(). (the visitor only descends into
1048 * functions called "main").
1051 visit_instructions(shader
->ir
);
1053 emit_vertex_program_code();
1056 if (c
->key
.userclip_active
&& !c
->key
.uses_clip_distance
)
1057 setup_uniform_clipplane_values();
1061 /* Before any optimization, push array accesses out to scratch
1062 * space where we need them to be. This pass may allocate new
1063 * virtual GRFs, so we want to do it early. It also makes sure
1064 * that we have reladdr computations available for CSE, since we'll
1065 * often do repeated subexpressions for those.
1068 move_grf_array_access_to_scratch();
1069 move_uniform_array_access_to_pull_constants();
1071 /* The ARB_vertex_program frontend emits pull constant loads directly
1072 * rather than using reladdr, so we don't need to walk through all the
1073 * instructions looking for things to move. There isn't anything.
1075 * We do still need to split things to vec4 size.
1077 split_uniform_registers();
1079 pack_uniform_registers();
1080 move_push_constants_to_pull_constants();
1081 split_virtual_grfs();
1086 progress
= dead_code_eliminate() || progress
;
1087 progress
= opt_copy_propagation() || progress
;
1088 progress
= opt_algebraic() || progress
;
1089 progress
= opt_compute_to_mrf() || progress
;
1099 /* Debug of register spilling: Go spill everything. */
1100 const int grf_count
= virtual_grf_count
;
1101 float spill_costs
[virtual_grf_count
];
1102 bool no_spill
[virtual_grf_count
];
1103 evaluate_spill_costs(spill_costs
, no_spill
);
1104 for (int i
= 0; i
< grf_count
; i
++) {
1111 while (!reg_allocate()) {
1119 brw_set_access_mode(p
, BRW_ALIGN_16
);
1126 } /* namespace brw */
1131 brw_vs_emit(struct brw_context
*brw
,
1132 struct gl_shader_program
*prog
,
1133 struct brw_vs_compile
*c
,
1136 struct intel_context
*intel
= &brw
->intel
;
1137 bool start_busy
= false;
1138 float start_time
= 0;
1140 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
1141 start_busy
= (intel
->batch
.last_bo
&&
1142 drm_intel_bo_busy(intel
->batch
.last_bo
));
1143 start_time
= get_time();
1146 struct brw_shader
*shader
= NULL
;
1148 shader
= (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
1150 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
1152 printf("GLSL IR for native vertex shader %d:\n", prog
->Name
);
1153 _mesa_print_ir(shader
->ir
, NULL
);
1156 printf("ARB_vertex_program %d for native vertex shader\n",
1157 c
->vp
->program
.Base
.Id
);
1158 _mesa_print_program(&c
->vp
->program
.Base
);
1162 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
) && shader
) {
1163 if (shader
->compiled_once
) {
1164 brw_vs_debug_recompile(brw
, prog
, &c
->key
);
1166 if (start_busy
&& !drm_intel_bo_busy(intel
->batch
.last_bo
)) {
1167 perf_debug("VS compile took %.03f ms and stalled the GPU\n",
1168 (get_time() - start_time
) * 1000);
1170 shader
->compiled_once
= true;
1173 vec4_visitor
v(brw
, c
, prog
, shader
, mem_ctx
);
1175 prog
->LinkStatus
= false;
1176 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);