2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "brw_vec4_live_variables.h"
30 #include "brw_dead_control_flow.h"
33 #include "main/macros.h"
34 #include "main/shaderobj.h"
35 #include "program/prog_print.h"
36 #include "program/prog_parameter.h"
38 #include "main/context.h"
40 #define MAX_INSTRUCTION (1 << 30)
49 memset(this, 0, sizeof(*this));
51 this->file
= BAD_FILE
;
54 src_reg::src_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
)
60 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
61 this->swizzle
= brw_swizzle_for_size(type
->vector_elements
);
63 this->swizzle
= BRW_SWIZZLE_XYZW
;
65 this->type
= brw_type_for_base_type(type
);
68 /** Generic unset register constructor. */
74 src_reg::src_reg(struct brw_reg reg
) :
81 src_reg::src_reg(const dst_reg
®
) :
82 backend_reg(static_cast<struct brw_reg
>(reg
))
84 this->reg_offset
= reg
.reg_offset
;
85 this->reladdr
= reg
.reladdr
;
86 this->swizzle
= brw_swizzle_for_mask(reg
.writemask
);
92 memset(this, 0, sizeof(*this));
93 this->file
= BAD_FILE
;
94 this->writemask
= WRITEMASK_XYZW
;
102 dst_reg::dst_reg(enum brw_reg_file file
, int nr
)
110 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
,
117 this->type
= brw_type_for_base_type(type
);
118 this->writemask
= writemask
;
121 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, brw_reg_type type
,
129 this->writemask
= writemask
;
132 dst_reg::dst_reg(struct brw_reg reg
) :
135 this->reg_offset
= 0;
136 this->reladdr
= NULL
;
139 dst_reg::dst_reg(const src_reg
®
) :
140 backend_reg(static_cast<struct brw_reg
>(reg
))
142 this->reg_offset
= reg
.reg_offset
;
143 this->writemask
= brw_mask_for_swizzle(reg
.swizzle
);
144 this->reladdr
= reg
.reladdr
;
148 dst_reg::equals(const dst_reg
&r
) const
150 return (memcmp((brw_reg
*)this, (brw_reg
*)&r
, sizeof(brw_reg
)) == 0 &&
151 reg_offset
== r
.reg_offset
&&
152 (reladdr
== r
.reladdr
||
153 (reladdr
&& r
.reladdr
&& reladdr
->equals(*r
.reladdr
))));
157 vec4_instruction::is_send_from_grf()
160 case SHADER_OPCODE_SHADER_TIME_ADD
:
161 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
162 case SHADER_OPCODE_UNTYPED_ATOMIC
:
163 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
164 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
165 case SHADER_OPCODE_TYPED_ATOMIC
:
166 case SHADER_OPCODE_TYPED_SURFACE_READ
:
167 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
175 vec4_instruction::regs_read(unsigned arg
) const
177 if (src
[arg
].file
== BAD_FILE
)
181 case SHADER_OPCODE_SHADER_TIME_ADD
:
182 case SHADER_OPCODE_UNTYPED_ATOMIC
:
183 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
184 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
185 case SHADER_OPCODE_TYPED_ATOMIC
:
186 case SHADER_OPCODE_TYPED_SURFACE_READ
:
187 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
188 return arg
== 0 ? mlen
: 1;
190 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
191 return arg
== 1 ? mlen
: 1;
199 vec4_instruction::can_do_source_mods(const struct brw_device_info
*devinfo
)
201 if (devinfo
->gen
== 6 && is_math())
204 if (is_send_from_grf())
207 if (!backend_instruction::can_do_source_mods())
214 vec4_instruction::can_change_types() const
216 return dst
.type
== src
[0].type
&&
217 !src
[0].abs
&& !src
[0].negate
&& !saturate
&&
218 (opcode
== BRW_OPCODE_MOV
||
219 (opcode
== BRW_OPCODE_SEL
&&
220 dst
.type
== src
[1].type
&&
221 predicate
!= BRW_PREDICATE_NONE
&&
222 !src
[1].abs
&& !src
[1].negate
));
226 * Returns how many MRFs an opcode will write over.
228 * Note that this is not the 0 or 1 implied writes in an actual gen
229 * instruction -- the generate_* functions generate additional MOVs
233 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
235 if (inst
->mlen
== 0 || inst
->is_send_from_grf())
238 switch (inst
->opcode
) {
239 case SHADER_OPCODE_RCP
:
240 case SHADER_OPCODE_RSQ
:
241 case SHADER_OPCODE_SQRT
:
242 case SHADER_OPCODE_EXP2
:
243 case SHADER_OPCODE_LOG2
:
244 case SHADER_OPCODE_SIN
:
245 case SHADER_OPCODE_COS
:
247 case SHADER_OPCODE_INT_QUOTIENT
:
248 case SHADER_OPCODE_INT_REMAINDER
:
249 case SHADER_OPCODE_POW
:
251 case VS_OPCODE_URB_WRITE
:
253 case VS_OPCODE_PULL_CONSTANT_LOAD
:
255 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
257 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
259 case GS_OPCODE_URB_WRITE
:
260 case GS_OPCODE_URB_WRITE_ALLOCATE
:
261 case GS_OPCODE_THREAD_END
:
263 case GS_OPCODE_FF_SYNC
:
265 case SHADER_OPCODE_SHADER_TIME_ADD
:
267 case SHADER_OPCODE_TEX
:
268 case SHADER_OPCODE_TXL
:
269 case SHADER_OPCODE_TXD
:
270 case SHADER_OPCODE_TXF
:
271 case SHADER_OPCODE_TXF_CMS
:
272 case SHADER_OPCODE_TXF_CMS_W
:
273 case SHADER_OPCODE_TXF_MCS
:
274 case SHADER_OPCODE_TXS
:
275 case SHADER_OPCODE_TG4
:
276 case SHADER_OPCODE_TG4_OFFSET
:
277 case SHADER_OPCODE_SAMPLEINFO
:
278 case VS_OPCODE_GET_BUFFER_SIZE
:
279 return inst
->header_size
;
281 unreachable("not reached");
286 src_reg::equals(const src_reg
&r
) const
288 return (memcmp((brw_reg
*)this, (brw_reg
*)&r
, sizeof(brw_reg
)) == 0 &&
289 reg_offset
== r
.reg_offset
&&
290 !reladdr
&& !r
.reladdr
);
294 vec4_visitor::opt_vector_float()
296 bool progress
= false;
298 int last_reg
= -1, last_reg_offset
= -1;
299 enum brw_reg_file last_reg_file
= BAD_FILE
;
301 int remaining_channels
= 0;
304 vec4_instruction
*imm_inst
[4];
306 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
307 if (last_reg
!= inst
->dst
.nr
||
308 last_reg_offset
!= inst
->dst
.reg_offset
||
309 last_reg_file
!= inst
->dst
.file
) {
310 last_reg
= inst
->dst
.nr
;
311 last_reg_offset
= inst
->dst
.reg_offset
;
312 last_reg_file
= inst
->dst
.file
;
313 remaining_channels
= WRITEMASK_XYZW
;
318 if (inst
->opcode
!= BRW_OPCODE_MOV
||
319 inst
->dst
.writemask
== WRITEMASK_XYZW
||
320 inst
->src
[0].file
!= IMM
)
323 int vf
= brw_float_to_vf(inst
->src
[0].f
);
327 if ((inst
->dst
.writemask
& WRITEMASK_X
) != 0)
329 if ((inst
->dst
.writemask
& WRITEMASK_Y
) != 0)
331 if ((inst
->dst
.writemask
& WRITEMASK_Z
) != 0)
333 if ((inst
->dst
.writemask
& WRITEMASK_W
) != 0)
336 imm_inst
[inst_count
++] = inst
;
338 remaining_channels
&= ~inst
->dst
.writemask
;
339 if (remaining_channels
== 0) {
341 memcpy(&vf
, imm
, sizeof(vf
));
342 vec4_instruction
*mov
= MOV(inst
->dst
, brw_imm_vf(vf
));
343 mov
->dst
.type
= BRW_REGISTER_TYPE_F
;
344 mov
->dst
.writemask
= WRITEMASK_XYZW
;
345 inst
->insert_after(block
, mov
);
348 for (int i
= 0; i
< inst_count
; i
++) {
349 imm_inst
[i
]->remove(block
);
356 invalidate_live_intervals();
361 /* Replaces unused channels of a swizzle with channels that are used.
363 * For instance, this pass transforms
365 * mov vgrf4.yz, vgrf5.wxzy
369 * mov vgrf4.yz, vgrf5.xxzx
371 * This eliminates false uses of some channels, letting dead code elimination
372 * remove the instructions that wrote them.
375 vec4_visitor::opt_reduce_swizzle()
377 bool progress
= false;
379 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
380 if (inst
->dst
.file
== BAD_FILE
||
381 inst
->dst
.file
== ARF
||
382 inst
->dst
.file
== FIXED_GRF
||
383 inst
->is_send_from_grf())
388 /* Determine which channels of the sources are read. */
389 switch (inst
->opcode
) {
390 case VEC4_OPCODE_PACK_BYTES
:
392 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
393 * but all four of src1.
395 swizzle
= brw_swizzle_for_size(4);
398 swizzle
= brw_swizzle_for_size(3);
401 swizzle
= brw_swizzle_for_size(2);
404 swizzle
= brw_swizzle_for_mask(inst
->dst
.writemask
);
408 /* Update sources' swizzles. */
409 for (int i
= 0; i
< 3; i
++) {
410 if (inst
->src
[i
].file
!= VGRF
&&
411 inst
->src
[i
].file
!= ATTR
&&
412 inst
->src
[i
].file
!= UNIFORM
)
415 const unsigned new_swizzle
=
416 brw_compose_swizzle(swizzle
, inst
->src
[i
].swizzle
);
417 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
418 inst
->src
[i
].swizzle
= new_swizzle
;
425 invalidate_live_intervals();
431 vec4_visitor::split_uniform_registers()
433 /* Prior to this, uniforms have been in an array sized according to
434 * the number of vector uniforms present, sparsely filled (so an
435 * aggregate results in reg indices being skipped over). Now we're
436 * going to cut those aggregates up so each .nr index is one
437 * vector. The goal is to make elimination of unused uniform
438 * components easier later.
440 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
441 for (int i
= 0 ; i
< 3; i
++) {
442 if (inst
->src
[i
].file
!= UNIFORM
)
445 assert(!inst
->src
[i
].reladdr
);
447 inst
->src
[i
].nr
+= inst
->src
[i
].reg_offset
;
448 inst
->src
[i
].reg_offset
= 0;
452 /* Update that everything is now vector-sized. */
453 for (int i
= 0; i
< this->uniforms
; i
++) {
454 this->uniform_size
[i
] = 1;
459 vec4_visitor::pack_uniform_registers()
461 uint8_t chans_used
[this->uniforms
];
462 int new_loc
[this->uniforms
];
463 int new_chan
[this->uniforms
];
465 memset(chans_used
, 0, sizeof(chans_used
));
466 memset(new_loc
, 0, sizeof(new_loc
));
467 memset(new_chan
, 0, sizeof(new_chan
));
469 /* Find which uniform vectors are actually used by the program. We
470 * expect unused vector elements when we've moved array access out
471 * to pull constants, and from some GLSL code generators like wine.
473 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
475 switch (inst
->opcode
) {
476 case VEC4_OPCODE_PACK_BYTES
:
488 readmask
= inst
->dst
.writemask
;
492 for (int i
= 0 ; i
< 3; i
++) {
493 if (inst
->src
[i
].file
!= UNIFORM
)
496 int reg
= inst
->src
[i
].nr
;
497 for (int c
= 0; c
< 4; c
++) {
498 if (!(readmask
& (1 << c
)))
501 chans_used
[reg
] = MAX2(chans_used
[reg
],
502 BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
) + 1);
507 int new_uniform_count
= 0;
509 /* Now, figure out a packing of the live uniform vectors into our
512 for (int src
= 0; src
< uniforms
; src
++) {
513 assert(src
< uniform_array_size
);
514 int size
= chans_used
[src
];
520 /* Find the lowest place we can slot this uniform in. */
521 for (dst
= 0; dst
< src
; dst
++) {
522 if (chans_used
[dst
] + size
<= 4)
531 new_chan
[src
] = chans_used
[dst
];
533 /* Move the references to the data */
534 for (int j
= 0; j
< size
; j
++) {
535 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
536 stage_prog_data
->param
[src
* 4 + j
];
539 chans_used
[dst
] += size
;
543 new_uniform_count
= MAX2(new_uniform_count
, dst
+ 1);
546 this->uniforms
= new_uniform_count
;
548 /* Now, update the instructions for our repacked uniforms. */
549 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
550 for (int i
= 0 ; i
< 3; i
++) {
551 int src
= inst
->src
[i
].nr
;
553 if (inst
->src
[i
].file
!= UNIFORM
)
556 inst
->src
[i
].nr
= new_loc
[src
];
557 inst
->src
[i
].swizzle
+= BRW_SWIZZLE4(new_chan
[src
], new_chan
[src
],
558 new_chan
[src
], new_chan
[src
]);
564 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
566 * While GLSL IR also performs this optimization, we end up with it in
567 * our instruction stream for a couple of reasons. One is that we
568 * sometimes generate silly instructions, for example in array access
569 * where we'll generate "ADD offset, index, base" even if base is 0.
570 * The other is that GLSL IR's constant propagation doesn't track the
571 * components of aggregates, so some VS patterns (initialize matrix to
572 * 0, accumulate in vertex blending factors) end up breaking down to
573 * instructions involving 0.
576 vec4_visitor::opt_algebraic()
578 bool progress
= false;
580 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
581 switch (inst
->opcode
) {
583 if (inst
->src
[0].file
!= IMM
)
586 if (inst
->saturate
) {
587 if (inst
->dst
.type
!= inst
->src
[0].type
)
588 assert(!"unimplemented: saturate mixed types");
590 if (brw_saturate_immediate(inst
->dst
.type
, &inst
->src
[0])) {
591 inst
->saturate
= false;
597 case VEC4_OPCODE_UNPACK_UNIFORM
:
598 if (inst
->src
[0].file
!= UNIFORM
) {
599 inst
->opcode
= BRW_OPCODE_MOV
;
605 if (inst
->src
[1].is_zero()) {
606 inst
->opcode
= BRW_OPCODE_MOV
;
607 inst
->src
[1] = src_reg();
613 if (inst
->src
[1].is_zero()) {
614 inst
->opcode
= BRW_OPCODE_MOV
;
615 switch (inst
->src
[0].type
) {
616 case BRW_REGISTER_TYPE_F
:
617 inst
->src
[0] = brw_imm_f(0.0f
);
619 case BRW_REGISTER_TYPE_D
:
620 inst
->src
[0] = brw_imm_d(0);
622 case BRW_REGISTER_TYPE_UD
:
623 inst
->src
[0] = brw_imm_ud(0u);
626 unreachable("not reached");
628 inst
->src
[1] = src_reg();
630 } else if (inst
->src
[1].is_one()) {
631 inst
->opcode
= BRW_OPCODE_MOV
;
632 inst
->src
[1] = src_reg();
634 } else if (inst
->src
[1].is_negative_one()) {
635 inst
->opcode
= BRW_OPCODE_MOV
;
636 inst
->src
[0].negate
= !inst
->src
[0].negate
;
637 inst
->src
[1] = src_reg();
642 if (inst
->conditional_mod
== BRW_CONDITIONAL_GE
&&
644 inst
->src
[0].negate
&&
645 inst
->src
[1].is_zero()) {
646 inst
->src
[0].abs
= false;
647 inst
->src
[0].negate
= false;
648 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
653 case SHADER_OPCODE_RCP
: {
654 vec4_instruction
*prev
= (vec4_instruction
*)inst
->prev
;
655 if (prev
->opcode
== SHADER_OPCODE_SQRT
) {
656 if (inst
->src
[0].equals(src_reg(prev
->dst
))) {
657 inst
->opcode
= SHADER_OPCODE_RSQ
;
658 inst
->src
[0] = prev
->src
[0];
664 case SHADER_OPCODE_BROADCAST
:
665 if (is_uniform(inst
->src
[0]) ||
666 inst
->src
[1].is_zero()) {
667 inst
->opcode
= BRW_OPCODE_MOV
;
668 inst
->src
[1] = src_reg();
669 inst
->force_writemask_all
= true;
680 invalidate_live_intervals();
686 * Only a limited number of hardware registers may be used for push
687 * constants, so this turns access to the overflowed constants into
691 vec4_visitor::move_push_constants_to_pull_constants()
693 int pull_constant_loc
[this->uniforms
];
695 /* Only allow 32 registers (256 uniform components) as push constants,
696 * which is the limit on gen6.
698 * If changing this value, note the limitation about total_regs in
701 int max_uniform_components
= 32 * 8;
702 if (this->uniforms
* 4 <= max_uniform_components
)
705 /* Make some sort of choice as to which uniforms get sent to pull
706 * constants. We could potentially do something clever here like
707 * look for the most infrequently used uniform vec4s, but leave
710 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
711 pull_constant_loc
[i
/ 4] = -1;
713 if (i
>= max_uniform_components
) {
714 const gl_constant_value
**values
= &stage_prog_data
->param
[i
];
716 /* Try to find an existing copy of this uniform in the pull
717 * constants if it was part of an array access already.
719 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
722 for (matches
= 0; matches
< 4; matches
++) {
723 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
728 pull_constant_loc
[i
/ 4] = j
/ 4;
733 if (pull_constant_loc
[i
/ 4] == -1) {
734 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
735 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
737 for (int j
= 0; j
< 4; j
++) {
738 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
745 /* Now actually rewrite usage of the things we've moved to pull
748 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
749 for (int i
= 0 ; i
< 3; i
++) {
750 if (inst
->src
[i
].file
!= UNIFORM
||
751 pull_constant_loc
[inst
->src
[i
].nr
] == -1)
754 int uniform
= inst
->src
[i
].nr
;
756 dst_reg temp
= dst_reg(this, glsl_type::vec4_type
);
758 emit_pull_constant_load(block
, inst
, temp
, inst
->src
[i
],
759 pull_constant_loc
[uniform
]);
761 inst
->src
[i
].file
= temp
.file
;
762 inst
->src
[i
].nr
= temp
.nr
;
763 inst
->src
[i
].reg_offset
= temp
.reg_offset
;
764 inst
->src
[i
].reladdr
= NULL
;
768 /* Repack push constants to remove the now-unused ones. */
769 pack_uniform_registers();
772 /* Conditions for which we want to avoid setting the dependency control bits */
774 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction
*inst
)
776 #define IS_DWORD(reg) \
777 (reg.type == BRW_REGISTER_TYPE_UD || \
778 reg.type == BRW_REGISTER_TYPE_D)
780 /* "When source or destination datatype is 64b or operation is integer DWord
781 * multiply, DepCtrl must not be used."
782 * May apply to future SoCs as well.
784 if (devinfo
->is_cherryview
) {
785 if (inst
->opcode
== BRW_OPCODE_MUL
&&
786 IS_DWORD(inst
->src
[0]) &&
787 IS_DWORD(inst
->src
[1]))
792 if (devinfo
->gen
>= 8) {
793 if (inst
->opcode
== BRW_OPCODE_F32TO16
)
799 * In the presence of send messages, totally interrupt dependency
800 * control. They're long enough that the chance of dependency
801 * control around them just doesn't matter.
804 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
805 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
806 * completes the scoreboard clear must have a non-zero execution mask. This
807 * means, if any kind of predication can change the execution mask or channel
808 * enable of the last instruction, the optimization must be avoided. This is
809 * to avoid instructions being shot down the pipeline when no writes are
813 * Dependency control does not work well over math instructions.
814 * NB: Discovered empirically
816 return (inst
->mlen
|| inst
->predicate
|| inst
->is_math());
820 * Sets the dependency control fields on instructions after register
821 * allocation and before the generator is run.
823 * When you have a sequence of instructions like:
825 * DP4 temp.x vertex uniform[0]
826 * DP4 temp.y vertex uniform[0]
827 * DP4 temp.z vertex uniform[0]
828 * DP4 temp.w vertex uniform[0]
830 * The hardware doesn't know that it can actually run the later instructions
831 * while the previous ones are in flight, producing stalls. However, we have
832 * manual fields we can set in the instructions that let it do so.
835 vec4_visitor::opt_set_dependency_control()
837 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
838 uint8_t grf_channels_written
[BRW_MAX_GRF
];
839 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
840 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
842 assert(prog_data
->total_grf
||
843 !"Must be called after register allocation");
845 foreach_block (block
, cfg
) {
846 memset(last_grf_write
, 0, sizeof(last_grf_write
));
847 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
849 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
850 /* If we read from a register that we were doing dependency control
851 * on, don't do dependency control across the read.
853 for (int i
= 0; i
< 3; i
++) {
854 int reg
= inst
->src
[i
].nr
+ inst
->src
[i
].reg_offset
;
855 if (inst
->src
[i
].file
== VGRF
) {
856 last_grf_write
[reg
] = NULL
;
857 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
858 memset(last_grf_write
, 0, sizeof(last_grf_write
));
861 assert(inst
->src
[i
].file
!= MRF
);
864 if (is_dep_ctrl_unsafe(inst
)) {
865 memset(last_grf_write
, 0, sizeof(last_grf_write
));
866 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
870 /* Now, see if we can do dependency control for this instruction
871 * against a previous one writing to its destination.
873 int reg
= inst
->dst
.nr
+ inst
->dst
.reg_offset
;
874 if (inst
->dst
.file
== VGRF
|| inst
->dst
.file
== FIXED_GRF
) {
875 if (last_grf_write
[reg
] &&
876 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
877 last_grf_write
[reg
]->no_dd_clear
= true;
878 inst
->no_dd_check
= true;
880 grf_channels_written
[reg
] = 0;
883 last_grf_write
[reg
] = inst
;
884 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
885 } else if (inst
->dst
.file
== MRF
) {
886 if (last_mrf_write
[reg
] &&
887 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
888 last_mrf_write
[reg
]->no_dd_clear
= true;
889 inst
->no_dd_check
= true;
891 mrf_channels_written
[reg
] = 0;
894 last_mrf_write
[reg
] = inst
;
895 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
902 vec4_instruction::can_reswizzle(const struct brw_device_info
*devinfo
,
907 /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
908 * or writemasking are not allowed.
910 if (devinfo
->gen
== 6 && is_math() &&
911 (swizzle
!= BRW_SWIZZLE_XYZW
|| dst_writemask
!= WRITEMASK_XYZW
))
914 /* If this instruction sets anything not referenced by swizzle, then we'd
915 * totally break it when we reswizzle.
917 if (dst
.writemask
& ~swizzle_mask
)
923 for (int i
= 0; i
< 3; i
++) {
924 if (src
[i
].is_accumulator())
932 * For any channels in the swizzle's source that were populated by this
933 * instruction, rewrite the instruction to put the appropriate result directly
936 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
939 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
941 /* Destination write mask doesn't correspond to source swizzle for the dot
942 * product and pack_bytes instructions.
944 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
945 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
&&
946 opcode
!= VEC4_OPCODE_PACK_BYTES
) {
947 for (int i
= 0; i
< 3; i
++) {
948 if (src
[i
].file
== BAD_FILE
|| src
[i
].file
== IMM
)
951 src
[i
].swizzle
= brw_compose_swizzle(swizzle
, src
[i
].swizzle
);
955 /* Apply the specified swizzle and writemask to the original mask of
956 * written components.
958 dst
.writemask
= dst_writemask
&
959 brw_apply_swizzle_to_mask(swizzle
, dst
.writemask
);
963 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
964 * just written and then MOVed into another reg and making the original write
965 * of the GRF write directly to the final destination instead.
968 vec4_visitor::opt_register_coalesce()
970 bool progress
= false;
973 calculate_live_intervals();
975 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
979 if (inst
->opcode
!= BRW_OPCODE_MOV
||
980 (inst
->dst
.file
!= VGRF
&& inst
->dst
.file
!= MRF
) ||
982 inst
->src
[0].file
!= VGRF
||
983 inst
->dst
.type
!= inst
->src
[0].type
||
984 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
987 /* Remove no-op MOVs */
988 if (inst
->dst
.file
== inst
->src
[0].file
&&
989 inst
->dst
.nr
== inst
->src
[0].nr
&&
990 inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
991 bool is_nop_mov
= true;
993 for (unsigned c
= 0; c
< 4; c
++) {
994 if ((inst
->dst
.writemask
& (1 << c
)) == 0)
997 if (BRW_GET_SWZ(inst
->src
[0].swizzle
, c
) != c
) {
1004 inst
->remove(block
);
1009 bool to_mrf
= (inst
->dst
.file
== MRF
);
1011 /* Can't coalesce this GRF if someone else was going to
1014 if (var_range_end(var_from_reg(alloc
, inst
->src
[0]), 4) > ip
)
1017 /* We need to check interference with the final destination between this
1018 * instruction and the earliest instruction involved in writing the GRF
1019 * we're eliminating. To do that, keep track of which of our source
1020 * channels we've seen initialized.
1022 const unsigned chans_needed
=
1023 brw_apply_inv_swizzle_to_mask(inst
->src
[0].swizzle
,
1024 inst
->dst
.writemask
);
1025 unsigned chans_remaining
= chans_needed
;
1027 /* Now walk up the instruction stream trying to see if we can rewrite
1028 * everything writing to the temporary to write into the destination
1031 vec4_instruction
*_scan_inst
= (vec4_instruction
*)inst
->prev
;
1032 foreach_inst_in_block_reverse_starting_from(vec4_instruction
, scan_inst
,
1034 _scan_inst
= scan_inst
;
1036 if (inst
->src
[0].in_range(scan_inst
->dst
, scan_inst
->regs_written
)) {
1037 /* Found something writing to the reg we want to coalesce away. */
1039 /* SEND instructions can't have MRF as a destination. */
1040 if (scan_inst
->mlen
)
1043 if (devinfo
->gen
== 6) {
1044 /* gen6 math instructions must have the destination be
1045 * VGRF, so no compute-to-MRF for them.
1047 if (scan_inst
->is_math()) {
1053 /* This doesn't handle saturation on the instruction we
1054 * want to coalesce away if the register types do not match.
1055 * But if scan_inst is a non type-converting 'mov', we can fix
1058 if (inst
->saturate
&&
1059 inst
->dst
.type
!= scan_inst
->dst
.type
&&
1060 !(scan_inst
->opcode
== BRW_OPCODE_MOV
&&
1061 scan_inst
->dst
.type
== scan_inst
->src
[0].type
))
1064 /* If we can't handle the swizzle, bail. */
1065 if (!scan_inst
->can_reswizzle(devinfo
, inst
->dst
.writemask
,
1066 inst
->src
[0].swizzle
,
1071 /* This doesn't handle coalescing of multiple registers. */
1072 if (scan_inst
->regs_written
> 1)
1075 /* Mark which channels we found unconditional writes for. */
1076 if (!scan_inst
->predicate
)
1077 chans_remaining
&= ~scan_inst
->dst
.writemask
;
1079 if (chans_remaining
== 0)
1083 /* You can't read from an MRF, so if someone else reads our MRF's
1084 * source GRF that we wanted to rewrite, that stops us. If it's a
1085 * GRF we're trying to coalesce to, we don't actually handle
1086 * rewriting sources so bail in that case as well.
1088 bool interfered
= false;
1089 for (int i
= 0; i
< 3; i
++) {
1090 if (inst
->src
[0].in_range(scan_inst
->src
[i
],
1091 scan_inst
->regs_read(i
)))
1097 /* If somebody else writes the same channels of our destination here,
1098 * we can't coalesce before that.
1100 if (inst
->dst
.in_range(scan_inst
->dst
, scan_inst
->regs_written
) &&
1101 (inst
->dst
.writemask
& scan_inst
->dst
.writemask
) != 0) {
1105 /* Check for reads of the register we're trying to coalesce into. We
1106 * can't go rewriting instructions above that to put some other value
1107 * in the register instead.
1109 if (to_mrf
&& scan_inst
->mlen
> 0) {
1110 if (inst
->dst
.nr
>= scan_inst
->base_mrf
&&
1111 inst
->dst
.nr
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
1115 for (int i
= 0; i
< 3; i
++) {
1116 if (inst
->dst
.in_range(scan_inst
->src
[i
],
1117 scan_inst
->regs_read(i
)))
1125 if (chans_remaining
== 0) {
1126 /* If we've made it here, we have an MOV we want to coalesce out, and
1127 * a scan_inst pointing to the earliest instruction involved in
1128 * computing the value. Now go rewrite the instruction stream
1131 vec4_instruction
*scan_inst
= _scan_inst
;
1132 while (scan_inst
!= inst
) {
1133 if (scan_inst
->dst
.file
== VGRF
&&
1134 scan_inst
->dst
.nr
== inst
->src
[0].nr
&&
1135 scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
1136 scan_inst
->reswizzle(inst
->dst
.writemask
,
1137 inst
->src
[0].swizzle
);
1138 scan_inst
->dst
.file
= inst
->dst
.file
;
1139 scan_inst
->dst
.nr
= inst
->dst
.nr
;
1140 scan_inst
->dst
.reg_offset
= inst
->dst
.reg_offset
;
1141 if (inst
->saturate
&&
1142 inst
->dst
.type
!= scan_inst
->dst
.type
) {
1143 /* If we have reached this point, scan_inst is a non
1144 * type-converting 'mov' and we can modify its register types
1145 * to match the ones in inst. Otherwise, we could have an
1146 * incorrect saturation result.
1148 scan_inst
->dst
.type
= inst
->dst
.type
;
1149 scan_inst
->src
[0].type
= inst
->src
[0].type
;
1151 scan_inst
->saturate
|= inst
->saturate
;
1153 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1155 inst
->remove(block
);
1161 invalidate_live_intervals();
1167 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1168 * flow. We could probably do better here with some form of divergence
1172 vec4_visitor::eliminate_find_live_channel()
1174 bool progress
= false;
1177 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1178 switch (inst
->opcode
) {
1184 case BRW_OPCODE_ENDIF
:
1185 case BRW_OPCODE_WHILE
:
1189 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1191 inst
->opcode
= BRW_OPCODE_MOV
;
1192 inst
->src
[0] = brw_imm_d(0);
1193 inst
->force_writemask_all
= true;
1207 * Splits virtual GRFs requesting more than one contiguous physical register.
1209 * We initially create large virtual GRFs for temporary structures, arrays,
1210 * and matrices, so that the dereference visitor functions can add reg_offsets
1211 * to work their way down to the actual member being accessed. But when it
1212 * comes to optimization, we'd like to treat each register as individual
1213 * storage if possible.
1215 * So far, the only thing that might prevent splitting is a send message from
1219 vec4_visitor::split_virtual_grfs()
1221 int num_vars
= this->alloc
.count
;
1222 int new_virtual_grf
[num_vars
];
1223 bool split_grf
[num_vars
];
1225 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1227 /* Try to split anything > 0 sized. */
1228 for (int i
= 0; i
< num_vars
; i
++) {
1229 split_grf
[i
] = this->alloc
.sizes
[i
] != 1;
1232 /* Check that the instructions are compatible with the registers we're trying
1235 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1236 if (inst
->dst
.file
== VGRF
&& inst
->regs_written
> 1)
1237 split_grf
[inst
->dst
.nr
] = false;
1239 for (int i
= 0; i
< 3; i
++) {
1240 if (inst
->src
[i
].file
== VGRF
&& inst
->regs_read(i
) > 1)
1241 split_grf
[inst
->src
[i
].nr
] = false;
1245 /* Allocate new space for split regs. Note that the virtual
1246 * numbers will be contiguous.
1248 for (int i
= 0; i
< num_vars
; i
++) {
1252 new_virtual_grf
[i
] = alloc
.allocate(1);
1253 for (unsigned j
= 2; j
< this->alloc
.sizes
[i
]; j
++) {
1254 unsigned reg
= alloc
.allocate(1);
1255 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1258 this->alloc
.sizes
[i
] = 1;
1261 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1262 if (inst
->dst
.file
== VGRF
&& split_grf
[inst
->dst
.nr
] &&
1263 inst
->dst
.reg_offset
!= 0) {
1264 inst
->dst
.nr
= (new_virtual_grf
[inst
->dst
.nr
] +
1265 inst
->dst
.reg_offset
- 1);
1266 inst
->dst
.reg_offset
= 0;
1268 for (int i
= 0; i
< 3; i
++) {
1269 if (inst
->src
[i
].file
== VGRF
&& split_grf
[inst
->src
[i
].nr
] &&
1270 inst
->src
[i
].reg_offset
!= 0) {
1271 inst
->src
[i
].nr
= (new_virtual_grf
[inst
->src
[i
].nr
] +
1272 inst
->src
[i
].reg_offset
- 1);
1273 inst
->src
[i
].reg_offset
= 0;
1277 invalidate_live_intervals();
1281 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1283 dump_instruction(be_inst
, stderr
);
1287 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1289 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1291 if (inst
->predicate
) {
1292 fprintf(file
, "(%cf0.%d%s) ",
1293 inst
->predicate_inverse
? '-' : '+',
1295 pred_ctrl_align16
[inst
->predicate
]);
1298 fprintf(file
, "%s", brw_instruction_name(inst
->opcode
));
1300 fprintf(file
, ".sat");
1301 if (inst
->conditional_mod
) {
1302 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1303 if (!inst
->predicate
&&
1304 (devinfo
->gen
< 5 || (inst
->opcode
!= BRW_OPCODE_SEL
&&
1305 inst
->opcode
!= BRW_OPCODE_IF
&&
1306 inst
->opcode
!= BRW_OPCODE_WHILE
))) {
1307 fprintf(file
, ".f0.%d", inst
->flag_subreg
);
1312 switch (inst
->dst
.file
) {
1314 fprintf(file
, "vgrf%d.%d", inst
->dst
.nr
, inst
->dst
.reg_offset
);
1317 fprintf(file
, "g%d", inst
->dst
.nr
);
1320 fprintf(file
, "m%d", inst
->dst
.nr
);
1323 switch (inst
->dst
.nr
) {
1325 fprintf(file
, "null");
1327 case BRW_ARF_ADDRESS
:
1328 fprintf(file
, "a0.%d", inst
->dst
.subnr
);
1330 case BRW_ARF_ACCUMULATOR
:
1331 fprintf(file
, "acc%d", inst
->dst
.subnr
);
1334 fprintf(file
, "f%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1337 fprintf(file
, "arf%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1340 if (inst
->dst
.subnr
)
1341 fprintf(file
, "+%d", inst
->dst
.subnr
);
1344 fprintf(file
, "(null)");
1349 unreachable("not reached");
1351 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1353 if (inst
->dst
.writemask
& 1)
1355 if (inst
->dst
.writemask
& 2)
1357 if (inst
->dst
.writemask
& 4)
1359 if (inst
->dst
.writemask
& 8)
1362 fprintf(file
, ":%s", brw_reg_type_letters(inst
->dst
.type
));
1364 if (inst
->src
[0].file
!= BAD_FILE
)
1365 fprintf(file
, ", ");
1367 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1368 if (inst
->src
[i
].negate
)
1370 if (inst
->src
[i
].abs
)
1372 switch (inst
->src
[i
].file
) {
1374 fprintf(file
, "vgrf%d", inst
->src
[i
].nr
);
1377 fprintf(file
, "g%d", inst
->src
[i
].nr
);
1380 fprintf(file
, "attr%d", inst
->src
[i
].nr
);
1383 fprintf(file
, "u%d", inst
->src
[i
].nr
);
1386 switch (inst
->src
[i
].type
) {
1387 case BRW_REGISTER_TYPE_F
:
1388 fprintf(file
, "%fF", inst
->src
[i
].f
);
1390 case BRW_REGISTER_TYPE_D
:
1391 fprintf(file
, "%dD", inst
->src
[i
].d
);
1393 case BRW_REGISTER_TYPE_UD
:
1394 fprintf(file
, "%uU", inst
->src
[i
].ud
);
1396 case BRW_REGISTER_TYPE_VF
:
1397 fprintf(file
, "[%-gF, %-gF, %-gF, %-gF]",
1398 brw_vf_to_float((inst
->src
[i
].ud
>> 0) & 0xff),
1399 brw_vf_to_float((inst
->src
[i
].ud
>> 8) & 0xff),
1400 brw_vf_to_float((inst
->src
[i
].ud
>> 16) & 0xff),
1401 brw_vf_to_float((inst
->src
[i
].ud
>> 24) & 0xff));
1404 fprintf(file
, "???");
1409 switch (inst
->src
[i
].nr
) {
1411 fprintf(file
, "null");
1413 case BRW_ARF_ADDRESS
:
1414 fprintf(file
, "a0.%d", inst
->src
[i
].subnr
);
1416 case BRW_ARF_ACCUMULATOR
:
1417 fprintf(file
, "acc%d", inst
->src
[i
].subnr
);
1420 fprintf(file
, "f%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1423 fprintf(file
, "arf%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1426 if (inst
->src
[i
].subnr
)
1427 fprintf(file
, "+%d", inst
->src
[i
].subnr
);
1430 fprintf(file
, "(null)");
1433 unreachable("not reached");
1436 /* Don't print .0; and only VGRFs have reg_offsets and sizes */
1437 if (inst
->src
[i
].reg_offset
!= 0 &&
1438 inst
->src
[i
].file
== VGRF
&&
1439 alloc
.sizes
[inst
->src
[i
].nr
] != 1)
1440 fprintf(file
, ".%d", inst
->src
[i
].reg_offset
);
1442 if (inst
->src
[i
].file
!= IMM
) {
1443 static const char *chans
[4] = {"x", "y", "z", "w"};
1445 for (int c
= 0; c
< 4; c
++) {
1446 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1450 if (inst
->src
[i
].abs
)
1453 if (inst
->src
[i
].file
!= IMM
) {
1454 fprintf(file
, ":%s", brw_reg_type_letters(inst
->src
[i
].type
));
1457 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1458 fprintf(file
, ", ");
1461 if (inst
->force_writemask_all
)
1462 fprintf(file
, " NoMask");
1464 fprintf(file
, "\n");
1468 static inline struct brw_reg
1469 attribute_to_hw_reg(int attr
, bool interleaved
)
1472 return stride(brw_vec4_grf(attr
/ 2, (attr
% 2) * 4), 0, 4, 1);
1474 return brw_vec8_grf(attr
, 0);
1479 * Replace each register of type ATTR in this->instructions with a reference
1480 * to a fixed HW register.
1482 * If interleaved is true, then each attribute takes up half a register, with
1483 * register N containing attribute 2*N in its first half and attribute 2*N+1
1484 * in its second half (this corresponds to the payload setup used by geometry
1485 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1486 * false, then each attribute takes up a whole register, with register N
1487 * containing attribute N (this corresponds to the payload setup used by
1488 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1491 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map
,
1494 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1495 /* We have to support ATTR as a destination for GL_FIXED fixup. */
1496 if (inst
->dst
.file
== ATTR
) {
1497 int grf
= attribute_map
[inst
->dst
.nr
+ inst
->dst
.reg_offset
];
1499 /* All attributes used in the shader need to have been assigned a
1500 * hardware register by the caller
1504 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1505 reg
.type
= inst
->dst
.type
;
1506 reg
.writemask
= inst
->dst
.writemask
;
1511 for (int i
= 0; i
< 3; i
++) {
1512 if (inst
->src
[i
].file
!= ATTR
)
1515 int grf
= attribute_map
[inst
->src
[i
].nr
+ inst
->src
[i
].reg_offset
];
1517 /* All attributes used in the shader need to have been assigned a
1518 * hardware register by the caller
1522 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1523 reg
.swizzle
= inst
->src
[i
].swizzle
;
1524 reg
.type
= inst
->src
[i
].type
;
1525 if (inst
->src
[i
].abs
)
1527 if (inst
->src
[i
].negate
)
1536 vec4_vs_visitor::setup_attributes(int payload_reg
)
1539 int attribute_map
[VERT_ATTRIB_MAX
+ 1];
1540 memset(attribute_map
, 0, sizeof(attribute_map
));
1543 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
1544 if (vs_prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
1545 attribute_map
[i
] = payload_reg
+ nr_attributes
;
1550 /* VertexID is stored by the VF as the last vertex element, but we
1551 * don't represent it with a flag in inputs_read, so we call it
1554 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
1555 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
1558 lower_attributes_to_hw_regs(attribute_map
, false /* interleaved */);
1560 return payload_reg
+ vs_prog_data
->nr_attributes
;
1564 vec4_visitor::setup_uniforms(int reg
)
1566 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1568 /* The pre-gen6 VS requires that some push constants get loaded no
1569 * matter what, or the GPU would hang.
1571 if (devinfo
->gen
< 6 && this->uniforms
== 0) {
1572 assert(this->uniforms
< this->uniform_array_size
);
1574 stage_prog_data
->param
=
1575 reralloc(NULL
, stage_prog_data
->param
, const gl_constant_value
*, 4);
1576 for (unsigned int i
= 0; i
< 4; i
++) {
1577 unsigned int slot
= this->uniforms
* 4 + i
;
1578 static gl_constant_value zero
= { 0.0 };
1579 stage_prog_data
->param
[slot
] = &zero
;
1585 reg
+= ALIGN(uniforms
, 2) / 2;
1588 stage_prog_data
->nr_params
= this->uniforms
* 4;
1590 prog_data
->base
.curb_read_length
=
1591 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1597 vec4_vs_visitor::setup_payload(void)
1601 /* The payload always contains important data in g0, which contains
1602 * the URB handles that are passed on to the URB write at the end
1603 * of the thread. So, we always start push constants at g1.
1607 reg
= setup_uniforms(reg
);
1609 reg
= setup_attributes(reg
);
1611 this->first_non_payload_grf
= reg
;
1615 vec4_visitor::get_timestamp()
1617 assert(devinfo
->gen
>= 7);
1619 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1624 BRW_REGISTER_TYPE_UD
,
1625 BRW_VERTICAL_STRIDE_0
,
1627 BRW_HORIZONTAL_STRIDE_4
,
1631 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1633 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1634 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1635 * even if it's not enabled in the dispatch.
1637 mov
->force_writemask_all
= true;
1639 return src_reg(dst
);
1643 vec4_visitor::emit_shader_time_begin()
1645 current_annotation
= "shader time start";
1646 shader_start_time
= get_timestamp();
1650 vec4_visitor::emit_shader_time_end()
1652 current_annotation
= "shader time end";
1653 src_reg shader_end_time
= get_timestamp();
1656 /* Check that there weren't any timestamp reset events (assuming these
1657 * were the only two timestamp reads that happened).
1659 src_reg reset_end
= shader_end_time
;
1660 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1661 vec4_instruction
*test
= emit(AND(dst_null_ud(), reset_end
, brw_imm_ud(1u)));
1662 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1664 emit(IF(BRW_PREDICATE_NORMAL
));
1666 /* Take the current timestamp and get the delta. */
1667 shader_start_time
.negate
= true;
1668 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1669 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1671 /* If there were no instructions between the two timestamp gets, the diff
1672 * is 2 cycles. Remove that overhead, so I can forget about that when
1673 * trying to determine the time taken for single instructions.
1675 emit(ADD(diff
, src_reg(diff
), brw_imm_ud(-2u)));
1677 emit_shader_time_write(0, src_reg(diff
));
1678 emit_shader_time_write(1, brw_imm_ud(1u));
1679 emit(BRW_OPCODE_ELSE
);
1680 emit_shader_time_write(2, brw_imm_ud(1u));
1681 emit(BRW_OPCODE_ENDIF
);
1685 vec4_visitor::emit_shader_time_write(int shader_time_subindex
, src_reg value
)
1688 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1690 dst_reg offset
= dst
;
1694 offset
.type
= BRW_REGISTER_TYPE_UD
;
1695 int index
= shader_time_index
* 3 + shader_time_subindex
;
1696 emit(MOV(offset
, brw_imm_d(index
* SHADER_TIME_STRIDE
)));
1698 time
.type
= BRW_REGISTER_TYPE_UD
;
1699 emit(MOV(time
, value
));
1701 vec4_instruction
*inst
=
1702 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
1707 vec4_visitor::convert_to_hw_regs()
1709 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1710 for (int i
= 0; i
< 3; i
++) {
1711 struct src_reg
&src
= inst
->src
[i
];
1715 reg
= brw_vec8_grf(src
.nr
+ src
.reg_offset
, 0);
1716 reg
.type
= src
.type
;
1717 reg
.swizzle
= src
.swizzle
;
1719 reg
.negate
= src
.negate
;
1723 reg
= stride(brw_vec4_grf(prog_data
->base
.dispatch_grf_start_reg
+
1724 (src
.nr
+ src
.reg_offset
) / 2,
1725 ((src
.nr
+ src
.reg_offset
) % 2) * 4),
1727 reg
.type
= src
.type
;
1728 reg
.swizzle
= src
.swizzle
;
1730 reg
.negate
= src
.negate
;
1732 /* This should have been moved to pull constants. */
1733 assert(!src
.reladdr
);
1742 /* Probably unused. */
1743 reg
= brw_null_reg();
1748 unreachable("not reached");
1753 dst_reg
&dst
= inst
->dst
;
1756 switch (inst
->dst
.file
) {
1758 reg
= brw_vec8_grf(dst
.nr
+ dst
.reg_offset
, 0);
1759 reg
.type
= dst
.type
;
1760 reg
.writemask
= dst
.writemask
;
1764 assert(((dst
.nr
+ dst
.reg_offset
) & ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
1765 reg
= brw_message_reg(dst
.nr
+ dst
.reg_offset
);
1766 reg
.type
= dst
.type
;
1767 reg
.writemask
= dst
.writemask
;
1776 reg
= brw_null_reg();
1782 unreachable("not reached");
1792 if (shader_time_index
>= 0)
1793 emit_shader_time_begin();
1806 /* Before any optimization, push array accesses out to scratch
1807 * space where we need them to be. This pass may allocate new
1808 * virtual GRFs, so we want to do it early. It also makes sure
1809 * that we have reladdr computations available for CSE, since we'll
1810 * often do repeated subexpressions for those.
1812 move_grf_array_access_to_scratch();
1813 move_uniform_array_access_to_pull_constants();
1815 pack_uniform_registers();
1816 move_push_constants_to_pull_constants();
1817 split_virtual_grfs();
1819 #define OPT(pass, args...) ({ \
1821 bool this_progress = pass(args); \
1823 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
1824 char filename[64]; \
1825 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
1826 stage_abbrev, nir->info.name, iteration, pass_num); \
1828 backend_shader::dump_instructions(filename); \
1831 progress = progress || this_progress; \
1836 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
1838 snprintf(filename
, 64, "%s-%s-00-start",
1839 stage_abbrev
, nir
->info
.name
);
1841 backend_shader::dump_instructions(filename
);
1852 OPT(opt_predicated_break
, this);
1853 OPT(opt_reduce_swizzle
);
1854 OPT(dead_code_eliminate
);
1855 OPT(dead_control_flow_eliminate
, this);
1856 OPT(opt_copy_propagation
);
1857 OPT(opt_cmod_propagation
);
1860 OPT(opt_register_coalesce
);
1861 OPT(eliminate_find_live_channel
);
1866 if (OPT(opt_vector_float
)) {
1868 OPT(opt_copy_propagation
, false);
1869 OPT(opt_copy_propagation
, true);
1870 OPT(dead_code_eliminate
);
1878 if (unlikely(INTEL_DEBUG
& DEBUG_SPILL_VEC4
)) {
1879 /* Debug of register spilling: Go spill everything. */
1880 const int grf_count
= alloc
.count
;
1881 float spill_costs
[alloc
.count
];
1882 bool no_spill
[alloc
.count
];
1883 evaluate_spill_costs(spill_costs
, no_spill
);
1884 for (int i
= 0; i
< grf_count
; i
++) {
1891 bool allocated_without_spills
= reg_allocate();
1893 if (!allocated_without_spills
) {
1894 compiler
->shader_perf_log(log_data
,
1895 "%s shader triggered register spilling. "
1896 "Try reducing the number of live vec4 values "
1897 "to improve performance.\n",
1900 while (!reg_allocate()) {
1906 opt_schedule_instructions();
1908 opt_set_dependency_control();
1910 convert_to_hw_regs();
1912 if (last_scratch
> 0) {
1913 prog_data
->base
.total_scratch
=
1914 brw_get_scratch_size(last_scratch
* REG_SIZE
);
1920 } /* namespace brw */
1925 * Compile a vertex shader.
1927 * Returns the final assembly and the program's size.
1930 brw_compile_vs(const struct brw_compiler
*compiler
, void *log_data
,
1932 const struct brw_vs_prog_key
*key
,
1933 struct brw_vs_prog_data
*prog_data
,
1934 const nir_shader
*shader
,
1935 gl_clip_plane
*clip_planes
,
1936 bool use_legacy_snorm_formula
,
1937 int shader_time_index
,
1938 unsigned *final_assembly_size
,
1941 const unsigned *assembly
= NULL
;
1943 unsigned nr_attributes
= _mesa_bitcount_64(prog_data
->inputs_read
);
1945 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
1946 * incoming vertex attribute. So, add an extra slot.
1948 if (shader
->info
.system_values_read
&
1949 (BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
1950 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))) {
1954 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
1955 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
1956 * vec4 mode, the hardware appears to wedge unless we read something.
1958 if (compiler
->scalar_stage
[MESA_SHADER_VERTEX
])
1959 prog_data
->base
.urb_read_length
= DIV_ROUND_UP(nr_attributes
, 2);
1961 prog_data
->base
.urb_read_length
= DIV_ROUND_UP(MAX2(nr_attributes
, 1), 2);
1963 prog_data
->nr_attributes
= nr_attributes
;
1965 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
1966 * (overwriting the original contents), we need to make sure the size is
1967 * the larger of the two.
1969 const unsigned vue_entries
=
1970 MAX2(nr_attributes
, (unsigned)prog_data
->base
.vue_map
.num_slots
);
1972 if (compiler
->devinfo
->gen
== 6)
1973 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 8);
1975 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 4);
1977 if (compiler
->scalar_stage
[MESA_SHADER_VERTEX
]) {
1978 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
1980 fs_visitor
v(compiler
, log_data
, mem_ctx
, key
, &prog_data
->base
.base
,
1981 NULL
, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
1982 shader
, 8, shader_time_index
);
1983 if (!v
.run_vs(clip_planes
)) {
1985 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
1990 fs_generator
g(compiler
, log_data
, mem_ctx
, (void *) key
,
1991 &prog_data
->base
.base
, v
.promoted_constants
,
1992 v
.runtime_check_aads_emit
, "VS");
1993 if (INTEL_DEBUG
& DEBUG_VS
) {
1994 const char *debug_name
=
1995 ralloc_asprintf(mem_ctx
, "%s vertex shader %s",
1996 shader
->info
.label
? shader
->info
.label
: "unnamed",
1999 g
.enable_debug(debug_name
);
2001 g
.generate_code(v
.cfg
, 8);
2002 assembly
= g
.get_assembly(final_assembly_size
);
2006 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2008 vec4_vs_visitor
v(compiler
, log_data
, key
, prog_data
,
2009 shader
, clip_planes
, mem_ctx
,
2010 shader_time_index
, use_legacy_snorm_formula
);
2013 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2018 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
,
2019 shader
, &prog_data
->base
, v
.cfg
,
2020 final_assembly_size
);