2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "brw_vec4_live_variables.h"
30 #include "brw_dead_control_flow.h"
33 #include "main/macros.h"
34 #include "main/shaderobj.h"
35 #include "program/prog_print.h"
36 #include "program/prog_parameter.h"
38 #include "main/context.h"
40 #define MAX_INSTRUCTION (1 << 30)
49 memset(this, 0, sizeof(*this));
51 this->file
= BAD_FILE
;
54 src_reg::src_reg(register_file file
, int reg
, const glsl_type
*type
)
60 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
61 this->swizzle
= brw_swizzle_for_size(type
->vector_elements
);
63 this->swizzle
= BRW_SWIZZLE_XYZW
;
66 /** Generic unset register constructor. */
72 src_reg::src_reg(float f
)
77 this->type
= BRW_REGISTER_TYPE_F
;
78 this->fixed_hw_reg
.dw1
.f
= f
;
81 src_reg::src_reg(uint32_t u
)
86 this->type
= BRW_REGISTER_TYPE_UD
;
87 this->fixed_hw_reg
.dw1
.ud
= u
;
90 src_reg::src_reg(int32_t i
)
95 this->type
= BRW_REGISTER_TYPE_D
;
96 this->fixed_hw_reg
.dw1
.d
= i
;
99 src_reg::src_reg(uint8_t vf
[4])
104 this->type
= BRW_REGISTER_TYPE_VF
;
105 memcpy(&this->fixed_hw_reg
.dw1
.ud
, vf
, sizeof(unsigned));
108 src_reg::src_reg(uint8_t vf0
, uint8_t vf1
, uint8_t vf2
, uint8_t vf3
)
113 this->type
= BRW_REGISTER_TYPE_VF
;
114 this->fixed_hw_reg
.dw1
.ud
= (vf0
<< 0) |
120 src_reg::src_reg(struct brw_reg reg
)
125 this->fixed_hw_reg
= reg
;
126 this->type
= reg
.type
;
129 src_reg::src_reg(const dst_reg
®
)
133 this->file
= reg
.file
;
135 this->reg_offset
= reg
.reg_offset
;
136 this->type
= reg
.type
;
137 this->reladdr
= reg
.reladdr
;
138 this->fixed_hw_reg
= reg
.fixed_hw_reg
;
139 this->swizzle
= brw_swizzle_for_mask(reg
.writemask
);
145 memset(this, 0, sizeof(*this));
146 this->file
= BAD_FILE
;
147 this->writemask
= WRITEMASK_XYZW
;
155 dst_reg::dst_reg(register_file file
, int reg
)
163 dst_reg::dst_reg(register_file file
, int reg
, const glsl_type
*type
,
170 this->type
= brw_type_for_base_type(type
);
171 this->writemask
= writemask
;
174 dst_reg::dst_reg(register_file file
, int reg
, brw_reg_type type
,
182 this->writemask
= writemask
;
185 dst_reg::dst_reg(struct brw_reg reg
)
190 this->fixed_hw_reg
= reg
;
191 this->type
= reg
.type
;
194 dst_reg::dst_reg(const src_reg
®
)
198 this->file
= reg
.file
;
200 this->reg_offset
= reg
.reg_offset
;
201 this->type
= reg
.type
;
202 this->writemask
= brw_mask_for_swizzle(reg
.swizzle
);
203 this->reladdr
= reg
.reladdr
;
204 this->fixed_hw_reg
= reg
.fixed_hw_reg
;
208 dst_reg::equals(const dst_reg
&r
) const
210 return (file
== r
.file
&&
212 reg_offset
== r
.reg_offset
&&
214 negate
== r
.negate
&&
216 writemask
== r
.writemask
&&
217 (reladdr
== r
.reladdr
||
218 (reladdr
&& r
.reladdr
&& reladdr
->equals(*r
.reladdr
))) &&
219 ((file
!= HW_REG
&& file
!= IMM
) ||
220 memcmp(&fixed_hw_reg
, &r
.fixed_hw_reg
,
221 sizeof(fixed_hw_reg
)) == 0));
225 vec4_instruction::is_send_from_grf()
228 case SHADER_OPCODE_SHADER_TIME_ADD
:
229 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
230 case SHADER_OPCODE_UNTYPED_ATOMIC
:
231 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
232 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
233 case SHADER_OPCODE_TYPED_ATOMIC
:
234 case SHADER_OPCODE_TYPED_SURFACE_READ
:
235 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
243 vec4_instruction::regs_read(unsigned arg
) const
245 if (src
[arg
].file
== BAD_FILE
)
249 case SHADER_OPCODE_SHADER_TIME_ADD
:
250 case SHADER_OPCODE_UNTYPED_ATOMIC
:
251 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
252 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
253 case SHADER_OPCODE_TYPED_ATOMIC
:
254 case SHADER_OPCODE_TYPED_SURFACE_READ
:
255 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
256 return arg
== 0 ? mlen
: 1;
258 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
259 return arg
== 1 ? mlen
: 1;
267 vec4_instruction::can_do_source_mods(const struct brw_device_info
*devinfo
)
269 if (devinfo
->gen
== 6 && is_math())
272 if (is_send_from_grf())
275 if (!backend_instruction::can_do_source_mods())
282 * Returns how many MRFs an opcode will write over.
284 * Note that this is not the 0 or 1 implied writes in an actual gen
285 * instruction -- the generate_* functions generate additional MOVs
289 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
291 if (inst
->mlen
== 0 || inst
->is_send_from_grf())
294 switch (inst
->opcode
) {
295 case SHADER_OPCODE_RCP
:
296 case SHADER_OPCODE_RSQ
:
297 case SHADER_OPCODE_SQRT
:
298 case SHADER_OPCODE_EXP2
:
299 case SHADER_OPCODE_LOG2
:
300 case SHADER_OPCODE_SIN
:
301 case SHADER_OPCODE_COS
:
303 case SHADER_OPCODE_INT_QUOTIENT
:
304 case SHADER_OPCODE_INT_REMAINDER
:
305 case SHADER_OPCODE_POW
:
307 case VS_OPCODE_URB_WRITE
:
309 case VS_OPCODE_PULL_CONSTANT_LOAD
:
311 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
313 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
315 case GS_OPCODE_URB_WRITE
:
316 case GS_OPCODE_URB_WRITE_ALLOCATE
:
317 case GS_OPCODE_THREAD_END
:
319 case GS_OPCODE_FF_SYNC
:
321 case SHADER_OPCODE_SHADER_TIME_ADD
:
323 case SHADER_OPCODE_TEX
:
324 case SHADER_OPCODE_TXL
:
325 case SHADER_OPCODE_TXD
:
326 case SHADER_OPCODE_TXF
:
327 case SHADER_OPCODE_TXF_CMS
:
328 case SHADER_OPCODE_TXF_MCS
:
329 case SHADER_OPCODE_TXS
:
330 case SHADER_OPCODE_TG4
:
331 case SHADER_OPCODE_TG4_OFFSET
:
332 return inst
->header_size
;
334 unreachable("not reached");
339 src_reg::equals(const src_reg
&r
) const
341 return (file
== r
.file
&&
343 reg_offset
== r
.reg_offset
&&
345 negate
== r
.negate
&&
347 swizzle
== r
.swizzle
&&
348 !reladdr
&& !r
.reladdr
&&
349 memcmp(&fixed_hw_reg
, &r
.fixed_hw_reg
,
350 sizeof(fixed_hw_reg
)) == 0);
354 vec4_visitor::opt_vector_float()
356 bool progress
= false;
358 int last_reg
= -1, last_reg_offset
= -1;
359 enum register_file last_reg_file
= BAD_FILE
;
361 int remaining_channels
= 0;
364 vec4_instruction
*imm_inst
[4];
366 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
367 if (last_reg
!= inst
->dst
.reg
||
368 last_reg_offset
!= inst
->dst
.reg_offset
||
369 last_reg_file
!= inst
->dst
.file
) {
370 last_reg
= inst
->dst
.reg
;
371 last_reg_offset
= inst
->dst
.reg_offset
;
372 last_reg_file
= inst
->dst
.file
;
373 remaining_channels
= WRITEMASK_XYZW
;
378 if (inst
->opcode
!= BRW_OPCODE_MOV
||
379 inst
->dst
.writemask
== WRITEMASK_XYZW
||
380 inst
->src
[0].file
!= IMM
)
383 int vf
= brw_float_to_vf(inst
->src
[0].fixed_hw_reg
.dw1
.f
);
387 if ((inst
->dst
.writemask
& WRITEMASK_X
) != 0)
389 if ((inst
->dst
.writemask
& WRITEMASK_Y
) != 0)
391 if ((inst
->dst
.writemask
& WRITEMASK_Z
) != 0)
393 if ((inst
->dst
.writemask
& WRITEMASK_W
) != 0)
396 imm_inst
[inst_count
++] = inst
;
398 remaining_channels
&= ~inst
->dst
.writemask
;
399 if (remaining_channels
== 0) {
400 vec4_instruction
*mov
= MOV(inst
->dst
, imm
);
401 mov
->dst
.type
= BRW_REGISTER_TYPE_F
;
402 mov
->dst
.writemask
= WRITEMASK_XYZW
;
403 inst
->insert_after(block
, mov
);
406 for (int i
= 0; i
< inst_count
; i
++) {
407 imm_inst
[i
]->remove(block
);
414 invalidate_live_intervals();
419 /* Replaces unused channels of a swizzle with channels that are used.
421 * For instance, this pass transforms
423 * mov vgrf4.yz, vgrf5.wxzy
427 * mov vgrf4.yz, vgrf5.xxzx
429 * This eliminates false uses of some channels, letting dead code elimination
430 * remove the instructions that wrote them.
433 vec4_visitor::opt_reduce_swizzle()
435 bool progress
= false;
437 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
438 if (inst
->dst
.file
== BAD_FILE
|| inst
->dst
.file
== HW_REG
||
439 inst
->is_send_from_grf())
444 /* Determine which channels of the sources are read. */
445 switch (inst
->opcode
) {
446 case VEC4_OPCODE_PACK_BYTES
:
448 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
449 * but all four of src1.
451 swizzle
= brw_swizzle_for_size(4);
454 swizzle
= brw_swizzle_for_size(3);
457 swizzle
= brw_swizzle_for_size(2);
460 swizzle
= brw_swizzle_for_mask(inst
->dst
.writemask
);
464 /* Update sources' swizzles. */
465 for (int i
= 0; i
< 3; i
++) {
466 if (inst
->src
[i
].file
!= GRF
&&
467 inst
->src
[i
].file
!= ATTR
&&
468 inst
->src
[i
].file
!= UNIFORM
)
471 const unsigned new_swizzle
=
472 brw_compose_swizzle(swizzle
, inst
->src
[i
].swizzle
);
473 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
474 inst
->src
[i
].swizzle
= new_swizzle
;
481 invalidate_live_intervals();
487 vec4_visitor::split_uniform_registers()
489 /* Prior to this, uniforms have been in an array sized according to
490 * the number of vector uniforms present, sparsely filled (so an
491 * aggregate results in reg indices being skipped over). Now we're
492 * going to cut those aggregates up so each .reg index is one
493 * vector. The goal is to make elimination of unused uniform
494 * components easier later.
496 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
497 for (int i
= 0 ; i
< 3; i
++) {
498 if (inst
->src
[i
].file
!= UNIFORM
)
501 assert(!inst
->src
[i
].reladdr
);
503 inst
->src
[i
].reg
+= inst
->src
[i
].reg_offset
;
504 inst
->src
[i
].reg_offset
= 0;
508 /* Update that everything is now vector-sized. */
509 for (int i
= 0; i
< this->uniforms
; i
++) {
510 this->uniform_size
[i
] = 1;
515 vec4_visitor::pack_uniform_registers()
517 bool uniform_used
[this->uniforms
];
518 int new_loc
[this->uniforms
];
519 int new_chan
[this->uniforms
];
521 memset(uniform_used
, 0, sizeof(uniform_used
));
522 memset(new_loc
, 0, sizeof(new_loc
));
523 memset(new_chan
, 0, sizeof(new_chan
));
525 /* Find which uniform vectors are actually used by the program. We
526 * expect unused vector elements when we've moved array access out
527 * to pull constants, and from some GLSL code generators like wine.
529 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
530 for (int i
= 0 ; i
< 3; i
++) {
531 if (inst
->src
[i
].file
!= UNIFORM
)
534 uniform_used
[inst
->src
[i
].reg
] = true;
538 int new_uniform_count
= 0;
540 /* Now, figure out a packing of the live uniform vectors into our
543 for (int src
= 0; src
< uniforms
; src
++) {
544 assert(src
< uniform_array_size
);
545 int size
= this->uniform_vector_size
[src
];
547 if (!uniform_used
[src
]) {
548 this->uniform_vector_size
[src
] = 0;
553 /* Find the lowest place we can slot this uniform in. */
554 for (dst
= 0; dst
< src
; dst
++) {
555 if (this->uniform_vector_size
[dst
] + size
<= 4)
564 new_chan
[src
] = this->uniform_vector_size
[dst
];
566 /* Move the references to the data */
567 for (int j
= 0; j
< size
; j
++) {
568 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
569 stage_prog_data
->param
[src
* 4 + j
];
572 this->uniform_vector_size
[dst
] += size
;
573 this->uniform_vector_size
[src
] = 0;
576 new_uniform_count
= MAX2(new_uniform_count
, dst
+ 1);
579 this->uniforms
= new_uniform_count
;
581 /* Now, update the instructions for our repacked uniforms. */
582 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
583 for (int i
= 0 ; i
< 3; i
++) {
584 int src
= inst
->src
[i
].reg
;
586 if (inst
->src
[i
].file
!= UNIFORM
)
589 inst
->src
[i
].reg
= new_loc
[src
];
590 inst
->src
[i
].swizzle
+= BRW_SWIZZLE4(new_chan
[src
], new_chan
[src
],
591 new_chan
[src
], new_chan
[src
]);
597 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
599 * While GLSL IR also performs this optimization, we end up with it in
600 * our instruction stream for a couple of reasons. One is that we
601 * sometimes generate silly instructions, for example in array access
602 * where we'll generate "ADD offset, index, base" even if base is 0.
603 * The other is that GLSL IR's constant propagation doesn't track the
604 * components of aggregates, so some VS patterns (initialize matrix to
605 * 0, accumulate in vertex blending factors) end up breaking down to
606 * instructions involving 0.
609 vec4_visitor::opt_algebraic()
611 bool progress
= false;
613 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
614 switch (inst
->opcode
) {
616 if (inst
->src
[0].file
!= IMM
)
619 if (inst
->saturate
) {
620 if (inst
->dst
.type
!= inst
->src
[0].type
)
621 assert(!"unimplemented: saturate mixed types");
623 if (brw_saturate_immediate(inst
->dst
.type
,
624 &inst
->src
[0].fixed_hw_reg
)) {
625 inst
->saturate
= false;
631 case VEC4_OPCODE_UNPACK_UNIFORM
:
632 if (inst
->src
[0].file
!= UNIFORM
) {
633 inst
->opcode
= BRW_OPCODE_MOV
;
639 if (inst
->src
[1].is_zero()) {
640 inst
->opcode
= BRW_OPCODE_MOV
;
641 inst
->src
[1] = src_reg();
647 if (inst
->src
[1].is_zero()) {
648 inst
->opcode
= BRW_OPCODE_MOV
;
649 switch (inst
->src
[0].type
) {
650 case BRW_REGISTER_TYPE_F
:
651 inst
->src
[0] = src_reg(0.0f
);
653 case BRW_REGISTER_TYPE_D
:
654 inst
->src
[0] = src_reg(0);
656 case BRW_REGISTER_TYPE_UD
:
657 inst
->src
[0] = src_reg(0u);
660 unreachable("not reached");
662 inst
->src
[1] = src_reg();
664 } else if (inst
->src
[1].is_one()) {
665 inst
->opcode
= BRW_OPCODE_MOV
;
666 inst
->src
[1] = src_reg();
668 } else if (inst
->src
[1].is_negative_one()) {
669 inst
->opcode
= BRW_OPCODE_MOV
;
670 inst
->src
[0].negate
= !inst
->src
[0].negate
;
671 inst
->src
[1] = src_reg();
676 if (inst
->conditional_mod
== BRW_CONDITIONAL_GE
&&
678 inst
->src
[0].negate
&&
679 inst
->src
[1].is_zero()) {
680 inst
->src
[0].abs
= false;
681 inst
->src
[0].negate
= false;
682 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
687 case SHADER_OPCODE_RCP
: {
688 vec4_instruction
*prev
= (vec4_instruction
*)inst
->prev
;
689 if (prev
->opcode
== SHADER_OPCODE_SQRT
) {
690 if (inst
->src
[0].equals(src_reg(prev
->dst
))) {
691 inst
->opcode
= SHADER_OPCODE_RSQ
;
692 inst
->src
[0] = prev
->src
[0];
698 case SHADER_OPCODE_BROADCAST
:
699 if (is_uniform(inst
->src
[0]) ||
700 inst
->src
[1].is_zero()) {
701 inst
->opcode
= BRW_OPCODE_MOV
;
702 inst
->src
[1] = src_reg();
703 inst
->force_writemask_all
= true;
714 invalidate_live_intervals();
720 * Only a limited number of hardware registers may be used for push
721 * constants, so this turns access to the overflowed constants into
725 vec4_visitor::move_push_constants_to_pull_constants()
727 int pull_constant_loc
[this->uniforms
];
729 /* Only allow 32 registers (256 uniform components) as push constants,
730 * which is the limit on gen6.
732 * If changing this value, note the limitation about total_regs in
735 int max_uniform_components
= 32 * 8;
736 if (this->uniforms
* 4 <= max_uniform_components
)
739 /* Make some sort of choice as to which uniforms get sent to pull
740 * constants. We could potentially do something clever here like
741 * look for the most infrequently used uniform vec4s, but leave
744 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
745 pull_constant_loc
[i
/ 4] = -1;
747 if (i
>= max_uniform_components
) {
748 const gl_constant_value
**values
= &stage_prog_data
->param
[i
];
750 /* Try to find an existing copy of this uniform in the pull
751 * constants if it was part of an array access already.
753 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
756 for (matches
= 0; matches
< 4; matches
++) {
757 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
762 pull_constant_loc
[i
/ 4] = j
/ 4;
767 if (pull_constant_loc
[i
/ 4] == -1) {
768 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
769 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
771 for (int j
= 0; j
< 4; j
++) {
772 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
779 /* Now actually rewrite usage of the things we've moved to pull
782 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
783 for (int i
= 0 ; i
< 3; i
++) {
784 if (inst
->src
[i
].file
!= UNIFORM
||
785 pull_constant_loc
[inst
->src
[i
].reg
] == -1)
788 int uniform
= inst
->src
[i
].reg
;
790 dst_reg temp
= dst_reg(this, glsl_type::vec4_type
);
792 emit_pull_constant_load(block
, inst
, temp
, inst
->src
[i
],
793 pull_constant_loc
[uniform
]);
795 inst
->src
[i
].file
= temp
.file
;
796 inst
->src
[i
].reg
= temp
.reg
;
797 inst
->src
[i
].reg_offset
= temp
.reg_offset
;
798 inst
->src
[i
].reladdr
= NULL
;
802 /* Repack push constants to remove the now-unused ones. */
803 pack_uniform_registers();
806 /* Conditions for which we want to avoid setting the dependency control bits */
808 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction
*inst
)
810 #define IS_DWORD(reg) \
811 (reg.type == BRW_REGISTER_TYPE_UD || \
812 reg.type == BRW_REGISTER_TYPE_D)
814 /* "When source or destination datatype is 64b or operation is integer DWord
815 * multiply, DepCtrl must not be used."
816 * May apply to future SoCs as well.
818 if (devinfo
->is_cherryview
) {
819 if (inst
->opcode
== BRW_OPCODE_MUL
&&
820 IS_DWORD(inst
->src
[0]) &&
821 IS_DWORD(inst
->src
[1]))
826 if (devinfo
->gen
>= 8) {
827 if (inst
->opcode
== BRW_OPCODE_F32TO16
)
833 * In the presence of send messages, totally interrupt dependency
834 * control. They're long enough that the chance of dependency
835 * control around them just doesn't matter.
838 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
839 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
840 * completes the scoreboard clear must have a non-zero execution mask. This
841 * means, if any kind of predication can change the execution mask or channel
842 * enable of the last instruction, the optimization must be avoided. This is
843 * to avoid instructions being shot down the pipeline when no writes are
847 * Dependency control does not work well over math instructions.
848 * NB: Discovered empirically
850 return (inst
->mlen
|| inst
->predicate
|| inst
->is_math());
854 * Sets the dependency control fields on instructions after register
855 * allocation and before the generator is run.
857 * When you have a sequence of instructions like:
859 * DP4 temp.x vertex uniform[0]
860 * DP4 temp.y vertex uniform[0]
861 * DP4 temp.z vertex uniform[0]
862 * DP4 temp.w vertex uniform[0]
864 * The hardware doesn't know that it can actually run the later instructions
865 * while the previous ones are in flight, producing stalls. However, we have
866 * manual fields we can set in the instructions that let it do so.
869 vec4_visitor::opt_set_dependency_control()
871 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
872 uint8_t grf_channels_written
[BRW_MAX_GRF
];
873 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
874 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
876 assert(prog_data
->total_grf
||
877 !"Must be called after register allocation");
879 foreach_block (block
, cfg
) {
880 memset(last_grf_write
, 0, sizeof(last_grf_write
));
881 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
883 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
884 /* If we read from a register that we were doing dependency control
885 * on, don't do dependency control across the read.
887 for (int i
= 0; i
< 3; i
++) {
888 int reg
= inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
;
889 if (inst
->src
[i
].file
== GRF
) {
890 last_grf_write
[reg
] = NULL
;
891 } else if (inst
->src
[i
].file
== HW_REG
) {
892 memset(last_grf_write
, 0, sizeof(last_grf_write
));
895 assert(inst
->src
[i
].file
!= MRF
);
898 if (is_dep_ctrl_unsafe(inst
)) {
899 memset(last_grf_write
, 0, sizeof(last_grf_write
));
900 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
904 /* Now, see if we can do dependency control for this instruction
905 * against a previous one writing to its destination.
907 int reg
= inst
->dst
.reg
+ inst
->dst
.reg_offset
;
908 if (inst
->dst
.file
== GRF
) {
909 if (last_grf_write
[reg
] &&
910 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
911 last_grf_write
[reg
]->no_dd_clear
= true;
912 inst
->no_dd_check
= true;
914 grf_channels_written
[reg
] = 0;
917 last_grf_write
[reg
] = inst
;
918 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
919 } else if (inst
->dst
.file
== MRF
) {
920 if (last_mrf_write
[reg
] &&
921 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
922 last_mrf_write
[reg
]->no_dd_clear
= true;
923 inst
->no_dd_check
= true;
925 mrf_channels_written
[reg
] = 0;
928 last_mrf_write
[reg
] = inst
;
929 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
930 } else if (inst
->dst
.reg
== HW_REG
) {
931 if (inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
)
932 memset(last_grf_write
, 0, sizeof(last_grf_write
));
933 if (inst
->dst
.fixed_hw_reg
.file
== BRW_MESSAGE_REGISTER_FILE
)
934 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
941 vec4_instruction::can_reswizzle(int dst_writemask
,
945 /* If this instruction sets anything not referenced by swizzle, then we'd
946 * totally break it when we reswizzle.
948 if (dst
.writemask
& ~swizzle_mask
)
958 * For any channels in the swizzle's source that were populated by this
959 * instruction, rewrite the instruction to put the appropriate result directly
962 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
965 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
967 /* Destination write mask doesn't correspond to source swizzle for the dot
968 * product and pack_bytes instructions.
970 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
971 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
&&
972 opcode
!= VEC4_OPCODE_PACK_BYTES
) {
973 for (int i
= 0; i
< 3; i
++) {
974 if (src
[i
].file
== BAD_FILE
|| src
[i
].file
== IMM
)
977 src
[i
].swizzle
= brw_compose_swizzle(swizzle
, src
[i
].swizzle
);
981 /* Apply the specified swizzle and writemask to the original mask of
982 * written components.
984 dst
.writemask
= dst_writemask
&
985 brw_apply_swizzle_to_mask(swizzle
, dst
.writemask
);
989 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
990 * just written and then MOVed into another reg and making the original write
991 * of the GRF write directly to the final destination instead.
994 vec4_visitor::opt_register_coalesce()
996 bool progress
= false;
999 calculate_live_intervals();
1001 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1005 if (inst
->opcode
!= BRW_OPCODE_MOV
||
1006 (inst
->dst
.file
!= GRF
&& inst
->dst
.file
!= MRF
) ||
1008 inst
->src
[0].file
!= GRF
||
1009 inst
->dst
.type
!= inst
->src
[0].type
||
1010 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
1013 bool to_mrf
= (inst
->dst
.file
== MRF
);
1015 /* Can't coalesce this GRF if someone else was going to
1018 if (var_range_end(var_from_reg(alloc
, inst
->src
[0]), 4) > ip
)
1021 /* We need to check interference with the final destination between this
1022 * instruction and the earliest instruction involved in writing the GRF
1023 * we're eliminating. To do that, keep track of which of our source
1024 * channels we've seen initialized.
1026 const unsigned chans_needed
=
1027 brw_apply_inv_swizzle_to_mask(inst
->src
[0].swizzle
,
1028 inst
->dst
.writemask
);
1029 unsigned chans_remaining
= chans_needed
;
1031 /* Now walk up the instruction stream trying to see if we can rewrite
1032 * everything writing to the temporary to write into the destination
1035 vec4_instruction
*_scan_inst
= (vec4_instruction
*)inst
->prev
;
1036 foreach_inst_in_block_reverse_starting_from(vec4_instruction
, scan_inst
,
1038 _scan_inst
= scan_inst
;
1040 if (inst
->src
[0].in_range(scan_inst
->dst
, scan_inst
->regs_written
)) {
1041 /* Found something writing to the reg we want to coalesce away. */
1043 /* SEND instructions can't have MRF as a destination. */
1044 if (scan_inst
->mlen
)
1047 if (devinfo
->gen
== 6) {
1048 /* gen6 math instructions must have the destination be
1049 * GRF, so no compute-to-MRF for them.
1051 if (scan_inst
->is_math()) {
1057 /* If we can't handle the swizzle, bail. */
1058 if (!scan_inst
->can_reswizzle(inst
->dst
.writemask
,
1059 inst
->src
[0].swizzle
,
1064 /* This doesn't handle coalescing of multiple registers. */
1065 if (scan_inst
->regs_written
> 1)
1068 /* Mark which channels we found unconditional writes for. */
1069 if (!scan_inst
->predicate
)
1070 chans_remaining
&= ~scan_inst
->dst
.writemask
;
1072 if (chans_remaining
== 0)
1076 /* You can't read from an MRF, so if someone else reads our MRF's
1077 * source GRF that we wanted to rewrite, that stops us. If it's a
1078 * GRF we're trying to coalesce to, we don't actually handle
1079 * rewriting sources so bail in that case as well.
1081 bool interfered
= false;
1082 for (int i
= 0; i
< 3; i
++) {
1083 if (inst
->src
[0].in_range(scan_inst
->src
[i
],
1084 scan_inst
->regs_read(i
)))
1090 /* If somebody else writes our destination here, we can't coalesce
1093 if (inst
->dst
.in_range(scan_inst
->dst
, scan_inst
->regs_written
))
1096 /* Check for reads of the register we're trying to coalesce into. We
1097 * can't go rewriting instructions above that to put some other value
1098 * in the register instead.
1100 if (to_mrf
&& scan_inst
->mlen
> 0) {
1101 if (inst
->dst
.reg
>= scan_inst
->base_mrf
&&
1102 inst
->dst
.reg
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
1106 for (int i
= 0; i
< 3; i
++) {
1107 if (inst
->dst
.in_range(scan_inst
->src
[i
],
1108 scan_inst
->regs_read(i
)))
1116 if (chans_remaining
== 0) {
1117 /* If we've made it here, we have an MOV we want to coalesce out, and
1118 * a scan_inst pointing to the earliest instruction involved in
1119 * computing the value. Now go rewrite the instruction stream
1122 vec4_instruction
*scan_inst
= _scan_inst
;
1123 while (scan_inst
!= inst
) {
1124 if (scan_inst
->dst
.file
== GRF
&&
1125 scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
1126 scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
1127 scan_inst
->reswizzle(inst
->dst
.writemask
,
1128 inst
->src
[0].swizzle
);
1129 scan_inst
->dst
.file
= inst
->dst
.file
;
1130 scan_inst
->dst
.reg
= inst
->dst
.reg
;
1131 scan_inst
->dst
.reg_offset
= inst
->dst
.reg_offset
;
1132 scan_inst
->saturate
|= inst
->saturate
;
1134 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1136 inst
->remove(block
);
1142 invalidate_live_intervals();
1148 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1149 * flow. We could probably do better here with some form of divergence
1153 vec4_visitor::eliminate_find_live_channel()
1155 bool progress
= false;
1158 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1159 switch (inst
->opcode
) {
1165 case BRW_OPCODE_ENDIF
:
1166 case BRW_OPCODE_WHILE
:
1170 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1172 inst
->opcode
= BRW_OPCODE_MOV
;
1173 inst
->src
[0] = src_reg(0);
1174 inst
->force_writemask_all
= true;
1188 * Splits virtual GRFs requesting more than one contiguous physical register.
1190 * We initially create large virtual GRFs for temporary structures, arrays,
1191 * and matrices, so that the dereference visitor functions can add reg_offsets
1192 * to work their way down to the actual member being accessed. But when it
1193 * comes to optimization, we'd like to treat each register as individual
1194 * storage if possible.
1196 * So far, the only thing that might prevent splitting is a send message from
1200 vec4_visitor::split_virtual_grfs()
1202 int num_vars
= this->alloc
.count
;
1203 int new_virtual_grf
[num_vars
];
1204 bool split_grf
[num_vars
];
1206 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1208 /* Try to split anything > 0 sized. */
1209 for (int i
= 0; i
< num_vars
; i
++) {
1210 split_grf
[i
] = this->alloc
.sizes
[i
] != 1;
1213 /* Check that the instructions are compatible with the registers we're trying
1216 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1217 if (inst
->dst
.file
== GRF
&& inst
->regs_written
> 1)
1218 split_grf
[inst
->dst
.reg
] = false;
1220 for (int i
= 0; i
< 3; i
++) {
1221 if (inst
->src
[i
].file
== GRF
&& inst
->regs_read(i
) > 1)
1222 split_grf
[inst
->src
[i
].reg
] = false;
1226 /* Allocate new space for split regs. Note that the virtual
1227 * numbers will be contiguous.
1229 for (int i
= 0; i
< num_vars
; i
++) {
1233 new_virtual_grf
[i
] = alloc
.allocate(1);
1234 for (unsigned j
= 2; j
< this->alloc
.sizes
[i
]; j
++) {
1235 unsigned reg
= alloc
.allocate(1);
1236 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1239 this->alloc
.sizes
[i
] = 1;
1242 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1243 if (inst
->dst
.file
== GRF
&& split_grf
[inst
->dst
.reg
] &&
1244 inst
->dst
.reg_offset
!= 0) {
1245 inst
->dst
.reg
= (new_virtual_grf
[inst
->dst
.reg
] +
1246 inst
->dst
.reg_offset
- 1);
1247 inst
->dst
.reg_offset
= 0;
1249 for (int i
= 0; i
< 3; i
++) {
1250 if (inst
->src
[i
].file
== GRF
&& split_grf
[inst
->src
[i
].reg
] &&
1251 inst
->src
[i
].reg_offset
!= 0) {
1252 inst
->src
[i
].reg
= (new_virtual_grf
[inst
->src
[i
].reg
] +
1253 inst
->src
[i
].reg_offset
- 1);
1254 inst
->src
[i
].reg_offset
= 0;
1258 invalidate_live_intervals();
1262 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1264 dump_instruction(be_inst
, stderr
);
1268 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1270 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1272 if (inst
->predicate
) {
1273 fprintf(file
, "(%cf0.%d) ",
1274 inst
->predicate_inverse
? '-' : '+',
1278 fprintf(file
, "%s", brw_instruction_name(inst
->opcode
));
1280 fprintf(file
, ".sat");
1281 if (inst
->conditional_mod
) {
1282 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1283 if (!inst
->predicate
&&
1284 (devinfo
->gen
< 5 || (inst
->opcode
!= BRW_OPCODE_SEL
&&
1285 inst
->opcode
!= BRW_OPCODE_IF
&&
1286 inst
->opcode
!= BRW_OPCODE_WHILE
))) {
1287 fprintf(file
, ".f0.%d", inst
->flag_subreg
);
1292 switch (inst
->dst
.file
) {
1294 fprintf(file
, "vgrf%d.%d", inst
->dst
.reg
, inst
->dst
.reg_offset
);
1297 fprintf(file
, "m%d", inst
->dst
.reg
);
1300 if (inst
->dst
.fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
) {
1301 switch (inst
->dst
.fixed_hw_reg
.nr
) {
1303 fprintf(file
, "null");
1305 case BRW_ARF_ADDRESS
:
1306 fprintf(file
, "a0.%d", inst
->dst
.fixed_hw_reg
.subnr
);
1308 case BRW_ARF_ACCUMULATOR
:
1309 fprintf(file
, "acc%d", inst
->dst
.fixed_hw_reg
.subnr
);
1312 fprintf(file
, "f%d.%d", inst
->dst
.fixed_hw_reg
.nr
& 0xf,
1313 inst
->dst
.fixed_hw_reg
.subnr
);
1316 fprintf(file
, "arf%d.%d", inst
->dst
.fixed_hw_reg
.nr
& 0xf,
1317 inst
->dst
.fixed_hw_reg
.subnr
);
1321 fprintf(file
, "hw_reg%d", inst
->dst
.fixed_hw_reg
.nr
);
1323 if (inst
->dst
.fixed_hw_reg
.subnr
)
1324 fprintf(file
, "+%d", inst
->dst
.fixed_hw_reg
.subnr
);
1327 fprintf(file
, "(null)");
1330 fprintf(file
, "???");
1333 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1335 if (inst
->dst
.writemask
& 1)
1337 if (inst
->dst
.writemask
& 2)
1339 if (inst
->dst
.writemask
& 4)
1341 if (inst
->dst
.writemask
& 8)
1344 fprintf(file
, ":%s", brw_reg_type_letters(inst
->dst
.type
));
1346 if (inst
->src
[0].file
!= BAD_FILE
)
1347 fprintf(file
, ", ");
1349 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1350 if (inst
->src
[i
].negate
)
1352 if (inst
->src
[i
].abs
)
1354 switch (inst
->src
[i
].file
) {
1356 fprintf(file
, "vgrf%d", inst
->src
[i
].reg
);
1359 fprintf(file
, "attr%d", inst
->src
[i
].reg
);
1362 fprintf(file
, "u%d", inst
->src
[i
].reg
);
1365 switch (inst
->src
[i
].type
) {
1366 case BRW_REGISTER_TYPE_F
:
1367 fprintf(file
, "%fF", inst
->src
[i
].fixed_hw_reg
.dw1
.f
);
1369 case BRW_REGISTER_TYPE_D
:
1370 fprintf(file
, "%dD", inst
->src
[i
].fixed_hw_reg
.dw1
.d
);
1372 case BRW_REGISTER_TYPE_UD
:
1373 fprintf(file
, "%uU", inst
->src
[i
].fixed_hw_reg
.dw1
.ud
);
1375 case BRW_REGISTER_TYPE_VF
:
1376 fprintf(file
, "[%-gF, %-gF, %-gF, %-gF]",
1377 brw_vf_to_float((inst
->src
[i
].fixed_hw_reg
.dw1
.ud
>> 0) & 0xff),
1378 brw_vf_to_float((inst
->src
[i
].fixed_hw_reg
.dw1
.ud
>> 8) & 0xff),
1379 brw_vf_to_float((inst
->src
[i
].fixed_hw_reg
.dw1
.ud
>> 16) & 0xff),
1380 brw_vf_to_float((inst
->src
[i
].fixed_hw_reg
.dw1
.ud
>> 24) & 0xff));
1383 fprintf(file
, "???");
1388 if (inst
->src
[i
].fixed_hw_reg
.negate
)
1390 if (inst
->src
[i
].fixed_hw_reg
.abs
)
1392 if (inst
->src
[i
].fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
) {
1393 switch (inst
->src
[i
].fixed_hw_reg
.nr
) {
1395 fprintf(file
, "null");
1397 case BRW_ARF_ADDRESS
:
1398 fprintf(file
, "a0.%d", inst
->src
[i
].fixed_hw_reg
.subnr
);
1400 case BRW_ARF_ACCUMULATOR
:
1401 fprintf(file
, "acc%d", inst
->src
[i
].fixed_hw_reg
.subnr
);
1404 fprintf(file
, "f%d.%d", inst
->src
[i
].fixed_hw_reg
.nr
& 0xf,
1405 inst
->src
[i
].fixed_hw_reg
.subnr
);
1408 fprintf(file
, "arf%d.%d", inst
->src
[i
].fixed_hw_reg
.nr
& 0xf,
1409 inst
->src
[i
].fixed_hw_reg
.subnr
);
1413 fprintf(file
, "hw_reg%d", inst
->src
[i
].fixed_hw_reg
.nr
);
1415 if (inst
->src
[i
].fixed_hw_reg
.subnr
)
1416 fprintf(file
, "+%d", inst
->src
[i
].fixed_hw_reg
.subnr
);
1417 if (inst
->src
[i
].fixed_hw_reg
.abs
)
1421 fprintf(file
, "(null)");
1424 fprintf(file
, "???");
1428 /* Don't print .0; and only VGRFs have reg_offsets and sizes */
1429 if (inst
->src
[i
].reg_offset
!= 0 &&
1430 inst
->src
[i
].file
== GRF
&&
1431 alloc
.sizes
[inst
->src
[i
].reg
] != 1)
1432 fprintf(file
, ".%d", inst
->src
[i
].reg_offset
);
1434 if (inst
->src
[i
].file
!= IMM
) {
1435 static const char *chans
[4] = {"x", "y", "z", "w"};
1437 for (int c
= 0; c
< 4; c
++) {
1438 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1442 if (inst
->src
[i
].abs
)
1445 if (inst
->src
[i
].file
!= IMM
) {
1446 fprintf(file
, ":%s", brw_reg_type_letters(inst
->src
[i
].type
));
1449 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1450 fprintf(file
, ", ");
1453 fprintf(file
, "\n");
1457 static inline struct brw_reg
1458 attribute_to_hw_reg(int attr
, bool interleaved
)
1461 return stride(brw_vec4_grf(attr
/ 2, (attr
% 2) * 4), 0, 4, 1);
1463 return brw_vec8_grf(attr
, 0);
1468 * Replace each register of type ATTR in this->instructions with a reference
1469 * to a fixed HW register.
1471 * If interleaved is true, then each attribute takes up half a register, with
1472 * register N containing attribute 2*N in its first half and attribute 2*N+1
1473 * in its second half (this corresponds to the payload setup used by geometry
1474 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1475 * false, then each attribute takes up a whole register, with register N
1476 * containing attribute N (this corresponds to the payload setup used by
1477 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1480 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map
,
1483 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1484 /* We have to support ATTR as a destination for GL_FIXED fixup. */
1485 if (inst
->dst
.file
== ATTR
) {
1486 int grf
= attribute_map
[inst
->dst
.reg
+ inst
->dst
.reg_offset
];
1488 /* All attributes used in the shader need to have been assigned a
1489 * hardware register by the caller
1493 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1494 reg
.type
= inst
->dst
.type
;
1495 reg
.dw1
.bits
.writemask
= inst
->dst
.writemask
;
1497 inst
->dst
.file
= HW_REG
;
1498 inst
->dst
.fixed_hw_reg
= reg
;
1501 for (int i
= 0; i
< 3; i
++) {
1502 if (inst
->src
[i
].file
!= ATTR
)
1505 int grf
= attribute_map
[inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
];
1507 /* All attributes used in the shader need to have been assigned a
1508 * hardware register by the caller
1512 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1513 reg
.dw1
.bits
.swizzle
= inst
->src
[i
].swizzle
;
1514 reg
.type
= inst
->src
[i
].type
;
1515 if (inst
->src
[i
].abs
)
1517 if (inst
->src
[i
].negate
)
1520 inst
->src
[i
].file
= HW_REG
;
1521 inst
->src
[i
].fixed_hw_reg
= reg
;
1527 vec4_vs_visitor::setup_attributes(int payload_reg
)
1530 int attribute_map
[VERT_ATTRIB_MAX
+ 1];
1531 memset(attribute_map
, 0, sizeof(attribute_map
));
1534 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
1535 if (vs_prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
1536 attribute_map
[i
] = payload_reg
+ nr_attributes
;
1541 /* VertexID is stored by the VF as the last vertex element, but we
1542 * don't represent it with a flag in inputs_read, so we call it
1545 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
1546 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
1550 lower_attributes_to_hw_regs(attribute_map
, false /* interleaved */);
1552 /* The BSpec says we always have to read at least one thing from
1553 * the VF, and it appears that the hardware wedges otherwise.
1555 if (nr_attributes
== 0)
1558 prog_data
->urb_read_length
= (nr_attributes
+ 1) / 2;
1560 unsigned vue_entries
=
1561 MAX2(nr_attributes
, prog_data
->vue_map
.num_slots
);
1563 if (devinfo
->gen
== 6)
1564 prog_data
->urb_entry_size
= ALIGN(vue_entries
, 8) / 8;
1566 prog_data
->urb_entry_size
= ALIGN(vue_entries
, 4) / 4;
1568 return payload_reg
+ nr_attributes
;
1572 vec4_visitor::setup_uniforms(int reg
)
1574 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1576 /* The pre-gen6 VS requires that some push constants get loaded no
1577 * matter what, or the GPU would hang.
1579 if (devinfo
->gen
< 6 && this->uniforms
== 0) {
1580 assert(this->uniforms
< this->uniform_array_size
);
1581 this->uniform_vector_size
[this->uniforms
] = 1;
1583 stage_prog_data
->param
=
1584 reralloc(NULL
, stage_prog_data
->param
, const gl_constant_value
*, 4);
1585 for (unsigned int i
= 0; i
< 4; i
++) {
1586 unsigned int slot
= this->uniforms
* 4 + i
;
1587 static gl_constant_value zero
= { 0.0 };
1588 stage_prog_data
->param
[slot
] = &zero
;
1594 reg
+= ALIGN(uniforms
, 2) / 2;
1597 stage_prog_data
->nr_params
= this->uniforms
* 4;
1599 prog_data
->base
.curb_read_length
=
1600 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1606 vec4_vs_visitor::setup_payload(void)
1610 /* The payload always contains important data in g0, which contains
1611 * the URB handles that are passed on to the URB write at the end
1612 * of the thread. So, we always start push constants at g1.
1616 reg
= setup_uniforms(reg
);
1618 reg
= setup_attributes(reg
);
1620 this->first_non_payload_grf
= reg
;
1624 vec4_visitor::assign_binding_table_offsets()
1626 assign_common_binding_table_offsets(0);
1630 vec4_visitor::get_timestamp()
1632 assert(devinfo
->gen
>= 7);
1634 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1639 BRW_REGISTER_TYPE_UD
,
1640 BRW_VERTICAL_STRIDE_0
,
1642 BRW_HORIZONTAL_STRIDE_4
,
1646 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1648 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1649 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1650 * even if it's not enabled in the dispatch.
1652 mov
->force_writemask_all
= true;
1654 return src_reg(dst
);
1658 vec4_visitor::emit_shader_time_begin()
1660 current_annotation
= "shader time start";
1661 shader_start_time
= get_timestamp();
1665 vec4_visitor::emit_shader_time_end()
1667 current_annotation
= "shader time end";
1668 src_reg shader_end_time
= get_timestamp();
1671 /* Check that there weren't any timestamp reset events (assuming these
1672 * were the only two timestamp reads that happened).
1674 src_reg reset_end
= shader_end_time
;
1675 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1676 vec4_instruction
*test
= emit(AND(dst_null_d(), reset_end
, src_reg(1u)));
1677 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1679 emit(IF(BRW_PREDICATE_NORMAL
));
1681 /* Take the current timestamp and get the delta. */
1682 shader_start_time
.negate
= true;
1683 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1684 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1686 /* If there were no instructions between the two timestamp gets, the diff
1687 * is 2 cycles. Remove that overhead, so I can forget about that when
1688 * trying to determine the time taken for single instructions.
1690 emit(ADD(diff
, src_reg(diff
), src_reg(-2u)));
1692 emit_shader_time_write(0, src_reg(diff
));
1693 emit_shader_time_write(1, src_reg(1u));
1694 emit(BRW_OPCODE_ELSE
);
1695 emit_shader_time_write(2, src_reg(1u));
1696 emit(BRW_OPCODE_ENDIF
);
1700 vec4_visitor::emit_shader_time_write(int shader_time_subindex
, src_reg value
)
1703 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1705 dst_reg offset
= dst
;
1709 offset
.type
= BRW_REGISTER_TYPE_UD
;
1710 int index
= shader_time_index
* 3 + shader_time_subindex
;
1711 emit(MOV(offset
, src_reg(index
* SHADER_TIME_STRIDE
)));
1713 time
.type
= BRW_REGISTER_TYPE_UD
;
1714 emit(MOV(time
, src_reg(value
)));
1716 vec4_instruction
*inst
=
1717 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
1722 vec4_visitor::run(gl_clip_plane
*clip_planes
)
1725 compiler
->glsl_compiler_options
[stage
].NirOptions
!= NULL
;
1727 sanity_param_count
= prog
->Parameters
->NumParameters
;
1729 if (shader_time_index
>= 0)
1730 emit_shader_time_begin();
1732 assign_binding_table_offsets();
1737 assert(prog
->nir
!= NULL
);
1741 } else if (shader
) {
1742 /* Generate VS IR for main(). (the visitor only descends into
1743 * functions called "main").
1745 visit_instructions(shader
->base
.ir
);
1747 emit_program_code();
1751 if (key
->userclip_active
&& !prog
->UsesClipDistanceOut
)
1752 setup_uniform_clipplane_values(clip_planes
);
1758 /* Before any optimization, push array accesses out to scratch
1759 * space where we need them to be. This pass may allocate new
1760 * virtual GRFs, so we want to do it early. It also makes sure
1761 * that we have reladdr computations available for CSE, since we'll
1762 * often do repeated subexpressions for those.
1764 if (shader
|| use_vec4_nir
) {
1765 move_grf_array_access_to_scratch();
1766 move_uniform_array_access_to_pull_constants();
1768 /* The ARB_vertex_program frontend emits pull constant loads directly
1769 * rather than using reladdr, so we don't need to walk through all the
1770 * instructions looking for things to move. There isn't anything.
1772 * We do still need to split things to vec4 size.
1774 split_uniform_registers();
1776 pack_uniform_registers();
1777 move_push_constants_to_pull_constants();
1778 split_virtual_grfs();
1780 #define OPT(pass, args...) ({ \
1782 bool this_progress = pass(args); \
1784 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
1785 char filename[64]; \
1786 snprintf(filename, 64, "%s-%04d-%02d-%02d-" #pass, \
1787 stage_abbrev, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
1789 backend_shader::dump_instructions(filename); \
1792 progress = progress || this_progress; \
1797 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
1799 snprintf(filename
, 64, "%s-%04d-00-start",
1800 stage_abbrev
, shader_prog
? shader_prog
->Name
: 0);
1802 backend_shader::dump_instructions(filename
);
1813 OPT(opt_reduce_swizzle
);
1814 OPT(dead_code_eliminate
);
1815 OPT(dead_control_flow_eliminate
, this);
1816 OPT(opt_copy_propagation
);
1819 OPT(opt_register_coalesce
);
1820 OPT(eliminate_find_live_channel
);
1825 if (OPT(opt_vector_float
)) {
1827 OPT(opt_copy_propagation
, false);
1828 OPT(opt_copy_propagation
, true);
1829 OPT(dead_code_eliminate
);
1838 /* Debug of register spilling: Go spill everything. */
1839 const int grf_count
= alloc
.count
;
1840 float spill_costs
[alloc
.count
];
1841 bool no_spill
[alloc
.count
];
1842 evaluate_spill_costs(spill_costs
, no_spill
);
1843 for (int i
= 0; i
< grf_count
; i
++) {
1850 bool allocated_without_spills
= reg_allocate();
1852 if (!allocated_without_spills
) {
1853 compiler
->shader_perf_log(log_data
,
1854 "%s shader triggered register spilling. "
1855 "Try reducing the number of live vec4 values "
1856 "to improve performance.\n",
1859 while (!reg_allocate()) {
1865 opt_schedule_instructions();
1867 opt_set_dependency_control();
1869 if (last_scratch
> 0) {
1870 prog_data
->base
.total_scratch
=
1871 brw_get_scratch_size(last_scratch
* REG_SIZE
);
1874 /* If any state parameters were appended, then ParameterValues could have
1875 * been realloced, in which case the driver uniform storage set up by
1876 * _mesa_associate_uniform_storage() would point to freed memory. Make
1877 * sure that didn't happen.
1879 assert(sanity_param_count
== prog
->Parameters
->NumParameters
);
1884 } /* namespace brw */
1889 * Compile a vertex shader.
1891 * Returns the final assembly and the program's size.
1894 brw_vs_emit(struct brw_context
*brw
,
1896 const struct brw_vs_prog_key
*key
,
1897 struct brw_vs_prog_data
*prog_data
,
1898 struct gl_vertex_program
*vp
,
1899 struct gl_shader_program
*prog
,
1900 unsigned *final_assembly_size
)
1902 bool start_busy
= false;
1903 double start_time
= 0;
1904 const unsigned *assembly
= NULL
;
1906 if (unlikely(brw
->perf_debug
)) {
1907 start_busy
= (brw
->batch
.last_bo
&&
1908 drm_intel_bo_busy(brw
->batch
.last_bo
));
1909 start_time
= get_time();
1912 struct brw_shader
*shader
= NULL
;
1914 shader
= (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
1917 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1918 st_index
= brw_get_shader_time_index(brw
, prog
, &vp
->Base
, ST_VS
);
1920 if (unlikely(INTEL_DEBUG
& DEBUG_VS
) && shader
->base
.ir
)
1921 brw_dump_ir("vertex", prog
, &shader
->base
, &vp
->Base
);
1923 if (!vp
->Base
.nir
&&
1924 (brw
->intelScreen
->compiler
->scalar_vs
||
1925 brw
->intelScreen
->compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].NirOptions
!= NULL
)) {
1926 /* Normally we generate NIR in LinkShader() or
1927 * ProgramStringNotify(), but Mesa's fixed-function vertex program
1928 * handling doesn't notify the driver at all. Just do it here, at
1929 * the last minute, even though it's lame.
1931 assert(vp
->Base
.Id
== 0 && prog
== NULL
);
1933 brw_create_nir(brw
, NULL
, &vp
->Base
, MESA_SHADER_VERTEX
,
1934 brw
->intelScreen
->compiler
->scalar_vs
);
1937 if (brw
->intelScreen
->compiler
->scalar_vs
) {
1938 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
1940 fs_visitor
v(brw
->intelScreen
->compiler
, brw
,
1941 mem_ctx
, MESA_SHADER_VERTEX
, key
,
1942 &prog_data
->base
.base
, prog
, &vp
->Base
,
1944 if (!v
.run_vs(brw_select_clip_planes(&brw
->ctx
))) {
1946 prog
->LinkStatus
= false;
1947 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);
1950 _mesa_problem(NULL
, "Failed to compile vertex shader: %s\n",
1956 fs_generator
g(brw
->intelScreen
->compiler
, brw
,
1957 mem_ctx
, (void *) key
, &prog_data
->base
.base
,
1958 &vp
->Base
, v
.promoted_constants
,
1959 v
.runtime_check_aads_emit
, "VS");
1960 if (INTEL_DEBUG
& DEBUG_VS
) {
1963 name
= ralloc_asprintf(mem_ctx
, "%s vertex shader %d",
1964 prog
->Label
? prog
->Label
: "unnamed",
1967 name
= ralloc_asprintf(mem_ctx
, "vertex program %d",
1970 g
.enable_debug(name
);
1972 g
.generate_code(v
.cfg
, 8);
1973 assembly
= g
.get_assembly(final_assembly_size
);
1977 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_4X2_DUAL_OBJECT
;
1979 vec4_vs_visitor
v(brw
->intelScreen
->compiler
, brw
, key
, prog_data
,
1980 vp
, prog
, mem_ctx
, st_index
,
1981 !_mesa_is_gles3(&brw
->ctx
));
1982 if (!v
.run(brw_select_clip_planes(&brw
->ctx
))) {
1984 prog
->LinkStatus
= false;
1985 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);
1988 _mesa_problem(NULL
, "Failed to compile vertex shader: %s\n",
1994 vec4_generator
g(brw
->intelScreen
->compiler
, brw
,
1995 prog
, &vp
->Base
, &prog_data
->base
,
1996 mem_ctx
, INTEL_DEBUG
& DEBUG_VS
, "vertex", "VS");
1997 assembly
= g
.generate_assembly(v
.cfg
, final_assembly_size
);
2000 if (unlikely(brw
->perf_debug
) && shader
) {
2001 if (shader
->compiled_once
) {
2002 brw_vs_debug_recompile(brw
, prog
, key
);
2004 if (start_busy
&& !drm_intel_bo_busy(brw
->batch
.last_bo
)) {
2005 perf_debug("VS compile took %.03f ms and stalled the GPU\n",
2006 (get_time() - start_time
) * 1000);
2008 shader
->compiled_once
= true;
2016 brw_vue_setup_prog_key_for_precompile(struct gl_context
*ctx
,
2017 struct brw_vue_prog_key
*key
,
2018 GLuint id
, struct gl_program
*prog
)
2020 struct brw_context
*brw
= brw_context(ctx
);
2021 key
->program_string_id
= id
;
2023 brw_setup_tex_for_precompile(brw
, &key
->tex
, prog
);