2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "brw_dead_control_flow.h"
30 #include "main/macros.h"
31 #include "main/shaderobj.h"
32 #include "program/prog_print.h"
33 #include "program/prog_parameter.h"
36 #define MAX_INSTRUCTION (1 << 30)
43 * Common helper for constructing swizzles. When only a subset of
44 * channels of a vec4 are used, we don't want to reference the other
45 * channels, as that will tell optimization passes that those other
49 swizzle_for_size(int size
)
51 static const unsigned size_swizzles
[4] = {
52 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
),
53 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
),
54 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_Z
),
55 BRW_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_W
),
58 assert((size
>= 1) && (size
<= 4));
59 return size_swizzles
[size
- 1];
65 memset(this, 0, sizeof(*this));
67 this->file
= BAD_FILE
;
70 src_reg::src_reg(register_file file
, int reg
, const glsl_type
*type
)
76 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
77 this->swizzle
= swizzle_for_size(type
->vector_elements
);
79 this->swizzle
= BRW_SWIZZLE_XYZW
;
82 /** Generic unset register constructor. */
88 src_reg::src_reg(float f
)
93 this->type
= BRW_REGISTER_TYPE_F
;
94 this->fixed_hw_reg
.dw1
.f
= f
;
97 src_reg::src_reg(uint32_t u
)
102 this->type
= BRW_REGISTER_TYPE_UD
;
103 this->fixed_hw_reg
.dw1
.ud
= u
;
106 src_reg::src_reg(int32_t i
)
111 this->type
= BRW_REGISTER_TYPE_D
;
112 this->fixed_hw_reg
.dw1
.d
= i
;
115 src_reg::src_reg(struct brw_reg reg
)
120 this->fixed_hw_reg
= reg
;
121 this->type
= reg
.type
;
124 src_reg::src_reg(dst_reg reg
)
128 this->file
= reg
.file
;
130 this->reg_offset
= reg
.reg_offset
;
131 this->type
= reg
.type
;
132 this->reladdr
= reg
.reladdr
;
133 this->fixed_hw_reg
= reg
.fixed_hw_reg
;
139 for (int i
= 0; i
< 4; i
++) {
140 if (!(reg
.writemask
& (1 << i
)))
143 swizzles
[next_chan
++] = last
= i
;
146 for (; next_chan
< 4; next_chan
++) {
147 swizzles
[next_chan
] = last
;
150 this->swizzle
= BRW_SWIZZLE4(swizzles
[0], swizzles
[1],
151 swizzles
[2], swizzles
[3]);
157 memset(this, 0, sizeof(*this));
158 this->file
= BAD_FILE
;
159 this->writemask
= WRITEMASK_XYZW
;
167 dst_reg::dst_reg(register_file file
, int reg
)
175 dst_reg::dst_reg(register_file file
, int reg
, const glsl_type
*type
,
182 this->type
= brw_type_for_base_type(type
);
183 this->writemask
= writemask
;
186 dst_reg::dst_reg(struct brw_reg reg
)
191 this->fixed_hw_reg
= reg
;
192 this->type
= reg
.type
;
195 dst_reg::dst_reg(src_reg reg
)
199 this->file
= reg
.file
;
201 this->reg_offset
= reg
.reg_offset
;
202 this->type
= reg
.type
;
203 /* How should we do writemasking when converting from a src_reg? It seems
204 * pretty obvious that for src.xxxx the caller wants to write to src.x, but
205 * what about for src.wx? Just special-case src.xxxx for now.
207 if (reg
.swizzle
== BRW_SWIZZLE_XXXX
)
208 this->writemask
= WRITEMASK_X
;
210 this->writemask
= WRITEMASK_XYZW
;
211 this->reladdr
= reg
.reladdr
;
212 this->fixed_hw_reg
= reg
.fixed_hw_reg
;
216 vec4_instruction::is_send_from_grf()
219 case SHADER_OPCODE_SHADER_TIME_ADD
:
220 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
228 vec4_instruction::can_do_source_mods(struct brw_context
*brw
)
230 if (brw
->gen
== 6 && is_math())
233 if (is_send_from_grf())
236 if (!backend_instruction::can_do_source_mods())
243 * Returns how many MRFs an opcode will write over.
245 * Note that this is not the 0 or 1 implied writes in an actual gen
246 * instruction -- the generate_* functions generate additional MOVs
250 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
255 switch (inst
->opcode
) {
256 case SHADER_OPCODE_RCP
:
257 case SHADER_OPCODE_RSQ
:
258 case SHADER_OPCODE_SQRT
:
259 case SHADER_OPCODE_EXP2
:
260 case SHADER_OPCODE_LOG2
:
261 case SHADER_OPCODE_SIN
:
262 case SHADER_OPCODE_COS
:
264 case SHADER_OPCODE_INT_QUOTIENT
:
265 case SHADER_OPCODE_INT_REMAINDER
:
266 case SHADER_OPCODE_POW
:
268 case VS_OPCODE_URB_WRITE
:
270 case VS_OPCODE_PULL_CONSTANT_LOAD
:
272 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
274 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
276 case GS_OPCODE_URB_WRITE
:
277 case GS_OPCODE_URB_WRITE_ALLOCATE
:
278 case GS_OPCODE_THREAD_END
:
280 case GS_OPCODE_FF_SYNC
:
282 case SHADER_OPCODE_SHADER_TIME_ADD
:
284 case SHADER_OPCODE_TEX
:
285 case SHADER_OPCODE_TXL
:
286 case SHADER_OPCODE_TXD
:
287 case SHADER_OPCODE_TXF
:
288 case SHADER_OPCODE_TXF_CMS
:
289 case SHADER_OPCODE_TXF_MCS
:
290 case SHADER_OPCODE_TXS
:
291 case SHADER_OPCODE_TG4
:
292 case SHADER_OPCODE_TG4_OFFSET
:
293 return inst
->header_present
? 1 : 0;
294 case SHADER_OPCODE_UNTYPED_ATOMIC
:
295 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
298 unreachable("not reached");
303 src_reg::equals(const src_reg
&r
) const
305 return (file
== r
.file
&&
307 reg_offset
== r
.reg_offset
&&
309 negate
== r
.negate
&&
311 swizzle
== r
.swizzle
&&
312 !reladdr
&& !r
.reladdr
&&
313 memcmp(&fixed_hw_reg
, &r
.fixed_hw_reg
,
314 sizeof(fixed_hw_reg
)) == 0);
317 /* Replaces unused channels of a swizzle with channels that are used.
319 * For instance, this pass transforms
321 * mov vgrf4.yz, vgrf5.wxzy
325 * mov vgrf4.yz, vgrf5.xxzx
327 * This eliminates false uses of some channels, letting dead code elimination
328 * remove the instructions that wrote them.
331 vec4_visitor::opt_reduce_swizzle()
333 bool progress
= false;
335 foreach_in_list_safe(vec4_instruction
, inst
, &instructions
) {
336 if (inst
->dst
.file
== BAD_FILE
|| inst
->dst
.file
== HW_REG
)
341 /* Determine which channels of the sources are read. */
342 switch (inst
->opcode
) {
344 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
345 * but all four of src1.
365 swizzle
[0] = inst
->dst
.writemask
& WRITEMASK_X
? 0 : -1;
366 swizzle
[1] = inst
->dst
.writemask
& WRITEMASK_Y
? 1 : -1;
367 swizzle
[2] = inst
->dst
.writemask
& WRITEMASK_Z
? 2 : -1;
368 swizzle
[3] = inst
->dst
.writemask
& WRITEMASK_W
? 3 : -1;
372 /* Resolve unread channels (-1) by assigning them the swizzle of the
373 * first channel that is used.
375 int first_used_channel
= 0;
376 for (int i
= 0; i
< 4; i
++) {
377 if (swizzle
[i
] != -1) {
378 first_used_channel
= swizzle
[i
];
382 for (int i
= 0; i
< 4; i
++) {
383 if (swizzle
[i
] == -1) {
384 swizzle
[i
] = first_used_channel
;
388 /* Update sources' swizzles. */
389 for (int i
= 0; i
< 3; i
++) {
390 if (inst
->src
[i
].file
!= GRF
&&
391 inst
->src
[i
].file
!= ATTR
&&
392 inst
->src
[i
].file
!= UNIFORM
)
396 for (int j
= 0; j
< 4; j
++) {
397 swiz
[j
] = BRW_GET_SWZ(inst
->src
[i
].swizzle
, swizzle
[j
]);
400 unsigned new_swizzle
= BRW_SWIZZLE4(swiz
[0], swiz
[1], swiz
[2], swiz
[3]);
401 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
402 inst
->src
[i
].swizzle
= new_swizzle
;
409 invalidate_live_intervals(false);
415 try_eliminate_instruction(vec4_instruction
*inst
, int new_writemask
,
416 const struct brw_context
*brw
)
418 if (inst
->has_side_effects())
421 if (new_writemask
== 0) {
422 /* Don't dead code eliminate instructions that write to the
423 * accumulator as a side-effect. Instead just set the destination
424 * to the null register to free it.
426 if (inst
->writes_accumulator
|| inst
->writes_flag()) {
427 inst
->dst
= dst_reg(retype(brw_null_reg(), inst
->dst
.type
));
429 inst
->opcode
= BRW_OPCODE_NOP
;
433 } else if (inst
->dst
.writemask
!= new_writemask
) {
434 switch (inst
->opcode
) {
435 case SHADER_OPCODE_TXF_CMS
:
436 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
437 case VS_OPCODE_PULL_CONSTANT_LOAD
:
438 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
441 /* Do not set a writemask on Gen6 for math instructions, those are
442 * executed using align1 mode that does not support a destination mask.
444 if (!(brw
->gen
== 6 && inst
->is_math()) && !inst
->is_tex()) {
445 inst
->dst
.writemask
= new_writemask
;
455 * Must be called after calculate_live_intervals() to remove unused
456 * writes to registers -- register allocation will fail otherwise
457 * because something deffed but not used won't be considered to
458 * interfere with other regs.
461 vec4_visitor::dead_code_eliminate()
463 bool progress
= false;
466 calculate_live_intervals();
468 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
471 bool inst_writes_flag
= false;
472 if (inst
->dst
.file
!= GRF
) {
473 if (inst
->dst
.is_null() && inst
->writes_flag()) {
474 inst_writes_flag
= true;
480 if (inst
->dst
.file
== GRF
) {
481 int write_mask
= inst
->dst
.writemask
;
483 for (int c
= 0; c
< 4; c
++) {
484 if (write_mask
& (1 << c
)) {
485 assert(this->virtual_grf_end
[inst
->dst
.reg
* 4 + c
] >= pc
);
486 if (this->virtual_grf_end
[inst
->dst
.reg
* 4 + c
] == pc
) {
487 write_mask
&= ~(1 << c
);
492 progress
= try_eliminate_instruction(inst
, write_mask
, brw
) ||
496 if (inst
->predicate
|| inst
->prev
== NULL
)
500 if (inst_writes_flag
) {
501 /* Arbitrarily chosen, other than not being an xyzw writemask. */
502 #define FLAG_WRITEMASK (1 << 5)
503 dead_channels
= inst
->reads_flag() ? 0 : FLAG_WRITEMASK
;
505 dead_channels
= inst
->dst
.writemask
;
507 for (int i
= 0; i
< 3; i
++) {
508 if (inst
->src
[i
].file
!= GRF
||
509 inst
->src
[i
].reg
!= inst
->dst
.reg
)
512 for (int j
= 0; j
< 4; j
++) {
513 int swiz
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, j
);
514 dead_channels
&= ~(1 << swiz
);
519 for (exec_node
*node
= inst
->prev
, *prev
= node
->prev
;
520 prev
!= NULL
&& dead_channels
!= 0;
521 node
= prev
, prev
= prev
->prev
) {
522 vec4_instruction
*scan_inst
= (vec4_instruction
*)node
;
524 if (scan_inst
->is_control_flow())
527 if (inst_writes_flag
) {
528 if (scan_inst
->dst
.is_null() && scan_inst
->writes_flag()) {
529 scan_inst
->opcode
= BRW_OPCODE_NOP
;
532 } else if (scan_inst
->reads_flag()) {
537 if (inst
->dst
.file
== scan_inst
->dst
.file
&&
538 inst
->dst
.reg
== scan_inst
->dst
.reg
&&
539 inst
->dst
.reg_offset
== scan_inst
->dst
.reg_offset
) {
540 int new_writemask
= scan_inst
->dst
.writemask
& ~dead_channels
;
542 progress
= try_eliminate_instruction(scan_inst
, new_writemask
, brw
) ||
546 for (int i
= 0; i
< 3; i
++) {
547 if (scan_inst
->src
[i
].file
!= inst
->dst
.file
||
548 scan_inst
->src
[i
].reg
!= inst
->dst
.reg
)
551 for (int j
= 0; j
< 4; j
++) {
552 int swiz
= BRW_GET_SWZ(scan_inst
->src
[i
].swizzle
, j
);
553 dead_channels
&= ~(1 << swiz
);
560 foreach_block_and_inst_safe (block
, backend_instruction
, inst
, cfg
) {
561 if (inst
->opcode
== BRW_OPCODE_NOP
) {
566 invalidate_live_intervals(false);
573 vec4_visitor::split_uniform_registers()
575 /* Prior to this, uniforms have been in an array sized according to
576 * the number of vector uniforms present, sparsely filled (so an
577 * aggregate results in reg indices being skipped over). Now we're
578 * going to cut those aggregates up so each .reg index is one
579 * vector. The goal is to make elimination of unused uniform
580 * components easier later.
582 foreach_in_list(vec4_instruction
, inst
, &instructions
) {
583 for (int i
= 0 ; i
< 3; i
++) {
584 if (inst
->src
[i
].file
!= UNIFORM
)
587 assert(!inst
->src
[i
].reladdr
);
589 inst
->src
[i
].reg
+= inst
->src
[i
].reg_offset
;
590 inst
->src
[i
].reg_offset
= 0;
594 /* Update that everything is now vector-sized. */
595 for (int i
= 0; i
< this->uniforms
; i
++) {
596 this->uniform_size
[i
] = 1;
601 vec4_visitor::pack_uniform_registers()
603 bool uniform_used
[this->uniforms
];
604 int new_loc
[this->uniforms
];
605 int new_chan
[this->uniforms
];
607 memset(uniform_used
, 0, sizeof(uniform_used
));
608 memset(new_loc
, 0, sizeof(new_loc
));
609 memset(new_chan
, 0, sizeof(new_chan
));
611 /* Find which uniform vectors are actually used by the program. We
612 * expect unused vector elements when we've moved array access out
613 * to pull constants, and from some GLSL code generators like wine.
615 foreach_in_list(vec4_instruction
, inst
, &instructions
) {
616 for (int i
= 0 ; i
< 3; i
++) {
617 if (inst
->src
[i
].file
!= UNIFORM
)
620 uniform_used
[inst
->src
[i
].reg
] = true;
624 int new_uniform_count
= 0;
626 /* Now, figure out a packing of the live uniform vectors into our
629 for (int src
= 0; src
< uniforms
; src
++) {
630 assert(src
< uniform_array_size
);
631 int size
= this->uniform_vector_size
[src
];
633 if (!uniform_used
[src
]) {
634 this->uniform_vector_size
[src
] = 0;
639 /* Find the lowest place we can slot this uniform in. */
640 for (dst
= 0; dst
< src
; dst
++) {
641 if (this->uniform_vector_size
[dst
] + size
<= 4)
650 new_chan
[src
] = this->uniform_vector_size
[dst
];
652 /* Move the references to the data */
653 for (int j
= 0; j
< size
; j
++) {
654 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
655 stage_prog_data
->param
[src
* 4 + j
];
658 this->uniform_vector_size
[dst
] += size
;
659 this->uniform_vector_size
[src
] = 0;
662 new_uniform_count
= MAX2(new_uniform_count
, dst
+ 1);
665 this->uniforms
= new_uniform_count
;
667 /* Now, update the instructions for our repacked uniforms. */
668 foreach_in_list(vec4_instruction
, inst
, &instructions
) {
669 for (int i
= 0 ; i
< 3; i
++) {
670 int src
= inst
->src
[i
].reg
;
672 if (inst
->src
[i
].file
!= UNIFORM
)
675 inst
->src
[i
].reg
= new_loc
[src
];
677 int sx
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 0) + new_chan
[src
];
678 int sy
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 1) + new_chan
[src
];
679 int sz
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 2) + new_chan
[src
];
680 int sw
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, 3) + new_chan
[src
];
681 inst
->src
[i
].swizzle
= BRW_SWIZZLE4(sx
, sy
, sz
, sw
);
687 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
689 * While GLSL IR also performs this optimization, we end up with it in
690 * our instruction stream for a couple of reasons. One is that we
691 * sometimes generate silly instructions, for example in array access
692 * where we'll generate "ADD offset, index, base" even if base is 0.
693 * The other is that GLSL IR's constant propagation doesn't track the
694 * components of aggregates, so some VS patterns (initialize matrix to
695 * 0, accumulate in vertex blending factors) end up breaking down to
696 * instructions involving 0.
699 vec4_visitor::opt_algebraic()
701 bool progress
= false;
705 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
706 switch (inst
->opcode
) {
708 if (inst
->src
[1].is_zero()) {
709 inst
->opcode
= BRW_OPCODE_MOV
;
710 inst
->src
[1] = src_reg();
716 if (inst
->src
[1].is_zero()) {
717 inst
->opcode
= BRW_OPCODE_MOV
;
718 switch (inst
->src
[0].type
) {
719 case BRW_REGISTER_TYPE_F
:
720 inst
->src
[0] = src_reg(0.0f
);
722 case BRW_REGISTER_TYPE_D
:
723 inst
->src
[0] = src_reg(0);
725 case BRW_REGISTER_TYPE_UD
:
726 inst
->src
[0] = src_reg(0u);
729 unreachable("not reached");
731 inst
->src
[1] = src_reg();
733 } else if (inst
->src
[1].is_one()) {
734 inst
->opcode
= BRW_OPCODE_MOV
;
735 inst
->src
[1] = src_reg();
745 invalidate_live_intervals(false);
751 * Only a limited number of hardware registers may be used for push
752 * constants, so this turns access to the overflowed constants into
756 vec4_visitor::move_push_constants_to_pull_constants()
758 int pull_constant_loc
[this->uniforms
];
760 /* Only allow 32 registers (256 uniform components) as push constants,
761 * which is the limit on gen6.
763 * If changing this value, note the limitation about total_regs in
766 int max_uniform_components
= 32 * 8;
767 if (this->uniforms
* 4 <= max_uniform_components
)
770 /* Make some sort of choice as to which uniforms get sent to pull
771 * constants. We could potentially do something clever here like
772 * look for the most infrequently used uniform vec4s, but leave
775 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
776 pull_constant_loc
[i
/ 4] = -1;
778 if (i
>= max_uniform_components
) {
779 const gl_constant_value
**values
= &stage_prog_data
->param
[i
];
781 /* Try to find an existing copy of this uniform in the pull
782 * constants if it was part of an array access already.
784 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
787 for (matches
= 0; matches
< 4; matches
++) {
788 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
793 pull_constant_loc
[i
/ 4] = j
/ 4;
798 if (pull_constant_loc
[i
/ 4] == -1) {
799 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
800 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
802 for (int j
= 0; j
< 4; j
++) {
803 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
810 /* Now actually rewrite usage of the things we've moved to pull
813 foreach_in_list_safe(vec4_instruction
, inst
, &instructions
) {
814 for (int i
= 0 ; i
< 3; i
++) {
815 if (inst
->src
[i
].file
!= UNIFORM
||
816 pull_constant_loc
[inst
->src
[i
].reg
] == -1)
819 int uniform
= inst
->src
[i
].reg
;
821 dst_reg temp
= dst_reg(this, glsl_type::vec4_type
);
823 emit_pull_constant_load(inst
, temp
, inst
->src
[i
],
824 pull_constant_loc
[uniform
]);
826 inst
->src
[i
].file
= temp
.file
;
827 inst
->src
[i
].reg
= temp
.reg
;
828 inst
->src
[i
].reg_offset
= temp
.reg_offset
;
829 inst
->src
[i
].reladdr
= NULL
;
833 /* Repack push constants to remove the now-unused ones. */
834 pack_uniform_registers();
838 * Sets the dependency control fields on instructions after register
839 * allocation and before the generator is run.
841 * When you have a sequence of instructions like:
843 * DP4 temp.x vertex uniform[0]
844 * DP4 temp.y vertex uniform[0]
845 * DP4 temp.z vertex uniform[0]
846 * DP4 temp.w vertex uniform[0]
848 * The hardware doesn't know that it can actually run the later instructions
849 * while the previous ones are in flight, producing stalls. However, we have
850 * manual fields we can set in the instructions that let it do so.
853 vec4_visitor::opt_set_dependency_control()
855 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
856 uint8_t grf_channels_written
[BRW_MAX_GRF
];
857 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
858 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
862 assert(prog_data
->total_grf
||
863 !"Must be called after register allocation");
865 foreach_block (block
, cfg
) {
866 memset(last_grf_write
, 0, sizeof(last_grf_write
));
867 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
869 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
870 /* If we read from a register that we were doing dependency control
871 * on, don't do dependency control across the read.
873 for (int i
= 0; i
< 3; i
++) {
874 int reg
= inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
;
875 if (inst
->src
[i
].file
== GRF
) {
876 last_grf_write
[reg
] = NULL
;
877 } else if (inst
->src
[i
].file
== HW_REG
) {
878 memset(last_grf_write
, 0, sizeof(last_grf_write
));
881 assert(inst
->src
[i
].file
!= MRF
);
884 /* In the presence of send messages, totally interrupt dependency
885 * control. They're long enough that the chance of dependency
886 * control around them just doesn't matter.
889 memset(last_grf_write
, 0, sizeof(last_grf_write
));
890 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
894 /* It looks like setting dependency control on a predicated
895 * instruction hangs the GPU.
897 if (inst
->predicate
) {
898 memset(last_grf_write
, 0, sizeof(last_grf_write
));
899 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
903 /* Dependency control does not work well over math instructions.
905 if (inst
->is_math()) {
906 memset(last_grf_write
, 0, sizeof(last_grf_write
));
907 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
911 /* Now, see if we can do dependency control for this instruction
912 * against a previous one writing to its destination.
914 int reg
= inst
->dst
.reg
+ inst
->dst
.reg_offset
;
915 if (inst
->dst
.file
== GRF
) {
916 if (last_grf_write
[reg
] &&
917 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
918 last_grf_write
[reg
]->no_dd_clear
= true;
919 inst
->no_dd_check
= true;
921 grf_channels_written
[reg
] = 0;
924 last_grf_write
[reg
] = inst
;
925 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
926 } else if (inst
->dst
.file
== MRF
) {
927 if (last_mrf_write
[reg
] &&
928 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
929 last_mrf_write
[reg
]->no_dd_clear
= true;
930 inst
->no_dd_check
= true;
932 mrf_channels_written
[reg
] = 0;
935 last_mrf_write
[reg
] = inst
;
936 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
937 } else if (inst
->dst
.reg
== HW_REG
) {
938 if (inst
->dst
.fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
)
939 memset(last_grf_write
, 0, sizeof(last_grf_write
));
940 if (inst
->dst
.fixed_hw_reg
.file
== BRW_MESSAGE_REGISTER_FILE
)
941 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
948 vec4_instruction::can_reswizzle(int dst_writemask
,
952 /* If this instruction sets anything not referenced by swizzle, then we'd
953 * totally break it when we reswizzle.
955 if (dst
.writemask
& ~swizzle_mask
)
965 * For any channels in the swizzle's source that were populated by this
966 * instruction, rewrite the instruction to put the appropriate result directly
969 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
972 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
974 int new_writemask
= 0;
975 int new_swizzle
[4] = { 0 };
977 /* Dot product instructions write a single result into all channels. */
978 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
979 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
) {
980 for (int i
= 0; i
< 3; i
++) {
981 if (src
[i
].file
== BAD_FILE
|| src
[i
].file
== IMM
)
984 for (int c
= 0; c
< 4; c
++) {
985 new_swizzle
[c
] = BRW_GET_SWZ(src
[i
].swizzle
, BRW_GET_SWZ(swizzle
, c
));
988 src
[i
].swizzle
= BRW_SWIZZLE4(new_swizzle
[0], new_swizzle
[1],
989 new_swizzle
[2], new_swizzle
[3]);
993 for (int c
= 0; c
< 4; c
++) {
994 int bit
= 1 << BRW_GET_SWZ(swizzle
, c
);
995 /* Skip components of the swizzle not used by the dst. */
996 if (!(dst_writemask
& (1 << c
)))
998 /* If we were populating this component, then populate the
999 * corresponding channel of the new dst.
1001 if (dst
.writemask
& bit
)
1002 new_writemask
|= (1 << c
);
1004 dst
.writemask
= new_writemask
;
1008 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1009 * just written and then MOVed into another reg and making the original write
1010 * of the GRF write directly to the final destination instead.
1013 vec4_visitor::opt_register_coalesce()
1015 bool progress
= false;
1018 calculate_live_intervals();
1020 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1024 if (inst
->opcode
!= BRW_OPCODE_MOV
||
1025 (inst
->dst
.file
!= GRF
&& inst
->dst
.file
!= MRF
) ||
1027 inst
->src
[0].file
!= GRF
||
1028 inst
->dst
.type
!= inst
->src
[0].type
||
1029 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
1032 bool to_mrf
= (inst
->dst
.file
== MRF
);
1034 /* Can't coalesce this GRF if someone else was going to
1037 if (this->virtual_grf_end
[inst
->src
[0].reg
* 4 + 0] > ip
||
1038 this->virtual_grf_end
[inst
->src
[0].reg
* 4 + 1] > ip
||
1039 this->virtual_grf_end
[inst
->src
[0].reg
* 4 + 2] > ip
||
1040 this->virtual_grf_end
[inst
->src
[0].reg
* 4 + 3] > ip
)
1043 /* We need to check interference with the final destination between this
1044 * instruction and the earliest instruction involved in writing the GRF
1045 * we're eliminating. To do that, keep track of which of our source
1046 * channels we've seen initialized.
1048 bool chans_needed
[4] = {false, false, false, false};
1049 int chans_remaining
= 0;
1050 int swizzle_mask
= 0;
1051 for (int i
= 0; i
< 4; i
++) {
1052 int chan
= BRW_GET_SWZ(inst
->src
[0].swizzle
, i
);
1054 if (!(inst
->dst
.writemask
& (1 << i
)))
1057 swizzle_mask
|= (1 << chan
);
1059 if (!chans_needed
[chan
]) {
1060 chans_needed
[chan
] = true;
1065 /* Now walk up the instruction stream trying to see if we can rewrite
1066 * everything writing to the temporary to write into the destination
1069 vec4_instruction
*scan_inst
;
1070 for (scan_inst
= (vec4_instruction
*)inst
->prev
;
1071 scan_inst
->prev
!= NULL
;
1072 scan_inst
= (vec4_instruction
*)scan_inst
->prev
) {
1073 if (scan_inst
->dst
.file
== GRF
&&
1074 scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
1075 scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
1076 /* Found something writing to the reg we want to coalesce away. */
1078 /* SEND instructions can't have MRF as a destination. */
1079 if (scan_inst
->mlen
)
1082 if (brw
->gen
== 6) {
1083 /* gen6 math instructions must have the destination be
1084 * GRF, so no compute-to-MRF for them.
1086 if (scan_inst
->is_math()) {
1092 /* If we can't handle the swizzle, bail. */
1093 if (!scan_inst
->can_reswizzle(inst
->dst
.writemask
,
1094 inst
->src
[0].swizzle
,
1099 /* Mark which channels we found unconditional writes for. */
1100 if (!scan_inst
->predicate
) {
1101 for (int i
= 0; i
< 4; i
++) {
1102 if (scan_inst
->dst
.writemask
& (1 << i
) &&
1104 chans_needed
[i
] = false;
1110 if (chans_remaining
== 0)
1114 /* We don't handle flow control here. Most computation of values
1115 * that could be coalesced happens just before their use.
1117 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
1118 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
1119 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
1120 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
1124 /* You can't read from an MRF, so if someone else reads our MRF's
1125 * source GRF that we wanted to rewrite, that stops us. If it's a
1126 * GRF we're trying to coalesce to, we don't actually handle
1127 * rewriting sources so bail in that case as well.
1129 bool interfered
= false;
1130 for (int i
= 0; i
< 3; i
++) {
1131 if (scan_inst
->src
[i
].file
== GRF
&&
1132 scan_inst
->src
[i
].reg
== inst
->src
[0].reg
&&
1133 scan_inst
->src
[i
].reg_offset
== inst
->src
[0].reg_offset
) {
1140 /* If somebody else writes our destination here, we can't coalesce
1143 if (scan_inst
->dst
.file
== inst
->dst
.file
&&
1144 scan_inst
->dst
.reg
== inst
->dst
.reg
) {
1148 /* Check for reads of the register we're trying to coalesce into. We
1149 * can't go rewriting instructions above that to put some other value
1150 * in the register instead.
1152 if (to_mrf
&& scan_inst
->mlen
> 0) {
1153 if (inst
->dst
.reg
>= scan_inst
->base_mrf
&&
1154 inst
->dst
.reg
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
1158 for (int i
= 0; i
< 3; i
++) {
1159 if (scan_inst
->src
[i
].file
== inst
->dst
.file
&&
1160 scan_inst
->src
[i
].reg
== inst
->dst
.reg
&&
1161 scan_inst
->src
[i
].reg_offset
== inst
->src
[0].reg_offset
) {
1170 if (chans_remaining
== 0) {
1171 /* If we've made it here, we have an MOV we want to coalesce out, and
1172 * a scan_inst pointing to the earliest instruction involved in
1173 * computing the value. Now go rewrite the instruction stream
1177 while (scan_inst
!= inst
) {
1178 if (scan_inst
->dst
.file
== GRF
&&
1179 scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
1180 scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
1181 scan_inst
->reswizzle(inst
->dst
.writemask
,
1182 inst
->src
[0].swizzle
);
1183 scan_inst
->dst
.file
= inst
->dst
.file
;
1184 scan_inst
->dst
.reg
= inst
->dst
.reg
;
1185 scan_inst
->dst
.reg_offset
= inst
->dst
.reg_offset
;
1186 scan_inst
->saturate
|= inst
->saturate
;
1188 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1190 inst
->remove(block
);
1196 invalidate_live_intervals(false);
1202 * Splits virtual GRFs requesting more than one contiguous physical register.
1204 * We initially create large virtual GRFs for temporary structures, arrays,
1205 * and matrices, so that the dereference visitor functions can add reg_offsets
1206 * to work their way down to the actual member being accessed. But when it
1207 * comes to optimization, we'd like to treat each register as individual
1208 * storage if possible.
1210 * So far, the only thing that might prevent splitting is a send message from
1214 vec4_visitor::split_virtual_grfs()
1216 int num_vars
= this->virtual_grf_count
;
1217 int new_virtual_grf
[num_vars
];
1218 bool split_grf
[num_vars
];
1220 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1222 /* Try to split anything > 0 sized. */
1223 for (int i
= 0; i
< num_vars
; i
++) {
1224 split_grf
[i
] = this->virtual_grf_sizes
[i
] != 1;
1227 /* Check that the instructions are compatible with the registers we're trying
1230 foreach_in_list(vec4_instruction
, inst
, &instructions
) {
1231 /* If there's a SEND message loading from a GRF on gen7+, it needs to be
1234 if (inst
->is_send_from_grf()) {
1235 for (int i
= 0; i
< 3; i
++) {
1236 if (inst
->src
[i
].file
== GRF
) {
1237 split_grf
[inst
->src
[i
].reg
] = false;
1243 /* Allocate new space for split regs. Note that the virtual
1244 * numbers will be contiguous.
1246 for (int i
= 0; i
< num_vars
; i
++) {
1250 new_virtual_grf
[i
] = virtual_grf_alloc(1);
1251 for (int j
= 2; j
< this->virtual_grf_sizes
[i
]; j
++) {
1252 int reg
= virtual_grf_alloc(1);
1253 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1256 this->virtual_grf_sizes
[i
] = 1;
1259 foreach_in_list(vec4_instruction
, inst
, &instructions
) {
1260 if (inst
->dst
.file
== GRF
&& split_grf
[inst
->dst
.reg
] &&
1261 inst
->dst
.reg_offset
!= 0) {
1262 inst
->dst
.reg
= (new_virtual_grf
[inst
->dst
.reg
] +
1263 inst
->dst
.reg_offset
- 1);
1264 inst
->dst
.reg_offset
= 0;
1266 for (int i
= 0; i
< 3; i
++) {
1267 if (inst
->src
[i
].file
== GRF
&& split_grf
[inst
->src
[i
].reg
] &&
1268 inst
->src
[i
].reg_offset
!= 0) {
1269 inst
->src
[i
].reg
= (new_virtual_grf
[inst
->src
[i
].reg
] +
1270 inst
->src
[i
].reg_offset
- 1);
1271 inst
->src
[i
].reg_offset
= 0;
1275 invalidate_live_intervals(false);
1279 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1281 dump_instruction(be_inst
, stderr
);
1285 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1287 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1289 if (inst
->predicate
) {
1290 fprintf(file
, "(%cf0) ",
1291 inst
->predicate_inverse
? '-' : '+');
1294 fprintf(file
, "%s", brw_instruction_name(inst
->opcode
));
1295 if (inst
->conditional_mod
) {
1296 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1300 switch (inst
->dst
.file
) {
1302 fprintf(file
, "vgrf%d.%d", inst
->dst
.reg
, inst
->dst
.reg_offset
);
1305 fprintf(file
, "m%d", inst
->dst
.reg
);
1308 if (inst
->dst
.fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
) {
1309 switch (inst
->dst
.fixed_hw_reg
.nr
) {
1311 fprintf(file
, "null");
1313 case BRW_ARF_ADDRESS
:
1314 fprintf(file
, "a0.%d", inst
->dst
.fixed_hw_reg
.subnr
);
1316 case BRW_ARF_ACCUMULATOR
:
1317 fprintf(file
, "acc%d", inst
->dst
.fixed_hw_reg
.subnr
);
1320 fprintf(file
, "f%d.%d", inst
->dst
.fixed_hw_reg
.nr
& 0xf,
1321 inst
->dst
.fixed_hw_reg
.subnr
);
1324 fprintf(file
, "arf%d.%d", inst
->dst
.fixed_hw_reg
.nr
& 0xf,
1325 inst
->dst
.fixed_hw_reg
.subnr
);
1329 fprintf(file
, "hw_reg%d", inst
->dst
.fixed_hw_reg
.nr
);
1331 if (inst
->dst
.fixed_hw_reg
.subnr
)
1332 fprintf(file
, "+%d", inst
->dst
.fixed_hw_reg
.subnr
);
1335 fprintf(file
, "(null)");
1338 fprintf(file
, "???");
1341 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1343 if (inst
->dst
.writemask
& 1)
1345 if (inst
->dst
.writemask
& 2)
1347 if (inst
->dst
.writemask
& 4)
1349 if (inst
->dst
.writemask
& 8)
1352 fprintf(file
, ":%s", brw_reg_type_letters(inst
->dst
.type
));
1354 if (inst
->src
[0].file
!= BAD_FILE
)
1355 fprintf(file
, ", ");
1357 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1358 if (inst
->src
[i
].negate
)
1360 if (inst
->src
[i
].abs
)
1362 switch (inst
->src
[i
].file
) {
1364 fprintf(file
, "vgrf%d", inst
->src
[i
].reg
);
1367 fprintf(file
, "attr%d", inst
->src
[i
].reg
);
1370 fprintf(file
, "u%d", inst
->src
[i
].reg
);
1373 switch (inst
->src
[i
].type
) {
1374 case BRW_REGISTER_TYPE_F
:
1375 fprintf(file
, "%fF", inst
->src
[i
].fixed_hw_reg
.dw1
.f
);
1377 case BRW_REGISTER_TYPE_D
:
1378 fprintf(file
, "%dD", inst
->src
[i
].fixed_hw_reg
.dw1
.d
);
1380 case BRW_REGISTER_TYPE_UD
:
1381 fprintf(file
, "%uU", inst
->src
[i
].fixed_hw_reg
.dw1
.ud
);
1384 fprintf(file
, "???");
1389 if (inst
->src
[i
].fixed_hw_reg
.negate
)
1391 if (inst
->src
[i
].fixed_hw_reg
.abs
)
1393 if (inst
->src
[i
].fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
) {
1394 switch (inst
->src
[i
].fixed_hw_reg
.nr
) {
1396 fprintf(file
, "null");
1398 case BRW_ARF_ADDRESS
:
1399 fprintf(file
, "a0.%d", inst
->src
[i
].fixed_hw_reg
.subnr
);
1401 case BRW_ARF_ACCUMULATOR
:
1402 fprintf(file
, "acc%d", inst
->src
[i
].fixed_hw_reg
.subnr
);
1405 fprintf(file
, "f%d.%d", inst
->src
[i
].fixed_hw_reg
.nr
& 0xf,
1406 inst
->src
[i
].fixed_hw_reg
.subnr
);
1409 fprintf(file
, "arf%d.%d", inst
->src
[i
].fixed_hw_reg
.nr
& 0xf,
1410 inst
->src
[i
].fixed_hw_reg
.subnr
);
1414 fprintf(file
, "hw_reg%d", inst
->src
[i
].fixed_hw_reg
.nr
);
1416 if (inst
->src
[i
].fixed_hw_reg
.subnr
)
1417 fprintf(file
, "+%d", inst
->src
[i
].fixed_hw_reg
.subnr
);
1418 if (inst
->src
[i
].fixed_hw_reg
.abs
)
1422 fprintf(file
, "(null)");
1425 fprintf(file
, "???");
1429 /* Don't print .0; and only VGRFs have reg_offsets and sizes */
1430 if (inst
->src
[i
].reg_offset
!= 0 &&
1431 inst
->src
[i
].file
== GRF
&&
1432 virtual_grf_sizes
[inst
->src
[i
].reg
] != 1)
1433 fprintf(file
, ".%d", inst
->src
[i
].reg_offset
);
1435 if (inst
->src
[i
].file
!= IMM
) {
1436 static const char *chans
[4] = {"x", "y", "z", "w"};
1438 for (int c
= 0; c
< 4; c
++) {
1439 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1443 if (inst
->src
[i
].abs
)
1446 if (inst
->src
[i
].file
!= IMM
) {
1447 fprintf(file
, ":%s", brw_reg_type_letters(inst
->src
[i
].type
));
1450 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1451 fprintf(file
, ", ");
1454 fprintf(file
, "\n");
1458 static inline struct brw_reg
1459 attribute_to_hw_reg(int attr
, bool interleaved
)
1462 return stride(brw_vec4_grf(attr
/ 2, (attr
% 2) * 4), 0, 4, 1);
1464 return brw_vec8_grf(attr
, 0);
1469 * Replace each register of type ATTR in this->instructions with a reference
1470 * to a fixed HW register.
1472 * If interleaved is true, then each attribute takes up half a register, with
1473 * register N containing attribute 2*N in its first half and attribute 2*N+1
1474 * in its second half (this corresponds to the payload setup used by geometry
1475 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1476 * false, then each attribute takes up a whole register, with register N
1477 * containing attribute N (this corresponds to the payload setup used by
1478 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1481 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map
,
1484 foreach_in_list(vec4_instruction
, inst
, &instructions
) {
1485 /* We have to support ATTR as a destination for GL_FIXED fixup. */
1486 if (inst
->dst
.file
== ATTR
) {
1487 int grf
= attribute_map
[inst
->dst
.reg
+ inst
->dst
.reg_offset
];
1489 /* All attributes used in the shader need to have been assigned a
1490 * hardware register by the caller
1494 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1495 reg
.type
= inst
->dst
.type
;
1496 reg
.dw1
.bits
.writemask
= inst
->dst
.writemask
;
1498 inst
->dst
.file
= HW_REG
;
1499 inst
->dst
.fixed_hw_reg
= reg
;
1502 for (int i
= 0; i
< 3; i
++) {
1503 if (inst
->src
[i
].file
!= ATTR
)
1506 int grf
= attribute_map
[inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
];
1508 /* All attributes used in the shader need to have been assigned a
1509 * hardware register by the caller
1513 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1514 reg
.dw1
.bits
.swizzle
= inst
->src
[i
].swizzle
;
1515 reg
.type
= inst
->src
[i
].type
;
1516 if (inst
->src
[i
].abs
)
1518 if (inst
->src
[i
].negate
)
1521 inst
->src
[i
].file
= HW_REG
;
1522 inst
->src
[i
].fixed_hw_reg
= reg
;
1528 vec4_vs_visitor::setup_attributes(int payload_reg
)
1531 int attribute_map
[VERT_ATTRIB_MAX
+ 1];
1532 memset(attribute_map
, 0, sizeof(attribute_map
));
1535 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
1536 if (vs_prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
1537 attribute_map
[i
] = payload_reg
+ nr_attributes
;
1542 /* VertexID is stored by the VF as the last vertex element, but we
1543 * don't represent it with a flag in inputs_read, so we call it
1546 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
1547 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
1551 lower_attributes_to_hw_regs(attribute_map
, false /* interleaved */);
1553 /* The BSpec says we always have to read at least one thing from
1554 * the VF, and it appears that the hardware wedges otherwise.
1556 if (nr_attributes
== 0)
1559 prog_data
->urb_read_length
= (nr_attributes
+ 1) / 2;
1561 unsigned vue_entries
=
1562 MAX2(nr_attributes
, prog_data
->vue_map
.num_slots
);
1565 prog_data
->urb_entry_size
= ALIGN(vue_entries
, 8) / 8;
1567 prog_data
->urb_entry_size
= ALIGN(vue_entries
, 4) / 4;
1569 return payload_reg
+ nr_attributes
;
1573 vec4_visitor::setup_uniforms(int reg
)
1575 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1577 /* The pre-gen6 VS requires that some push constants get loaded no
1578 * matter what, or the GPU would hang.
1580 if (brw
->gen
< 6 && this->uniforms
== 0) {
1581 assert(this->uniforms
< this->uniform_array_size
);
1582 this->uniform_vector_size
[this->uniforms
] = 1;
1584 stage_prog_data
->param
=
1585 reralloc(NULL
, stage_prog_data
->param
, const gl_constant_value
*, 4);
1586 for (unsigned int i
= 0; i
< 4; i
++) {
1587 unsigned int slot
= this->uniforms
* 4 + i
;
1588 static gl_constant_value zero
= { 0.0 };
1589 stage_prog_data
->param
[slot
] = &zero
;
1595 reg
+= ALIGN(uniforms
, 2) / 2;
1598 stage_prog_data
->nr_params
= this->uniforms
* 4;
1600 prog_data
->base
.curb_read_length
=
1601 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1607 vec4_vs_visitor::setup_payload(void)
1611 /* The payload always contains important data in g0, which contains
1612 * the URB handles that are passed on to the URB write at the end
1613 * of the thread. So, we always start push constants at g1.
1617 reg
= setup_uniforms(reg
);
1619 reg
= setup_attributes(reg
);
1621 this->first_non_payload_grf
= reg
;
1625 vec4_visitor::assign_binding_table_offsets()
1627 assign_common_binding_table_offsets(0);
1631 vec4_visitor::get_timestamp()
1633 assert(brw
->gen
>= 7);
1635 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1638 BRW_REGISTER_TYPE_UD
,
1639 BRW_VERTICAL_STRIDE_0
,
1641 BRW_HORIZONTAL_STRIDE_4
,
1645 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1647 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1648 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1649 * even if it's not enabled in the dispatch.
1651 mov
->force_writemask_all
= true;
1653 return src_reg(dst
);
1657 vec4_visitor::emit_shader_time_begin()
1659 current_annotation
= "shader time start";
1660 shader_start_time
= get_timestamp();
1664 vec4_visitor::emit_shader_time_end()
1666 current_annotation
= "shader time end";
1667 src_reg shader_end_time
= get_timestamp();
1670 /* Check that there weren't any timestamp reset events (assuming these
1671 * were the only two timestamp reads that happened).
1673 src_reg reset_end
= shader_end_time
;
1674 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1675 vec4_instruction
*test
= emit(AND(dst_null_d(), reset_end
, src_reg(1u)));
1676 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1678 emit(IF(BRW_PREDICATE_NORMAL
));
1680 /* Take the current timestamp and get the delta. */
1681 shader_start_time
.negate
= true;
1682 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1683 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1685 /* If there were no instructions between the two timestamp gets, the diff
1686 * is 2 cycles. Remove that overhead, so I can forget about that when
1687 * trying to determine the time taken for single instructions.
1689 emit(ADD(diff
, src_reg(diff
), src_reg(-2u)));
1691 emit_shader_time_write(st_base
, src_reg(diff
));
1692 emit_shader_time_write(st_written
, src_reg(1u));
1693 emit(BRW_OPCODE_ELSE
);
1694 emit_shader_time_write(st_reset
, src_reg(1u));
1695 emit(BRW_OPCODE_ENDIF
);
1699 vec4_visitor::emit_shader_time_write(enum shader_time_shader_type type
,
1702 int shader_time_index
=
1703 brw_get_shader_time_index(brw
, shader_prog
, prog
, type
);
1706 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1708 dst_reg offset
= dst
;
1712 offset
.type
= BRW_REGISTER_TYPE_UD
;
1713 emit(MOV(offset
, src_reg(shader_time_index
* SHADER_TIME_STRIDE
)));
1715 time
.type
= BRW_REGISTER_TYPE_UD
;
1716 emit(MOV(time
, src_reg(value
)));
1718 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
1724 sanity_param_count
= prog
->Parameters
->NumParameters
;
1726 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
)
1727 emit_shader_time_begin();
1729 assign_binding_table_offsets();
1733 /* Generate VS IR for main(). (the visitor only descends into
1734 * functions called "main").
1737 visit_instructions(shader
->base
.ir
);
1739 emit_program_code();
1743 if (key
->userclip_active
&& !prog
->UsesClipDistanceOut
)
1744 setup_uniform_clipplane_values();
1748 /* Before any optimization, push array accesses out to scratch
1749 * space where we need them to be. This pass may allocate new
1750 * virtual GRFs, so we want to do it early. It also makes sure
1751 * that we have reladdr computations available for CSE, since we'll
1752 * often do repeated subexpressions for those.
1755 move_grf_array_access_to_scratch();
1756 move_uniform_array_access_to_pull_constants();
1758 /* The ARB_vertex_program frontend emits pull constant loads directly
1759 * rather than using reladdr, so we don't need to walk through all the
1760 * instructions looking for things to move. There isn't anything.
1762 * We do still need to split things to vec4 size.
1764 split_uniform_registers();
1766 pack_uniform_registers();
1767 move_push_constants_to_pull_constants();
1768 split_virtual_grfs();
1770 const char *stage_name
= stage
== MESA_SHADER_GEOMETRY
? "gs" : "vs";
1772 #define OPT(pass, args...) do { \
1774 bool this_progress = pass(args); \
1776 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
1777 char filename[64]; \
1778 snprintf(filename, 64, "%s-%04d-%02d-%02d-" #pass, \
1779 stage_name, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
1781 backend_visitor::dump_instructions(filename); \
1784 progress = progress || this_progress; \
1788 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
1790 snprintf(filename
, 64, "%s-%04d-00-start",
1791 stage_name
, shader_prog
? shader_prog
->Name
: 0);
1793 backend_visitor::dump_instructions(filename
);
1803 OPT(opt_reduce_swizzle
);
1804 OPT(dead_code_eliminate
);
1805 OPT(dead_control_flow_eliminate
, this);
1806 OPT(opt_copy_propagation
);
1809 OPT(opt_register_coalesce
);
1819 /* Debug of register spilling: Go spill everything. */
1820 const int grf_count
= virtual_grf_count
;
1821 float spill_costs
[virtual_grf_count
];
1822 bool no_spill
[virtual_grf_count
];
1823 evaluate_spill_costs(spill_costs
, no_spill
);
1824 for (int i
= 0; i
< grf_count
; i
++) {
1831 while (!reg_allocate()) {
1836 opt_schedule_instructions();
1838 opt_set_dependency_control();
1840 /* If any state parameters were appended, then ParameterValues could have
1841 * been realloced, in which case the driver uniform storage set up by
1842 * _mesa_associate_uniform_storage() would point to freed memory. Make
1843 * sure that didn't happen.
1845 assert(sanity_param_count
== prog
->Parameters
->NumParameters
);
1852 } /* namespace brw */
1857 * Compile a vertex shader.
1859 * Returns the final assembly and the program's size.
1862 brw_vs_emit(struct brw_context
*brw
,
1863 struct gl_shader_program
*prog
,
1864 struct brw_vs_compile
*c
,
1865 struct brw_vs_prog_data
*prog_data
,
1867 unsigned *final_assembly_size
)
1869 bool start_busy
= false;
1870 double start_time
= 0;
1872 if (unlikely(brw
->perf_debug
)) {
1873 start_busy
= (brw
->batch
.last_bo
&&
1874 drm_intel_bo_busy(brw
->batch
.last_bo
));
1875 start_time
= get_time();
1878 struct brw_shader
*shader
= NULL
;
1880 shader
= (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
1882 if (unlikely(INTEL_DEBUG
& DEBUG_VS
))
1883 brw_dump_ir(brw
, "vertex", prog
, &shader
->base
, &c
->vp
->program
.Base
);
1885 vec4_vs_visitor
v(brw
, c
, prog_data
, prog
, mem_ctx
);
1888 prog
->LinkStatus
= false;
1889 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);
1892 _mesa_problem(NULL
, "Failed to compile vertex shader: %s\n",
1898 const unsigned *assembly
= NULL
;
1899 vec4_generator
g(brw
, prog
, &c
->vp
->program
.Base
, &prog_data
->base
,
1900 mem_ctx
, INTEL_DEBUG
& DEBUG_VS
);
1901 assembly
= g
.generate_assembly(v
.cfg
, final_assembly_size
);
1903 if (unlikely(brw
->perf_debug
) && shader
) {
1904 if (shader
->compiled_once
) {
1905 brw_vs_debug_recompile(brw
, prog
, &c
->key
);
1907 if (start_busy
&& !drm_intel_bo_busy(brw
->batch
.last_bo
)) {
1908 perf_debug("VS compile took %.03f ms and stalled the GPU\n",
1909 (get_time() - start_time
) * 1000);
1911 shader
->compiled_once
= true;
1919 brw_vec4_setup_prog_key_for_precompile(struct gl_context
*ctx
,
1920 struct brw_vec4_prog_key
*key
,
1921 GLuint id
, struct gl_program
*prog
)
1923 key
->program_string_id
= id
;
1924 key
->clamp_vertex_color
= ctx
->API
== API_OPENGL_COMPAT
;
1926 unsigned sampler_count
= _mesa_fls(prog
->SamplersUsed
);
1927 for (unsigned i
= 0; i
< sampler_count
; i
++) {
1928 if (prog
->ShadowSamplers
& (1 << i
)) {
1929 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
1930 key
->tex
.swizzles
[i
] =
1931 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_ONE
);
1933 /* Color sampler: assume no swizzling. */
1934 key
->tex
.swizzles
[i
] = SWIZZLE_XYZW
;