2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "brw_vec4_builder.h"
30 #include "brw_vec4_live_variables.h"
31 #include "brw_dead_control_flow.h"
32 #include "program/prog_parameter.h"
34 #define MAX_INSTRUCTION (1 << 30)
43 memset(this, 0, sizeof(*this));
45 this->file
= BAD_FILE
;
48 src_reg::src_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
)
54 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
55 this->swizzle
= brw_swizzle_for_size(type
->vector_elements
);
57 this->swizzle
= BRW_SWIZZLE_XYZW
;
59 this->type
= brw_type_for_base_type(type
);
62 /** Generic unset register constructor. */
68 src_reg::src_reg(struct ::brw_reg reg
) :
75 src_reg::src_reg(const dst_reg
®
) :
78 this->reladdr
= reg
.reladdr
;
79 this->swizzle
= brw_swizzle_for_mask(reg
.writemask
);
85 memset(this, 0, sizeof(*this));
86 this->file
= BAD_FILE
;
87 this->writemask
= WRITEMASK_XYZW
;
95 dst_reg::dst_reg(enum brw_reg_file file
, int nr
)
103 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
,
110 this->type
= brw_type_for_base_type(type
);
111 this->writemask
= writemask
;
114 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, brw_reg_type type
,
122 this->writemask
= writemask
;
125 dst_reg::dst_reg(struct ::brw_reg reg
) :
129 this->reladdr
= NULL
;
132 dst_reg::dst_reg(const src_reg
®
) :
135 this->writemask
= brw_mask_for_swizzle(reg
.swizzle
);
136 this->reladdr
= reg
.reladdr
;
140 dst_reg::equals(const dst_reg
&r
) const
142 return (this->backend_reg::equals(r
) &&
143 (reladdr
== r
.reladdr
||
144 (reladdr
&& r
.reladdr
&& reladdr
->equals(*r
.reladdr
))));
148 vec4_instruction::is_send_from_grf()
151 case SHADER_OPCODE_SHADER_TIME_ADD
:
152 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
153 case SHADER_OPCODE_UNTYPED_ATOMIC
:
154 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
155 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
156 case SHADER_OPCODE_TYPED_ATOMIC
:
157 case SHADER_OPCODE_TYPED_SURFACE_READ
:
158 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
159 case VEC4_OPCODE_URB_READ
:
160 case TCS_OPCODE_URB_WRITE
:
161 case TCS_OPCODE_RELEASE_INPUT
:
162 case SHADER_OPCODE_BARRIER
:
170 * Returns true if this instruction's sources and destinations cannot
171 * safely be the same register.
173 * In most cases, a register can be written over safely by the same
174 * instruction that is its last use. For a single instruction, the
175 * sources are dereferenced before writing of the destination starts
178 * However, there are a few cases where this can be problematic:
180 * - Virtual opcodes that translate to multiple instructions in the
181 * code generator: if src == dst and one instruction writes the
182 * destination before a later instruction reads the source, then
183 * src will have been clobbered.
185 * The register allocator uses this information to set up conflicts between
186 * GRF sources and the destination.
189 vec4_instruction::has_source_and_destination_hazard() const
192 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
193 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
194 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
202 vec4_instruction::size_read(unsigned arg
) const
205 case SHADER_OPCODE_SHADER_TIME_ADD
:
206 case SHADER_OPCODE_UNTYPED_ATOMIC
:
207 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
208 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
209 case SHADER_OPCODE_TYPED_ATOMIC
:
210 case SHADER_OPCODE_TYPED_SURFACE_READ
:
211 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
212 case TCS_OPCODE_URB_WRITE
:
214 return mlen
* REG_SIZE
;
216 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
218 return mlen
* REG_SIZE
;
224 switch (src
[arg
].file
) {
229 return 4 * type_sz(src
[arg
].type
);
231 /* XXX - Represent actual execution size and vertical stride. */
232 return 8 * type_sz(src
[arg
].type
);
237 vec4_instruction::can_do_source_mods(const struct gen_device_info
*devinfo
)
239 if (devinfo
->gen
== 6 && is_math())
242 if (is_send_from_grf())
245 if (!backend_instruction::can_do_source_mods())
252 vec4_instruction::can_do_writemask(const struct gen_device_info
*devinfo
)
255 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
256 case VS_OPCODE_PULL_CONSTANT_LOAD
:
257 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
258 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
259 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
260 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
261 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
262 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
263 case VEC4_OPCODE_URB_READ
:
264 case SHADER_OPCODE_MOV_INDIRECT
:
267 /* The MATH instruction on Gen6 only executes in align1 mode, which does
268 * not support writemasking.
270 if (devinfo
->gen
== 6 && is_math())
281 vec4_instruction::can_change_types() const
283 return dst
.type
== src
[0].type
&&
284 !src
[0].abs
&& !src
[0].negate
&& !saturate
&&
285 (opcode
== BRW_OPCODE_MOV
||
286 (opcode
== BRW_OPCODE_SEL
&&
287 dst
.type
== src
[1].type
&&
288 predicate
!= BRW_PREDICATE_NONE
&&
289 !src
[1].abs
&& !src
[1].negate
));
293 * Returns how many MRFs an opcode will write over.
295 * Note that this is not the 0 or 1 implied writes in an actual gen
296 * instruction -- the generate_* functions generate additional MOVs
300 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
302 if (inst
->mlen
== 0 || inst
->is_send_from_grf())
305 switch (inst
->opcode
) {
306 case SHADER_OPCODE_RCP
:
307 case SHADER_OPCODE_RSQ
:
308 case SHADER_OPCODE_SQRT
:
309 case SHADER_OPCODE_EXP2
:
310 case SHADER_OPCODE_LOG2
:
311 case SHADER_OPCODE_SIN
:
312 case SHADER_OPCODE_COS
:
314 case SHADER_OPCODE_INT_QUOTIENT
:
315 case SHADER_OPCODE_INT_REMAINDER
:
316 case SHADER_OPCODE_POW
:
317 case TCS_OPCODE_THREAD_END
:
319 case VS_OPCODE_URB_WRITE
:
321 case VS_OPCODE_PULL_CONSTANT_LOAD
:
323 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
325 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
327 case GS_OPCODE_URB_WRITE
:
328 case GS_OPCODE_URB_WRITE_ALLOCATE
:
329 case GS_OPCODE_THREAD_END
:
331 case GS_OPCODE_FF_SYNC
:
333 case TCS_OPCODE_URB_WRITE
:
335 case SHADER_OPCODE_SHADER_TIME_ADD
:
337 case SHADER_OPCODE_TEX
:
338 case SHADER_OPCODE_TXL
:
339 case SHADER_OPCODE_TXD
:
340 case SHADER_OPCODE_TXF
:
341 case SHADER_OPCODE_TXF_CMS
:
342 case SHADER_OPCODE_TXF_CMS_W
:
343 case SHADER_OPCODE_TXF_MCS
:
344 case SHADER_OPCODE_TXS
:
345 case SHADER_OPCODE_TG4
:
346 case SHADER_OPCODE_TG4_OFFSET
:
347 case SHADER_OPCODE_SAMPLEINFO
:
348 case VS_OPCODE_GET_BUFFER_SIZE
:
349 return inst
->header_size
;
351 unreachable("not reached");
356 src_reg::equals(const src_reg
&r
) const
358 return (this->backend_reg::equals(r
) &&
359 !reladdr
&& !r
.reladdr
);
363 vec4_visitor::opt_vector_float()
365 bool progress
= false;
367 foreach_block(block
, cfg
) {
368 int last_reg
= -1, last_offset
= -1;
369 enum brw_reg_file last_reg_file
= BAD_FILE
;
371 uint8_t imm
[4] = { 0 };
373 vec4_instruction
*imm_inst
[4];
374 unsigned writemask
= 0;
375 enum brw_reg_type dest_type
= BRW_REGISTER_TYPE_F
;
377 foreach_inst_in_block_safe(vec4_instruction
, inst
, block
) {
379 enum brw_reg_type need_type
;
381 /* Look for unconditional MOVs from an immediate with a partial
382 * writemask. Skip type-conversion MOVs other than integer 0,
383 * where the type doesn't matter. See if the immediate can be
384 * represented as a VF.
386 if (inst
->opcode
== BRW_OPCODE_MOV
&&
387 inst
->src
[0].file
== IMM
&&
388 inst
->predicate
== BRW_PREDICATE_NONE
&&
389 inst
->dst
.writemask
!= WRITEMASK_XYZW
&&
390 (inst
->src
[0].type
== inst
->dst
.type
|| inst
->src
[0].d
== 0)) {
392 vf
= brw_float_to_vf(inst
->src
[0].d
);
393 need_type
= BRW_REGISTER_TYPE_D
;
396 vf
= brw_float_to_vf(inst
->src
[0].f
);
397 need_type
= BRW_REGISTER_TYPE_F
;
403 /* If this wasn't a MOV, or the destination register doesn't match,
404 * or we have to switch destination types, then this breaks our
405 * sequence. Combine anything we've accumulated so far.
407 if (last_reg
!= inst
->dst
.nr
||
408 last_offset
!= inst
->dst
.offset
||
409 last_reg_file
!= inst
->dst
.file
||
410 (vf
> 0 && dest_type
!= need_type
)) {
412 if (inst_count
> 1) {
414 memcpy(&vf
, imm
, sizeof(vf
));
415 vec4_instruction
*mov
= MOV(imm_inst
[0]->dst
, brw_imm_vf(vf
));
416 mov
->dst
.type
= dest_type
;
417 mov
->dst
.writemask
= writemask
;
418 inst
->insert_before(block
, mov
);
420 for (int i
= 0; i
< inst_count
; i
++) {
421 imm_inst
[i
]->remove(block
);
430 dest_type
= BRW_REGISTER_TYPE_F
;
432 for (int i
= 0; i
< 4; i
++) {
437 /* Record this instruction's value (if it was representable). */
439 if ((inst
->dst
.writemask
& WRITEMASK_X
) != 0)
441 if ((inst
->dst
.writemask
& WRITEMASK_Y
) != 0)
443 if ((inst
->dst
.writemask
& WRITEMASK_Z
) != 0)
445 if ((inst
->dst
.writemask
& WRITEMASK_W
) != 0)
448 writemask
|= inst
->dst
.writemask
;
449 imm_inst
[inst_count
++] = inst
;
451 last_reg
= inst
->dst
.nr
;
452 last_offset
= inst
->dst
.offset
;
453 last_reg_file
= inst
->dst
.file
;
455 dest_type
= need_type
;
461 invalidate_live_intervals();
466 /* Replaces unused channels of a swizzle with channels that are used.
468 * For instance, this pass transforms
470 * mov vgrf4.yz, vgrf5.wxzy
474 * mov vgrf4.yz, vgrf5.xxzx
476 * This eliminates false uses of some channels, letting dead code elimination
477 * remove the instructions that wrote them.
480 vec4_visitor::opt_reduce_swizzle()
482 bool progress
= false;
484 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
485 if (inst
->dst
.file
== BAD_FILE
||
486 inst
->dst
.file
== ARF
||
487 inst
->dst
.file
== FIXED_GRF
||
488 inst
->is_send_from_grf())
493 /* Determine which channels of the sources are read. */
494 switch (inst
->opcode
) {
495 case VEC4_OPCODE_PACK_BYTES
:
497 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
498 * but all four of src1.
500 swizzle
= brw_swizzle_for_size(4);
503 swizzle
= brw_swizzle_for_size(3);
506 swizzle
= brw_swizzle_for_size(2);
509 swizzle
= brw_swizzle_for_mask(inst
->dst
.writemask
);
513 /* Update sources' swizzles. */
514 for (int i
= 0; i
< 3; i
++) {
515 if (inst
->src
[i
].file
!= VGRF
&&
516 inst
->src
[i
].file
!= ATTR
&&
517 inst
->src
[i
].file
!= UNIFORM
)
520 const unsigned new_swizzle
=
521 brw_compose_swizzle(swizzle
, inst
->src
[i
].swizzle
);
522 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
523 inst
->src
[i
].swizzle
= new_swizzle
;
530 invalidate_live_intervals();
536 vec4_visitor::split_uniform_registers()
538 /* Prior to this, uniforms have been in an array sized according to
539 * the number of vector uniforms present, sparsely filled (so an
540 * aggregate results in reg indices being skipped over). Now we're
541 * going to cut those aggregates up so each .nr index is one
542 * vector. The goal is to make elimination of unused uniform
543 * components easier later.
545 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
546 for (int i
= 0 ; i
< 3; i
++) {
547 if (inst
->src
[i
].file
!= UNIFORM
)
550 assert(!inst
->src
[i
].reladdr
);
552 inst
->src
[i
].nr
+= inst
->src
[i
].offset
/ 16;
553 inst
->src
[i
].offset
%= 16;
559 vec4_visitor::pack_uniform_registers()
561 uint8_t chans_used
[this->uniforms
];
562 int new_loc
[this->uniforms
];
563 int new_chan
[this->uniforms
];
565 memset(chans_used
, 0, sizeof(chans_used
));
566 memset(new_loc
, 0, sizeof(new_loc
));
567 memset(new_chan
, 0, sizeof(new_chan
));
569 /* Find which uniform vectors are actually used by the program. We
570 * expect unused vector elements when we've moved array access out
571 * to pull constants, and from some GLSL code generators like wine.
573 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
575 switch (inst
->opcode
) {
576 case VEC4_OPCODE_PACK_BYTES
:
588 readmask
= inst
->dst
.writemask
;
592 for (int i
= 0 ; i
< 3; i
++) {
593 if (inst
->src
[i
].file
!= UNIFORM
)
596 int reg
= inst
->src
[i
].nr
;
597 for (int c
= 0; c
< 4; c
++) {
598 if (!(readmask
& (1 << c
)))
601 chans_used
[reg
] = MAX2(chans_used
[reg
],
602 BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
) + 1);
606 if (inst
->opcode
== SHADER_OPCODE_MOV_INDIRECT
&&
607 inst
->src
[0].file
== UNIFORM
) {
608 assert(inst
->src
[2].file
== BRW_IMMEDIATE_VALUE
);
609 assert(inst
->src
[0].subnr
== 0);
611 unsigned bytes_read
= inst
->src
[2].ud
;
612 assert(bytes_read
% 4 == 0);
613 unsigned vec4s_read
= DIV_ROUND_UP(bytes_read
, 16);
615 /* We just mark every register touched by a MOV_INDIRECT as being
616 * fully used. This ensures that it doesn't broken up piecewise by
617 * the next part of our packing algorithm.
619 int reg
= inst
->src
[0].nr
;
620 for (unsigned i
= 0; i
< vec4s_read
; i
++)
621 chans_used
[reg
+ i
] = 4;
625 int new_uniform_count
= 0;
627 /* Now, figure out a packing of the live uniform vectors into our
630 for (int src
= 0; src
< uniforms
; src
++) {
631 int size
= chans_used
[src
];
637 /* Find the lowest place we can slot this uniform in. */
638 for (dst
= 0; dst
< src
; dst
++) {
639 if (chans_used
[dst
] + size
<= 4)
648 new_chan
[src
] = chans_used
[dst
];
650 /* Move the references to the data */
651 for (int j
= 0; j
< size
; j
++) {
652 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
653 stage_prog_data
->param
[src
* 4 + j
];
656 chans_used
[dst
] += size
;
660 new_uniform_count
= MAX2(new_uniform_count
, dst
+ 1);
663 this->uniforms
= new_uniform_count
;
665 /* Now, update the instructions for our repacked uniforms. */
666 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
667 for (int i
= 0 ; i
< 3; i
++) {
668 int src
= inst
->src
[i
].nr
;
670 if (inst
->src
[i
].file
!= UNIFORM
)
673 inst
->src
[i
].nr
= new_loc
[src
];
674 inst
->src
[i
].swizzle
+= BRW_SWIZZLE4(new_chan
[src
], new_chan
[src
],
675 new_chan
[src
], new_chan
[src
]);
681 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
683 * While GLSL IR also performs this optimization, we end up with it in
684 * our instruction stream for a couple of reasons. One is that we
685 * sometimes generate silly instructions, for example in array access
686 * where we'll generate "ADD offset, index, base" even if base is 0.
687 * The other is that GLSL IR's constant propagation doesn't track the
688 * components of aggregates, so some VS patterns (initialize matrix to
689 * 0, accumulate in vertex blending factors) end up breaking down to
690 * instructions involving 0.
693 vec4_visitor::opt_algebraic()
695 bool progress
= false;
697 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
698 switch (inst
->opcode
) {
700 if (inst
->src
[0].file
!= IMM
)
703 if (inst
->saturate
) {
704 if (inst
->dst
.type
!= inst
->src
[0].type
)
705 assert(!"unimplemented: saturate mixed types");
707 if (brw_saturate_immediate(inst
->dst
.type
,
708 &inst
->src
[0].as_brw_reg())) {
709 inst
->saturate
= false;
715 case VEC4_OPCODE_UNPACK_UNIFORM
:
716 if (inst
->src
[0].file
!= UNIFORM
) {
717 inst
->opcode
= BRW_OPCODE_MOV
;
723 if (inst
->src
[1].is_zero()) {
724 inst
->opcode
= BRW_OPCODE_MOV
;
725 inst
->src
[1] = src_reg();
731 if (inst
->src
[1].is_zero()) {
732 inst
->opcode
= BRW_OPCODE_MOV
;
733 switch (inst
->src
[0].type
) {
734 case BRW_REGISTER_TYPE_F
:
735 inst
->src
[0] = brw_imm_f(0.0f
);
737 case BRW_REGISTER_TYPE_D
:
738 inst
->src
[0] = brw_imm_d(0);
740 case BRW_REGISTER_TYPE_UD
:
741 inst
->src
[0] = brw_imm_ud(0u);
744 unreachable("not reached");
746 inst
->src
[1] = src_reg();
748 } else if (inst
->src
[1].is_one()) {
749 inst
->opcode
= BRW_OPCODE_MOV
;
750 inst
->src
[1] = src_reg();
752 } else if (inst
->src
[1].is_negative_one()) {
753 inst
->opcode
= BRW_OPCODE_MOV
;
754 inst
->src
[0].negate
= !inst
->src
[0].negate
;
755 inst
->src
[1] = src_reg();
760 if (inst
->conditional_mod
== BRW_CONDITIONAL_GE
&&
762 inst
->src
[0].negate
&&
763 inst
->src
[1].is_zero()) {
764 inst
->src
[0].abs
= false;
765 inst
->src
[0].negate
= false;
766 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
771 case SHADER_OPCODE_BROADCAST
:
772 if (is_uniform(inst
->src
[0]) ||
773 inst
->src
[1].is_zero()) {
774 inst
->opcode
= BRW_OPCODE_MOV
;
775 inst
->src
[1] = src_reg();
776 inst
->force_writemask_all
= true;
787 invalidate_live_intervals();
793 * Only a limited number of hardware registers may be used for push
794 * constants, so this turns access to the overflowed constants into
798 vec4_visitor::move_push_constants_to_pull_constants()
800 int pull_constant_loc
[this->uniforms
];
802 /* Only allow 32 registers (256 uniform components) as push constants,
803 * which is the limit on gen6.
805 * If changing this value, note the limitation about total_regs in
808 int max_uniform_components
= 32 * 8;
809 if (this->uniforms
* 4 <= max_uniform_components
)
812 /* Make some sort of choice as to which uniforms get sent to pull
813 * constants. We could potentially do something clever here like
814 * look for the most infrequently used uniform vec4s, but leave
817 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
818 pull_constant_loc
[i
/ 4] = -1;
820 if (i
>= max_uniform_components
) {
821 const gl_constant_value
**values
= &stage_prog_data
->param
[i
];
823 /* Try to find an existing copy of this uniform in the pull
824 * constants if it was part of an array access already.
826 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
829 for (matches
= 0; matches
< 4; matches
++) {
830 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
835 pull_constant_loc
[i
/ 4] = j
/ 4;
840 if (pull_constant_loc
[i
/ 4] == -1) {
841 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
842 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
844 for (int j
= 0; j
< 4; j
++) {
845 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
852 /* Now actually rewrite usage of the things we've moved to pull
855 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
856 for (int i
= 0 ; i
< 3; i
++) {
857 if (inst
->src
[i
].file
!= UNIFORM
||
858 pull_constant_loc
[inst
->src
[i
].nr
] == -1)
861 int uniform
= inst
->src
[i
].nr
;
863 dst_reg temp
= dst_reg(this, glsl_type::vec4_type
);
865 emit_pull_constant_load(block
, inst
, temp
, inst
->src
[i
],
866 pull_constant_loc
[uniform
], src_reg());
868 inst
->src
[i
].file
= temp
.file
;
869 inst
->src
[i
].nr
= temp
.nr
;
870 inst
->src
[i
].offset
%= 16;
871 inst
->src
[i
].reladdr
= NULL
;
875 /* Repack push constants to remove the now-unused ones. */
876 pack_uniform_registers();
879 /* Conditions for which we want to avoid setting the dependency control bits */
881 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction
*inst
)
883 #define IS_DWORD(reg) \
884 (reg.type == BRW_REGISTER_TYPE_UD || \
885 reg.type == BRW_REGISTER_TYPE_D)
887 /* "When source or destination datatype is 64b or operation is integer DWord
888 * multiply, DepCtrl must not be used."
889 * May apply to future SoCs as well.
891 if (devinfo
->is_cherryview
) {
892 if (inst
->opcode
== BRW_OPCODE_MUL
&&
893 IS_DWORD(inst
->src
[0]) &&
894 IS_DWORD(inst
->src
[1]))
899 if (devinfo
->gen
>= 8) {
900 if (inst
->opcode
== BRW_OPCODE_F32TO16
)
906 * In the presence of send messages, totally interrupt dependency
907 * control. They're long enough that the chance of dependency
908 * control around them just doesn't matter.
911 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
912 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
913 * completes the scoreboard clear must have a non-zero execution mask. This
914 * means, if any kind of predication can change the execution mask or channel
915 * enable of the last instruction, the optimization must be avoided. This is
916 * to avoid instructions being shot down the pipeline when no writes are
920 * Dependency control does not work well over math instructions.
921 * NB: Discovered empirically
923 return (inst
->mlen
|| inst
->predicate
|| inst
->is_math());
927 * Sets the dependency control fields on instructions after register
928 * allocation and before the generator is run.
930 * When you have a sequence of instructions like:
932 * DP4 temp.x vertex uniform[0]
933 * DP4 temp.y vertex uniform[0]
934 * DP4 temp.z vertex uniform[0]
935 * DP4 temp.w vertex uniform[0]
937 * The hardware doesn't know that it can actually run the later instructions
938 * while the previous ones are in flight, producing stalls. However, we have
939 * manual fields we can set in the instructions that let it do so.
942 vec4_visitor::opt_set_dependency_control()
944 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
945 uint8_t grf_channels_written
[BRW_MAX_GRF
];
946 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
947 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
949 assert(prog_data
->total_grf
||
950 !"Must be called after register allocation");
952 foreach_block (block
, cfg
) {
953 memset(last_grf_write
, 0, sizeof(last_grf_write
));
954 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
956 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
957 /* If we read from a register that we were doing dependency control
958 * on, don't do dependency control across the read.
960 for (int i
= 0; i
< 3; i
++) {
961 int reg
= inst
->src
[i
].nr
+ inst
->src
[i
].offset
/ REG_SIZE
;
962 if (inst
->src
[i
].file
== VGRF
) {
963 last_grf_write
[reg
] = NULL
;
964 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
965 memset(last_grf_write
, 0, sizeof(last_grf_write
));
968 assert(inst
->src
[i
].file
!= MRF
);
971 if (is_dep_ctrl_unsafe(inst
)) {
972 memset(last_grf_write
, 0, sizeof(last_grf_write
));
973 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
977 /* Now, see if we can do dependency control for this instruction
978 * against a previous one writing to its destination.
980 int reg
= inst
->dst
.nr
+ inst
->dst
.offset
/ REG_SIZE
;
981 if (inst
->dst
.file
== VGRF
|| inst
->dst
.file
== FIXED_GRF
) {
982 if (last_grf_write
[reg
] &&
983 last_grf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
984 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
985 last_grf_write
[reg
]->no_dd_clear
= true;
986 inst
->no_dd_check
= true;
988 grf_channels_written
[reg
] = 0;
991 last_grf_write
[reg
] = inst
;
992 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
993 } else if (inst
->dst
.file
== MRF
) {
994 if (last_mrf_write
[reg
] &&
995 last_mrf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
996 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
997 last_mrf_write
[reg
]->no_dd_clear
= true;
998 inst
->no_dd_check
= true;
1000 mrf_channels_written
[reg
] = 0;
1003 last_mrf_write
[reg
] = inst
;
1004 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
1011 vec4_instruction::can_reswizzle(const struct gen_device_info
*devinfo
,
1016 /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
1019 if (devinfo
->gen
== 6 && is_math() && swizzle
!= BRW_SWIZZLE_XYZW
)
1022 if (!can_do_writemask(devinfo
) && dst_writemask
!= WRITEMASK_XYZW
)
1025 /* If this instruction sets anything not referenced by swizzle, then we'd
1026 * totally break it when we reswizzle.
1028 if (dst
.writemask
& ~swizzle_mask
)
1034 for (int i
= 0; i
< 3; i
++) {
1035 if (src
[i
].is_accumulator())
1043 * For any channels in the swizzle's source that were populated by this
1044 * instruction, rewrite the instruction to put the appropriate result directly
1045 * in those channels.
1047 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1050 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
1052 /* Destination write mask doesn't correspond to source swizzle for the dot
1053 * product and pack_bytes instructions.
1055 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
1056 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
&&
1057 opcode
!= VEC4_OPCODE_PACK_BYTES
) {
1058 for (int i
= 0; i
< 3; i
++) {
1059 if (src
[i
].file
== BAD_FILE
|| src
[i
].file
== IMM
)
1062 src
[i
].swizzle
= brw_compose_swizzle(swizzle
, src
[i
].swizzle
);
1066 /* Apply the specified swizzle and writemask to the original mask of
1067 * written components.
1069 dst
.writemask
= dst_writemask
&
1070 brw_apply_swizzle_to_mask(swizzle
, dst
.writemask
);
1074 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1075 * just written and then MOVed into another reg and making the original write
1076 * of the GRF write directly to the final destination instead.
1079 vec4_visitor::opt_register_coalesce()
1081 bool progress
= false;
1084 calculate_live_intervals();
1086 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1090 if (inst
->opcode
!= BRW_OPCODE_MOV
||
1091 (inst
->dst
.file
!= VGRF
&& inst
->dst
.file
!= MRF
) ||
1093 inst
->src
[0].file
!= VGRF
||
1094 inst
->dst
.type
!= inst
->src
[0].type
||
1095 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
1098 /* Remove no-op MOVs */
1099 if (inst
->dst
.file
== inst
->src
[0].file
&&
1100 inst
->dst
.nr
== inst
->src
[0].nr
&&
1101 inst
->dst
.offset
== inst
->src
[0].offset
) {
1102 bool is_nop_mov
= true;
1104 for (unsigned c
= 0; c
< 4; c
++) {
1105 if ((inst
->dst
.writemask
& (1 << c
)) == 0)
1108 if (BRW_GET_SWZ(inst
->src
[0].swizzle
, c
) != c
) {
1115 inst
->remove(block
);
1121 bool to_mrf
= (inst
->dst
.file
== MRF
);
1123 /* Can't coalesce this GRF if someone else was going to
1126 if (var_range_end(var_from_reg(alloc
, dst_reg(inst
->src
[0])), 4) > ip
)
1129 /* We need to check interference with the final destination between this
1130 * instruction and the earliest instruction involved in writing the GRF
1131 * we're eliminating. To do that, keep track of which of our source
1132 * channels we've seen initialized.
1134 const unsigned chans_needed
=
1135 brw_apply_inv_swizzle_to_mask(inst
->src
[0].swizzle
,
1136 inst
->dst
.writemask
);
1137 unsigned chans_remaining
= chans_needed
;
1139 /* Now walk up the instruction stream trying to see if we can rewrite
1140 * everything writing to the temporary to write into the destination
1143 vec4_instruction
*_scan_inst
= (vec4_instruction
*)inst
->prev
;
1144 foreach_inst_in_block_reverse_starting_from(vec4_instruction
, scan_inst
,
1146 _scan_inst
= scan_inst
;
1148 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1149 scan_inst
->dst
, scan_inst
->size_written
)) {
1150 /* Found something writing to the reg we want to coalesce away. */
1152 /* SEND instructions can't have MRF as a destination. */
1153 if (scan_inst
->mlen
)
1156 if (devinfo
->gen
== 6) {
1157 /* gen6 math instructions must have the destination be
1158 * VGRF, so no compute-to-MRF for them.
1160 if (scan_inst
->is_math()) {
1166 /* This doesn't handle saturation on the instruction we
1167 * want to coalesce away if the register types do not match.
1168 * But if scan_inst is a non type-converting 'mov', we can fix
1171 if (inst
->saturate
&&
1172 inst
->dst
.type
!= scan_inst
->dst
.type
&&
1173 !(scan_inst
->opcode
== BRW_OPCODE_MOV
&&
1174 scan_inst
->dst
.type
== scan_inst
->src
[0].type
))
1177 /* If we can't handle the swizzle, bail. */
1178 if (!scan_inst
->can_reswizzle(devinfo
, inst
->dst
.writemask
,
1179 inst
->src
[0].swizzle
,
1184 /* This only handles coalescing of a single register starting at
1185 * the source offset of the copy instruction.
1187 if (scan_inst
->size_written
> REG_SIZE
||
1188 scan_inst
->dst
.offset
!= inst
->src
[0].offset
)
1191 /* Mark which channels we found unconditional writes for. */
1192 if (!scan_inst
->predicate
)
1193 chans_remaining
&= ~scan_inst
->dst
.writemask
;
1195 if (chans_remaining
== 0)
1199 /* You can't read from an MRF, so if someone else reads our MRF's
1200 * source GRF that we wanted to rewrite, that stops us. If it's a
1201 * GRF we're trying to coalesce to, we don't actually handle
1202 * rewriting sources so bail in that case as well.
1204 bool interfered
= false;
1205 for (int i
= 0; i
< 3; i
++) {
1206 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1207 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1213 /* If somebody else writes the same channels of our destination here,
1214 * we can't coalesce before that.
1216 if (regions_overlap(inst
->dst
, inst
->size_written
,
1217 scan_inst
->dst
, scan_inst
->size_written
) &&
1218 (inst
->dst
.writemask
& scan_inst
->dst
.writemask
) != 0) {
1222 /* Check for reads of the register we're trying to coalesce into. We
1223 * can't go rewriting instructions above that to put some other value
1224 * in the register instead.
1226 if (to_mrf
&& scan_inst
->mlen
> 0) {
1227 if (inst
->dst
.nr
>= scan_inst
->base_mrf
&&
1228 inst
->dst
.nr
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
1232 for (int i
= 0; i
< 3; i
++) {
1233 if (regions_overlap(inst
->dst
, inst
->size_written
,
1234 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1242 if (chans_remaining
== 0) {
1243 /* If we've made it here, we have an MOV we want to coalesce out, and
1244 * a scan_inst pointing to the earliest instruction involved in
1245 * computing the value. Now go rewrite the instruction stream
1248 vec4_instruction
*scan_inst
= _scan_inst
;
1249 while (scan_inst
!= inst
) {
1250 if (scan_inst
->dst
.file
== VGRF
&&
1251 scan_inst
->dst
.nr
== inst
->src
[0].nr
&&
1252 scan_inst
->dst
.offset
== inst
->src
[0].offset
) {
1253 scan_inst
->reswizzle(inst
->dst
.writemask
,
1254 inst
->src
[0].swizzle
);
1255 scan_inst
->dst
.file
= inst
->dst
.file
;
1256 scan_inst
->dst
.nr
= inst
->dst
.nr
;
1257 scan_inst
->dst
.offset
= inst
->dst
.offset
;
1258 if (inst
->saturate
&&
1259 inst
->dst
.type
!= scan_inst
->dst
.type
) {
1260 /* If we have reached this point, scan_inst is a non
1261 * type-converting 'mov' and we can modify its register types
1262 * to match the ones in inst. Otherwise, we could have an
1263 * incorrect saturation result.
1265 scan_inst
->dst
.type
= inst
->dst
.type
;
1266 scan_inst
->src
[0].type
= inst
->src
[0].type
;
1268 scan_inst
->saturate
|= inst
->saturate
;
1270 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1272 inst
->remove(block
);
1278 invalidate_live_intervals();
1284 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1285 * flow. We could probably do better here with some form of divergence
1289 vec4_visitor::eliminate_find_live_channel()
1291 bool progress
= false;
1294 if (!brw_stage_has_packed_dispatch(devinfo
, stage
, stage_prog_data
)) {
1295 /* The optimization below assumes that channel zero is live on thread
1296 * dispatch, which may not be the case if the fixed function dispatches
1302 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1303 switch (inst
->opcode
) {
1309 case BRW_OPCODE_ENDIF
:
1310 case BRW_OPCODE_WHILE
:
1314 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1316 inst
->opcode
= BRW_OPCODE_MOV
;
1317 inst
->src
[0] = brw_imm_d(0);
1318 inst
->force_writemask_all
= true;
1332 * Splits virtual GRFs requesting more than one contiguous physical register.
1334 * We initially create large virtual GRFs for temporary structures, arrays,
1335 * and matrices, so that the visitor functions can add offsets to work their
1336 * way down to the actual member being accessed. But when it comes to
1337 * optimization, we'd like to treat each register as individual storage if
1340 * So far, the only thing that might prevent splitting is a send message from
1344 vec4_visitor::split_virtual_grfs()
1346 int num_vars
= this->alloc
.count
;
1347 int new_virtual_grf
[num_vars
];
1348 bool split_grf
[num_vars
];
1350 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1352 /* Try to split anything > 0 sized. */
1353 for (int i
= 0; i
< num_vars
; i
++) {
1354 split_grf
[i
] = this->alloc
.sizes
[i
] != 1;
1357 /* Check that the instructions are compatible with the registers we're trying
1360 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1361 if (inst
->dst
.file
== VGRF
&& regs_written(inst
) > 1)
1362 split_grf
[inst
->dst
.nr
] = false;
1364 for (int i
= 0; i
< 3; i
++) {
1365 if (inst
->src
[i
].file
== VGRF
&& regs_read(inst
, i
) > 1)
1366 split_grf
[inst
->src
[i
].nr
] = false;
1370 /* Allocate new space for split regs. Note that the virtual
1371 * numbers will be contiguous.
1373 for (int i
= 0; i
< num_vars
; i
++) {
1377 new_virtual_grf
[i
] = alloc
.allocate(1);
1378 for (unsigned j
= 2; j
< this->alloc
.sizes
[i
]; j
++) {
1379 unsigned reg
= alloc
.allocate(1);
1380 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1383 this->alloc
.sizes
[i
] = 1;
1386 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1387 if (inst
->dst
.file
== VGRF
&& split_grf
[inst
->dst
.nr
] &&
1388 inst
->dst
.offset
/ REG_SIZE
!= 0) {
1389 inst
->dst
.nr
= (new_virtual_grf
[inst
->dst
.nr
] +
1390 inst
->dst
.offset
/ REG_SIZE
- 1);
1391 inst
->dst
.offset
%= REG_SIZE
;
1393 for (int i
= 0; i
< 3; i
++) {
1394 if (inst
->src
[i
].file
== VGRF
&& split_grf
[inst
->src
[i
].nr
] &&
1395 inst
->src
[i
].offset
/ REG_SIZE
!= 0) {
1396 inst
->src
[i
].nr
= (new_virtual_grf
[inst
->src
[i
].nr
] +
1397 inst
->src
[i
].offset
/ REG_SIZE
- 1);
1398 inst
->src
[i
].offset
%= REG_SIZE
;
1402 invalidate_live_intervals();
1406 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1408 dump_instruction(be_inst
, stderr
);
1412 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1414 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1416 if (inst
->predicate
) {
1417 fprintf(file
, "(%cf0.%d%s) ",
1418 inst
->predicate_inverse
? '-' : '+',
1420 pred_ctrl_align16
[inst
->predicate
]);
1423 fprintf(file
, "%s", brw_instruction_name(devinfo
, inst
->opcode
));
1425 fprintf(file
, ".sat");
1426 if (inst
->conditional_mod
) {
1427 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1428 if (!inst
->predicate
&&
1429 (devinfo
->gen
< 5 || (inst
->opcode
!= BRW_OPCODE_SEL
&&
1430 inst
->opcode
!= BRW_OPCODE_IF
&&
1431 inst
->opcode
!= BRW_OPCODE_WHILE
))) {
1432 fprintf(file
, ".f0.%d", inst
->flag_subreg
);
1437 switch (inst
->dst
.file
) {
1439 fprintf(file
, "vgrf%d", inst
->dst
.nr
);
1442 fprintf(file
, "g%d", inst
->dst
.nr
);
1445 fprintf(file
, "m%d", inst
->dst
.nr
);
1448 switch (inst
->dst
.nr
) {
1450 fprintf(file
, "null");
1452 case BRW_ARF_ADDRESS
:
1453 fprintf(file
, "a0.%d", inst
->dst
.subnr
);
1455 case BRW_ARF_ACCUMULATOR
:
1456 fprintf(file
, "acc%d", inst
->dst
.subnr
);
1459 fprintf(file
, "f%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1462 fprintf(file
, "arf%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1467 fprintf(file
, "(null)");
1472 unreachable("not reached");
1474 if (inst
->dst
.offset
||
1475 (inst
->dst
.file
== VGRF
&&
1476 alloc
.sizes
[inst
->dst
.nr
] * REG_SIZE
!= inst
->size_written
)) {
1477 const unsigned reg_size
= (inst
->dst
.file
== UNIFORM
? 16 : REG_SIZE
);
1478 fprintf(file
, "+%d.%d", inst
->dst
.offset
/ reg_size
,
1479 inst
->dst
.offset
% reg_size
);
1481 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1483 if (inst
->dst
.writemask
& 1)
1485 if (inst
->dst
.writemask
& 2)
1487 if (inst
->dst
.writemask
& 4)
1489 if (inst
->dst
.writemask
& 8)
1492 fprintf(file
, ":%s", brw_reg_type_letters(inst
->dst
.type
));
1494 if (inst
->src
[0].file
!= BAD_FILE
)
1495 fprintf(file
, ", ");
1497 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1498 if (inst
->src
[i
].negate
)
1500 if (inst
->src
[i
].abs
)
1502 switch (inst
->src
[i
].file
) {
1504 fprintf(file
, "vgrf%d", inst
->src
[i
].nr
);
1507 fprintf(file
, "g%d", inst
->src
[i
].nr
);
1510 fprintf(file
, "attr%d", inst
->src
[i
].nr
);
1513 fprintf(file
, "u%d", inst
->src
[i
].nr
);
1516 switch (inst
->src
[i
].type
) {
1517 case BRW_REGISTER_TYPE_F
:
1518 fprintf(file
, "%fF", inst
->src
[i
].f
);
1520 case BRW_REGISTER_TYPE_D
:
1521 fprintf(file
, "%dD", inst
->src
[i
].d
);
1523 case BRW_REGISTER_TYPE_UD
:
1524 fprintf(file
, "%uU", inst
->src
[i
].ud
);
1526 case BRW_REGISTER_TYPE_VF
:
1527 fprintf(file
, "[%-gF, %-gF, %-gF, %-gF]",
1528 brw_vf_to_float((inst
->src
[i
].ud
>> 0) & 0xff),
1529 brw_vf_to_float((inst
->src
[i
].ud
>> 8) & 0xff),
1530 brw_vf_to_float((inst
->src
[i
].ud
>> 16) & 0xff),
1531 brw_vf_to_float((inst
->src
[i
].ud
>> 24) & 0xff));
1534 fprintf(file
, "???");
1539 switch (inst
->src
[i
].nr
) {
1541 fprintf(file
, "null");
1543 case BRW_ARF_ADDRESS
:
1544 fprintf(file
, "a0.%d", inst
->src
[i
].subnr
);
1546 case BRW_ARF_ACCUMULATOR
:
1547 fprintf(file
, "acc%d", inst
->src
[i
].subnr
);
1550 fprintf(file
, "f%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1553 fprintf(file
, "arf%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1558 fprintf(file
, "(null)");
1561 unreachable("not reached");
1564 if (inst
->src
[i
].offset
||
1565 (inst
->src
[i
].file
== VGRF
&&
1566 alloc
.sizes
[inst
->src
[i
].nr
] * REG_SIZE
!= inst
->size_read(i
))) {
1567 const unsigned reg_size
= (inst
->src
[i
].file
== UNIFORM
? 16 : REG_SIZE
);
1568 fprintf(file
, "+%d.%d", inst
->src
[i
].offset
/ reg_size
,
1569 inst
->src
[i
].offset
% reg_size
);
1572 if (inst
->src
[i
].file
!= IMM
) {
1573 static const char *chans
[4] = {"x", "y", "z", "w"};
1575 for (int c
= 0; c
< 4; c
++) {
1576 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1580 if (inst
->src
[i
].abs
)
1583 if (inst
->src
[i
].file
!= IMM
) {
1584 fprintf(file
, ":%s", brw_reg_type_letters(inst
->src
[i
].type
));
1587 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1588 fprintf(file
, ", ");
1591 if (inst
->force_writemask_all
)
1592 fprintf(file
, " NoMask");
1594 fprintf(file
, "\n");
1598 static inline struct brw_reg
1599 attribute_to_hw_reg(int attr
, bool interleaved
)
1602 return stride(brw_vec4_grf(attr
/ 2, (attr
% 2) * 4), 0, 4, 1);
1604 return brw_vec8_grf(attr
, 0);
1609 * Replace each register of type ATTR in this->instructions with a reference
1610 * to a fixed HW register.
1612 * If interleaved is true, then each attribute takes up half a register, with
1613 * register N containing attribute 2*N in its first half and attribute 2*N+1
1614 * in its second half (this corresponds to the payload setup used by geometry
1615 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1616 * false, then each attribute takes up a whole register, with register N
1617 * containing attribute N (this corresponds to the payload setup used by
1618 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1621 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map
,
1624 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1625 for (int i
= 0; i
< 3; i
++) {
1626 if (inst
->src
[i
].file
!= ATTR
)
1629 int grf
= attribute_map
[inst
->src
[i
].nr
+
1630 inst
->src
[i
].offset
/ REG_SIZE
];
1631 assert(inst
->src
[i
].offset
% REG_SIZE
== 0);
1633 /* All attributes used in the shader need to have been assigned a
1634 * hardware register by the caller
1638 struct brw_reg reg
= attribute_to_hw_reg(grf
, interleaved
);
1639 reg
.swizzle
= inst
->src
[i
].swizzle
;
1640 reg
.type
= inst
->src
[i
].type
;
1641 if (inst
->src
[i
].abs
)
1643 if (inst
->src
[i
].negate
)
1652 vec4_vs_visitor::setup_attributes(int payload_reg
)
1655 int attribute_map
[VERT_ATTRIB_MAX
+ 2];
1656 memset(attribute_map
, 0, sizeof(attribute_map
));
1659 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
1660 if (vs_prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
1661 attribute_map
[i
] = payload_reg
+ nr_attributes
;
1666 /* VertexID is stored by the VF as the last vertex element, but we
1667 * don't represent it with a flag in inputs_read, so we call it
1670 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
||
1671 vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
) {
1672 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
1676 if (vs_prog_data
->uses_drawid
) {
1677 attribute_map
[VERT_ATTRIB_MAX
+ 1] = payload_reg
+ nr_attributes
;
1681 lower_attributes_to_hw_regs(attribute_map
, false /* interleaved */);
1683 return payload_reg
+ vs_prog_data
->nr_attributes
;
1687 vec4_visitor::setup_uniforms(int reg
)
1689 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1691 /* The pre-gen6 VS requires that some push constants get loaded no
1692 * matter what, or the GPU would hang.
1694 if (devinfo
->gen
< 6 && this->uniforms
== 0) {
1695 stage_prog_data
->param
=
1696 reralloc(NULL
, stage_prog_data
->param
, const gl_constant_value
*, 4);
1697 for (unsigned int i
= 0; i
< 4; i
++) {
1698 unsigned int slot
= this->uniforms
* 4 + i
;
1699 static gl_constant_value zero
= { 0.0 };
1700 stage_prog_data
->param
[slot
] = &zero
;
1706 reg
+= ALIGN(uniforms
, 2) / 2;
1709 stage_prog_data
->nr_params
= this->uniforms
* 4;
1711 prog_data
->base
.curb_read_length
=
1712 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1718 vec4_vs_visitor::setup_payload(void)
1722 /* The payload always contains important data in g0, which contains
1723 * the URB handles that are passed on to the URB write at the end
1724 * of the thread. So, we always start push constants at g1.
1728 reg
= setup_uniforms(reg
);
1730 reg
= setup_attributes(reg
);
1732 this->first_non_payload_grf
= reg
;
1736 vec4_visitor::lower_minmax()
1738 assert(devinfo
->gen
< 6);
1740 bool progress
= false;
1742 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1743 const vec4_builder
ibld(this, block
, inst
);
1745 if (inst
->opcode
== BRW_OPCODE_SEL
&&
1746 inst
->predicate
== BRW_PREDICATE_NONE
) {
1747 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
1748 * the original SEL.L/GE instruction
1750 ibld
.CMP(ibld
.null_reg_d(), inst
->src
[0], inst
->src
[1],
1751 inst
->conditional_mod
);
1752 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1753 inst
->conditional_mod
= BRW_CONDITIONAL_NONE
;
1760 invalidate_live_intervals();
1766 vec4_visitor::get_timestamp()
1768 assert(devinfo
->gen
>= 7);
1770 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1775 BRW_REGISTER_TYPE_UD
,
1776 BRW_VERTICAL_STRIDE_0
,
1778 BRW_HORIZONTAL_STRIDE_4
,
1782 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1784 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1785 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1786 * even if it's not enabled in the dispatch.
1788 mov
->force_writemask_all
= true;
1790 return src_reg(dst
);
1794 vec4_visitor::emit_shader_time_begin()
1796 current_annotation
= "shader time start";
1797 shader_start_time
= get_timestamp();
1801 vec4_visitor::emit_shader_time_end()
1803 current_annotation
= "shader time end";
1804 src_reg shader_end_time
= get_timestamp();
1807 /* Check that there weren't any timestamp reset events (assuming these
1808 * were the only two timestamp reads that happened).
1810 src_reg reset_end
= shader_end_time
;
1811 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1812 vec4_instruction
*test
= emit(AND(dst_null_ud(), reset_end
, brw_imm_ud(1u)));
1813 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1815 emit(IF(BRW_PREDICATE_NORMAL
));
1817 /* Take the current timestamp and get the delta. */
1818 shader_start_time
.negate
= true;
1819 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1820 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1822 /* If there were no instructions between the two timestamp gets, the diff
1823 * is 2 cycles. Remove that overhead, so I can forget about that when
1824 * trying to determine the time taken for single instructions.
1826 emit(ADD(diff
, src_reg(diff
), brw_imm_ud(-2u)));
1828 emit_shader_time_write(0, src_reg(diff
));
1829 emit_shader_time_write(1, brw_imm_ud(1u));
1830 emit(BRW_OPCODE_ELSE
);
1831 emit_shader_time_write(2, brw_imm_ud(1u));
1832 emit(BRW_OPCODE_ENDIF
);
1836 vec4_visitor::emit_shader_time_write(int shader_time_subindex
, src_reg value
)
1839 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1841 dst_reg offset
= dst
;
1843 time
.offset
+= REG_SIZE
;
1845 offset
.type
= BRW_REGISTER_TYPE_UD
;
1846 int index
= shader_time_index
* 3 + shader_time_subindex
;
1847 emit(MOV(offset
, brw_imm_d(index
* SHADER_TIME_STRIDE
)));
1849 time
.type
= BRW_REGISTER_TYPE_UD
;
1850 emit(MOV(time
, value
));
1852 vec4_instruction
*inst
=
1853 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
1858 vec4_visitor::convert_to_hw_regs()
1860 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1861 for (int i
= 0; i
< 3; i
++) {
1862 struct src_reg
&src
= inst
->src
[i
];
1866 reg
= byte_offset(brw_vec8_grf(src
.nr
, 0), src
.offset
);
1867 reg
.type
= src
.type
;
1868 reg
.swizzle
= src
.swizzle
;
1870 reg
.negate
= src
.negate
;
1874 reg
= stride(byte_offset(brw_vec4_grf(
1875 prog_data
->base
.dispatch_grf_start_reg
+
1876 src
.nr
/ 2, src
.nr
% 2 * 4),
1879 reg
.type
= src
.type
;
1880 reg
.swizzle
= src
.swizzle
;
1882 reg
.negate
= src
.negate
;
1884 /* This should have been moved to pull constants. */
1885 assert(!src
.reladdr
);
1894 /* Probably unused. */
1895 reg
= brw_null_reg();
1900 unreachable("not reached");
1906 if (inst
->is_3src(devinfo
)) {
1907 /* 3-src instructions with scalar sources support arbitrary subnr,
1908 * but don't actually use swizzles. Convert swizzle into subnr.
1910 for (int i
= 0; i
< 3; i
++) {
1911 if (inst
->src
[i
].vstride
== BRW_VERTICAL_STRIDE_0
) {
1912 assert(brw_is_single_value_swizzle(inst
->src
[i
].swizzle
));
1913 inst
->src
[i
].subnr
+= 4 * BRW_GET_SWZ(inst
->src
[i
].swizzle
, 0);
1918 dst_reg
&dst
= inst
->dst
;
1921 switch (inst
->dst
.file
) {
1923 reg
= byte_offset(brw_vec8_grf(dst
.nr
, 0), dst
.offset
);
1924 reg
.type
= dst
.type
;
1925 reg
.writemask
= dst
.writemask
;
1929 reg
= byte_offset(brw_message_reg(dst
.nr
), dst
.offset
);
1930 assert((reg
.nr
& ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
1931 reg
.type
= dst
.type
;
1932 reg
.writemask
= dst
.writemask
;
1937 reg
= dst
.as_brw_reg();
1941 reg
= brw_null_reg();
1947 unreachable("not reached");
1957 if (shader_time_index
>= 0)
1958 emit_shader_time_begin();
1971 /* Before any optimization, push array accesses out to scratch
1972 * space where we need them to be. This pass may allocate new
1973 * virtual GRFs, so we want to do it early. It also makes sure
1974 * that we have reladdr computations available for CSE, since we'll
1975 * often do repeated subexpressions for those.
1977 move_grf_array_access_to_scratch();
1978 move_uniform_array_access_to_pull_constants();
1980 pack_uniform_registers();
1981 move_push_constants_to_pull_constants();
1982 split_virtual_grfs();
1984 #define OPT(pass, args...) ({ \
1986 bool this_progress = pass(args); \
1988 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
1989 char filename[64]; \
1990 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
1991 stage_abbrev, nir->info->name, iteration, pass_num); \
1993 backend_shader::dump_instructions(filename); \
1996 progress = progress || this_progress; \
2001 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
2003 snprintf(filename
, 64, "%s-%s-00-00-start",
2004 stage_abbrev
, nir
->info
->name
);
2006 backend_shader::dump_instructions(filename
);
2017 OPT(opt_predicated_break
, this);
2018 OPT(opt_reduce_swizzle
);
2019 OPT(dead_code_eliminate
);
2020 OPT(dead_control_flow_eliminate
, this);
2021 OPT(opt_copy_propagation
);
2022 OPT(opt_cmod_propagation
);
2025 OPT(opt_register_coalesce
);
2026 OPT(eliminate_find_live_channel
);
2031 if (OPT(opt_vector_float
)) {
2033 OPT(opt_copy_propagation
, false);
2034 OPT(opt_copy_propagation
, true);
2035 OPT(dead_code_eliminate
);
2038 if (devinfo
->gen
<= 5 && OPT(lower_minmax
)) {
2039 OPT(opt_cmod_propagation
);
2041 OPT(opt_copy_propagation
);
2042 OPT(dead_code_eliminate
);
2050 if (unlikely(INTEL_DEBUG
& DEBUG_SPILL_VEC4
)) {
2051 /* Debug of register spilling: Go spill everything. */
2052 const int grf_count
= alloc
.count
;
2053 float spill_costs
[alloc
.count
];
2054 bool no_spill
[alloc
.count
];
2055 evaluate_spill_costs(spill_costs
, no_spill
);
2056 for (int i
= 0; i
< grf_count
; i
++) {
2063 bool allocated_without_spills
= reg_allocate();
2065 if (!allocated_without_spills
) {
2066 compiler
->shader_perf_log(log_data
,
2067 "%s shader triggered register spilling. "
2068 "Try reducing the number of live vec4 values "
2069 "to improve performance.\n",
2072 while (!reg_allocate()) {
2078 opt_schedule_instructions();
2080 opt_set_dependency_control();
2082 convert_to_hw_regs();
2084 if (last_scratch
> 0) {
2085 prog_data
->base
.total_scratch
=
2086 brw_get_scratch_size(last_scratch
* REG_SIZE
);
2092 } /* namespace brw */
2097 * Compile a vertex shader.
2099 * Returns the final assembly and the program's size.
2102 brw_compile_vs(const struct brw_compiler
*compiler
, void *log_data
,
2104 const struct brw_vs_prog_key
*key
,
2105 struct brw_vs_prog_data
*prog_data
,
2106 const nir_shader
*src_shader
,
2107 gl_clip_plane
*clip_planes
,
2108 bool use_legacy_snorm_formula
,
2109 int shader_time_index
,
2110 unsigned *final_assembly_size
,
2113 const bool is_scalar
= compiler
->scalar_stage
[MESA_SHADER_VERTEX
];
2114 nir_shader
*shader
= nir_shader_clone(mem_ctx
, src_shader
);
2115 shader
= brw_nir_apply_sampler_key(shader
, compiler
->devinfo
, &key
->tex
,
2117 brw_nir_lower_vs_inputs(shader
, is_scalar
,
2118 use_legacy_snorm_formula
, key
->gl_attrib_wa_flags
);
2119 brw_nir_lower_vue_outputs(shader
, is_scalar
);
2120 shader
= brw_postprocess_nir(shader
, compiler
->devinfo
, is_scalar
);
2122 const unsigned *assembly
= NULL
;
2124 unsigned nr_attributes
= _mesa_bitcount_64(prog_data
->inputs_read
);
2126 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2127 * incoming vertex attribute. So, add an extra slot.
2129 if (shader
->info
->system_values_read
&
2130 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
) |
2131 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
2132 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
2133 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))) {
2137 /* gl_DrawID has its very own vec4 */
2138 if (shader
->info
->system_values_read
&
2139 BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
)) {
2143 unsigned nr_attribute_slots
=
2145 _mesa_bitcount_64(shader
->info
->double_inputs_read
);
2147 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2148 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
2149 * vec4 mode, the hardware appears to wedge unless we read something.
2152 prog_data
->base
.urb_read_length
=
2153 DIV_ROUND_UP(nr_attribute_slots
, 2);
2155 prog_data
->base
.urb_read_length
=
2156 DIV_ROUND_UP(MAX2(nr_attribute_slots
, 1), 2);
2158 prog_data
->nr_attributes
= nr_attributes
;
2159 prog_data
->nr_attribute_slots
= nr_attribute_slots
;
2161 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2162 * (overwriting the original contents), we need to make sure the size is
2163 * the larger of the two.
2165 const unsigned vue_entries
=
2166 MAX2(nr_attribute_slots
, (unsigned)prog_data
->base
.vue_map
.num_slots
);
2168 if (compiler
->devinfo
->gen
== 6)
2169 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 8);
2171 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 4);
2174 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
2176 fs_visitor
v(compiler
, log_data
, mem_ctx
, key
, &prog_data
->base
.base
,
2177 NULL
, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
2178 shader
, 8, shader_time_index
);
2179 if (!v
.run_vs(clip_planes
)) {
2181 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2186 prog_data
->base
.base
.dispatch_grf_start_reg
= v
.payload
.num_regs
;
2188 fs_generator
g(compiler
, log_data
, mem_ctx
, (void *) key
,
2189 &prog_data
->base
.base
, v
.promoted_constants
,
2190 v
.runtime_check_aads_emit
, MESA_SHADER_VERTEX
);
2191 if (INTEL_DEBUG
& DEBUG_VS
) {
2192 const char *debug_name
=
2193 ralloc_asprintf(mem_ctx
, "%s vertex shader %s",
2194 shader
->info
->label
? shader
->info
->label
:
2196 shader
->info
->name
);
2198 g
.enable_debug(debug_name
);
2200 g
.generate_code(v
.cfg
, 8);
2201 assembly
= g
.get_assembly(final_assembly_size
);
2205 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2207 vec4_vs_visitor
v(compiler
, log_data
, key
, prog_data
,
2208 shader
, clip_planes
, mem_ctx
,
2209 shader_time_index
, use_legacy_snorm_formula
);
2212 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2217 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
,
2218 shader
, &prog_data
->base
, v
.cfg
,
2219 final_assembly_size
);