2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_vec4_builder.h"
29 #include "brw_vec4_live_variables.h"
30 #include "brw_vec4_vs.h"
31 #include "brw_dead_control_flow.h"
32 #include "common/gen_debug.h"
33 #include "program/prog_parameter.h"
35 #define MAX_INSTRUCTION (1 << 30)
44 memset(this, 0, sizeof(*this));
46 this->file
= BAD_FILE
;
49 src_reg::src_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
)
55 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
56 this->swizzle
= brw_swizzle_for_size(type
->vector_elements
);
58 this->swizzle
= BRW_SWIZZLE_XYZW
;
60 this->type
= brw_type_for_base_type(type
);
63 /** Generic unset register constructor. */
69 src_reg::src_reg(struct ::brw_reg reg
) :
76 src_reg::src_reg(const dst_reg
®
) :
79 this->reladdr
= reg
.reladdr
;
80 this->swizzle
= brw_swizzle_for_mask(reg
.writemask
);
86 memset(this, 0, sizeof(*this));
87 this->file
= BAD_FILE
;
88 this->writemask
= WRITEMASK_XYZW
;
96 dst_reg::dst_reg(enum brw_reg_file file
, int nr
)
104 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
,
111 this->type
= brw_type_for_base_type(type
);
112 this->writemask
= writemask
;
115 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, brw_reg_type type
,
123 this->writemask
= writemask
;
126 dst_reg::dst_reg(struct ::brw_reg reg
) :
130 this->reladdr
= NULL
;
133 dst_reg::dst_reg(const src_reg
®
) :
136 this->writemask
= brw_mask_for_swizzle(reg
.swizzle
);
137 this->reladdr
= reg
.reladdr
;
141 dst_reg::equals(const dst_reg
&r
) const
143 return (this->backend_reg::equals(r
) &&
144 (reladdr
== r
.reladdr
||
145 (reladdr
&& r
.reladdr
&& reladdr
->equals(*r
.reladdr
))));
149 vec4_instruction::is_send_from_grf()
152 case SHADER_OPCODE_SHADER_TIME_ADD
:
153 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
154 case SHADER_OPCODE_UNTYPED_ATOMIC
:
155 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
156 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
157 case SHADER_OPCODE_TYPED_ATOMIC
:
158 case SHADER_OPCODE_TYPED_SURFACE_READ
:
159 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
160 case VEC4_OPCODE_URB_READ
:
161 case TCS_OPCODE_URB_WRITE
:
162 case TCS_OPCODE_RELEASE_INPUT
:
163 case SHADER_OPCODE_BARRIER
:
171 * Returns true if this instruction's sources and destinations cannot
172 * safely be the same register.
174 * In most cases, a register can be written over safely by the same
175 * instruction that is its last use. For a single instruction, the
176 * sources are dereferenced before writing of the destination starts
179 * However, there are a few cases where this can be problematic:
181 * - Virtual opcodes that translate to multiple instructions in the
182 * code generator: if src == dst and one instruction writes the
183 * destination before a later instruction reads the source, then
184 * src will have been clobbered.
186 * The register allocator uses this information to set up conflicts between
187 * GRF sources and the destination.
190 vec4_instruction::has_source_and_destination_hazard() const
193 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
194 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
195 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
198 /* 8-wide compressed DF operations are executed as two 4-wide operations,
199 * so we have a src/dst hazard if the first half of the instruction
200 * overwrites the source of the second half. Prevent this by marking
201 * compressed instructions as having src/dst hazards, so the register
202 * allocator assigns safe register regions for dst and srcs.
204 return size_written
> REG_SIZE
;
209 vec4_instruction::size_read(unsigned arg
) const
212 case SHADER_OPCODE_SHADER_TIME_ADD
:
213 case SHADER_OPCODE_UNTYPED_ATOMIC
:
214 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
215 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
216 case SHADER_OPCODE_TYPED_ATOMIC
:
217 case SHADER_OPCODE_TYPED_SURFACE_READ
:
218 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
219 case TCS_OPCODE_URB_WRITE
:
221 return mlen
* REG_SIZE
;
223 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
225 return mlen
* REG_SIZE
;
231 switch (src
[arg
].file
) {
236 return 4 * type_sz(src
[arg
].type
);
238 /* XXX - Represent actual vertical stride. */
239 return exec_size
* type_sz(src
[arg
].type
);
244 vec4_instruction::can_do_source_mods(const struct gen_device_info
*devinfo
)
246 if (devinfo
->gen
== 6 && is_math())
249 if (is_send_from_grf())
252 if (!backend_instruction::can_do_source_mods())
259 vec4_instruction::can_do_writemask(const struct gen_device_info
*devinfo
)
262 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
263 case VEC4_OPCODE_FROM_DOUBLE
:
264 case VEC4_OPCODE_TO_DOUBLE
:
265 case VEC4_OPCODE_PICK_LOW_32BIT
:
266 case VEC4_OPCODE_PICK_HIGH_32BIT
:
267 case VEC4_OPCODE_SET_LOW_32BIT
:
268 case VEC4_OPCODE_SET_HIGH_32BIT
:
269 case VS_OPCODE_PULL_CONSTANT_LOAD
:
270 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
271 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
272 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
273 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
274 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
275 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
276 case VEC4_OPCODE_URB_READ
:
277 case SHADER_OPCODE_MOV_INDIRECT
:
280 /* The MATH instruction on Gen6 only executes in align1 mode, which does
281 * not support writemasking.
283 if (devinfo
->gen
== 6 && is_math())
294 vec4_instruction::can_change_types() const
296 return dst
.type
== src
[0].type
&&
297 !src
[0].abs
&& !src
[0].negate
&& !saturate
&&
298 (opcode
== BRW_OPCODE_MOV
||
299 (opcode
== BRW_OPCODE_SEL
&&
300 dst
.type
== src
[1].type
&&
301 predicate
!= BRW_PREDICATE_NONE
&&
302 !src
[1].abs
&& !src
[1].negate
));
306 * Returns how many MRFs an opcode will write over.
308 * Note that this is not the 0 or 1 implied writes in an actual gen
309 * instruction -- the generate_* functions generate additional MOVs
313 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
315 if (inst
->mlen
== 0 || inst
->is_send_from_grf())
318 switch (inst
->opcode
) {
319 case SHADER_OPCODE_RCP
:
320 case SHADER_OPCODE_RSQ
:
321 case SHADER_OPCODE_SQRT
:
322 case SHADER_OPCODE_EXP2
:
323 case SHADER_OPCODE_LOG2
:
324 case SHADER_OPCODE_SIN
:
325 case SHADER_OPCODE_COS
:
327 case SHADER_OPCODE_INT_QUOTIENT
:
328 case SHADER_OPCODE_INT_REMAINDER
:
329 case SHADER_OPCODE_POW
:
330 case TCS_OPCODE_THREAD_END
:
332 case VS_OPCODE_URB_WRITE
:
334 case VS_OPCODE_PULL_CONSTANT_LOAD
:
336 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
338 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
340 case GS_OPCODE_URB_WRITE
:
341 case GS_OPCODE_URB_WRITE_ALLOCATE
:
342 case GS_OPCODE_THREAD_END
:
344 case GS_OPCODE_FF_SYNC
:
346 case TCS_OPCODE_URB_WRITE
:
348 case SHADER_OPCODE_SHADER_TIME_ADD
:
350 case SHADER_OPCODE_TEX
:
351 case SHADER_OPCODE_TXL
:
352 case SHADER_OPCODE_TXD
:
353 case SHADER_OPCODE_TXF
:
354 case SHADER_OPCODE_TXF_CMS
:
355 case SHADER_OPCODE_TXF_CMS_W
:
356 case SHADER_OPCODE_TXF_MCS
:
357 case SHADER_OPCODE_TXS
:
358 case SHADER_OPCODE_TG4
:
359 case SHADER_OPCODE_TG4_OFFSET
:
360 case SHADER_OPCODE_SAMPLEINFO
:
361 case VS_OPCODE_GET_BUFFER_SIZE
:
362 return inst
->header_size
;
364 unreachable("not reached");
369 src_reg::equals(const src_reg
&r
) const
371 return (this->backend_reg::equals(r
) &&
372 !reladdr
&& !r
.reladdr
);
376 vec4_visitor::opt_vector_float()
378 bool progress
= false;
380 foreach_block(block
, cfg
) {
381 int last_reg
= -1, last_offset
= -1;
382 enum brw_reg_file last_reg_file
= BAD_FILE
;
384 uint8_t imm
[4] = { 0 };
386 vec4_instruction
*imm_inst
[4];
387 unsigned writemask
= 0;
388 enum brw_reg_type dest_type
= BRW_REGISTER_TYPE_F
;
390 foreach_inst_in_block_safe(vec4_instruction
, inst
, block
) {
392 enum brw_reg_type need_type
;
394 /* Look for unconditional MOVs from an immediate with a partial
395 * writemask. Skip type-conversion MOVs other than integer 0,
396 * where the type doesn't matter. See if the immediate can be
397 * represented as a VF.
399 if (inst
->opcode
== BRW_OPCODE_MOV
&&
400 inst
->src
[0].file
== IMM
&&
401 inst
->predicate
== BRW_PREDICATE_NONE
&&
402 inst
->dst
.writemask
!= WRITEMASK_XYZW
&&
403 type_sz(inst
->src
[0].type
) < 8 &&
404 (inst
->src
[0].type
== inst
->dst
.type
|| inst
->src
[0].d
== 0)) {
406 vf
= brw_float_to_vf(inst
->src
[0].d
);
407 need_type
= BRW_REGISTER_TYPE_D
;
410 vf
= brw_float_to_vf(inst
->src
[0].f
);
411 need_type
= BRW_REGISTER_TYPE_F
;
417 /* If this wasn't a MOV, or the destination register doesn't match,
418 * or we have to switch destination types, then this breaks our
419 * sequence. Combine anything we've accumulated so far.
421 if (last_reg
!= inst
->dst
.nr
||
422 last_offset
!= inst
->dst
.offset
||
423 last_reg_file
!= inst
->dst
.file
||
424 (vf
> 0 && dest_type
!= need_type
)) {
426 if (inst_count
> 1) {
428 memcpy(&vf
, imm
, sizeof(vf
));
429 vec4_instruction
*mov
= MOV(imm_inst
[0]->dst
, brw_imm_vf(vf
));
430 mov
->dst
.type
= dest_type
;
431 mov
->dst
.writemask
= writemask
;
432 inst
->insert_before(block
, mov
);
434 for (int i
= 0; i
< inst_count
; i
++) {
435 imm_inst
[i
]->remove(block
);
444 dest_type
= BRW_REGISTER_TYPE_F
;
446 for (int i
= 0; i
< 4; i
++) {
451 /* Record this instruction's value (if it was representable). */
453 if ((inst
->dst
.writemask
& WRITEMASK_X
) != 0)
455 if ((inst
->dst
.writemask
& WRITEMASK_Y
) != 0)
457 if ((inst
->dst
.writemask
& WRITEMASK_Z
) != 0)
459 if ((inst
->dst
.writemask
& WRITEMASK_W
) != 0)
462 writemask
|= inst
->dst
.writemask
;
463 imm_inst
[inst_count
++] = inst
;
465 last_reg
= inst
->dst
.nr
;
466 last_offset
= inst
->dst
.offset
;
467 last_reg_file
= inst
->dst
.file
;
469 dest_type
= need_type
;
475 invalidate_live_intervals();
480 /* Replaces unused channels of a swizzle with channels that are used.
482 * For instance, this pass transforms
484 * mov vgrf4.yz, vgrf5.wxzy
488 * mov vgrf4.yz, vgrf5.xxzx
490 * This eliminates false uses of some channels, letting dead code elimination
491 * remove the instructions that wrote them.
494 vec4_visitor::opt_reduce_swizzle()
496 bool progress
= false;
498 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
499 if (inst
->dst
.file
== BAD_FILE
||
500 inst
->dst
.file
== ARF
||
501 inst
->dst
.file
== FIXED_GRF
||
502 inst
->is_send_from_grf())
507 /* Determine which channels of the sources are read. */
508 switch (inst
->opcode
) {
509 case VEC4_OPCODE_PACK_BYTES
:
511 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
512 * but all four of src1.
514 swizzle
= brw_swizzle_for_size(4);
517 swizzle
= brw_swizzle_for_size(3);
520 swizzle
= brw_swizzle_for_size(2);
523 case VEC4_OPCODE_TO_DOUBLE
:
524 case VEC4_OPCODE_FROM_DOUBLE
:
525 case VEC4_OPCODE_PICK_LOW_32BIT
:
526 case VEC4_OPCODE_PICK_HIGH_32BIT
:
527 case VEC4_OPCODE_SET_LOW_32BIT
:
528 case VEC4_OPCODE_SET_HIGH_32BIT
:
529 swizzle
= brw_swizzle_for_size(4);
533 swizzle
= brw_swizzle_for_mask(inst
->dst
.writemask
);
537 /* Update sources' swizzles. */
538 for (int i
= 0; i
< 3; i
++) {
539 if (inst
->src
[i
].file
!= VGRF
&&
540 inst
->src
[i
].file
!= ATTR
&&
541 inst
->src
[i
].file
!= UNIFORM
)
544 const unsigned new_swizzle
=
545 brw_compose_swizzle(swizzle
, inst
->src
[i
].swizzle
);
546 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
547 inst
->src
[i
].swizzle
= new_swizzle
;
554 invalidate_live_intervals();
560 vec4_visitor::split_uniform_registers()
562 /* Prior to this, uniforms have been in an array sized according to
563 * the number of vector uniforms present, sparsely filled (so an
564 * aggregate results in reg indices being skipped over). Now we're
565 * going to cut those aggregates up so each .nr index is one
566 * vector. The goal is to make elimination of unused uniform
567 * components easier later.
569 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
570 for (int i
= 0 ; i
< 3; i
++) {
571 if (inst
->src
[i
].file
!= UNIFORM
)
574 assert(!inst
->src
[i
].reladdr
);
576 inst
->src
[i
].nr
+= inst
->src
[i
].offset
/ 16;
577 inst
->src
[i
].offset
%= 16;
583 vec4_visitor::pack_uniform_registers()
585 uint8_t chans_used
[this->uniforms
];
586 int new_loc
[this->uniforms
];
587 int new_chan
[this->uniforms
];
589 memset(chans_used
, 0, sizeof(chans_used
));
590 memset(new_loc
, 0, sizeof(new_loc
));
591 memset(new_chan
, 0, sizeof(new_chan
));
593 /* Find which uniform vectors are actually used by the program. We
594 * expect unused vector elements when we've moved array access out
595 * to pull constants, and from some GLSL code generators like wine.
597 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
599 switch (inst
->opcode
) {
600 case VEC4_OPCODE_PACK_BYTES
:
612 readmask
= inst
->dst
.writemask
;
616 for (int i
= 0 ; i
< 3; i
++) {
617 if (inst
->src
[i
].file
!= UNIFORM
)
620 assert(type_sz(inst
->src
[i
].type
) % 4 == 0);
621 unsigned channel_size
= type_sz(inst
->src
[i
].type
) / 4;
623 int reg
= inst
->src
[i
].nr
;
624 for (int c
= 0; c
< 4; c
++) {
625 if (!(readmask
& (1 << c
)))
628 unsigned channel
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
) + 1;
629 unsigned used
= MAX2(chans_used
[reg
], channel
* channel_size
);
631 chans_used
[reg
] = used
;
633 chans_used
[reg
+ 1] = used
- 4;
637 if (inst
->opcode
== SHADER_OPCODE_MOV_INDIRECT
&&
638 inst
->src
[0].file
== UNIFORM
) {
639 assert(inst
->src
[2].file
== BRW_IMMEDIATE_VALUE
);
640 assert(inst
->src
[0].subnr
== 0);
642 unsigned bytes_read
= inst
->src
[2].ud
;
643 assert(bytes_read
% 4 == 0);
644 unsigned vec4s_read
= DIV_ROUND_UP(bytes_read
, 16);
646 /* We just mark every register touched by a MOV_INDIRECT as being
647 * fully used. This ensures that it doesn't broken up piecewise by
648 * the next part of our packing algorithm.
650 int reg
= inst
->src
[0].nr
;
651 for (unsigned i
= 0; i
< vec4s_read
; i
++)
652 chans_used
[reg
+ i
] = 4;
656 int new_uniform_count
= 0;
658 /* Now, figure out a packing of the live uniform vectors into our
661 for (int src
= 0; src
< uniforms
; src
++) {
662 int size
= chans_used
[src
];
668 /* Find the lowest place we can slot this uniform in. */
669 for (dst
= 0; dst
< src
; dst
++) {
670 if (chans_used
[dst
] + size
<= 4)
679 new_chan
[src
] = chans_used
[dst
];
681 /* Move the references to the data */
682 for (int j
= 0; j
< size
; j
++) {
683 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
684 stage_prog_data
->param
[src
* 4 + j
];
687 chans_used
[dst
] += size
;
691 new_uniform_count
= MAX2(new_uniform_count
, dst
+ 1);
694 this->uniforms
= new_uniform_count
;
696 /* Now, update the instructions for our repacked uniforms. */
697 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
698 for (int i
= 0 ; i
< 3; i
++) {
699 int src
= inst
->src
[i
].nr
;
701 if (inst
->src
[i
].file
!= UNIFORM
)
704 inst
->src
[i
].nr
= new_loc
[src
];
705 inst
->src
[i
].swizzle
+= BRW_SWIZZLE4(new_chan
[src
], new_chan
[src
],
706 new_chan
[src
], new_chan
[src
]);
712 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
714 * While GLSL IR also performs this optimization, we end up with it in
715 * our instruction stream for a couple of reasons. One is that we
716 * sometimes generate silly instructions, for example in array access
717 * where we'll generate "ADD offset, index, base" even if base is 0.
718 * The other is that GLSL IR's constant propagation doesn't track the
719 * components of aggregates, so some VS patterns (initialize matrix to
720 * 0, accumulate in vertex blending factors) end up breaking down to
721 * instructions involving 0.
724 vec4_visitor::opt_algebraic()
726 bool progress
= false;
728 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
729 switch (inst
->opcode
) {
731 if (inst
->src
[0].file
!= IMM
)
734 if (inst
->saturate
) {
735 if (inst
->dst
.type
!= inst
->src
[0].type
)
736 assert(!"unimplemented: saturate mixed types");
738 if (brw_saturate_immediate(inst
->dst
.type
,
739 &inst
->src
[0].as_brw_reg())) {
740 inst
->saturate
= false;
746 case VEC4_OPCODE_UNPACK_UNIFORM
:
747 if (inst
->src
[0].file
!= UNIFORM
) {
748 inst
->opcode
= BRW_OPCODE_MOV
;
754 if (inst
->src
[1].is_zero()) {
755 inst
->opcode
= BRW_OPCODE_MOV
;
756 inst
->src
[1] = src_reg();
762 if (inst
->src
[1].is_zero()) {
763 inst
->opcode
= BRW_OPCODE_MOV
;
764 switch (inst
->src
[0].type
) {
765 case BRW_REGISTER_TYPE_F
:
766 inst
->src
[0] = brw_imm_f(0.0f
);
768 case BRW_REGISTER_TYPE_D
:
769 inst
->src
[0] = brw_imm_d(0);
771 case BRW_REGISTER_TYPE_UD
:
772 inst
->src
[0] = brw_imm_ud(0u);
775 unreachable("not reached");
777 inst
->src
[1] = src_reg();
779 } else if (inst
->src
[1].is_one()) {
780 inst
->opcode
= BRW_OPCODE_MOV
;
781 inst
->src
[1] = src_reg();
783 } else if (inst
->src
[1].is_negative_one()) {
784 inst
->opcode
= BRW_OPCODE_MOV
;
785 inst
->src
[0].negate
= !inst
->src
[0].negate
;
786 inst
->src
[1] = src_reg();
791 if (inst
->conditional_mod
== BRW_CONDITIONAL_GE
&&
793 inst
->src
[0].negate
&&
794 inst
->src
[1].is_zero()) {
795 inst
->src
[0].abs
= false;
796 inst
->src
[0].negate
= false;
797 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
802 case SHADER_OPCODE_BROADCAST
:
803 if (is_uniform(inst
->src
[0]) ||
804 inst
->src
[1].is_zero()) {
805 inst
->opcode
= BRW_OPCODE_MOV
;
806 inst
->src
[1] = src_reg();
807 inst
->force_writemask_all
= true;
818 invalidate_live_intervals();
824 * Only a limited number of hardware registers may be used for push
825 * constants, so this turns access to the overflowed constants into
829 vec4_visitor::move_push_constants_to_pull_constants()
831 int pull_constant_loc
[this->uniforms
];
833 /* Only allow 32 registers (256 uniform components) as push constants,
834 * which is the limit on gen6.
836 * If changing this value, note the limitation about total_regs in
839 int max_uniform_components
= 32 * 8;
840 if (this->uniforms
* 4 <= max_uniform_components
)
843 /* Make some sort of choice as to which uniforms get sent to pull
844 * constants. We could potentially do something clever here like
845 * look for the most infrequently used uniform vec4s, but leave
848 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
849 pull_constant_loc
[i
/ 4] = -1;
851 if (i
>= max_uniform_components
) {
852 const gl_constant_value
**values
= &stage_prog_data
->param
[i
];
854 /* Try to find an existing copy of this uniform in the pull
855 * constants if it was part of an array access already.
857 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
860 for (matches
= 0; matches
< 4; matches
++) {
861 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
866 pull_constant_loc
[i
/ 4] = j
/ 4;
871 if (pull_constant_loc
[i
/ 4] == -1) {
872 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
873 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
875 for (int j
= 0; j
< 4; j
++) {
876 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
883 /* Now actually rewrite usage of the things we've moved to pull
886 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
887 for (int i
= 0 ; i
< 3; i
++) {
888 if (inst
->src
[i
].file
!= UNIFORM
||
889 pull_constant_loc
[inst
->src
[i
].nr
] == -1)
892 int uniform
= inst
->src
[i
].nr
;
894 const glsl_type
*temp_type
= type_sz(inst
->src
[i
].type
) == 8 ?
895 glsl_type::dvec4_type
: glsl_type::vec4_type
;
896 dst_reg temp
= dst_reg(this, temp_type
);
898 emit_pull_constant_load(block
, inst
, temp
, inst
->src
[i
],
899 pull_constant_loc
[uniform
], src_reg());
901 inst
->src
[i
].file
= temp
.file
;
902 inst
->src
[i
].nr
= temp
.nr
;
903 inst
->src
[i
].offset
%= 16;
904 inst
->src
[i
].reladdr
= NULL
;
908 /* Repack push constants to remove the now-unused ones. */
909 pack_uniform_registers();
912 /* Conditions for which we want to avoid setting the dependency control bits */
914 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction
*inst
)
916 #define IS_DWORD(reg) \
917 (reg.type == BRW_REGISTER_TYPE_UD || \
918 reg.type == BRW_REGISTER_TYPE_D)
920 #define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8)
922 /* From the Cherryview and Broadwell PRMs:
924 * "When source or destination datatype is 64b or operation is integer DWord
925 * multiply, DepCtrl must not be used."
927 * SKL PRMs don't include this restriction, however, gen7 seems to be
928 * affected, at least by the 64b restriction, since DepCtrl with double
929 * precision instructions seems to produce GPU hangs in some cases.
931 if (devinfo
->gen
== 8 || devinfo
->is_broxton
) {
932 if (inst
->opcode
== BRW_OPCODE_MUL
&&
933 IS_DWORD(inst
->src
[0]) &&
934 IS_DWORD(inst
->src
[1]))
938 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8) {
939 if (IS_64BIT(inst
->dst
) || IS_64BIT(inst
->src
[0]) ||
940 IS_64BIT(inst
->src
[1]) || IS_64BIT(inst
->src
[2]))
947 if (devinfo
->gen
>= 8) {
948 if (inst
->opcode
== BRW_OPCODE_F32TO16
)
954 * In the presence of send messages, totally interrupt dependency
955 * control. They're long enough that the chance of dependency
956 * control around them just doesn't matter.
959 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
960 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
961 * completes the scoreboard clear must have a non-zero execution mask. This
962 * means, if any kind of predication can change the execution mask or channel
963 * enable of the last instruction, the optimization must be avoided. This is
964 * to avoid instructions being shot down the pipeline when no writes are
968 * Dependency control does not work well over math instructions.
969 * NB: Discovered empirically
971 return (inst
->mlen
|| inst
->predicate
|| inst
->is_math());
975 * Sets the dependency control fields on instructions after register
976 * allocation and before the generator is run.
978 * When you have a sequence of instructions like:
980 * DP4 temp.x vertex uniform[0]
981 * DP4 temp.y vertex uniform[0]
982 * DP4 temp.z vertex uniform[0]
983 * DP4 temp.w vertex uniform[0]
985 * The hardware doesn't know that it can actually run the later instructions
986 * while the previous ones are in flight, producing stalls. However, we have
987 * manual fields we can set in the instructions that let it do so.
990 vec4_visitor::opt_set_dependency_control()
992 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
993 uint8_t grf_channels_written
[BRW_MAX_GRF
];
994 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
995 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
997 assert(prog_data
->total_grf
||
998 !"Must be called after register allocation");
1000 foreach_block (block
, cfg
) {
1001 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1002 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1004 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
1005 /* If we read from a register that we were doing dependency control
1006 * on, don't do dependency control across the read.
1008 for (int i
= 0; i
< 3; i
++) {
1009 int reg
= inst
->src
[i
].nr
+ inst
->src
[i
].offset
/ REG_SIZE
;
1010 if (inst
->src
[i
].file
== VGRF
) {
1011 last_grf_write
[reg
] = NULL
;
1012 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1013 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1016 assert(inst
->src
[i
].file
!= MRF
);
1019 if (is_dep_ctrl_unsafe(inst
)) {
1020 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1021 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1025 /* Now, see if we can do dependency control for this instruction
1026 * against a previous one writing to its destination.
1028 int reg
= inst
->dst
.nr
+ inst
->dst
.offset
/ REG_SIZE
;
1029 if (inst
->dst
.file
== VGRF
|| inst
->dst
.file
== FIXED_GRF
) {
1030 if (last_grf_write
[reg
] &&
1031 last_grf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
1032 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
1033 last_grf_write
[reg
]->no_dd_clear
= true;
1034 inst
->no_dd_check
= true;
1036 grf_channels_written
[reg
] = 0;
1039 last_grf_write
[reg
] = inst
;
1040 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
1041 } else if (inst
->dst
.file
== MRF
) {
1042 if (last_mrf_write
[reg
] &&
1043 last_mrf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
1044 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
1045 last_mrf_write
[reg
]->no_dd_clear
= true;
1046 inst
->no_dd_check
= true;
1048 mrf_channels_written
[reg
] = 0;
1051 last_mrf_write
[reg
] = inst
;
1052 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
1059 vec4_instruction::can_reswizzle(const struct gen_device_info
*devinfo
,
1064 /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
1067 if (devinfo
->gen
== 6 && is_math() && swizzle
!= BRW_SWIZZLE_XYZW
)
1070 if (!can_do_writemask(devinfo
) && dst_writemask
!= WRITEMASK_XYZW
)
1073 /* If this instruction sets anything not referenced by swizzle, then we'd
1074 * totally break it when we reswizzle.
1076 if (dst
.writemask
& ~swizzle_mask
)
1082 for (int i
= 0; i
< 3; i
++) {
1083 if (src
[i
].is_accumulator())
1091 * For any channels in the swizzle's source that were populated by this
1092 * instruction, rewrite the instruction to put the appropriate result directly
1093 * in those channels.
1095 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1098 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
1100 /* Destination write mask doesn't correspond to source swizzle for the dot
1101 * product and pack_bytes instructions.
1103 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
1104 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
&&
1105 opcode
!= VEC4_OPCODE_PACK_BYTES
) {
1106 for (int i
= 0; i
< 3; i
++) {
1107 if (src
[i
].file
== BAD_FILE
|| src
[i
].file
== IMM
)
1110 src
[i
].swizzle
= brw_compose_swizzle(swizzle
, src
[i
].swizzle
);
1114 /* Apply the specified swizzle and writemask to the original mask of
1115 * written components.
1117 dst
.writemask
= dst_writemask
&
1118 brw_apply_swizzle_to_mask(swizzle
, dst
.writemask
);
1122 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1123 * just written and then MOVed into another reg and making the original write
1124 * of the GRF write directly to the final destination instead.
1127 vec4_visitor::opt_register_coalesce()
1129 bool progress
= false;
1132 calculate_live_intervals();
1134 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1138 if (inst
->opcode
!= BRW_OPCODE_MOV
||
1139 (inst
->dst
.file
!= VGRF
&& inst
->dst
.file
!= MRF
) ||
1141 inst
->src
[0].file
!= VGRF
||
1142 inst
->dst
.type
!= inst
->src
[0].type
||
1143 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
1146 /* Remove no-op MOVs */
1147 if (inst
->dst
.file
== inst
->src
[0].file
&&
1148 inst
->dst
.nr
== inst
->src
[0].nr
&&
1149 inst
->dst
.offset
== inst
->src
[0].offset
) {
1150 bool is_nop_mov
= true;
1152 for (unsigned c
= 0; c
< 4; c
++) {
1153 if ((inst
->dst
.writemask
& (1 << c
)) == 0)
1156 if (BRW_GET_SWZ(inst
->src
[0].swizzle
, c
) != c
) {
1163 inst
->remove(block
);
1169 bool to_mrf
= (inst
->dst
.file
== MRF
);
1171 /* Can't coalesce this GRF if someone else was going to
1174 if (var_range_end(var_from_reg(alloc
, dst_reg(inst
->src
[0])), 8) > ip
)
1177 /* We need to check interference with the final destination between this
1178 * instruction and the earliest instruction involved in writing the GRF
1179 * we're eliminating. To do that, keep track of which of our source
1180 * channels we've seen initialized.
1182 const unsigned chans_needed
=
1183 brw_apply_inv_swizzle_to_mask(inst
->src
[0].swizzle
,
1184 inst
->dst
.writemask
);
1185 unsigned chans_remaining
= chans_needed
;
1187 /* Now walk up the instruction stream trying to see if we can rewrite
1188 * everything writing to the temporary to write into the destination
1191 vec4_instruction
*_scan_inst
= (vec4_instruction
*)inst
->prev
;
1192 foreach_inst_in_block_reverse_starting_from(vec4_instruction
, scan_inst
,
1194 _scan_inst
= scan_inst
;
1196 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1197 scan_inst
->dst
, scan_inst
->size_written
)) {
1198 /* Found something writing to the reg we want to coalesce away. */
1200 /* SEND instructions can't have MRF as a destination. */
1201 if (scan_inst
->mlen
)
1204 if (devinfo
->gen
== 6) {
1205 /* gen6 math instructions must have the destination be
1206 * VGRF, so no compute-to-MRF for them.
1208 if (scan_inst
->is_math()) {
1214 /* This doesn't handle saturation on the instruction we
1215 * want to coalesce away if the register types do not match.
1216 * But if scan_inst is a non type-converting 'mov', we can fix
1219 if (inst
->saturate
&&
1220 inst
->dst
.type
!= scan_inst
->dst
.type
&&
1221 !(scan_inst
->opcode
== BRW_OPCODE_MOV
&&
1222 scan_inst
->dst
.type
== scan_inst
->src
[0].type
))
1225 /* Only allow coalescing between registers of the same type size.
1226 * Otherwise we would need to make the pass aware of the fact that
1227 * channel sizes are different for single and double precision.
1229 if (type_sz(inst
->src
[0].type
) != type_sz(scan_inst
->src
[0].type
))
1232 /* Check that scan_inst writes the same amount of data as the
1233 * instruction, otherwise coalescing would lead to writing a
1234 * different (larger or smaller) region of the destination
1236 if (scan_inst
->size_written
!= inst
->size_written
)
1239 /* If we can't handle the swizzle, bail. */
1240 if (!scan_inst
->can_reswizzle(devinfo
, inst
->dst
.writemask
,
1241 inst
->src
[0].swizzle
,
1246 /* This only handles coalescing writes of 8 channels (1 register
1247 * for single-precision and 2 registers for double-precision)
1248 * starting at the source offset of the copy instruction.
1250 if (DIV_ROUND_UP(scan_inst
->size_written
,
1251 type_sz(scan_inst
->dst
.type
)) > 8 ||
1252 scan_inst
->dst
.offset
!= inst
->src
[0].offset
)
1255 /* Mark which channels we found unconditional writes for. */
1256 if (!scan_inst
->predicate
)
1257 chans_remaining
&= ~scan_inst
->dst
.writemask
;
1259 if (chans_remaining
== 0)
1263 /* You can't read from an MRF, so if someone else reads our MRF's
1264 * source GRF that we wanted to rewrite, that stops us. If it's a
1265 * GRF we're trying to coalesce to, we don't actually handle
1266 * rewriting sources so bail in that case as well.
1268 bool interfered
= false;
1269 for (int i
= 0; i
< 3; i
++) {
1270 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1271 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1277 /* If somebody else writes the same channels of our destination here,
1278 * we can't coalesce before that.
1280 if (regions_overlap(inst
->dst
, inst
->size_written
,
1281 scan_inst
->dst
, scan_inst
->size_written
) &&
1282 (inst
->dst
.writemask
& scan_inst
->dst
.writemask
) != 0) {
1286 /* Check for reads of the register we're trying to coalesce into. We
1287 * can't go rewriting instructions above that to put some other value
1288 * in the register instead.
1290 if (to_mrf
&& scan_inst
->mlen
> 0) {
1291 if (inst
->dst
.nr
>= scan_inst
->base_mrf
&&
1292 inst
->dst
.nr
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
1296 for (int i
= 0; i
< 3; i
++) {
1297 if (regions_overlap(inst
->dst
, inst
->size_written
,
1298 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1306 if (chans_remaining
== 0) {
1307 /* If we've made it here, we have an MOV we want to coalesce out, and
1308 * a scan_inst pointing to the earliest instruction involved in
1309 * computing the value. Now go rewrite the instruction stream
1312 vec4_instruction
*scan_inst
= _scan_inst
;
1313 while (scan_inst
!= inst
) {
1314 if (scan_inst
->dst
.file
== VGRF
&&
1315 scan_inst
->dst
.nr
== inst
->src
[0].nr
&&
1316 scan_inst
->dst
.offset
== inst
->src
[0].offset
) {
1317 scan_inst
->reswizzle(inst
->dst
.writemask
,
1318 inst
->src
[0].swizzle
);
1319 scan_inst
->dst
.file
= inst
->dst
.file
;
1320 scan_inst
->dst
.nr
= inst
->dst
.nr
;
1321 scan_inst
->dst
.offset
= inst
->dst
.offset
;
1322 if (inst
->saturate
&&
1323 inst
->dst
.type
!= scan_inst
->dst
.type
) {
1324 /* If we have reached this point, scan_inst is a non
1325 * type-converting 'mov' and we can modify its register types
1326 * to match the ones in inst. Otherwise, we could have an
1327 * incorrect saturation result.
1329 scan_inst
->dst
.type
= inst
->dst
.type
;
1330 scan_inst
->src
[0].type
= inst
->src
[0].type
;
1332 scan_inst
->saturate
|= inst
->saturate
;
1334 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1336 inst
->remove(block
);
1342 invalidate_live_intervals();
1348 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1349 * flow. We could probably do better here with some form of divergence
1353 vec4_visitor::eliminate_find_live_channel()
1355 bool progress
= false;
1358 if (!brw_stage_has_packed_dispatch(devinfo
, stage
, stage_prog_data
)) {
1359 /* The optimization below assumes that channel zero is live on thread
1360 * dispatch, which may not be the case if the fixed function dispatches
1366 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1367 switch (inst
->opcode
) {
1373 case BRW_OPCODE_ENDIF
:
1374 case BRW_OPCODE_WHILE
:
1378 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1380 inst
->opcode
= BRW_OPCODE_MOV
;
1381 inst
->src
[0] = brw_imm_d(0);
1382 inst
->force_writemask_all
= true;
1396 * Splits virtual GRFs requesting more than one contiguous physical register.
1398 * We initially create large virtual GRFs for temporary structures, arrays,
1399 * and matrices, so that the visitor functions can add offsets to work their
1400 * way down to the actual member being accessed. But when it comes to
1401 * optimization, we'd like to treat each register as individual storage if
1404 * So far, the only thing that might prevent splitting is a send message from
1408 vec4_visitor::split_virtual_grfs()
1410 int num_vars
= this->alloc
.count
;
1411 int new_virtual_grf
[num_vars
];
1412 bool split_grf
[num_vars
];
1414 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1416 /* Try to split anything > 0 sized. */
1417 for (int i
= 0; i
< num_vars
; i
++) {
1418 split_grf
[i
] = this->alloc
.sizes
[i
] != 1;
1421 /* Check that the instructions are compatible with the registers we're trying
1424 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1425 if (inst
->dst
.file
== VGRF
&& regs_written(inst
) > 1)
1426 split_grf
[inst
->dst
.nr
] = false;
1428 for (int i
= 0; i
< 3; i
++) {
1429 if (inst
->src
[i
].file
== VGRF
&& regs_read(inst
, i
) > 1)
1430 split_grf
[inst
->src
[i
].nr
] = false;
1434 /* Allocate new space for split regs. Note that the virtual
1435 * numbers will be contiguous.
1437 for (int i
= 0; i
< num_vars
; i
++) {
1441 new_virtual_grf
[i
] = alloc
.allocate(1);
1442 for (unsigned j
= 2; j
< this->alloc
.sizes
[i
]; j
++) {
1443 unsigned reg
= alloc
.allocate(1);
1444 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1447 this->alloc
.sizes
[i
] = 1;
1450 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1451 if (inst
->dst
.file
== VGRF
&& split_grf
[inst
->dst
.nr
] &&
1452 inst
->dst
.offset
/ REG_SIZE
!= 0) {
1453 inst
->dst
.nr
= (new_virtual_grf
[inst
->dst
.nr
] +
1454 inst
->dst
.offset
/ REG_SIZE
- 1);
1455 inst
->dst
.offset
%= REG_SIZE
;
1457 for (int i
= 0; i
< 3; i
++) {
1458 if (inst
->src
[i
].file
== VGRF
&& split_grf
[inst
->src
[i
].nr
] &&
1459 inst
->src
[i
].offset
/ REG_SIZE
!= 0) {
1460 inst
->src
[i
].nr
= (new_virtual_grf
[inst
->src
[i
].nr
] +
1461 inst
->src
[i
].offset
/ REG_SIZE
- 1);
1462 inst
->src
[i
].offset
%= REG_SIZE
;
1466 invalidate_live_intervals();
1470 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1472 dump_instruction(be_inst
, stderr
);
1476 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1478 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1480 if (inst
->predicate
) {
1481 fprintf(file
, "(%cf0.%d%s) ",
1482 inst
->predicate_inverse
? '-' : '+',
1484 pred_ctrl_align16
[inst
->predicate
]);
1487 fprintf(file
, "%s(%d)", brw_instruction_name(devinfo
, inst
->opcode
),
1490 fprintf(file
, ".sat");
1491 if (inst
->conditional_mod
) {
1492 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1493 if (!inst
->predicate
&&
1494 (devinfo
->gen
< 5 || (inst
->opcode
!= BRW_OPCODE_SEL
&&
1495 inst
->opcode
!= BRW_OPCODE_IF
&&
1496 inst
->opcode
!= BRW_OPCODE_WHILE
))) {
1497 fprintf(file
, ".f0.%d", inst
->flag_subreg
);
1502 switch (inst
->dst
.file
) {
1504 fprintf(file
, "vgrf%d", inst
->dst
.nr
);
1507 fprintf(file
, "g%d", inst
->dst
.nr
);
1510 fprintf(file
, "m%d", inst
->dst
.nr
);
1513 switch (inst
->dst
.nr
) {
1515 fprintf(file
, "null");
1517 case BRW_ARF_ADDRESS
:
1518 fprintf(file
, "a0.%d", inst
->dst
.subnr
);
1520 case BRW_ARF_ACCUMULATOR
:
1521 fprintf(file
, "acc%d", inst
->dst
.subnr
);
1524 fprintf(file
, "f%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1527 fprintf(file
, "arf%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1532 fprintf(file
, "(null)");
1537 unreachable("not reached");
1539 if (inst
->dst
.offset
||
1540 (inst
->dst
.file
== VGRF
&&
1541 alloc
.sizes
[inst
->dst
.nr
] * REG_SIZE
!= inst
->size_written
)) {
1542 const unsigned reg_size
= (inst
->dst
.file
== UNIFORM
? 16 : REG_SIZE
);
1543 fprintf(file
, "+%d.%d", inst
->dst
.offset
/ reg_size
,
1544 inst
->dst
.offset
% reg_size
);
1546 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1548 if (inst
->dst
.writemask
& 1)
1550 if (inst
->dst
.writemask
& 2)
1552 if (inst
->dst
.writemask
& 4)
1554 if (inst
->dst
.writemask
& 8)
1557 fprintf(file
, ":%s", brw_reg_type_letters(inst
->dst
.type
));
1559 if (inst
->src
[0].file
!= BAD_FILE
)
1560 fprintf(file
, ", ");
1562 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1563 if (inst
->src
[i
].negate
)
1565 if (inst
->src
[i
].abs
)
1567 switch (inst
->src
[i
].file
) {
1569 fprintf(file
, "vgrf%d", inst
->src
[i
].nr
);
1572 fprintf(file
, "g%d.%d", inst
->src
[i
].nr
, inst
->src
[i
].subnr
);
1575 fprintf(file
, "attr%d", inst
->src
[i
].nr
);
1578 fprintf(file
, "u%d", inst
->src
[i
].nr
);
1581 switch (inst
->src
[i
].type
) {
1582 case BRW_REGISTER_TYPE_F
:
1583 fprintf(file
, "%fF", inst
->src
[i
].f
);
1585 case BRW_REGISTER_TYPE_DF
:
1586 fprintf(file
, "%fDF", inst
->src
[i
].df
);
1588 case BRW_REGISTER_TYPE_D
:
1589 fprintf(file
, "%dD", inst
->src
[i
].d
);
1591 case BRW_REGISTER_TYPE_UD
:
1592 fprintf(file
, "%uU", inst
->src
[i
].ud
);
1594 case BRW_REGISTER_TYPE_VF
:
1595 fprintf(file
, "[%-gF, %-gF, %-gF, %-gF]",
1596 brw_vf_to_float((inst
->src
[i
].ud
>> 0) & 0xff),
1597 brw_vf_to_float((inst
->src
[i
].ud
>> 8) & 0xff),
1598 brw_vf_to_float((inst
->src
[i
].ud
>> 16) & 0xff),
1599 brw_vf_to_float((inst
->src
[i
].ud
>> 24) & 0xff));
1602 fprintf(file
, "???");
1607 switch (inst
->src
[i
].nr
) {
1609 fprintf(file
, "null");
1611 case BRW_ARF_ADDRESS
:
1612 fprintf(file
, "a0.%d", inst
->src
[i
].subnr
);
1614 case BRW_ARF_ACCUMULATOR
:
1615 fprintf(file
, "acc%d", inst
->src
[i
].subnr
);
1618 fprintf(file
, "f%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1621 fprintf(file
, "arf%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1626 fprintf(file
, "(null)");
1629 unreachable("not reached");
1632 if (inst
->src
[i
].offset
||
1633 (inst
->src
[i
].file
== VGRF
&&
1634 alloc
.sizes
[inst
->src
[i
].nr
] * REG_SIZE
!= inst
->size_read(i
))) {
1635 const unsigned reg_size
= (inst
->src
[i
].file
== UNIFORM
? 16 : REG_SIZE
);
1636 fprintf(file
, "+%d.%d", inst
->src
[i
].offset
/ reg_size
,
1637 inst
->src
[i
].offset
% reg_size
);
1640 if (inst
->src
[i
].file
!= IMM
) {
1641 static const char *chans
[4] = {"x", "y", "z", "w"};
1643 for (int c
= 0; c
< 4; c
++) {
1644 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1648 if (inst
->src
[i
].abs
)
1651 if (inst
->src
[i
].file
!= IMM
) {
1652 fprintf(file
, ":%s", brw_reg_type_letters(inst
->src
[i
].type
));
1655 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1656 fprintf(file
, ", ");
1659 if (inst
->force_writemask_all
)
1660 fprintf(file
, " NoMask");
1662 if (inst
->exec_size
!= 8)
1663 fprintf(file
, " group%d", inst
->group
);
1665 fprintf(file
, "\n");
1669 static inline struct brw_reg
1670 attribute_to_hw_reg(int attr
, brw_reg_type type
, bool interleaved
)
1674 unsigned width
= REG_SIZE
/ 2 / MAX2(4, type_sz(type
));
1676 reg
= stride(brw_vecn_grf(width
, attr
/ 2, (attr
% 2) * 4), 0, width
, 1);
1678 reg
= brw_vecn_grf(width
, attr
, 0);
1687 * Replace each register of type ATTR in this->instructions with a reference
1688 * to a fixed HW register.
1690 * If interleaved is true, then each attribute takes up half a register, with
1691 * register N containing attribute 2*N in its first half and attribute 2*N+1
1692 * in its second half (this corresponds to the payload setup used by geometry
1693 * shaders in "single" or "dual instanced" dispatch mode). If interleaved is
1694 * false, then each attribute takes up a whole register, with register N
1695 * containing attribute N (this corresponds to the payload setup used by
1696 * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1699 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map
,
1702 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1703 for (int i
= 0; i
< 3; i
++) {
1704 if (inst
->src
[i
].file
!= ATTR
)
1707 int grf
= attribute_map
[inst
->src
[i
].nr
+
1708 inst
->src
[i
].offset
/ REG_SIZE
];
1709 assert(inst
->src
[i
].offset
% REG_SIZE
== 0);
1711 /* All attributes used in the shader need to have been assigned a
1712 * hardware register by the caller
1716 struct brw_reg reg
=
1717 attribute_to_hw_reg(grf
, inst
->src
[i
].type
, interleaved
);
1718 reg
.swizzle
= inst
->src
[i
].swizzle
;
1719 if (inst
->src
[i
].abs
)
1721 if (inst
->src
[i
].negate
)
1730 vec4_vs_visitor::setup_attributes(int payload_reg
)
1733 int attribute_map
[VERT_ATTRIB_MAX
+ 2];
1734 memset(attribute_map
, 0, sizeof(attribute_map
));
1737 GLbitfield64 vs_inputs
= vs_prog_data
->inputs_read
;
1739 GLuint first
= ffsll(vs_inputs
) - 1;
1741 (vs_prog_data
->double_inputs_read
& BITFIELD64_BIT(first
)) ? 2 : 1;
1742 for (int c
= 0; c
< needed_slots
; c
++) {
1743 attribute_map
[first
+ c
] = payload_reg
+ nr_attributes
;
1745 vs_inputs
&= ~BITFIELD64_BIT(first
+ c
);
1749 /* VertexID is stored by the VF as the last vertex element, but we
1750 * don't represent it with a flag in inputs_read, so we call it
1753 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
||
1754 vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
) {
1755 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
1759 if (vs_prog_data
->uses_drawid
) {
1760 attribute_map
[VERT_ATTRIB_MAX
+ 1] = payload_reg
+ nr_attributes
;
1764 lower_attributes_to_hw_regs(attribute_map
, false /* interleaved */);
1766 return payload_reg
+ vs_prog_data
->nr_attribute_slots
;
1770 vec4_visitor::setup_uniforms(int reg
)
1772 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1774 /* The pre-gen6 VS requires that some push constants get loaded no
1775 * matter what, or the GPU would hang.
1777 if (devinfo
->gen
< 6 && this->uniforms
== 0) {
1778 stage_prog_data
->param
=
1779 reralloc(NULL
, stage_prog_data
->param
, const gl_constant_value
*, 4);
1780 for (unsigned int i
= 0; i
< 4; i
++) {
1781 unsigned int slot
= this->uniforms
* 4 + i
;
1782 static gl_constant_value zero
= { 0.0 };
1783 stage_prog_data
->param
[slot
] = &zero
;
1789 reg
+= ALIGN(uniforms
, 2) / 2;
1792 stage_prog_data
->nr_params
= this->uniforms
* 4;
1794 prog_data
->base
.curb_read_length
=
1795 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1801 vec4_vs_visitor::setup_payload(void)
1805 /* The payload always contains important data in g0, which contains
1806 * the URB handles that are passed on to the URB write at the end
1807 * of the thread. So, we always start push constants at g1.
1811 reg
= setup_uniforms(reg
);
1813 reg
= setup_attributes(reg
);
1815 this->first_non_payload_grf
= reg
;
1819 vec4_visitor::lower_minmax()
1821 assert(devinfo
->gen
< 6);
1823 bool progress
= false;
1825 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1826 const vec4_builder
ibld(this, block
, inst
);
1828 if (inst
->opcode
== BRW_OPCODE_SEL
&&
1829 inst
->predicate
== BRW_PREDICATE_NONE
) {
1830 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
1831 * the original SEL.L/GE instruction
1833 ibld
.CMP(ibld
.null_reg_d(), inst
->src
[0], inst
->src
[1],
1834 inst
->conditional_mod
);
1835 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1836 inst
->conditional_mod
= BRW_CONDITIONAL_NONE
;
1843 invalidate_live_intervals();
1849 vec4_visitor::get_timestamp()
1851 assert(devinfo
->gen
>= 7);
1853 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1858 BRW_REGISTER_TYPE_UD
,
1859 BRW_VERTICAL_STRIDE_0
,
1861 BRW_HORIZONTAL_STRIDE_4
,
1865 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1867 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1868 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1869 * even if it's not enabled in the dispatch.
1871 mov
->force_writemask_all
= true;
1873 return src_reg(dst
);
1877 vec4_visitor::emit_shader_time_begin()
1879 current_annotation
= "shader time start";
1880 shader_start_time
= get_timestamp();
1884 vec4_visitor::emit_shader_time_end()
1886 current_annotation
= "shader time end";
1887 src_reg shader_end_time
= get_timestamp();
1890 /* Check that there weren't any timestamp reset events (assuming these
1891 * were the only two timestamp reads that happened).
1893 src_reg reset_end
= shader_end_time
;
1894 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1895 vec4_instruction
*test
= emit(AND(dst_null_ud(), reset_end
, brw_imm_ud(1u)));
1896 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1898 emit(IF(BRW_PREDICATE_NORMAL
));
1900 /* Take the current timestamp and get the delta. */
1901 shader_start_time
.negate
= true;
1902 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1903 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1905 /* If there were no instructions between the two timestamp gets, the diff
1906 * is 2 cycles. Remove that overhead, so I can forget about that when
1907 * trying to determine the time taken for single instructions.
1909 emit(ADD(diff
, src_reg(diff
), brw_imm_ud(-2u)));
1911 emit_shader_time_write(0, src_reg(diff
));
1912 emit_shader_time_write(1, brw_imm_ud(1u));
1913 emit(BRW_OPCODE_ELSE
);
1914 emit_shader_time_write(2, brw_imm_ud(1u));
1915 emit(BRW_OPCODE_ENDIF
);
1919 vec4_visitor::emit_shader_time_write(int shader_time_subindex
, src_reg value
)
1922 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1924 dst_reg offset
= dst
;
1926 time
.offset
+= REG_SIZE
;
1928 offset
.type
= BRW_REGISTER_TYPE_UD
;
1929 int index
= shader_time_index
* 3 + shader_time_subindex
;
1930 emit(MOV(offset
, brw_imm_d(index
* BRW_SHADER_TIME_STRIDE
)));
1932 time
.type
= BRW_REGISTER_TYPE_UD
;
1933 emit(MOV(time
, value
));
1935 vec4_instruction
*inst
=
1936 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
1941 vec4_visitor::convert_to_hw_regs()
1943 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1944 for (int i
= 0; i
< 3; i
++) {
1945 struct src_reg
&src
= inst
->src
[i
];
1949 const unsigned type_size
= type_sz(src
.type
);
1950 const unsigned width
= REG_SIZE
/ 2 / MAX2(4, type_size
);
1951 reg
= byte_offset(brw_vecn_grf(width
, src
.nr
, 0), src
.offset
);
1952 reg
.type
= src
.type
;
1954 reg
.negate
= src
.negate
;
1959 const unsigned width
= REG_SIZE
/ 2 / MAX2(4, type_sz(src
.type
));
1960 reg
= stride(byte_offset(brw_vec4_grf(
1961 prog_data
->base
.dispatch_grf_start_reg
+
1962 src
.nr
/ 2, src
.nr
% 2 * 4),
1965 reg
.type
= src
.type
;
1967 reg
.negate
= src
.negate
;
1969 /* This should have been moved to pull constants. */
1970 assert(!src
.reladdr
);
1975 if (type_sz(src
.type
) == 8) {
1976 reg
= src
.as_brw_reg();
1985 /* Probably unused. */
1986 reg
= brw_null_reg();
1991 unreachable("not reached");
1994 apply_logical_swizzle(®
, inst
, i
);
1998 if (inst
->is_3src(devinfo
)) {
1999 /* 3-src instructions with scalar sources support arbitrary subnr,
2000 * but don't actually use swizzles. Convert swizzle into subnr.
2001 * Skip this for double-precision instructions: RepCtrl=1 is not
2002 * allowed for them and needs special handling.
2004 for (int i
= 0; i
< 3; i
++) {
2005 if (inst
->src
[i
].vstride
== BRW_VERTICAL_STRIDE_0
&&
2006 type_sz(inst
->src
[i
].type
) < 8) {
2007 assert(brw_is_single_value_swizzle(inst
->src
[i
].swizzle
));
2008 inst
->src
[i
].subnr
+= 4 * BRW_GET_SWZ(inst
->src
[i
].swizzle
, 0);
2013 dst_reg
&dst
= inst
->dst
;
2016 switch (inst
->dst
.file
) {
2018 reg
= byte_offset(brw_vec8_grf(dst
.nr
, 0), dst
.offset
);
2019 reg
.type
= dst
.type
;
2020 reg
.writemask
= dst
.writemask
;
2024 reg
= byte_offset(brw_message_reg(dst
.nr
), dst
.offset
);
2025 assert((reg
.nr
& ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
2026 reg
.type
= dst
.type
;
2027 reg
.writemask
= dst
.writemask
;
2032 reg
= dst
.as_brw_reg();
2036 reg
= brw_null_reg();
2042 unreachable("not reached");
2050 stage_uses_interleaved_attributes(unsigned stage
,
2051 enum shader_dispatch_mode dispatch_mode
)
2054 case MESA_SHADER_TESS_EVAL
:
2056 case MESA_SHADER_GEOMETRY
:
2057 return dispatch_mode
!= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2064 * Get the closest native SIMD width supported by the hardware for instruction
2065 * \p inst. The instruction will be left untouched by
2066 * vec4_visitor::lower_simd_width() if the returned value matches the
2067 * instruction's original execution size.
2070 get_lowered_simd_width(const struct gen_device_info
*devinfo
,
2071 enum shader_dispatch_mode dispatch_mode
,
2072 unsigned stage
, const vec4_instruction
*inst
)
2074 /* Do not split some instructions that require special handling */
2075 switch (inst
->opcode
) {
2076 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
2077 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
2078 return inst
->exec_size
;
2083 unsigned lowered_width
= MIN2(16, inst
->exec_size
);
2085 /* We need to split some cases of double-precision instructions that write
2086 * 2 registers. We only need to care about this in gen7 because that is the
2087 * only hardware that implements fp64 in Align16.
2089 if (devinfo
->gen
== 7 && inst
->size_written
> REG_SIZE
) {
2090 /* Align16 8-wide double-precision SEL does not work well. Verified
2093 if (inst
->opcode
== BRW_OPCODE_SEL
&& type_sz(inst
->dst
.type
) == 8)
2094 lowered_width
= MIN2(lowered_width
, 4);
2096 /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
2097 * Register Addressing:
2099 * "When destination spans two registers, the source MUST span two
2102 for (unsigned i
= 0; i
< 3; i
++) {
2103 if (inst
->src
[i
].file
== BAD_FILE
)
2105 if (inst
->size_read(i
) <= REG_SIZE
)
2106 lowered_width
= MIN2(lowered_width
, 4);
2108 /* Interleaved attribute setups use a vertical stride of 0, which
2109 * makes them hit the associated instruction decompression bug in gen7.
2110 * Split them to prevent this.
2112 if (inst
->src
[i
].file
== ATTR
&&
2113 stage_uses_interleaved_attributes(stage
, dispatch_mode
))
2114 lowered_width
= MIN2(lowered_width
, 4);
2118 return lowered_width
;
2122 dst_src_regions_overlap(vec4_instruction
*inst
)
2124 if (inst
->size_written
== 0)
2127 unsigned dst_start
= inst
->dst
.offset
;
2128 unsigned dst_end
= dst_start
+ inst
->size_written
- 1;
2129 for (int i
= 0; i
< 3; i
++) {
2130 if (inst
->src
[i
].file
== BAD_FILE
)
2133 if (inst
->dst
.file
!= inst
->src
[i
].file
||
2134 inst
->dst
.nr
!= inst
->src
[i
].nr
)
2137 unsigned src_start
= inst
->src
[i
].offset
;
2138 unsigned src_end
= src_start
+ inst
->size_read(i
) - 1;
2140 if ((dst_start
>= src_start
&& dst_start
<= src_end
) ||
2141 (dst_end
>= src_start
&& dst_end
<= src_end
) ||
2142 (dst_start
<= src_start
&& dst_end
>= src_end
)) {
2151 vec4_visitor::lower_simd_width()
2153 bool progress
= false;
2155 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2156 const unsigned lowered_width
=
2157 get_lowered_simd_width(devinfo
, prog_data
->dispatch_mode
, stage
, inst
);
2158 assert(lowered_width
<= inst
->exec_size
);
2159 if (lowered_width
== inst
->exec_size
)
2162 /* We need to deal with source / destination overlaps when splitting.
2163 * The hardware supports reading from and writing to the same register
2164 * in the same instruction, but we need to be careful that each split
2165 * instruction we produce does not corrupt the source of the next.
2167 * The easiest way to handle this is to make the split instructions write
2168 * to temporaries if there is an src/dst overlap and then move from the
2169 * temporaries to the original destination. We also need to consider
2170 * instructions that do partial writes via align1 opcodes, in which case
2171 * we need to make sure that the we initialize the temporary with the
2172 * value of the instruction's dst.
2174 bool needs_temp
= dst_src_regions_overlap(inst
);
2175 for (unsigned n
= 0; n
< inst
->exec_size
/ lowered_width
; n
++) {
2176 unsigned channel_offset
= lowered_width
* n
;
2178 unsigned size_written
= lowered_width
* type_sz(inst
->dst
.type
);
2180 /* Create the split instruction from the original so that we copy all
2181 * relevant instruction fields, then set the width and calculate the
2182 * new dst/src regions.
2184 vec4_instruction
*linst
= new(mem_ctx
) vec4_instruction(*inst
);
2185 linst
->exec_size
= lowered_width
;
2186 linst
->group
= channel_offset
;
2187 linst
->size_written
= size_written
;
2189 /* Compute split dst region */
2192 unsigned num_regs
= DIV_ROUND_UP(size_written
, REG_SIZE
);
2193 dst
= retype(dst_reg(VGRF
, alloc
.allocate(num_regs
)),
2195 if (inst
->is_align1_partial_write()) {
2196 vec4_instruction
*copy
= MOV(dst
, src_reg(inst
->dst
));
2197 copy
->exec_size
= lowered_width
;
2198 copy
->group
= channel_offset
;
2199 copy
->size_written
= size_written
;
2200 inst
->insert_before(block
, copy
);
2203 dst
= horiz_offset(inst
->dst
, channel_offset
);
2207 /* Compute split source regions */
2208 for (int i
= 0; i
< 3; i
++) {
2209 if (linst
->src
[i
].file
== BAD_FILE
)
2212 if (!is_uniform(linst
->src
[i
]))
2213 linst
->src
[i
] = horiz_offset(linst
->src
[i
], channel_offset
);
2216 inst
->insert_before(block
, linst
);
2218 /* If we used a temporary to store the result of the split
2219 * instruction, copy the result to the original destination
2222 vec4_instruction
*mov
=
2223 MOV(offset(inst
->dst
, lowered_width
, n
), src_reg(dst
));
2224 mov
->exec_size
= lowered_width
;
2225 mov
->group
= channel_offset
;
2226 mov
->size_written
= size_written
;
2227 mov
->predicate
= inst
->predicate
;
2228 inst
->insert_before(block
, mov
);
2232 inst
->remove(block
);
2237 invalidate_live_intervals();
2243 is_align1_df(vec4_instruction
*inst
)
2245 switch (inst
->opcode
) {
2246 case VEC4_OPCODE_FROM_DOUBLE
:
2247 case VEC4_OPCODE_TO_DOUBLE
:
2248 case VEC4_OPCODE_PICK_LOW_32BIT
:
2249 case VEC4_OPCODE_PICK_HIGH_32BIT
:
2250 case VEC4_OPCODE_SET_LOW_32BIT
:
2251 case VEC4_OPCODE_SET_HIGH_32BIT
:
2258 static brw_predicate
2259 scalarize_predicate(brw_predicate predicate
, unsigned writemask
)
2261 if (predicate
!= BRW_PREDICATE_NORMAL
)
2264 switch (writemask
) {
2266 return BRW_PREDICATE_ALIGN16_REPLICATE_X
;
2268 return BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
2270 return BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
2272 return BRW_PREDICATE_ALIGN16_REPLICATE_W
;
2274 unreachable("invalid writemask");
2278 /* Gen7 has a hardware decompression bug that we can exploit to represent
2279 * handful of additional swizzles natively.
2282 is_gen7_supported_64bit_swizzle(vec4_instruction
*inst
, unsigned arg
)
2284 switch (inst
->src
[arg
].swizzle
) {
2285 case BRW_SWIZZLE_XXXX
:
2286 case BRW_SWIZZLE_YYYY
:
2287 case BRW_SWIZZLE_ZZZZ
:
2288 case BRW_SWIZZLE_WWWW
:
2289 case BRW_SWIZZLE_XYXY
:
2290 case BRW_SWIZZLE_YXYX
:
2291 case BRW_SWIZZLE_ZWZW
:
2292 case BRW_SWIZZLE_WZWZ
:
2299 /* 64-bit sources use regions with a width of 2. These 2 elements in each row
2300 * can be addressed using 32-bit swizzles (which is what the hardware supports)
2301 * but it also means that the swizzle we apply on the first two components of a
2302 * dvec4 is coupled with the swizzle we use for the last 2. In other words,
2303 * only some specific swizzle combinations can be natively supported.
2305 * FIXME: we can go an step further and implement even more swizzle
2306 * variations using only partial scalarization.
2308 * For more details see:
2309 * https://bugs.freedesktop.org/show_bug.cgi?id=92760#c82
2312 vec4_visitor::is_supported_64bit_region(vec4_instruction
*inst
, unsigned arg
)
2314 const src_reg
&src
= inst
->src
[arg
];
2315 assert(type_sz(src
.type
) == 8);
2317 /* Uniform regions have a vstride=0. Because we use 2-wide rows with
2318 * 64-bit regions it means that we cannot access components Z/W, so
2319 * return false for any such case. Interleaved attributes will also be
2320 * mapped to GRF registers with a vstride of 0, so apply the same
2323 if ((is_uniform(src
) ||
2324 (stage_uses_interleaved_attributes(stage
, prog_data
->dispatch_mode
) &&
2325 src
.file
== ATTR
)) &&
2326 (brw_mask_for_swizzle(src
.swizzle
) & 12))
2329 switch (src
.swizzle
) {
2330 case BRW_SWIZZLE_XYZW
:
2331 case BRW_SWIZZLE_XXZZ
:
2332 case BRW_SWIZZLE_YYWW
:
2333 case BRW_SWIZZLE_YXWZ
:
2336 return devinfo
->gen
== 7 && is_gen7_supported_64bit_swizzle(inst
, arg
);
2341 vec4_visitor::scalarize_df()
2343 bool progress
= false;
2345 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2346 /* Skip DF instructions that operate in Align1 mode */
2347 if (is_align1_df(inst
))
2350 /* Check if this is a double-precision instruction */
2351 bool is_double
= type_sz(inst
->dst
.type
) == 8;
2352 for (int arg
= 0; !is_double
&& arg
< 3; arg
++) {
2353 is_double
= inst
->src
[arg
].file
!= BAD_FILE
&&
2354 type_sz(inst
->src
[arg
].type
) == 8;
2360 /* Skip the lowering for specific regioning scenarios that we can
2363 bool skip_lowering
= true;
2365 /* XY and ZW writemasks operate in 32-bit, which means that they don't
2366 * have a native 64-bit representation and they should always be split.
2368 if (inst
->dst
.writemask
== WRITEMASK_XY
||
2369 inst
->dst
.writemask
== WRITEMASK_ZW
) {
2370 skip_lowering
= false;
2372 for (unsigned i
= 0; i
< 3; i
++) {
2373 if (inst
->src
[i
].file
== BAD_FILE
|| type_sz(inst
->src
[i
].type
) < 8)
2375 skip_lowering
= skip_lowering
&& is_supported_64bit_region(inst
, i
);
2382 /* Generate scalar instructions for each enabled channel */
2383 for (unsigned chan
= 0; chan
< 4; chan
++) {
2384 unsigned chan_mask
= 1 << chan
;
2385 if (!(inst
->dst
.writemask
& chan_mask
))
2388 vec4_instruction
*scalar_inst
= new(mem_ctx
) vec4_instruction(*inst
);
2390 for (unsigned i
= 0; i
< 3; i
++) {
2391 unsigned swz
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, chan
);
2392 scalar_inst
->src
[i
].swizzle
= BRW_SWIZZLE4(swz
, swz
, swz
, swz
);
2395 scalar_inst
->dst
.writemask
= chan_mask
;
2397 if (inst
->predicate
!= BRW_PREDICATE_NONE
) {
2398 scalar_inst
->predicate
=
2399 scalarize_predicate(inst
->predicate
, chan_mask
);
2402 inst
->insert_before(block
, scalar_inst
);
2405 inst
->remove(block
);
2410 invalidate_live_intervals();
2416 vec4_visitor::lower_64bit_mad_to_mul_add()
2418 bool progress
= false;
2420 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2421 if (inst
->opcode
!= BRW_OPCODE_MAD
)
2424 if (type_sz(inst
->dst
.type
) != 8)
2427 dst_reg mul_dst
= dst_reg(this, glsl_type::dvec4_type
);
2429 /* Use the copy constructor so we copy all relevant instruction fields
2430 * from the original mad into the add and mul instructions
2432 vec4_instruction
*mul
= new(mem_ctx
) vec4_instruction(*inst
);
2433 mul
->opcode
= BRW_OPCODE_MUL
;
2435 mul
->src
[0] = inst
->src
[1];
2436 mul
->src
[1] = inst
->src
[2];
2437 mul
->src
[2].file
= BAD_FILE
;
2439 vec4_instruction
*add
= new(mem_ctx
) vec4_instruction(*inst
);
2440 add
->opcode
= BRW_OPCODE_ADD
;
2441 add
->src
[0] = src_reg(mul_dst
);
2442 add
->src
[1] = inst
->src
[0];
2443 add
->src
[2].file
= BAD_FILE
;
2445 inst
->insert_before(block
, mul
);
2446 inst
->insert_before(block
, add
);
2447 inst
->remove(block
);
2453 invalidate_live_intervals();
2458 /* The align16 hardware can only do 32-bit swizzle channels, so we need to
2459 * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
2460 * to 32-bit swizzle channels in hardware registers.
2462 * @inst and @arg identify the original vec4 IR source operand we need to
2463 * translate the swizzle for and @hw_reg is the hardware register where we
2464 * will write the hardware swizzle to use.
2466 * This pass assumes that Align16/DF instructions have been fully scalarized
2467 * previously so there is just one 64-bit swizzle channel to deal with for any
2468 * given Vec4 IR source.
2471 vec4_visitor::apply_logical_swizzle(struct brw_reg
*hw_reg
,
2472 vec4_instruction
*inst
, int arg
)
2474 src_reg reg
= inst
->src
[arg
];
2476 if (reg
.file
== BAD_FILE
|| reg
.file
== BRW_IMMEDIATE_VALUE
)
2479 /* If this is not a 64-bit operand or this is a scalar instruction we don't
2480 * need to do anything about the swizzles.
2482 if(type_sz(reg
.type
) < 8 || is_align1_df(inst
)) {
2483 hw_reg
->swizzle
= reg
.swizzle
;
2487 /* Take the 64-bit logical swizzle channel and translate it to 32-bit */
2488 assert(brw_is_single_value_swizzle(reg
.swizzle
) ||
2489 is_supported_64bit_region(inst
, arg
));
2491 if (is_supported_64bit_region(inst
, arg
) &&
2492 !is_gen7_supported_64bit_swizzle(inst
, arg
)) {
2493 /* Supported 64-bit swizzles are those such that their first two
2494 * components, when expanded to 32-bit swizzles, match the semantics
2495 * of the original 64-bit swizzle with 2-wide row regioning.
2497 unsigned swizzle0
= BRW_GET_SWZ(reg
.swizzle
, 0);
2498 unsigned swizzle1
= BRW_GET_SWZ(reg
.swizzle
, 1);
2499 hw_reg
->swizzle
= BRW_SWIZZLE4(swizzle0
* 2, swizzle0
* 2 + 1,
2500 swizzle1
* 2, swizzle1
* 2 + 1);
2502 /* If we got here then we have one of the following:
2504 * 1. An unsupported swizzle, which should be single-value thanks to the
2505 * scalarization pass.
2507 * 2. A gen7 supported swizzle. These can be single-value or double-value
2508 * swizzles. If the latter, they are never cross-dvec2 channels. For
2509 * these we always need to activate the gen7 vstride=0 exploit.
2511 unsigned swizzle0
= BRW_GET_SWZ(reg
.swizzle
, 0);
2512 unsigned swizzle1
= BRW_GET_SWZ(reg
.swizzle
, 1);
2513 assert((swizzle0
< 2) == (swizzle1
< 2));
2515 /* To gain access to Z/W components we need to select the second half
2516 * of the register and then use a X/Y swizzle to select Z/W respectively.
2518 if (swizzle0
>= 2) {
2519 *hw_reg
= suboffset(*hw_reg
, 2);
2524 /* All gen7-specific supported swizzles require the vstride=0 exploit */
2525 if (devinfo
->gen
== 7 && is_gen7_supported_64bit_swizzle(inst
, arg
))
2526 hw_reg
->vstride
= BRW_VERTICAL_STRIDE_0
;
2528 /* Any 64-bit source with an offset at 16B is intended to address the
2529 * second half of a register and needs a vertical stride of 0 so we:
2531 * 1. Don't violate register region restrictions.
2532 * 2. Activate the gen7 instruction decompresion bug exploit when
2535 if (hw_reg
->subnr
% REG_SIZE
== 16) {
2536 assert(devinfo
->gen
== 7);
2537 hw_reg
->vstride
= BRW_VERTICAL_STRIDE_0
;
2540 hw_reg
->swizzle
= BRW_SWIZZLE4(swizzle0
* 2, swizzle0
* 2 + 1,
2541 swizzle1
* 2, swizzle1
* 2 + 1);
2548 if (shader_time_index
>= 0)
2549 emit_shader_time_begin();
2562 /* Before any optimization, push array accesses out to scratch
2563 * space where we need them to be. This pass may allocate new
2564 * virtual GRFs, so we want to do it early. It also makes sure
2565 * that we have reladdr computations available for CSE, since we'll
2566 * often do repeated subexpressions for those.
2568 move_grf_array_access_to_scratch();
2569 move_uniform_array_access_to_pull_constants();
2571 pack_uniform_registers();
2572 move_push_constants_to_pull_constants();
2573 split_virtual_grfs();
2575 #define OPT(pass, args...) ({ \
2577 bool this_progress = pass(args); \
2579 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
2580 char filename[64]; \
2581 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
2582 stage_abbrev, nir->info->name, iteration, pass_num); \
2584 backend_shader::dump_instructions(filename); \
2587 progress = progress || this_progress; \
2592 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
2594 snprintf(filename
, 64, "%s-%s-00-00-start",
2595 stage_abbrev
, nir
->info
->name
);
2597 backend_shader::dump_instructions(filename
);
2608 OPT(opt_predicated_break
, this);
2609 OPT(opt_reduce_swizzle
);
2610 OPT(dead_code_eliminate
);
2611 OPT(dead_control_flow_eliminate
, this);
2612 OPT(opt_copy_propagation
);
2613 OPT(opt_cmod_propagation
);
2616 OPT(opt_register_coalesce
);
2617 OPT(eliminate_find_live_channel
);
2622 if (OPT(opt_vector_float
)) {
2624 OPT(opt_copy_propagation
, false);
2625 OPT(opt_copy_propagation
, true);
2626 OPT(dead_code_eliminate
);
2629 if (devinfo
->gen
<= 5 && OPT(lower_minmax
)) {
2630 OPT(opt_cmod_propagation
);
2632 OPT(opt_copy_propagation
);
2633 OPT(dead_code_eliminate
);
2636 if (OPT(lower_simd_width
)) {
2637 OPT(opt_copy_propagation
);
2638 OPT(dead_code_eliminate
);
2644 OPT(lower_64bit_mad_to_mul_add
);
2646 /* Run this before payload setup because tesselation shaders
2647 * rely on it to prevent cross dvec2 regioning on DF attributes
2648 * that are setup so that XY are on the second half of register and
2649 * ZW are in the first half of the next.
2655 if (unlikely(INTEL_DEBUG
& DEBUG_SPILL_VEC4
)) {
2656 /* Debug of register spilling: Go spill everything. */
2657 const int grf_count
= alloc
.count
;
2658 float spill_costs
[alloc
.count
];
2659 bool no_spill
[alloc
.count
];
2660 evaluate_spill_costs(spill_costs
, no_spill
);
2661 for (int i
= 0; i
< grf_count
; i
++) {
2667 /* We want to run this after spilling because 64-bit (un)spills need to
2668 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2669 * messages that can produce unsupported 64-bit swizzle regions.
2674 bool allocated_without_spills
= reg_allocate();
2676 if (!allocated_without_spills
) {
2677 compiler
->shader_perf_log(log_data
,
2678 "%s shader triggered register spilling. "
2679 "Try reducing the number of live vec4 values "
2680 "to improve performance.\n",
2683 while (!reg_allocate()) {
2688 /* We want to run this after spilling because 64-bit (un)spills need to
2689 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2690 * messages that can produce unsupported 64-bit swizzle regions.
2695 opt_schedule_instructions();
2697 opt_set_dependency_control();
2699 convert_to_hw_regs();
2701 if (last_scratch
> 0) {
2702 prog_data
->base
.total_scratch
=
2703 brw_get_scratch_size(last_scratch
* REG_SIZE
);
2709 } /* namespace brw */
2714 * Compile a vertex shader.
2716 * Returns the final assembly and the program's size.
2719 brw_compile_vs(const struct brw_compiler
*compiler
, void *log_data
,
2721 const struct brw_vs_prog_key
*key
,
2722 struct brw_vs_prog_data
*prog_data
,
2723 const nir_shader
*src_shader
,
2724 gl_clip_plane
*clip_planes
,
2725 bool use_legacy_snorm_formula
,
2726 int shader_time_index
,
2727 unsigned *final_assembly_size
,
2730 const bool is_scalar
= compiler
->scalar_stage
[MESA_SHADER_VERTEX
];
2731 nir_shader
*shader
= nir_shader_clone(mem_ctx
, src_shader
);
2732 shader
= brw_nir_apply_sampler_key(shader
, compiler
, &key
->tex
, is_scalar
);
2733 brw_nir_lower_vs_inputs(shader
, is_scalar
,
2734 use_legacy_snorm_formula
, key
->gl_attrib_wa_flags
);
2735 brw_nir_lower_vue_outputs(shader
, is_scalar
);
2736 shader
= brw_postprocess_nir(shader
, compiler
, is_scalar
);
2738 const unsigned *assembly
= NULL
;
2740 prog_data
->base
.clip_distance_mask
=
2741 ((1 << shader
->info
->clip_distance_array_size
) - 1);
2742 prog_data
->base
.cull_distance_mask
=
2743 ((1 << shader
->info
->cull_distance_array_size
) - 1) <<
2744 shader
->info
->clip_distance_array_size
;
2746 unsigned nr_attribute_slots
= _mesa_bitcount_64(prog_data
->inputs_read
);
2748 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2749 * incoming vertex attribute. So, add an extra slot.
2751 if (shader
->info
->system_values_read
&
2752 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
) |
2753 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
2754 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
2755 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))) {
2756 nr_attribute_slots
++;
2759 /* gl_DrawID has its very own vec4 */
2760 if (shader
->info
->system_values_read
&
2761 BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
)) {
2762 nr_attribute_slots
++;
2765 unsigned nr_attributes
= nr_attribute_slots
-
2766 DIV_ROUND_UP(_mesa_bitcount_64(shader
->info
->double_inputs_read
), 2);
2768 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2769 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
2770 * vec4 mode, the hardware appears to wedge unless we read something.
2773 prog_data
->base
.urb_read_length
=
2774 DIV_ROUND_UP(nr_attribute_slots
, 2);
2776 prog_data
->base
.urb_read_length
=
2777 DIV_ROUND_UP(MAX2(nr_attribute_slots
, 1), 2);
2779 prog_data
->nr_attributes
= nr_attributes
;
2780 prog_data
->nr_attribute_slots
= nr_attribute_slots
;
2782 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2783 * (overwriting the original contents), we need to make sure the size is
2784 * the larger of the two.
2786 const unsigned vue_entries
=
2787 MAX2(nr_attribute_slots
, (unsigned)prog_data
->base
.vue_map
.num_slots
);
2789 if (compiler
->devinfo
->gen
== 6)
2790 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 8);
2792 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 4);
2794 if (INTEL_DEBUG
& DEBUG_VS
) {
2795 fprintf(stderr
, "VS Output ");
2796 brw_print_vue_map(stderr
, &prog_data
->base
.vue_map
);
2800 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
2802 fs_visitor
v(compiler
, log_data
, mem_ctx
, key
, &prog_data
->base
.base
,
2803 NULL
, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
2804 shader
, 8, shader_time_index
);
2805 if (!v
.run_vs(clip_planes
)) {
2807 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2812 prog_data
->base
.base
.dispatch_grf_start_reg
= v
.payload
.num_regs
;
2814 fs_generator
g(compiler
, log_data
, mem_ctx
, (void *) key
,
2815 &prog_data
->base
.base
, v
.promoted_constants
,
2816 v
.runtime_check_aads_emit
, MESA_SHADER_VERTEX
);
2817 if (INTEL_DEBUG
& DEBUG_VS
) {
2818 const char *debug_name
=
2819 ralloc_asprintf(mem_ctx
, "%s vertex shader %s",
2820 shader
->info
->label
? shader
->info
->label
:
2822 shader
->info
->name
);
2824 g
.enable_debug(debug_name
);
2826 g
.generate_code(v
.cfg
, 8);
2827 assembly
= g
.get_assembly(final_assembly_size
);
2831 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2833 vec4_vs_visitor
v(compiler
, log_data
, key
, prog_data
,
2834 shader
, clip_planes
, mem_ctx
,
2835 shader_time_index
, use_legacy_snorm_formula
);
2838 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2843 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
,
2844 shader
, &prog_data
->base
, v
.cfg
,
2845 final_assembly_size
);