2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_vec4_builder.h"
29 #include "brw_vec4_live_variables.h"
30 #include "brw_vec4_vs.h"
31 #include "brw_dead_control_flow.h"
32 #include "common/gen_debug.h"
33 #include "program/prog_parameter.h"
35 #define MAX_INSTRUCTION (1 << 30)
44 memset(this, 0, sizeof(*this));
45 this->file
= BAD_FILE
;
46 this->type
= BRW_REGISTER_TYPE_UD
;
49 src_reg::src_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
)
55 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
56 this->swizzle
= brw_swizzle_for_size(type
->vector_elements
);
58 this->swizzle
= BRW_SWIZZLE_XYZW
;
60 this->type
= brw_type_for_base_type(type
);
63 /** Generic unset register constructor. */
69 src_reg::src_reg(struct ::brw_reg reg
) :
76 src_reg::src_reg(const dst_reg
®
) :
79 this->reladdr
= reg
.reladdr
;
80 this->swizzle
= brw_swizzle_for_mask(reg
.writemask
);
86 memset(this, 0, sizeof(*this));
87 this->file
= BAD_FILE
;
88 this->type
= BRW_REGISTER_TYPE_UD
;
89 this->writemask
= WRITEMASK_XYZW
;
97 dst_reg::dst_reg(enum brw_reg_file file
, int nr
)
105 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
,
112 this->type
= brw_type_for_base_type(type
);
113 this->writemask
= writemask
;
116 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, brw_reg_type type
,
124 this->writemask
= writemask
;
127 dst_reg::dst_reg(struct ::brw_reg reg
) :
131 this->reladdr
= NULL
;
134 dst_reg::dst_reg(const src_reg
®
) :
137 this->writemask
= brw_mask_for_swizzle(reg
.swizzle
);
138 this->reladdr
= reg
.reladdr
;
142 dst_reg::equals(const dst_reg
&r
) const
144 return (this->backend_reg::equals(r
) &&
145 (reladdr
== r
.reladdr
||
146 (reladdr
&& r
.reladdr
&& reladdr
->equals(*r
.reladdr
))));
150 vec4_instruction::is_send_from_grf()
153 case SHADER_OPCODE_SHADER_TIME_ADD
:
154 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
155 case SHADER_OPCODE_UNTYPED_ATOMIC
:
156 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
157 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
158 case SHADER_OPCODE_TYPED_ATOMIC
:
159 case SHADER_OPCODE_TYPED_SURFACE_READ
:
160 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
161 case VEC4_OPCODE_URB_READ
:
162 case TCS_OPCODE_URB_WRITE
:
163 case TCS_OPCODE_RELEASE_INPUT
:
164 case SHADER_OPCODE_BARRIER
:
172 * Returns true if this instruction's sources and destinations cannot
173 * safely be the same register.
175 * In most cases, a register can be written over safely by the same
176 * instruction that is its last use. For a single instruction, the
177 * sources are dereferenced before writing of the destination starts
180 * However, there are a few cases where this can be problematic:
182 * - Virtual opcodes that translate to multiple instructions in the
183 * code generator: if src == dst and one instruction writes the
184 * destination before a later instruction reads the source, then
185 * src will have been clobbered.
187 * The register allocator uses this information to set up conflicts between
188 * GRF sources and the destination.
191 vec4_instruction::has_source_and_destination_hazard() const
194 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
195 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
196 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
199 /* 8-wide compressed DF operations are executed as two 4-wide operations,
200 * so we have a src/dst hazard if the first half of the instruction
201 * overwrites the source of the second half. Prevent this by marking
202 * compressed instructions as having src/dst hazards, so the register
203 * allocator assigns safe register regions for dst and srcs.
205 return size_written
> REG_SIZE
;
210 vec4_instruction::size_read(unsigned arg
) const
213 case SHADER_OPCODE_SHADER_TIME_ADD
:
214 case SHADER_OPCODE_UNTYPED_ATOMIC
:
215 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
216 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
217 case SHADER_OPCODE_TYPED_ATOMIC
:
218 case SHADER_OPCODE_TYPED_SURFACE_READ
:
219 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
220 case TCS_OPCODE_URB_WRITE
:
222 return mlen
* REG_SIZE
;
224 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
226 return mlen
* REG_SIZE
;
232 switch (src
[arg
].file
) {
237 return 4 * type_sz(src
[arg
].type
);
239 /* XXX - Represent actual vertical stride. */
240 return exec_size
* type_sz(src
[arg
].type
);
245 vec4_instruction::can_do_source_mods(const struct gen_device_info
*devinfo
)
247 if (devinfo
->gen
== 6 && is_math())
250 if (is_send_from_grf())
253 if (!backend_instruction::can_do_source_mods())
260 vec4_instruction::can_do_writemask(const struct gen_device_info
*devinfo
)
263 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
264 case VEC4_OPCODE_DOUBLE_TO_F32
:
265 case VEC4_OPCODE_DOUBLE_TO_D32
:
266 case VEC4_OPCODE_DOUBLE_TO_U32
:
267 case VEC4_OPCODE_TO_DOUBLE
:
268 case VEC4_OPCODE_PICK_LOW_32BIT
:
269 case VEC4_OPCODE_PICK_HIGH_32BIT
:
270 case VEC4_OPCODE_SET_LOW_32BIT
:
271 case VEC4_OPCODE_SET_HIGH_32BIT
:
272 case VS_OPCODE_PULL_CONSTANT_LOAD
:
273 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
274 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
275 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
276 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
277 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
278 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
279 case VEC4_OPCODE_URB_READ
:
280 case SHADER_OPCODE_MOV_INDIRECT
:
283 /* The MATH instruction on Gen6 only executes in align1 mode, which does
284 * not support writemasking.
286 if (devinfo
->gen
== 6 && is_math())
297 vec4_instruction::can_change_types() const
299 return dst
.type
== src
[0].type
&&
300 !src
[0].abs
&& !src
[0].negate
&& !saturate
&&
301 (opcode
== BRW_OPCODE_MOV
||
302 (opcode
== BRW_OPCODE_SEL
&&
303 dst
.type
== src
[1].type
&&
304 predicate
!= BRW_PREDICATE_NONE
&&
305 !src
[1].abs
&& !src
[1].negate
));
309 * Returns how many MRFs an opcode will write over.
311 * Note that this is not the 0 or 1 implied writes in an actual gen
312 * instruction -- the generate_* functions generate additional MOVs
316 vec4_visitor::implied_mrf_writes(vec4_instruction
*inst
)
318 if (inst
->mlen
== 0 || inst
->is_send_from_grf())
321 switch (inst
->opcode
) {
322 case SHADER_OPCODE_RCP
:
323 case SHADER_OPCODE_RSQ
:
324 case SHADER_OPCODE_SQRT
:
325 case SHADER_OPCODE_EXP2
:
326 case SHADER_OPCODE_LOG2
:
327 case SHADER_OPCODE_SIN
:
328 case SHADER_OPCODE_COS
:
330 case SHADER_OPCODE_INT_QUOTIENT
:
331 case SHADER_OPCODE_INT_REMAINDER
:
332 case SHADER_OPCODE_POW
:
333 case TCS_OPCODE_THREAD_END
:
335 case VS_OPCODE_URB_WRITE
:
337 case VS_OPCODE_PULL_CONSTANT_LOAD
:
339 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
341 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
343 case GS_OPCODE_URB_WRITE
:
344 case GS_OPCODE_URB_WRITE_ALLOCATE
:
345 case GS_OPCODE_THREAD_END
:
347 case GS_OPCODE_FF_SYNC
:
349 case TCS_OPCODE_URB_WRITE
:
351 case SHADER_OPCODE_SHADER_TIME_ADD
:
353 case SHADER_OPCODE_TEX
:
354 case SHADER_OPCODE_TXL
:
355 case SHADER_OPCODE_TXD
:
356 case SHADER_OPCODE_TXF
:
357 case SHADER_OPCODE_TXF_CMS
:
358 case SHADER_OPCODE_TXF_CMS_W
:
359 case SHADER_OPCODE_TXF_MCS
:
360 case SHADER_OPCODE_TXS
:
361 case SHADER_OPCODE_TG4
:
362 case SHADER_OPCODE_TG4_OFFSET
:
363 case SHADER_OPCODE_SAMPLEINFO
:
364 case SHADER_OPCODE_GET_BUFFER_SIZE
:
365 return inst
->header_size
;
367 unreachable("not reached");
372 src_reg::equals(const src_reg
&r
) const
374 return (this->backend_reg::equals(r
) &&
375 !reladdr
&& !r
.reladdr
);
379 src_reg::negative_equals(const src_reg
&r
) const
381 return this->backend_reg::negative_equals(r
) &&
382 !reladdr
&& !r
.reladdr
;
386 vec4_visitor::opt_vector_float()
388 bool progress
= false;
390 foreach_block(block
, cfg
) {
391 int last_reg
= -1, last_offset
= -1;
392 enum brw_reg_file last_reg_file
= BAD_FILE
;
394 uint8_t imm
[4] = { 0 };
396 vec4_instruction
*imm_inst
[4];
397 unsigned writemask
= 0;
398 enum brw_reg_type dest_type
= BRW_REGISTER_TYPE_F
;
400 foreach_inst_in_block_safe(vec4_instruction
, inst
, block
) {
402 enum brw_reg_type need_type
;
404 /* Look for unconditional MOVs from an immediate with a partial
405 * writemask. Skip type-conversion MOVs other than integer 0,
406 * where the type doesn't matter. See if the immediate can be
407 * represented as a VF.
409 if (inst
->opcode
== BRW_OPCODE_MOV
&&
410 inst
->src
[0].file
== IMM
&&
411 inst
->predicate
== BRW_PREDICATE_NONE
&&
412 inst
->dst
.writemask
!= WRITEMASK_XYZW
&&
413 type_sz(inst
->src
[0].type
) < 8 &&
414 (inst
->src
[0].type
== inst
->dst
.type
|| inst
->src
[0].d
== 0)) {
416 vf
= brw_float_to_vf(inst
->src
[0].d
);
417 need_type
= BRW_REGISTER_TYPE_D
;
420 vf
= brw_float_to_vf(inst
->src
[0].f
);
421 need_type
= BRW_REGISTER_TYPE_F
;
427 /* If this wasn't a MOV, or the destination register doesn't match,
428 * or we have to switch destination types, then this breaks our
429 * sequence. Combine anything we've accumulated so far.
431 if (last_reg
!= inst
->dst
.nr
||
432 last_offset
!= inst
->dst
.offset
||
433 last_reg_file
!= inst
->dst
.file
||
434 (vf
> 0 && dest_type
!= need_type
)) {
436 if (inst_count
> 1) {
438 memcpy(&vf
, imm
, sizeof(vf
));
439 vec4_instruction
*mov
= MOV(imm_inst
[0]->dst
, brw_imm_vf(vf
));
440 mov
->dst
.type
= dest_type
;
441 mov
->dst
.writemask
= writemask
;
442 inst
->insert_before(block
, mov
);
444 for (int i
= 0; i
< inst_count
; i
++) {
445 imm_inst
[i
]->remove(block
);
454 dest_type
= BRW_REGISTER_TYPE_F
;
456 for (int i
= 0; i
< 4; i
++) {
461 /* Record this instruction's value (if it was representable). */
463 if ((inst
->dst
.writemask
& WRITEMASK_X
) != 0)
465 if ((inst
->dst
.writemask
& WRITEMASK_Y
) != 0)
467 if ((inst
->dst
.writemask
& WRITEMASK_Z
) != 0)
469 if ((inst
->dst
.writemask
& WRITEMASK_W
) != 0)
472 writemask
|= inst
->dst
.writemask
;
473 imm_inst
[inst_count
++] = inst
;
475 last_reg
= inst
->dst
.nr
;
476 last_offset
= inst
->dst
.offset
;
477 last_reg_file
= inst
->dst
.file
;
479 dest_type
= need_type
;
485 invalidate_live_intervals();
490 /* Replaces unused channels of a swizzle with channels that are used.
492 * For instance, this pass transforms
494 * mov vgrf4.yz, vgrf5.wxzy
498 * mov vgrf4.yz, vgrf5.xxzx
500 * This eliminates false uses of some channels, letting dead code elimination
501 * remove the instructions that wrote them.
504 vec4_visitor::opt_reduce_swizzle()
506 bool progress
= false;
508 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
509 if (inst
->dst
.file
== BAD_FILE
||
510 inst
->dst
.file
== ARF
||
511 inst
->dst
.file
== FIXED_GRF
||
512 inst
->is_send_from_grf())
517 /* Determine which channels of the sources are read. */
518 switch (inst
->opcode
) {
519 case VEC4_OPCODE_PACK_BYTES
:
521 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
522 * but all four of src1.
524 swizzle
= brw_swizzle_for_size(4);
527 swizzle
= brw_swizzle_for_size(3);
530 swizzle
= brw_swizzle_for_size(2);
533 case VEC4_OPCODE_TO_DOUBLE
:
534 case VEC4_OPCODE_DOUBLE_TO_F32
:
535 case VEC4_OPCODE_DOUBLE_TO_D32
:
536 case VEC4_OPCODE_DOUBLE_TO_U32
:
537 case VEC4_OPCODE_PICK_LOW_32BIT
:
538 case VEC4_OPCODE_PICK_HIGH_32BIT
:
539 case VEC4_OPCODE_SET_LOW_32BIT
:
540 case VEC4_OPCODE_SET_HIGH_32BIT
:
541 swizzle
= brw_swizzle_for_size(4);
545 swizzle
= brw_swizzle_for_mask(inst
->dst
.writemask
);
549 /* Update sources' swizzles. */
550 for (int i
= 0; i
< 3; i
++) {
551 if (inst
->src
[i
].file
!= VGRF
&&
552 inst
->src
[i
].file
!= ATTR
&&
553 inst
->src
[i
].file
!= UNIFORM
)
556 const unsigned new_swizzle
=
557 brw_compose_swizzle(swizzle
, inst
->src
[i
].swizzle
);
558 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
559 inst
->src
[i
].swizzle
= new_swizzle
;
566 invalidate_live_intervals();
572 vec4_visitor::split_uniform_registers()
574 /* Prior to this, uniforms have been in an array sized according to
575 * the number of vector uniforms present, sparsely filled (so an
576 * aggregate results in reg indices being skipped over). Now we're
577 * going to cut those aggregates up so each .nr index is one
578 * vector. The goal is to make elimination of unused uniform
579 * components easier later.
581 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
582 for (int i
= 0 ; i
< 3; i
++) {
583 if (inst
->src
[i
].file
!= UNIFORM
)
586 assert(!inst
->src
[i
].reladdr
);
588 inst
->src
[i
].nr
+= inst
->src
[i
].offset
/ 16;
589 inst
->src
[i
].offset
%= 16;
594 /* This function returns the register number where we placed the uniform */
596 set_push_constant_loc(const int nr_uniforms
, int *new_uniform_count
,
597 const int src
, const int size
, const int channel_size
,
598 int *new_loc
, int *new_chan
,
602 /* Find the lowest place we can slot this uniform in. */
603 for (dst
= 0; dst
< nr_uniforms
; dst
++) {
604 if (ALIGN(new_chans_used
[dst
], channel_size
) + size
<= 4)
608 assert(dst
< nr_uniforms
);
611 new_chan
[src
] = ALIGN(new_chans_used
[dst
], channel_size
);
612 new_chans_used
[dst
] = ALIGN(new_chans_used
[dst
], channel_size
) + size
;
614 *new_uniform_count
= MAX2(*new_uniform_count
, dst
+ 1);
619 vec4_visitor::pack_uniform_registers()
621 uint8_t chans_used
[this->uniforms
];
622 int new_loc
[this->uniforms
];
623 int new_chan
[this->uniforms
];
624 bool is_aligned_to_dvec4
[this->uniforms
];
625 int new_chans_used
[this->uniforms
];
626 int channel_sizes
[this->uniforms
];
628 memset(chans_used
, 0, sizeof(chans_used
));
629 memset(new_loc
, 0, sizeof(new_loc
));
630 memset(new_chan
, 0, sizeof(new_chan
));
631 memset(new_chans_used
, 0, sizeof(new_chans_used
));
632 memset(is_aligned_to_dvec4
, 0, sizeof(is_aligned_to_dvec4
));
633 memset(channel_sizes
, 0, sizeof(channel_sizes
));
635 /* Find which uniform vectors are actually used by the program. We
636 * expect unused vector elements when we've moved array access out
637 * to pull constants, and from some GLSL code generators like wine.
639 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
641 switch (inst
->opcode
) {
642 case VEC4_OPCODE_PACK_BYTES
:
654 readmask
= inst
->dst
.writemask
;
658 for (int i
= 0 ; i
< 3; i
++) {
659 if (inst
->src
[i
].file
!= UNIFORM
)
662 assert(type_sz(inst
->src
[i
].type
) % 4 == 0);
663 int channel_size
= type_sz(inst
->src
[i
].type
) / 4;
665 int reg
= inst
->src
[i
].nr
;
666 for (int c
= 0; c
< 4; c
++) {
667 if (!(readmask
& (1 << c
)))
670 unsigned channel
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
) + 1;
671 unsigned used
= MAX2(chans_used
[reg
], channel
* channel_size
);
673 chans_used
[reg
] = used
;
674 channel_sizes
[reg
] = MAX2(channel_sizes
[reg
], channel_size
);
676 is_aligned_to_dvec4
[reg
] = true;
677 is_aligned_to_dvec4
[reg
+ 1] = true;
678 chans_used
[reg
+ 1] = used
- 4;
679 channel_sizes
[reg
+ 1] = MAX2(channel_sizes
[reg
+ 1], channel_size
);
684 if (inst
->opcode
== SHADER_OPCODE_MOV_INDIRECT
&&
685 inst
->src
[0].file
== UNIFORM
) {
686 assert(inst
->src
[2].file
== BRW_IMMEDIATE_VALUE
);
687 assert(inst
->src
[0].subnr
== 0);
689 unsigned bytes_read
= inst
->src
[2].ud
;
690 assert(bytes_read
% 4 == 0);
691 unsigned vec4s_read
= DIV_ROUND_UP(bytes_read
, 16);
693 /* We just mark every register touched by a MOV_INDIRECT as being
694 * fully used. This ensures that it doesn't broken up piecewise by
695 * the next part of our packing algorithm.
697 int reg
= inst
->src
[0].nr
;
698 int channel_size
= type_sz(inst
->src
[0].type
) / 4;
699 for (unsigned i
= 0; i
< vec4s_read
; i
++) {
700 chans_used
[reg
+ i
] = 4;
701 channel_sizes
[reg
+ i
] = MAX2(channel_sizes
[reg
+ i
], channel_size
);
706 int new_uniform_count
= 0;
708 /* As the uniforms are going to be reordered, take the data from a temporary
709 * copy of the original param[].
711 uint32_t *param
= ralloc_array(NULL
, uint32_t, stage_prog_data
->nr_params
);
712 memcpy(param
, stage_prog_data
->param
,
713 sizeof(uint32_t) * stage_prog_data
->nr_params
);
715 /* Now, figure out a packing of the live uniform vectors into our
716 * push constants. Start with dvec{3,4} because they are aligned to
717 * dvec4 size (2 vec4).
719 for (int src
= 0; src
< uniforms
; src
++) {
720 int size
= chans_used
[src
];
722 if (size
== 0 || !is_aligned_to_dvec4
[src
])
725 /* dvec3 are aligned to dvec4 size, apply the alignment of the size
726 * to 4 to avoid moving last component of a dvec3 to the available
727 * location at the end of a previous dvec3. These available locations
728 * could be filled by smaller variables in next loop.
730 size
= ALIGN(size
, 4);
731 int dst
= set_push_constant_loc(uniforms
, &new_uniform_count
,
732 src
, size
, channel_sizes
[src
],
735 /* Move the references to the data */
736 for (int j
= 0; j
< size
; j
++) {
737 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
742 /* Continue with the rest of data, which is aligned to vec4. */
743 for (int src
= 0; src
< uniforms
; src
++) {
744 int size
= chans_used
[src
];
746 if (size
== 0 || is_aligned_to_dvec4
[src
])
749 int dst
= set_push_constant_loc(uniforms
, &new_uniform_count
,
750 src
, size
, channel_sizes
[src
],
753 /* Move the references to the data */
754 for (int j
= 0; j
< size
; j
++) {
755 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
761 this->uniforms
= new_uniform_count
;
763 /* Now, update the instructions for our repacked uniforms. */
764 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
765 for (int i
= 0 ; i
< 3; i
++) {
766 int src
= inst
->src
[i
].nr
;
768 if (inst
->src
[i
].file
!= UNIFORM
)
771 int chan
= new_chan
[src
] / channel_sizes
[src
];
772 inst
->src
[i
].nr
= new_loc
[src
];
773 inst
->src
[i
].swizzle
+= BRW_SWIZZLE4(chan
, chan
, chan
, chan
);
779 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
781 * While GLSL IR also performs this optimization, we end up with it in
782 * our instruction stream for a couple of reasons. One is that we
783 * sometimes generate silly instructions, for example in array access
784 * where we'll generate "ADD offset, index, base" even if base is 0.
785 * The other is that GLSL IR's constant propagation doesn't track the
786 * components of aggregates, so some VS patterns (initialize matrix to
787 * 0, accumulate in vertex blending factors) end up breaking down to
788 * instructions involving 0.
791 vec4_visitor::opt_algebraic()
793 bool progress
= false;
795 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
796 switch (inst
->opcode
) {
798 if (inst
->src
[0].file
!= IMM
)
801 if (inst
->saturate
) {
802 if (inst
->dst
.type
!= inst
->src
[0].type
)
803 assert(!"unimplemented: saturate mixed types");
805 if (brw_saturate_immediate(inst
->dst
.type
,
806 &inst
->src
[0].as_brw_reg())) {
807 inst
->saturate
= false;
814 if (inst
->src
[1].is_zero()) {
815 inst
->opcode
= BRW_OPCODE_MOV
;
816 inst
->src
[1] = src_reg();
821 case VEC4_OPCODE_UNPACK_UNIFORM
:
822 if (inst
->src
[0].file
!= UNIFORM
) {
823 inst
->opcode
= BRW_OPCODE_MOV
;
829 if (inst
->src
[1].is_zero()) {
830 inst
->opcode
= BRW_OPCODE_MOV
;
831 inst
->src
[1] = src_reg();
837 if (inst
->src
[1].is_zero()) {
838 inst
->opcode
= BRW_OPCODE_MOV
;
839 switch (inst
->src
[0].type
) {
840 case BRW_REGISTER_TYPE_F
:
841 inst
->src
[0] = brw_imm_f(0.0f
);
843 case BRW_REGISTER_TYPE_D
:
844 inst
->src
[0] = brw_imm_d(0);
846 case BRW_REGISTER_TYPE_UD
:
847 inst
->src
[0] = brw_imm_ud(0u);
850 unreachable("not reached");
852 inst
->src
[1] = src_reg();
854 } else if (inst
->src
[1].is_one()) {
855 inst
->opcode
= BRW_OPCODE_MOV
;
856 inst
->src
[1] = src_reg();
858 } else if (inst
->src
[1].is_negative_one()) {
859 inst
->opcode
= BRW_OPCODE_MOV
;
860 inst
->src
[0].negate
= !inst
->src
[0].negate
;
861 inst
->src
[1] = src_reg();
866 if (inst
->conditional_mod
== BRW_CONDITIONAL_GE
&&
868 inst
->src
[0].negate
&&
869 inst
->src
[1].is_zero()) {
870 inst
->src
[0].abs
= false;
871 inst
->src
[0].negate
= false;
872 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
877 case SHADER_OPCODE_BROADCAST
:
878 if (is_uniform(inst
->src
[0]) ||
879 inst
->src
[1].is_zero()) {
880 inst
->opcode
= BRW_OPCODE_MOV
;
881 inst
->src
[1] = src_reg();
882 inst
->force_writemask_all
= true;
893 invalidate_live_intervals();
899 * Only a limited number of hardware registers may be used for push
900 * constants, so this turns access to the overflowed constants into
904 vec4_visitor::move_push_constants_to_pull_constants()
906 int pull_constant_loc
[this->uniforms
];
908 /* Only allow 32 registers (256 uniform components) as push constants,
909 * which is the limit on gen6.
911 * If changing this value, note the limitation about total_regs in
914 int max_uniform_components
= 32 * 8;
915 if (this->uniforms
* 4 <= max_uniform_components
)
918 /* Make some sort of choice as to which uniforms get sent to pull
919 * constants. We could potentially do something clever here like
920 * look for the most infrequently used uniform vec4s, but leave
923 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
924 pull_constant_loc
[i
/ 4] = -1;
926 if (i
>= max_uniform_components
) {
927 uint32_t *values
= &stage_prog_data
->param
[i
];
929 /* Try to find an existing copy of this uniform in the pull
930 * constants if it was part of an array access already.
932 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
935 for (matches
= 0; matches
< 4; matches
++) {
936 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
941 pull_constant_loc
[i
/ 4] = j
/ 4;
946 if (pull_constant_loc
[i
/ 4] == -1) {
947 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
948 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
950 for (int j
= 0; j
< 4; j
++) {
951 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
958 /* Now actually rewrite usage of the things we've moved to pull
961 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
962 for (int i
= 0 ; i
< 3; i
++) {
963 if (inst
->src
[i
].file
!= UNIFORM
||
964 pull_constant_loc
[inst
->src
[i
].nr
] == -1)
967 int uniform
= inst
->src
[i
].nr
;
969 const glsl_type
*temp_type
= type_sz(inst
->src
[i
].type
) == 8 ?
970 glsl_type::dvec4_type
: glsl_type::vec4_type
;
971 dst_reg temp
= dst_reg(this, temp_type
);
973 emit_pull_constant_load(block
, inst
, temp
, inst
->src
[i
],
974 pull_constant_loc
[uniform
], src_reg());
976 inst
->src
[i
].file
= temp
.file
;
977 inst
->src
[i
].nr
= temp
.nr
;
978 inst
->src
[i
].offset
%= 16;
979 inst
->src
[i
].reladdr
= NULL
;
983 /* Repack push constants to remove the now-unused ones. */
984 pack_uniform_registers();
987 /* Conditions for which we want to avoid setting the dependency control bits */
989 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction
*inst
)
991 #define IS_DWORD(reg) \
992 (reg.type == BRW_REGISTER_TYPE_UD || \
993 reg.type == BRW_REGISTER_TYPE_D)
995 #define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8)
997 /* From the Cherryview and Broadwell PRMs:
999 * "When source or destination datatype is 64b or operation is integer DWord
1000 * multiply, DepCtrl must not be used."
1002 * SKL PRMs don't include this restriction, however, gen7 seems to be
1003 * affected, at least by the 64b restriction, since DepCtrl with double
1004 * precision instructions seems to produce GPU hangs in some cases.
1006 if (devinfo
->gen
== 8 || gen_device_info_is_9lp(devinfo
)) {
1007 if (inst
->opcode
== BRW_OPCODE_MUL
&&
1008 IS_DWORD(inst
->src
[0]) &&
1009 IS_DWORD(inst
->src
[1]))
1013 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8) {
1014 if (IS_64BIT(inst
->dst
) || IS_64BIT(inst
->src
[0]) ||
1015 IS_64BIT(inst
->src
[1]) || IS_64BIT(inst
->src
[2]))
1022 if (devinfo
->gen
>= 8) {
1023 if (inst
->opcode
== BRW_OPCODE_F32TO16
)
1029 * In the presence of send messages, totally interrupt dependency
1030 * control. They're long enough that the chance of dependency
1031 * control around them just doesn't matter.
1034 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
1035 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
1036 * completes the scoreboard clear must have a non-zero execution mask. This
1037 * means, if any kind of predication can change the execution mask or channel
1038 * enable of the last instruction, the optimization must be avoided. This is
1039 * to avoid instructions being shot down the pipeline when no writes are
1043 * Dependency control does not work well over math instructions.
1044 * NB: Discovered empirically
1046 return (inst
->mlen
|| inst
->predicate
|| inst
->is_math());
1050 * Sets the dependency control fields on instructions after register
1051 * allocation and before the generator is run.
1053 * When you have a sequence of instructions like:
1055 * DP4 temp.x vertex uniform[0]
1056 * DP4 temp.y vertex uniform[0]
1057 * DP4 temp.z vertex uniform[0]
1058 * DP4 temp.w vertex uniform[0]
1060 * The hardware doesn't know that it can actually run the later instructions
1061 * while the previous ones are in flight, producing stalls. However, we have
1062 * manual fields we can set in the instructions that let it do so.
1065 vec4_visitor::opt_set_dependency_control()
1067 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
1068 uint8_t grf_channels_written
[BRW_MAX_GRF
];
1069 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
1070 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
1072 assert(prog_data
->total_grf
||
1073 !"Must be called after register allocation");
1075 foreach_block (block
, cfg
) {
1076 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1077 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1079 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
1080 /* If we read from a register that we were doing dependency control
1081 * on, don't do dependency control across the read.
1083 for (int i
= 0; i
< 3; i
++) {
1084 int reg
= inst
->src
[i
].nr
+ inst
->src
[i
].offset
/ REG_SIZE
;
1085 if (inst
->src
[i
].file
== VGRF
) {
1086 last_grf_write
[reg
] = NULL
;
1087 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1088 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1091 assert(inst
->src
[i
].file
!= MRF
);
1094 if (is_dep_ctrl_unsafe(inst
)) {
1095 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1096 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1100 /* Now, see if we can do dependency control for this instruction
1101 * against a previous one writing to its destination.
1103 int reg
= inst
->dst
.nr
+ inst
->dst
.offset
/ REG_SIZE
;
1104 if (inst
->dst
.file
== VGRF
|| inst
->dst
.file
== FIXED_GRF
) {
1105 if (last_grf_write
[reg
] &&
1106 last_grf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
1107 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
1108 last_grf_write
[reg
]->no_dd_clear
= true;
1109 inst
->no_dd_check
= true;
1111 grf_channels_written
[reg
] = 0;
1114 last_grf_write
[reg
] = inst
;
1115 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
1116 } else if (inst
->dst
.file
== MRF
) {
1117 if (last_mrf_write
[reg
] &&
1118 last_mrf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
1119 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
1120 last_mrf_write
[reg
]->no_dd_clear
= true;
1121 inst
->no_dd_check
= true;
1123 mrf_channels_written
[reg
] = 0;
1126 last_mrf_write
[reg
] = inst
;
1127 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
1134 vec4_instruction::can_reswizzle(const struct gen_device_info
*devinfo
,
1139 /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
1142 if (devinfo
->gen
== 6 && is_math() && swizzle
!= BRW_SWIZZLE_XYZW
)
1145 /* We can't swizzle implicit accumulator access. We'd have to
1146 * reswizzle the producer of the accumulator value in addition
1147 * to the consumer (i.e. both MUL and MACH). Just skip this.
1149 if (reads_accumulator_implicitly())
1152 if (!can_do_writemask(devinfo
) && dst_writemask
!= WRITEMASK_XYZW
)
1155 /* If this instruction sets anything not referenced by swizzle, then we'd
1156 * totally break it when we reswizzle.
1158 if (dst
.writemask
& ~swizzle_mask
)
1164 for (int i
= 0; i
< 3; i
++) {
1165 if (src
[i
].is_accumulator())
1173 * For any channels in the swizzle's source that were populated by this
1174 * instruction, rewrite the instruction to put the appropriate result directly
1175 * in those channels.
1177 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1180 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
1182 /* Destination write mask doesn't correspond to source swizzle for the dot
1183 * product and pack_bytes instructions.
1185 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
1186 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
&&
1187 opcode
!= VEC4_OPCODE_PACK_BYTES
) {
1188 for (int i
= 0; i
< 3; i
++) {
1189 if (src
[i
].file
== BAD_FILE
|| src
[i
].file
== IMM
)
1192 src
[i
].swizzle
= brw_compose_swizzle(swizzle
, src
[i
].swizzle
);
1196 /* Apply the specified swizzle and writemask to the original mask of
1197 * written components.
1199 dst
.writemask
= dst_writemask
&
1200 brw_apply_swizzle_to_mask(swizzle
, dst
.writemask
);
1204 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1205 * just written and then MOVed into another reg and making the original write
1206 * of the GRF write directly to the final destination instead.
1209 vec4_visitor::opt_register_coalesce()
1211 bool progress
= false;
1214 calculate_live_intervals();
1216 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1220 if (inst
->opcode
!= BRW_OPCODE_MOV
||
1221 (inst
->dst
.file
!= VGRF
&& inst
->dst
.file
!= MRF
) ||
1223 inst
->src
[0].file
!= VGRF
||
1224 inst
->dst
.type
!= inst
->src
[0].type
||
1225 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
1228 /* Remove no-op MOVs */
1229 if (inst
->dst
.file
== inst
->src
[0].file
&&
1230 inst
->dst
.nr
== inst
->src
[0].nr
&&
1231 inst
->dst
.offset
== inst
->src
[0].offset
) {
1232 bool is_nop_mov
= true;
1234 for (unsigned c
= 0; c
< 4; c
++) {
1235 if ((inst
->dst
.writemask
& (1 << c
)) == 0)
1238 if (BRW_GET_SWZ(inst
->src
[0].swizzle
, c
) != c
) {
1245 inst
->remove(block
);
1251 bool to_mrf
= (inst
->dst
.file
== MRF
);
1253 /* Can't coalesce this GRF if someone else was going to
1256 if (var_range_end(var_from_reg(alloc
, dst_reg(inst
->src
[0])), 8) > ip
)
1259 /* We need to check interference with the final destination between this
1260 * instruction and the earliest instruction involved in writing the GRF
1261 * we're eliminating. To do that, keep track of which of our source
1262 * channels we've seen initialized.
1264 const unsigned chans_needed
=
1265 brw_apply_inv_swizzle_to_mask(inst
->src
[0].swizzle
,
1266 inst
->dst
.writemask
);
1267 unsigned chans_remaining
= chans_needed
;
1269 /* Now walk up the instruction stream trying to see if we can rewrite
1270 * everything writing to the temporary to write into the destination
1273 vec4_instruction
*_scan_inst
= (vec4_instruction
*)inst
->prev
;
1274 foreach_inst_in_block_reverse_starting_from(vec4_instruction
, scan_inst
,
1276 _scan_inst
= scan_inst
;
1278 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1279 scan_inst
->dst
, scan_inst
->size_written
)) {
1280 /* Found something writing to the reg we want to coalesce away. */
1282 /* SEND instructions can't have MRF as a destination. */
1283 if (scan_inst
->mlen
)
1286 if (devinfo
->gen
== 6) {
1287 /* gen6 math instructions must have the destination be
1288 * VGRF, so no compute-to-MRF for them.
1290 if (scan_inst
->is_math()) {
1296 /* VS_OPCODE_UNPACK_FLAGS_SIMD4X2 generates a bunch of mov(1)
1297 * instructions, and this optimization pass is not capable of
1298 * handling that. Bail on these instructions and hope that some
1299 * later optimization pass can do the right thing after they are
1302 if (scan_inst
->opcode
== VS_OPCODE_UNPACK_FLAGS_SIMD4X2
)
1305 /* This doesn't handle saturation on the instruction we
1306 * want to coalesce away if the register types do not match.
1307 * But if scan_inst is a non type-converting 'mov', we can fix
1310 if (inst
->saturate
&&
1311 inst
->dst
.type
!= scan_inst
->dst
.type
&&
1312 !(scan_inst
->opcode
== BRW_OPCODE_MOV
&&
1313 scan_inst
->dst
.type
== scan_inst
->src
[0].type
))
1316 /* Only allow coalescing between registers of the same type size.
1317 * Otherwise we would need to make the pass aware of the fact that
1318 * channel sizes are different for single and double precision.
1320 if (type_sz(inst
->src
[0].type
) != type_sz(scan_inst
->src
[0].type
))
1323 /* Check that scan_inst writes the same amount of data as the
1324 * instruction, otherwise coalescing would lead to writing a
1325 * different (larger or smaller) region of the destination
1327 if (scan_inst
->size_written
!= inst
->size_written
)
1330 /* If we can't handle the swizzle, bail. */
1331 if (!scan_inst
->can_reswizzle(devinfo
, inst
->dst
.writemask
,
1332 inst
->src
[0].swizzle
,
1337 /* This only handles coalescing writes of 8 channels (1 register
1338 * for single-precision and 2 registers for double-precision)
1339 * starting at the source offset of the copy instruction.
1341 if (DIV_ROUND_UP(scan_inst
->size_written
,
1342 type_sz(scan_inst
->dst
.type
)) > 8 ||
1343 scan_inst
->dst
.offset
!= inst
->src
[0].offset
)
1346 /* Mark which channels we found unconditional writes for. */
1347 if (!scan_inst
->predicate
)
1348 chans_remaining
&= ~scan_inst
->dst
.writemask
;
1350 if (chans_remaining
== 0)
1354 /* You can't read from an MRF, so if someone else reads our MRF's
1355 * source GRF that we wanted to rewrite, that stops us. If it's a
1356 * GRF we're trying to coalesce to, we don't actually handle
1357 * rewriting sources so bail in that case as well.
1359 bool interfered
= false;
1360 for (int i
= 0; i
< 3; i
++) {
1361 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1362 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1368 /* If somebody else writes the same channels of our destination here,
1369 * we can't coalesce before that.
1371 if (regions_overlap(inst
->dst
, inst
->size_written
,
1372 scan_inst
->dst
, scan_inst
->size_written
) &&
1373 (inst
->dst
.writemask
& scan_inst
->dst
.writemask
) != 0) {
1377 /* Check for reads of the register we're trying to coalesce into. We
1378 * can't go rewriting instructions above that to put some other value
1379 * in the register instead.
1381 if (to_mrf
&& scan_inst
->mlen
> 0) {
1382 if (inst
->dst
.nr
>= scan_inst
->base_mrf
&&
1383 inst
->dst
.nr
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
1387 for (int i
= 0; i
< 3; i
++) {
1388 if (regions_overlap(inst
->dst
, inst
->size_written
,
1389 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1397 if (chans_remaining
== 0) {
1398 /* If we've made it here, we have an MOV we want to coalesce out, and
1399 * a scan_inst pointing to the earliest instruction involved in
1400 * computing the value. Now go rewrite the instruction stream
1403 vec4_instruction
*scan_inst
= _scan_inst
;
1404 while (scan_inst
!= inst
) {
1405 if (scan_inst
->dst
.file
== VGRF
&&
1406 scan_inst
->dst
.nr
== inst
->src
[0].nr
&&
1407 scan_inst
->dst
.offset
== inst
->src
[0].offset
) {
1408 scan_inst
->reswizzle(inst
->dst
.writemask
,
1409 inst
->src
[0].swizzle
);
1410 scan_inst
->dst
.file
= inst
->dst
.file
;
1411 scan_inst
->dst
.nr
= inst
->dst
.nr
;
1412 scan_inst
->dst
.offset
= inst
->dst
.offset
;
1413 if (inst
->saturate
&&
1414 inst
->dst
.type
!= scan_inst
->dst
.type
) {
1415 /* If we have reached this point, scan_inst is a non
1416 * type-converting 'mov' and we can modify its register types
1417 * to match the ones in inst. Otherwise, we could have an
1418 * incorrect saturation result.
1420 scan_inst
->dst
.type
= inst
->dst
.type
;
1421 scan_inst
->src
[0].type
= inst
->src
[0].type
;
1423 scan_inst
->saturate
|= inst
->saturate
;
1425 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1427 inst
->remove(block
);
1433 invalidate_live_intervals();
1439 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1440 * flow. We could probably do better here with some form of divergence
1444 vec4_visitor::eliminate_find_live_channel()
1446 bool progress
= false;
1449 if (!brw_stage_has_packed_dispatch(devinfo
, stage
, stage_prog_data
)) {
1450 /* The optimization below assumes that channel zero is live on thread
1451 * dispatch, which may not be the case if the fixed function dispatches
1457 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1458 switch (inst
->opcode
) {
1464 case BRW_OPCODE_ENDIF
:
1465 case BRW_OPCODE_WHILE
:
1469 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1471 inst
->opcode
= BRW_OPCODE_MOV
;
1472 inst
->src
[0] = brw_imm_d(0);
1473 inst
->force_writemask_all
= true;
1487 * Splits virtual GRFs requesting more than one contiguous physical register.
1489 * We initially create large virtual GRFs for temporary structures, arrays,
1490 * and matrices, so that the visitor functions can add offsets to work their
1491 * way down to the actual member being accessed. But when it comes to
1492 * optimization, we'd like to treat each register as individual storage if
1495 * So far, the only thing that might prevent splitting is a send message from
1499 vec4_visitor::split_virtual_grfs()
1501 int num_vars
= this->alloc
.count
;
1502 int new_virtual_grf
[num_vars
];
1503 bool split_grf
[num_vars
];
1505 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1507 /* Try to split anything > 0 sized. */
1508 for (int i
= 0; i
< num_vars
; i
++) {
1509 split_grf
[i
] = this->alloc
.sizes
[i
] != 1;
1512 /* Check that the instructions are compatible with the registers we're trying
1515 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1516 if (inst
->dst
.file
== VGRF
&& regs_written(inst
) > 1)
1517 split_grf
[inst
->dst
.nr
] = false;
1519 for (int i
= 0; i
< 3; i
++) {
1520 if (inst
->src
[i
].file
== VGRF
&& regs_read(inst
, i
) > 1)
1521 split_grf
[inst
->src
[i
].nr
] = false;
1525 /* Allocate new space for split regs. Note that the virtual
1526 * numbers will be contiguous.
1528 for (int i
= 0; i
< num_vars
; i
++) {
1532 new_virtual_grf
[i
] = alloc
.allocate(1);
1533 for (unsigned j
= 2; j
< this->alloc
.sizes
[i
]; j
++) {
1534 unsigned reg
= alloc
.allocate(1);
1535 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1538 this->alloc
.sizes
[i
] = 1;
1541 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1542 if (inst
->dst
.file
== VGRF
&& split_grf
[inst
->dst
.nr
] &&
1543 inst
->dst
.offset
/ REG_SIZE
!= 0) {
1544 inst
->dst
.nr
= (new_virtual_grf
[inst
->dst
.nr
] +
1545 inst
->dst
.offset
/ REG_SIZE
- 1);
1546 inst
->dst
.offset
%= REG_SIZE
;
1548 for (int i
= 0; i
< 3; i
++) {
1549 if (inst
->src
[i
].file
== VGRF
&& split_grf
[inst
->src
[i
].nr
] &&
1550 inst
->src
[i
].offset
/ REG_SIZE
!= 0) {
1551 inst
->src
[i
].nr
= (new_virtual_grf
[inst
->src
[i
].nr
] +
1552 inst
->src
[i
].offset
/ REG_SIZE
- 1);
1553 inst
->src
[i
].offset
%= REG_SIZE
;
1557 invalidate_live_intervals();
1561 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1563 dump_instruction(be_inst
, stderr
);
1567 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1569 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1571 if (inst
->predicate
) {
1572 fprintf(file
, "(%cf%d.%d%s) ",
1573 inst
->predicate_inverse
? '-' : '+',
1574 inst
->flag_subreg
/ 2,
1575 inst
->flag_subreg
% 2,
1576 pred_ctrl_align16
[inst
->predicate
]);
1579 fprintf(file
, "%s(%d)", brw_instruction_name(devinfo
, inst
->opcode
),
1582 fprintf(file
, ".sat");
1583 if (inst
->conditional_mod
) {
1584 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1585 if (!inst
->predicate
&&
1586 (devinfo
->gen
< 5 || (inst
->opcode
!= BRW_OPCODE_SEL
&&
1587 inst
->opcode
!= BRW_OPCODE_CSEL
&&
1588 inst
->opcode
!= BRW_OPCODE_IF
&&
1589 inst
->opcode
!= BRW_OPCODE_WHILE
))) {
1590 fprintf(file
, ".f%d.%d", inst
->flag_subreg
/ 2, inst
->flag_subreg
% 2);
1595 switch (inst
->dst
.file
) {
1597 fprintf(file
, "vgrf%d", inst
->dst
.nr
);
1600 fprintf(file
, "g%d", inst
->dst
.nr
);
1603 fprintf(file
, "m%d", inst
->dst
.nr
);
1606 switch (inst
->dst
.nr
) {
1608 fprintf(file
, "null");
1610 case BRW_ARF_ADDRESS
:
1611 fprintf(file
, "a0.%d", inst
->dst
.subnr
);
1613 case BRW_ARF_ACCUMULATOR
:
1614 fprintf(file
, "acc%d", inst
->dst
.subnr
);
1617 fprintf(file
, "f%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1620 fprintf(file
, "arf%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1625 fprintf(file
, "(null)");
1630 unreachable("not reached");
1632 if (inst
->dst
.offset
||
1633 (inst
->dst
.file
== VGRF
&&
1634 alloc
.sizes
[inst
->dst
.nr
] * REG_SIZE
!= inst
->size_written
)) {
1635 const unsigned reg_size
= (inst
->dst
.file
== UNIFORM
? 16 : REG_SIZE
);
1636 fprintf(file
, "+%d.%d", inst
->dst
.offset
/ reg_size
,
1637 inst
->dst
.offset
% reg_size
);
1639 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1641 if (inst
->dst
.writemask
& 1)
1643 if (inst
->dst
.writemask
& 2)
1645 if (inst
->dst
.writemask
& 4)
1647 if (inst
->dst
.writemask
& 8)
1650 fprintf(file
, ":%s", brw_reg_type_to_letters(inst
->dst
.type
));
1652 if (inst
->src
[0].file
!= BAD_FILE
)
1653 fprintf(file
, ", ");
1655 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1656 if (inst
->src
[i
].negate
)
1658 if (inst
->src
[i
].abs
)
1660 switch (inst
->src
[i
].file
) {
1662 fprintf(file
, "vgrf%d", inst
->src
[i
].nr
);
1665 fprintf(file
, "g%d.%d", inst
->src
[i
].nr
, inst
->src
[i
].subnr
);
1668 fprintf(file
, "attr%d", inst
->src
[i
].nr
);
1671 fprintf(file
, "u%d", inst
->src
[i
].nr
);
1674 switch (inst
->src
[i
].type
) {
1675 case BRW_REGISTER_TYPE_F
:
1676 fprintf(file
, "%fF", inst
->src
[i
].f
);
1678 case BRW_REGISTER_TYPE_DF
:
1679 fprintf(file
, "%fDF", inst
->src
[i
].df
);
1681 case BRW_REGISTER_TYPE_D
:
1682 fprintf(file
, "%dD", inst
->src
[i
].d
);
1684 case BRW_REGISTER_TYPE_UD
:
1685 fprintf(file
, "%uU", inst
->src
[i
].ud
);
1687 case BRW_REGISTER_TYPE_VF
:
1688 fprintf(file
, "[%-gF, %-gF, %-gF, %-gF]",
1689 brw_vf_to_float((inst
->src
[i
].ud
>> 0) & 0xff),
1690 brw_vf_to_float((inst
->src
[i
].ud
>> 8) & 0xff),
1691 brw_vf_to_float((inst
->src
[i
].ud
>> 16) & 0xff),
1692 brw_vf_to_float((inst
->src
[i
].ud
>> 24) & 0xff));
1695 fprintf(file
, "???");
1700 switch (inst
->src
[i
].nr
) {
1702 fprintf(file
, "null");
1704 case BRW_ARF_ADDRESS
:
1705 fprintf(file
, "a0.%d", inst
->src
[i
].subnr
);
1707 case BRW_ARF_ACCUMULATOR
:
1708 fprintf(file
, "acc%d", inst
->src
[i
].subnr
);
1711 fprintf(file
, "f%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1714 fprintf(file
, "arf%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1719 fprintf(file
, "(null)");
1722 unreachable("not reached");
1725 if (inst
->src
[i
].offset
||
1726 (inst
->src
[i
].file
== VGRF
&&
1727 alloc
.sizes
[inst
->src
[i
].nr
] * REG_SIZE
!= inst
->size_read(i
))) {
1728 const unsigned reg_size
= (inst
->src
[i
].file
== UNIFORM
? 16 : REG_SIZE
);
1729 fprintf(file
, "+%d.%d", inst
->src
[i
].offset
/ reg_size
,
1730 inst
->src
[i
].offset
% reg_size
);
1733 if (inst
->src
[i
].file
!= IMM
) {
1734 static const char *chans
[4] = {"x", "y", "z", "w"};
1736 for (int c
= 0; c
< 4; c
++) {
1737 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1741 if (inst
->src
[i
].abs
)
1744 if (inst
->src
[i
].file
!= IMM
) {
1745 fprintf(file
, ":%s", brw_reg_type_to_letters(inst
->src
[i
].type
));
1748 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1749 fprintf(file
, ", ");
1752 if (inst
->force_writemask_all
)
1753 fprintf(file
, " NoMask");
1755 if (inst
->exec_size
!= 8)
1756 fprintf(file
, " group%d", inst
->group
);
1758 fprintf(file
, "\n");
1763 vec4_vs_visitor::setup_attributes(int payload_reg
)
1765 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1766 for (int i
= 0; i
< 3; i
++) {
1767 if (inst
->src
[i
].file
== ATTR
) {
1768 assert(inst
->src
[i
].offset
% REG_SIZE
== 0);
1769 int grf
= payload_reg
+ inst
->src
[i
].nr
+
1770 inst
->src
[i
].offset
/ REG_SIZE
;
1772 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
1773 reg
.swizzle
= inst
->src
[i
].swizzle
;
1774 reg
.type
= inst
->src
[i
].type
;
1775 reg
.abs
= inst
->src
[i
].abs
;
1776 reg
.negate
= inst
->src
[i
].negate
;
1782 return payload_reg
+ vs_prog_data
->nr_attribute_slots
;
1786 vec4_visitor::setup_uniforms(int reg
)
1788 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1790 /* The pre-gen6 VS requires that some push constants get loaded no
1791 * matter what, or the GPU would hang.
1793 if (devinfo
->gen
< 6 && this->uniforms
== 0) {
1794 brw_stage_prog_data_add_params(stage_prog_data
, 4);
1795 for (unsigned int i
= 0; i
< 4; i
++) {
1796 unsigned int slot
= this->uniforms
* 4 + i
;
1797 stage_prog_data
->param
[slot
] = BRW_PARAM_BUILTIN_ZERO
;
1803 reg
+= ALIGN(uniforms
, 2) / 2;
1806 for (int i
= 0; i
< 4; i
++)
1807 reg
+= stage_prog_data
->ubo_ranges
[i
].length
;
1809 stage_prog_data
->nr_params
= this->uniforms
* 4;
1811 prog_data
->base
.curb_read_length
=
1812 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1818 vec4_vs_visitor::setup_payload(void)
1822 /* The payload always contains important data in g0, which contains
1823 * the URB handles that are passed on to the URB write at the end
1824 * of the thread. So, we always start push constants at g1.
1828 reg
= setup_uniforms(reg
);
1830 reg
= setup_attributes(reg
);
1832 this->first_non_payload_grf
= reg
;
1836 vec4_visitor::lower_minmax()
1838 assert(devinfo
->gen
< 6);
1840 bool progress
= false;
1842 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1843 const vec4_builder
ibld(this, block
, inst
);
1845 if (inst
->opcode
== BRW_OPCODE_SEL
&&
1846 inst
->predicate
== BRW_PREDICATE_NONE
) {
1847 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
1848 * the original SEL.L/GE instruction
1850 ibld
.CMP(ibld
.null_reg_d(), inst
->src
[0], inst
->src
[1],
1851 inst
->conditional_mod
);
1852 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1853 inst
->conditional_mod
= BRW_CONDITIONAL_NONE
;
1860 invalidate_live_intervals();
1866 vec4_visitor::get_timestamp()
1868 assert(devinfo
->gen
>= 7);
1870 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1875 BRW_REGISTER_TYPE_UD
,
1876 BRW_VERTICAL_STRIDE_0
,
1878 BRW_HORIZONTAL_STRIDE_4
,
1882 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1884 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1885 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1886 * even if it's not enabled in the dispatch.
1888 mov
->force_writemask_all
= true;
1890 return src_reg(dst
);
1894 vec4_visitor::emit_shader_time_begin()
1896 current_annotation
= "shader time start";
1897 shader_start_time
= get_timestamp();
1901 vec4_visitor::emit_shader_time_end()
1903 current_annotation
= "shader time end";
1904 src_reg shader_end_time
= get_timestamp();
1907 /* Check that there weren't any timestamp reset events (assuming these
1908 * were the only two timestamp reads that happened).
1910 src_reg reset_end
= shader_end_time
;
1911 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1912 vec4_instruction
*test
= emit(AND(dst_null_ud(), reset_end
, brw_imm_ud(1u)));
1913 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1915 emit(IF(BRW_PREDICATE_NORMAL
));
1917 /* Take the current timestamp and get the delta. */
1918 shader_start_time
.negate
= true;
1919 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1920 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1922 /* If there were no instructions between the two timestamp gets, the diff
1923 * is 2 cycles. Remove that overhead, so I can forget about that when
1924 * trying to determine the time taken for single instructions.
1926 emit(ADD(diff
, src_reg(diff
), brw_imm_ud(-2u)));
1928 emit_shader_time_write(0, src_reg(diff
));
1929 emit_shader_time_write(1, brw_imm_ud(1u));
1930 emit(BRW_OPCODE_ELSE
);
1931 emit_shader_time_write(2, brw_imm_ud(1u));
1932 emit(BRW_OPCODE_ENDIF
);
1936 vec4_visitor::emit_shader_time_write(int shader_time_subindex
, src_reg value
)
1939 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1941 dst_reg offset
= dst
;
1943 time
.offset
+= REG_SIZE
;
1945 offset
.type
= BRW_REGISTER_TYPE_UD
;
1946 int index
= shader_time_index
* 3 + shader_time_subindex
;
1947 emit(MOV(offset
, brw_imm_d(index
* BRW_SHADER_TIME_STRIDE
)));
1949 time
.type
= BRW_REGISTER_TYPE_UD
;
1950 emit(MOV(time
, value
));
1952 vec4_instruction
*inst
=
1953 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
1958 is_align1_df(vec4_instruction
*inst
)
1960 switch (inst
->opcode
) {
1961 case VEC4_OPCODE_DOUBLE_TO_F32
:
1962 case VEC4_OPCODE_DOUBLE_TO_D32
:
1963 case VEC4_OPCODE_DOUBLE_TO_U32
:
1964 case VEC4_OPCODE_TO_DOUBLE
:
1965 case VEC4_OPCODE_PICK_LOW_32BIT
:
1966 case VEC4_OPCODE_PICK_HIGH_32BIT
:
1967 case VEC4_OPCODE_SET_LOW_32BIT
:
1968 case VEC4_OPCODE_SET_HIGH_32BIT
:
1976 * Three source instruction must have a GRF/MRF destination register.
1977 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
1980 vec4_visitor::fixup_3src_null_dest()
1982 bool progress
= false;
1984 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1985 if (inst
->is_3src(devinfo
) && inst
->dst
.is_null()) {
1986 const unsigned size_written
= type_sz(inst
->dst
.type
);
1987 const unsigned num_regs
= DIV_ROUND_UP(size_written
, REG_SIZE
);
1989 inst
->dst
= retype(dst_reg(VGRF
, alloc
.allocate(num_regs
)),
1996 invalidate_live_intervals();
2000 vec4_visitor::convert_to_hw_regs()
2002 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
2003 for (int i
= 0; i
< 3; i
++) {
2004 class src_reg
&src
= inst
->src
[i
];
2008 reg
= byte_offset(brw_vecn_grf(4, src
.nr
, 0), src
.offset
);
2009 reg
.type
= src
.type
;
2011 reg
.negate
= src
.negate
;
2016 reg
= stride(byte_offset(brw_vec4_grf(
2017 prog_data
->base
.dispatch_grf_start_reg
+
2018 src
.nr
/ 2, src
.nr
% 2 * 4),
2021 reg
.type
= src
.type
;
2023 reg
.negate
= src
.negate
;
2025 /* This should have been moved to pull constants. */
2026 assert(!src
.reladdr
);
2031 if (type_sz(src
.type
) == 8) {
2032 reg
= src
.as_brw_reg();
2041 /* Probably unused. */
2042 reg
= brw_null_reg();
2043 reg
= retype(reg
, src
.type
);
2048 unreachable("not reached");
2051 apply_logical_swizzle(®
, inst
, i
);
2054 /* From IVB PRM, vol4, part3, "General Restrictions on Regioning
2057 * "If ExecSize = Width and HorzStride ≠ 0, VertStride must be set
2058 * to Width * HorzStride."
2060 * We can break this rule with DF sources on DF align1
2061 * instructions, because the exec_size would be 4 and width is 4.
2062 * As we know we are not accessing to next GRF, it is safe to
2063 * set vstride to the formula given by the rule itself.
2065 if (is_align1_df(inst
) && (cvt(inst
->exec_size
) - 1) == src
.width
)
2066 src
.vstride
= src
.width
+ src
.hstride
;
2069 if (inst
->is_3src(devinfo
)) {
2070 /* 3-src instructions with scalar sources support arbitrary subnr,
2071 * but don't actually use swizzles. Convert swizzle into subnr.
2072 * Skip this for double-precision instructions: RepCtrl=1 is not
2073 * allowed for them and needs special handling.
2075 for (int i
= 0; i
< 3; i
++) {
2076 if (inst
->src
[i
].vstride
== BRW_VERTICAL_STRIDE_0
&&
2077 type_sz(inst
->src
[i
].type
) < 8) {
2078 assert(brw_is_single_value_swizzle(inst
->src
[i
].swizzle
));
2079 inst
->src
[i
].subnr
+= 4 * BRW_GET_SWZ(inst
->src
[i
].swizzle
, 0);
2084 dst_reg
&dst
= inst
->dst
;
2087 switch (inst
->dst
.file
) {
2089 reg
= byte_offset(brw_vec8_grf(dst
.nr
, 0), dst
.offset
);
2090 reg
.type
= dst
.type
;
2091 reg
.writemask
= dst
.writemask
;
2095 reg
= byte_offset(brw_message_reg(dst
.nr
), dst
.offset
);
2096 assert((reg
.nr
& ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
2097 reg
.type
= dst
.type
;
2098 reg
.writemask
= dst
.writemask
;
2103 reg
= dst
.as_brw_reg();
2107 reg
= brw_null_reg();
2108 reg
= retype(reg
, dst
.type
);
2114 unreachable("not reached");
2122 stage_uses_interleaved_attributes(unsigned stage
,
2123 enum shader_dispatch_mode dispatch_mode
)
2126 case MESA_SHADER_TESS_EVAL
:
2128 case MESA_SHADER_GEOMETRY
:
2129 return dispatch_mode
!= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2136 * Get the closest native SIMD width supported by the hardware for instruction
2137 * \p inst. The instruction will be left untouched by
2138 * vec4_visitor::lower_simd_width() if the returned value matches the
2139 * instruction's original execution size.
2142 get_lowered_simd_width(const struct gen_device_info
*devinfo
,
2143 enum shader_dispatch_mode dispatch_mode
,
2144 unsigned stage
, const vec4_instruction
*inst
)
2146 /* Do not split some instructions that require special handling */
2147 switch (inst
->opcode
) {
2148 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
2149 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
2150 return inst
->exec_size
;
2155 unsigned lowered_width
= MIN2(16, inst
->exec_size
);
2157 /* We need to split some cases of double-precision instructions that write
2158 * 2 registers. We only need to care about this in gen7 because that is the
2159 * only hardware that implements fp64 in Align16.
2161 if (devinfo
->gen
== 7 && inst
->size_written
> REG_SIZE
) {
2162 /* Align16 8-wide double-precision SEL does not work well. Verified
2165 if (inst
->opcode
== BRW_OPCODE_SEL
&& type_sz(inst
->dst
.type
) == 8)
2166 lowered_width
= MIN2(lowered_width
, 4);
2168 /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
2169 * Register Addressing:
2171 * "When destination spans two registers, the source MUST span two
2174 for (unsigned i
= 0; i
< 3; i
++) {
2175 if (inst
->src
[i
].file
== BAD_FILE
)
2177 if (inst
->size_read(i
) <= REG_SIZE
)
2178 lowered_width
= MIN2(lowered_width
, 4);
2180 /* Interleaved attribute setups use a vertical stride of 0, which
2181 * makes them hit the associated instruction decompression bug in gen7.
2182 * Split them to prevent this.
2184 if (inst
->src
[i
].file
== ATTR
&&
2185 stage_uses_interleaved_attributes(stage
, dispatch_mode
))
2186 lowered_width
= MIN2(lowered_width
, 4);
2190 /* IvyBridge can manage a maximum of 4 DFs per SIMD4x2 instruction, since
2191 * it doesn't support compression in Align16 mode, no matter if it has
2192 * force_writemask_all enabled or disabled (the latter is affected by the
2193 * compressed instruction bug in gen7, which is another reason to enforce
2196 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
2197 (get_exec_type_size(inst
) == 8 || type_sz(inst
->dst
.type
) == 8))
2198 lowered_width
= MIN2(lowered_width
, 4);
2200 return lowered_width
;
2204 dst_src_regions_overlap(vec4_instruction
*inst
)
2206 if (inst
->size_written
== 0)
2209 unsigned dst_start
= inst
->dst
.offset
;
2210 unsigned dst_end
= dst_start
+ inst
->size_written
- 1;
2211 for (int i
= 0; i
< 3; i
++) {
2212 if (inst
->src
[i
].file
== BAD_FILE
)
2215 if (inst
->dst
.file
!= inst
->src
[i
].file
||
2216 inst
->dst
.nr
!= inst
->src
[i
].nr
)
2219 unsigned src_start
= inst
->src
[i
].offset
;
2220 unsigned src_end
= src_start
+ inst
->size_read(i
) - 1;
2222 if ((dst_start
>= src_start
&& dst_start
<= src_end
) ||
2223 (dst_end
>= src_start
&& dst_end
<= src_end
) ||
2224 (dst_start
<= src_start
&& dst_end
>= src_end
)) {
2233 vec4_visitor::lower_simd_width()
2235 bool progress
= false;
2237 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2238 const unsigned lowered_width
=
2239 get_lowered_simd_width(devinfo
, prog_data
->dispatch_mode
, stage
, inst
);
2240 assert(lowered_width
<= inst
->exec_size
);
2241 if (lowered_width
== inst
->exec_size
)
2244 /* We need to deal with source / destination overlaps when splitting.
2245 * The hardware supports reading from and writing to the same register
2246 * in the same instruction, but we need to be careful that each split
2247 * instruction we produce does not corrupt the source of the next.
2249 * The easiest way to handle this is to make the split instructions write
2250 * to temporaries if there is an src/dst overlap and then move from the
2251 * temporaries to the original destination. We also need to consider
2252 * instructions that do partial writes via align1 opcodes, in which case
2253 * we need to make sure that the we initialize the temporary with the
2254 * value of the instruction's dst.
2256 bool needs_temp
= dst_src_regions_overlap(inst
);
2257 for (unsigned n
= 0; n
< inst
->exec_size
/ lowered_width
; n
++) {
2258 unsigned channel_offset
= lowered_width
* n
;
2260 unsigned size_written
= lowered_width
* type_sz(inst
->dst
.type
);
2262 /* Create the split instruction from the original so that we copy all
2263 * relevant instruction fields, then set the width and calculate the
2264 * new dst/src regions.
2266 vec4_instruction
*linst
= new(mem_ctx
) vec4_instruction(*inst
);
2267 linst
->exec_size
= lowered_width
;
2268 linst
->group
= channel_offset
;
2269 linst
->size_written
= size_written
;
2271 /* Compute split dst region */
2274 unsigned num_regs
= DIV_ROUND_UP(size_written
, REG_SIZE
);
2275 dst
= retype(dst_reg(VGRF
, alloc
.allocate(num_regs
)),
2277 if (inst
->is_align1_partial_write()) {
2278 vec4_instruction
*copy
= MOV(dst
, src_reg(inst
->dst
));
2279 copy
->exec_size
= lowered_width
;
2280 copy
->group
= channel_offset
;
2281 copy
->size_written
= size_written
;
2282 inst
->insert_before(block
, copy
);
2285 dst
= horiz_offset(inst
->dst
, channel_offset
);
2289 /* Compute split source regions */
2290 for (int i
= 0; i
< 3; i
++) {
2291 if (linst
->src
[i
].file
== BAD_FILE
)
2294 bool is_interleaved_attr
=
2295 linst
->src
[i
].file
== ATTR
&&
2296 stage_uses_interleaved_attributes(stage
,
2297 prog_data
->dispatch_mode
);
2299 if (!is_uniform(linst
->src
[i
]) && !is_interleaved_attr
)
2300 linst
->src
[i
] = horiz_offset(linst
->src
[i
], channel_offset
);
2303 inst
->insert_before(block
, linst
);
2305 /* If we used a temporary to store the result of the split
2306 * instruction, copy the result to the original destination
2309 vec4_instruction
*mov
=
2310 MOV(offset(inst
->dst
, lowered_width
, n
), src_reg(dst
));
2311 mov
->exec_size
= lowered_width
;
2312 mov
->group
= channel_offset
;
2313 mov
->size_written
= size_written
;
2314 mov
->predicate
= inst
->predicate
;
2315 inst
->insert_before(block
, mov
);
2319 inst
->remove(block
);
2324 invalidate_live_intervals();
2329 static brw_predicate
2330 scalarize_predicate(brw_predicate predicate
, unsigned writemask
)
2332 if (predicate
!= BRW_PREDICATE_NORMAL
)
2335 switch (writemask
) {
2337 return BRW_PREDICATE_ALIGN16_REPLICATE_X
;
2339 return BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
2341 return BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
2343 return BRW_PREDICATE_ALIGN16_REPLICATE_W
;
2345 unreachable("invalid writemask");
2349 /* Gen7 has a hardware decompression bug that we can exploit to represent
2350 * handful of additional swizzles natively.
2353 is_gen7_supported_64bit_swizzle(vec4_instruction
*inst
, unsigned arg
)
2355 switch (inst
->src
[arg
].swizzle
) {
2356 case BRW_SWIZZLE_XXXX
:
2357 case BRW_SWIZZLE_YYYY
:
2358 case BRW_SWIZZLE_ZZZZ
:
2359 case BRW_SWIZZLE_WWWW
:
2360 case BRW_SWIZZLE_XYXY
:
2361 case BRW_SWIZZLE_YXYX
:
2362 case BRW_SWIZZLE_ZWZW
:
2363 case BRW_SWIZZLE_WZWZ
:
2370 /* 64-bit sources use regions with a width of 2. These 2 elements in each row
2371 * can be addressed using 32-bit swizzles (which is what the hardware supports)
2372 * but it also means that the swizzle we apply on the first two components of a
2373 * dvec4 is coupled with the swizzle we use for the last 2. In other words,
2374 * only some specific swizzle combinations can be natively supported.
2376 * FIXME: we can go an step further and implement even more swizzle
2377 * variations using only partial scalarization.
2379 * For more details see:
2380 * https://bugs.freedesktop.org/show_bug.cgi?id=92760#c82
2383 vec4_visitor::is_supported_64bit_region(vec4_instruction
*inst
, unsigned arg
)
2385 const src_reg
&src
= inst
->src
[arg
];
2386 assert(type_sz(src
.type
) == 8);
2388 /* Uniform regions have a vstride=0. Because we use 2-wide rows with
2389 * 64-bit regions it means that we cannot access components Z/W, so
2390 * return false for any such case. Interleaved attributes will also be
2391 * mapped to GRF registers with a vstride of 0, so apply the same
2394 if ((is_uniform(src
) ||
2395 (stage_uses_interleaved_attributes(stage
, prog_data
->dispatch_mode
) &&
2396 src
.file
== ATTR
)) &&
2397 (brw_mask_for_swizzle(src
.swizzle
) & 12))
2400 switch (src
.swizzle
) {
2401 case BRW_SWIZZLE_XYZW
:
2402 case BRW_SWIZZLE_XXZZ
:
2403 case BRW_SWIZZLE_YYWW
:
2404 case BRW_SWIZZLE_YXWZ
:
2407 return devinfo
->gen
== 7 && is_gen7_supported_64bit_swizzle(inst
, arg
);
2412 vec4_visitor::scalarize_df()
2414 bool progress
= false;
2416 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2417 /* Skip DF instructions that operate in Align1 mode */
2418 if (is_align1_df(inst
))
2421 /* Check if this is a double-precision instruction */
2422 bool is_double
= type_sz(inst
->dst
.type
) == 8;
2423 for (int arg
= 0; !is_double
&& arg
< 3; arg
++) {
2424 is_double
= inst
->src
[arg
].file
!= BAD_FILE
&&
2425 type_sz(inst
->src
[arg
].type
) == 8;
2431 /* Skip the lowering for specific regioning scenarios that we can
2434 bool skip_lowering
= true;
2436 /* XY and ZW writemasks operate in 32-bit, which means that they don't
2437 * have a native 64-bit representation and they should always be split.
2439 if (inst
->dst
.writemask
== WRITEMASK_XY
||
2440 inst
->dst
.writemask
== WRITEMASK_ZW
) {
2441 skip_lowering
= false;
2443 for (unsigned i
= 0; i
< 3; i
++) {
2444 if (inst
->src
[i
].file
== BAD_FILE
|| type_sz(inst
->src
[i
].type
) < 8)
2446 skip_lowering
= skip_lowering
&& is_supported_64bit_region(inst
, i
);
2453 /* Generate scalar instructions for each enabled channel */
2454 for (unsigned chan
= 0; chan
< 4; chan
++) {
2455 unsigned chan_mask
= 1 << chan
;
2456 if (!(inst
->dst
.writemask
& chan_mask
))
2459 vec4_instruction
*scalar_inst
= new(mem_ctx
) vec4_instruction(*inst
);
2461 for (unsigned i
= 0; i
< 3; i
++) {
2462 unsigned swz
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, chan
);
2463 scalar_inst
->src
[i
].swizzle
= BRW_SWIZZLE4(swz
, swz
, swz
, swz
);
2466 scalar_inst
->dst
.writemask
= chan_mask
;
2468 if (inst
->predicate
!= BRW_PREDICATE_NONE
) {
2469 scalar_inst
->predicate
=
2470 scalarize_predicate(inst
->predicate
, chan_mask
);
2473 inst
->insert_before(block
, scalar_inst
);
2476 inst
->remove(block
);
2481 invalidate_live_intervals();
2487 vec4_visitor::lower_64bit_mad_to_mul_add()
2489 bool progress
= false;
2491 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2492 if (inst
->opcode
!= BRW_OPCODE_MAD
)
2495 if (type_sz(inst
->dst
.type
) != 8)
2498 dst_reg mul_dst
= dst_reg(this, glsl_type::dvec4_type
);
2500 /* Use the copy constructor so we copy all relevant instruction fields
2501 * from the original mad into the add and mul instructions
2503 vec4_instruction
*mul
= new(mem_ctx
) vec4_instruction(*inst
);
2504 mul
->opcode
= BRW_OPCODE_MUL
;
2506 mul
->src
[0] = inst
->src
[1];
2507 mul
->src
[1] = inst
->src
[2];
2508 mul
->src
[2].file
= BAD_FILE
;
2510 vec4_instruction
*add
= new(mem_ctx
) vec4_instruction(*inst
);
2511 add
->opcode
= BRW_OPCODE_ADD
;
2512 add
->src
[0] = src_reg(mul_dst
);
2513 add
->src
[1] = inst
->src
[0];
2514 add
->src
[2].file
= BAD_FILE
;
2516 inst
->insert_before(block
, mul
);
2517 inst
->insert_before(block
, add
);
2518 inst
->remove(block
);
2524 invalidate_live_intervals();
2529 /* The align16 hardware can only do 32-bit swizzle channels, so we need to
2530 * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
2531 * to 32-bit swizzle channels in hardware registers.
2533 * @inst and @arg identify the original vec4 IR source operand we need to
2534 * translate the swizzle for and @hw_reg is the hardware register where we
2535 * will write the hardware swizzle to use.
2537 * This pass assumes that Align16/DF instructions have been fully scalarized
2538 * previously so there is just one 64-bit swizzle channel to deal with for any
2539 * given Vec4 IR source.
2542 vec4_visitor::apply_logical_swizzle(struct brw_reg
*hw_reg
,
2543 vec4_instruction
*inst
, int arg
)
2545 src_reg reg
= inst
->src
[arg
];
2547 if (reg
.file
== BAD_FILE
|| reg
.file
== BRW_IMMEDIATE_VALUE
)
2550 /* If this is not a 64-bit operand or this is a scalar instruction we don't
2551 * need to do anything about the swizzles.
2553 if(type_sz(reg
.type
) < 8 || is_align1_df(inst
)) {
2554 hw_reg
->swizzle
= reg
.swizzle
;
2558 /* Take the 64-bit logical swizzle channel and translate it to 32-bit */
2559 assert(brw_is_single_value_swizzle(reg
.swizzle
) ||
2560 is_supported_64bit_region(inst
, arg
));
2562 /* Apply the region <2, 2, 1> for GRF or <0, 2, 1> for uniforms, as align16
2563 * HW can only do 32-bit swizzle channels.
2565 hw_reg
->width
= BRW_WIDTH_2
;
2567 if (is_supported_64bit_region(inst
, arg
) &&
2568 !is_gen7_supported_64bit_swizzle(inst
, arg
)) {
2569 /* Supported 64-bit swizzles are those such that their first two
2570 * components, when expanded to 32-bit swizzles, match the semantics
2571 * of the original 64-bit swizzle with 2-wide row regioning.
2573 unsigned swizzle0
= BRW_GET_SWZ(reg
.swizzle
, 0);
2574 unsigned swizzle1
= BRW_GET_SWZ(reg
.swizzle
, 1);
2575 hw_reg
->swizzle
= BRW_SWIZZLE4(swizzle0
* 2, swizzle0
* 2 + 1,
2576 swizzle1
* 2, swizzle1
* 2 + 1);
2578 /* If we got here then we have one of the following:
2580 * 1. An unsupported swizzle, which should be single-value thanks to the
2581 * scalarization pass.
2583 * 2. A gen7 supported swizzle. These can be single-value or double-value
2584 * swizzles. If the latter, they are never cross-dvec2 channels. For
2585 * these we always need to activate the gen7 vstride=0 exploit.
2587 unsigned swizzle0
= BRW_GET_SWZ(reg
.swizzle
, 0);
2588 unsigned swizzle1
= BRW_GET_SWZ(reg
.swizzle
, 1);
2589 assert((swizzle0
< 2) == (swizzle1
< 2));
2591 /* To gain access to Z/W components we need to select the second half
2592 * of the register and then use a X/Y swizzle to select Z/W respectively.
2594 if (swizzle0
>= 2) {
2595 *hw_reg
= suboffset(*hw_reg
, 2);
2600 /* All gen7-specific supported swizzles require the vstride=0 exploit */
2601 if (devinfo
->gen
== 7 && is_gen7_supported_64bit_swizzle(inst
, arg
))
2602 hw_reg
->vstride
= BRW_VERTICAL_STRIDE_0
;
2604 /* Any 64-bit source with an offset at 16B is intended to address the
2605 * second half of a register and needs a vertical stride of 0 so we:
2607 * 1. Don't violate register region restrictions.
2608 * 2. Activate the gen7 instruction decompresion bug exploit when
2611 if (hw_reg
->subnr
% REG_SIZE
== 16) {
2612 assert(devinfo
->gen
== 7);
2613 hw_reg
->vstride
= BRW_VERTICAL_STRIDE_0
;
2616 hw_reg
->swizzle
= BRW_SWIZZLE4(swizzle0
* 2, swizzle0
* 2 + 1,
2617 swizzle1
* 2, swizzle1
* 2 + 1);
2624 if (shader_time_index
>= 0)
2625 emit_shader_time_begin();
2638 /* Before any optimization, push array accesses out to scratch
2639 * space where we need them to be. This pass may allocate new
2640 * virtual GRFs, so we want to do it early. It also makes sure
2641 * that we have reladdr computations available for CSE, since we'll
2642 * often do repeated subexpressions for those.
2644 move_grf_array_access_to_scratch();
2645 move_uniform_array_access_to_pull_constants();
2647 pack_uniform_registers();
2648 move_push_constants_to_pull_constants();
2649 split_virtual_grfs();
2651 #define OPT(pass, args...) ({ \
2653 bool this_progress = pass(args); \
2655 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
2656 char filename[64]; \
2657 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
2658 stage_abbrev, nir->info.name, iteration, pass_num); \
2660 backend_shader::dump_instructions(filename); \
2663 progress = progress || this_progress; \
2668 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
2670 snprintf(filename
, 64, "%s-%s-00-00-start",
2671 stage_abbrev
, nir
->info
.name
);
2673 backend_shader::dump_instructions(filename
);
2684 OPT(opt_predicated_break
, this);
2685 OPT(opt_reduce_swizzle
);
2686 OPT(dead_code_eliminate
);
2687 OPT(dead_control_flow_eliminate
, this);
2688 OPT(opt_copy_propagation
);
2689 OPT(opt_cmod_propagation
);
2692 OPT(opt_register_coalesce
);
2693 OPT(eliminate_find_live_channel
);
2698 if (OPT(opt_vector_float
)) {
2700 OPT(opt_copy_propagation
, false);
2701 OPT(opt_copy_propagation
, true);
2702 OPT(dead_code_eliminate
);
2705 if (devinfo
->gen
<= 5 && OPT(lower_minmax
)) {
2706 OPT(opt_cmod_propagation
);
2708 OPT(opt_copy_propagation
);
2709 OPT(dead_code_eliminate
);
2712 if (OPT(lower_simd_width
)) {
2713 OPT(opt_copy_propagation
);
2714 OPT(dead_code_eliminate
);
2720 OPT(lower_64bit_mad_to_mul_add
);
2722 /* Run this before payload setup because tesselation shaders
2723 * rely on it to prevent cross dvec2 regioning on DF attributes
2724 * that are setup so that XY are on the second half of register and
2725 * ZW are in the first half of the next.
2731 if (unlikely(INTEL_DEBUG
& DEBUG_SPILL_VEC4
)) {
2732 /* Debug of register spilling: Go spill everything. */
2733 const int grf_count
= alloc
.count
;
2734 float spill_costs
[alloc
.count
];
2735 bool no_spill
[alloc
.count
];
2736 evaluate_spill_costs(spill_costs
, no_spill
);
2737 for (int i
= 0; i
< grf_count
; i
++) {
2743 /* We want to run this after spilling because 64-bit (un)spills need to
2744 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2745 * messages that can produce unsupported 64-bit swizzle regions.
2750 fixup_3src_null_dest();
2752 bool allocated_without_spills
= reg_allocate();
2754 if (!allocated_without_spills
) {
2755 compiler
->shader_perf_log(log_data
,
2756 "%s shader triggered register spilling. "
2757 "Try reducing the number of live vec4 values "
2758 "to improve performance.\n",
2761 while (!reg_allocate()) {
2766 /* We want to run this after spilling because 64-bit (un)spills need to
2767 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2768 * messages that can produce unsupported 64-bit swizzle regions.
2773 opt_schedule_instructions();
2775 opt_set_dependency_control();
2777 convert_to_hw_regs();
2779 if (last_scratch
> 0) {
2780 prog_data
->base
.total_scratch
=
2781 brw_get_scratch_size(last_scratch
* REG_SIZE
);
2787 } /* namespace brw */
2792 * Compile a vertex shader.
2794 * Returns the final assembly and the program's size.
2797 brw_compile_vs(const struct brw_compiler
*compiler
, void *log_data
,
2799 const struct brw_vs_prog_key
*key
,
2800 struct brw_vs_prog_data
*prog_data
,
2801 const nir_shader
*src_shader
,
2802 int shader_time_index
,
2805 const bool is_scalar
= compiler
->scalar_stage
[MESA_SHADER_VERTEX
];
2806 nir_shader
*shader
= nir_shader_clone(mem_ctx
, src_shader
);
2807 shader
= brw_nir_apply_sampler_key(shader
, compiler
, &key
->tex
, is_scalar
);
2809 const unsigned *assembly
= NULL
;
2811 if (prog_data
->base
.vue_map
.varying_to_slot
[VARYING_SLOT_EDGE
] != -1) {
2812 /* If the output VUE map contains VARYING_SLOT_EDGE then we need to copy
2813 * the edge flag from VERT_ATTRIB_EDGEFLAG. This will be done
2814 * automatically by brw_vec4_visitor::emit_urb_slot but we need to
2815 * ensure that prog_data->inputs_read is accurate.
2817 * In order to make late NIR passes aware of the change, we actually
2818 * whack shader->info.inputs_read instead. This is safe because we just
2819 * made a copy of the shader.
2822 assert(key
->copy_edgeflag
);
2823 shader
->info
.inputs_read
|= VERT_BIT_EDGEFLAG
;
2826 prog_data
->inputs_read
= shader
->info
.inputs_read
;
2827 prog_data
->double_inputs_read
= shader
->info
.vs
.double_inputs
;
2829 brw_nir_lower_vs_inputs(shader
, key
->gl_attrib_wa_flags
);
2830 brw_nir_lower_vue_outputs(shader
, is_scalar
);
2831 shader
= brw_postprocess_nir(shader
, compiler
, is_scalar
);
2833 prog_data
->base
.clip_distance_mask
=
2834 ((1 << shader
->info
.clip_distance_array_size
) - 1);
2835 prog_data
->base
.cull_distance_mask
=
2836 ((1 << shader
->info
.cull_distance_array_size
) - 1) <<
2837 shader
->info
.clip_distance_array_size
;
2839 unsigned nr_attribute_slots
= _mesa_bitcount_64(prog_data
->inputs_read
);
2841 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2842 * incoming vertex attribute. So, add an extra slot.
2844 if (shader
->info
.system_values_read
&
2845 (BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX
) |
2846 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
2847 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
2848 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))) {
2849 nr_attribute_slots
++;
2852 /* gl_DrawID and IsIndexedDraw share its very own vec4 */
2853 if (shader
->info
.system_values_read
&
2854 (BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
) |
2855 BITFIELD64_BIT(SYSTEM_VALUE_IS_INDEXED_DRAW
))) {
2856 nr_attribute_slots
++;
2859 if (shader
->info
.system_values_read
&
2860 BITFIELD64_BIT(SYSTEM_VALUE_IS_INDEXED_DRAW
))
2861 prog_data
->uses_is_indexed_draw
= true;
2863 if (shader
->info
.system_values_read
&
2864 BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX
))
2865 prog_data
->uses_firstvertex
= true;
2867 if (shader
->info
.system_values_read
&
2868 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
))
2869 prog_data
->uses_baseinstance
= true;
2871 if (shader
->info
.system_values_read
&
2872 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
))
2873 prog_data
->uses_vertexid
= true;
2875 if (shader
->info
.system_values_read
&
2876 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))
2877 prog_data
->uses_instanceid
= true;
2879 if (shader
->info
.system_values_read
&
2880 BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
))
2881 prog_data
->uses_drawid
= true;
2883 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2884 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
2885 * vec4 mode, the hardware appears to wedge unless we read something.
2888 prog_data
->base
.urb_read_length
=
2889 DIV_ROUND_UP(nr_attribute_slots
, 2);
2891 prog_data
->base
.urb_read_length
=
2892 DIV_ROUND_UP(MAX2(nr_attribute_slots
, 1), 2);
2894 prog_data
->nr_attribute_slots
= nr_attribute_slots
;
2896 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2897 * (overwriting the original contents), we need to make sure the size is
2898 * the larger of the two.
2900 const unsigned vue_entries
=
2901 MAX2(nr_attribute_slots
, (unsigned)prog_data
->base
.vue_map
.num_slots
);
2903 if (compiler
->devinfo
->gen
== 6) {
2904 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 8);
2906 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 4);
2907 /* On Cannonlake software shall not program an allocation size that
2908 * specifies a size that is a multiple of 3 64B (512-bit) cachelines.
2910 if (compiler
->devinfo
->gen
== 10 &&
2911 prog_data
->base
.urb_entry_size
% 3 == 0)
2912 prog_data
->base
.urb_entry_size
++;
2915 if (INTEL_DEBUG
& DEBUG_VS
) {
2916 fprintf(stderr
, "VS Output ");
2917 brw_print_vue_map(stderr
, &prog_data
->base
.vue_map
);
2921 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
2923 fs_visitor
v(compiler
, log_data
, mem_ctx
, key
, &prog_data
->base
.base
,
2924 NULL
, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
2925 shader
, 8, shader_time_index
);
2928 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2933 prog_data
->base
.base
.dispatch_grf_start_reg
= v
.payload
.num_regs
;
2935 fs_generator
g(compiler
, log_data
, mem_ctx
,
2936 &prog_data
->base
.base
, v
.promoted_constants
,
2937 v
.runtime_check_aads_emit
, MESA_SHADER_VERTEX
);
2938 if (INTEL_DEBUG
& DEBUG_VS
) {
2939 const char *debug_name
=
2940 ralloc_asprintf(mem_ctx
, "%s vertex shader %s",
2941 shader
->info
.label
? shader
->info
.label
:
2945 g
.enable_debug(debug_name
);
2947 g
.generate_code(v
.cfg
, 8);
2948 assembly
= g
.get_assembly();
2952 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2954 vec4_vs_visitor
v(compiler
, log_data
, key
, prog_data
,
2955 shader
, mem_ctx
, shader_time_index
);
2958 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2963 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
,
2964 shader
, &prog_data
->base
, v
.cfg
);