2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_vec4_builder.h"
29 #include "brw_vec4_vs.h"
30 #include "brw_dead_control_flow.h"
31 #include "dev/gen_debug.h"
32 #include "program/prog_parameter.h"
33 #include "util/u_math.h"
35 #define MAX_INSTRUCTION (1 << 30)
44 memset((void*)this, 0, sizeof(*this));
45 this->file
= BAD_FILE
;
46 this->type
= BRW_REGISTER_TYPE_UD
;
49 src_reg::src_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
)
55 if (type
&& (type
->is_scalar() || type
->is_vector() || type
->is_matrix()))
56 this->swizzle
= brw_swizzle_for_size(type
->vector_elements
);
58 this->swizzle
= BRW_SWIZZLE_XYZW
;
60 this->type
= brw_type_for_base_type(type
);
63 /** Generic unset register constructor. */
69 src_reg::src_reg(struct ::brw_reg reg
) :
76 src_reg::src_reg(const dst_reg
®
) :
79 this->reladdr
= reg
.reladdr
;
80 this->swizzle
= brw_swizzle_for_mask(reg
.writemask
);
86 memset((void*)this, 0, sizeof(*this));
87 this->file
= BAD_FILE
;
88 this->type
= BRW_REGISTER_TYPE_UD
;
89 this->writemask
= WRITEMASK_XYZW
;
97 dst_reg::dst_reg(enum brw_reg_file file
, int nr
)
105 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, const glsl_type
*type
,
112 this->type
= brw_type_for_base_type(type
);
113 this->writemask
= writemask
;
116 dst_reg::dst_reg(enum brw_reg_file file
, int nr
, brw_reg_type type
,
124 this->writemask
= writemask
;
127 dst_reg::dst_reg(struct ::brw_reg reg
) :
131 this->reladdr
= NULL
;
134 dst_reg::dst_reg(const src_reg
®
) :
137 this->writemask
= brw_mask_for_swizzle(reg
.swizzle
);
138 this->reladdr
= reg
.reladdr
;
142 dst_reg::equals(const dst_reg
&r
) const
144 return (this->backend_reg::equals(r
) &&
145 (reladdr
== r
.reladdr
||
146 (reladdr
&& r
.reladdr
&& reladdr
->equals(*r
.reladdr
))));
150 vec4_instruction::is_send_from_grf()
153 case SHADER_OPCODE_SHADER_TIME_ADD
:
154 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
155 case VEC4_OPCODE_UNTYPED_ATOMIC
:
156 case VEC4_OPCODE_UNTYPED_SURFACE_READ
:
157 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE
:
158 case VEC4_OPCODE_URB_READ
:
159 case TCS_OPCODE_URB_WRITE
:
160 case TCS_OPCODE_RELEASE_INPUT
:
161 case SHADER_OPCODE_BARRIER
:
169 * Returns true if this instruction's sources and destinations cannot
170 * safely be the same register.
172 * In most cases, a register can be written over safely by the same
173 * instruction that is its last use. For a single instruction, the
174 * sources are dereferenced before writing of the destination starts
177 * However, there are a few cases where this can be problematic:
179 * - Virtual opcodes that translate to multiple instructions in the
180 * code generator: if src == dst and one instruction writes the
181 * destination before a later instruction reads the source, then
182 * src will have been clobbered.
184 * The register allocator uses this information to set up conflicts between
185 * GRF sources and the destination.
188 vec4_instruction::has_source_and_destination_hazard() const
191 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
192 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
193 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
196 /* 8-wide compressed DF operations are executed as two 4-wide operations,
197 * so we have a src/dst hazard if the first half of the instruction
198 * overwrites the source of the second half. Prevent this by marking
199 * compressed instructions as having src/dst hazards, so the register
200 * allocator assigns safe register regions for dst and srcs.
202 return size_written
> REG_SIZE
;
207 vec4_instruction::size_read(unsigned arg
) const
210 case SHADER_OPCODE_SHADER_TIME_ADD
:
211 case VEC4_OPCODE_UNTYPED_ATOMIC
:
212 case VEC4_OPCODE_UNTYPED_SURFACE_READ
:
213 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE
:
214 case TCS_OPCODE_URB_WRITE
:
216 return mlen
* REG_SIZE
;
218 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
220 return mlen
* REG_SIZE
;
226 switch (src
[arg
].file
) {
231 return 4 * type_sz(src
[arg
].type
);
233 /* XXX - Represent actual vertical stride. */
234 return exec_size
* type_sz(src
[arg
].type
);
239 vec4_instruction::can_do_source_mods(const struct gen_device_info
*devinfo
)
241 if (devinfo
->gen
== 6 && is_math())
244 if (is_send_from_grf())
247 if (!backend_instruction::can_do_source_mods())
254 vec4_instruction::can_do_cmod()
256 if (!backend_instruction::can_do_cmod())
259 /* The accumulator result appears to get used for the conditional modifier
260 * generation. When negating a UD value, there is a 33rd bit generated for
261 * the sign in the accumulator value, so now you can't check, for example,
262 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
264 for (unsigned i
= 0; i
< 3; i
++) {
265 if (src
[i
].file
!= BAD_FILE
&&
266 type_is_unsigned_int(src
[i
].type
) && src
[i
].negate
)
274 vec4_instruction::can_do_writemask(const struct gen_device_info
*devinfo
)
277 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
278 case VEC4_OPCODE_DOUBLE_TO_F32
:
279 case VEC4_OPCODE_DOUBLE_TO_D32
:
280 case VEC4_OPCODE_DOUBLE_TO_U32
:
281 case VEC4_OPCODE_TO_DOUBLE
:
282 case VEC4_OPCODE_PICK_LOW_32BIT
:
283 case VEC4_OPCODE_PICK_HIGH_32BIT
:
284 case VEC4_OPCODE_SET_LOW_32BIT
:
285 case VEC4_OPCODE_SET_HIGH_32BIT
:
286 case VS_OPCODE_PULL_CONSTANT_LOAD
:
287 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
288 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
289 case TCS_OPCODE_SET_INPUT_URB_OFFSETS
:
290 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
:
291 case TES_OPCODE_CREATE_INPUT_READ_HEADER
:
292 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET
:
293 case VEC4_OPCODE_URB_READ
:
294 case SHADER_OPCODE_MOV_INDIRECT
:
297 /* The MATH instruction on Gen6 only executes in align1 mode, which does
298 * not support writemasking.
300 if (devinfo
->gen
== 6 && is_math())
311 vec4_instruction::can_change_types() const
313 return dst
.type
== src
[0].type
&&
314 !src
[0].abs
&& !src
[0].negate
&& !saturate
&&
315 (opcode
== BRW_OPCODE_MOV
||
316 (opcode
== BRW_OPCODE_SEL
&&
317 dst
.type
== src
[1].type
&&
318 predicate
!= BRW_PREDICATE_NONE
&&
319 !src
[1].abs
&& !src
[1].negate
));
323 * Returns how many MRFs an opcode will write over.
325 * Note that this is not the 0 or 1 implied writes in an actual gen
326 * instruction -- the generate_* functions generate additional MOVs
330 vec4_instruction::implied_mrf_writes() const
332 if (mlen
== 0 || is_send_from_grf())
336 case SHADER_OPCODE_RCP
:
337 case SHADER_OPCODE_RSQ
:
338 case SHADER_OPCODE_SQRT
:
339 case SHADER_OPCODE_EXP2
:
340 case SHADER_OPCODE_LOG2
:
341 case SHADER_OPCODE_SIN
:
342 case SHADER_OPCODE_COS
:
344 case SHADER_OPCODE_INT_QUOTIENT
:
345 case SHADER_OPCODE_INT_REMAINDER
:
346 case SHADER_OPCODE_POW
:
347 case TCS_OPCODE_THREAD_END
:
349 case VS_OPCODE_URB_WRITE
:
351 case VS_OPCODE_PULL_CONSTANT_LOAD
:
353 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
355 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
357 case GS_OPCODE_URB_WRITE
:
358 case GS_OPCODE_URB_WRITE_ALLOCATE
:
359 case GS_OPCODE_THREAD_END
:
361 case GS_OPCODE_FF_SYNC
:
363 case TCS_OPCODE_URB_WRITE
:
365 case SHADER_OPCODE_SHADER_TIME_ADD
:
367 case SHADER_OPCODE_TEX
:
368 case SHADER_OPCODE_TXL
:
369 case SHADER_OPCODE_TXD
:
370 case SHADER_OPCODE_TXF
:
371 case SHADER_OPCODE_TXF_CMS
:
372 case SHADER_OPCODE_TXF_CMS_W
:
373 case SHADER_OPCODE_TXF_MCS
:
374 case SHADER_OPCODE_TXS
:
375 case SHADER_OPCODE_TG4
:
376 case SHADER_OPCODE_TG4_OFFSET
:
377 case SHADER_OPCODE_SAMPLEINFO
:
378 case SHADER_OPCODE_GET_BUFFER_SIZE
:
381 unreachable("not reached");
386 src_reg::equals(const src_reg
&r
) const
388 return (this->backend_reg::equals(r
) &&
389 !reladdr
&& !r
.reladdr
);
393 src_reg::negative_equals(const src_reg
&r
) const
395 return this->backend_reg::negative_equals(r
) &&
396 !reladdr
&& !r
.reladdr
;
400 vec4_visitor::opt_vector_float()
402 bool progress
= false;
404 foreach_block(block
, cfg
) {
405 unsigned last_reg
= ~0u, last_offset
= ~0u;
406 enum brw_reg_file last_reg_file
= BAD_FILE
;
408 uint8_t imm
[4] = { 0 };
410 vec4_instruction
*imm_inst
[4];
411 unsigned writemask
= 0;
412 enum brw_reg_type dest_type
= BRW_REGISTER_TYPE_F
;
414 foreach_inst_in_block_safe(vec4_instruction
, inst
, block
) {
416 enum brw_reg_type need_type
= BRW_REGISTER_TYPE_LAST
;
418 /* Look for unconditional MOVs from an immediate with a partial
419 * writemask. Skip type-conversion MOVs other than integer 0,
420 * where the type doesn't matter. See if the immediate can be
421 * represented as a VF.
423 if (inst
->opcode
== BRW_OPCODE_MOV
&&
424 inst
->src
[0].file
== IMM
&&
425 inst
->predicate
== BRW_PREDICATE_NONE
&&
426 inst
->dst
.writemask
!= WRITEMASK_XYZW
&&
427 type_sz(inst
->src
[0].type
) < 8 &&
428 (inst
->src
[0].type
== inst
->dst
.type
|| inst
->src
[0].d
== 0)) {
430 vf
= brw_float_to_vf(inst
->src
[0].d
);
431 need_type
= BRW_REGISTER_TYPE_D
;
434 vf
= brw_float_to_vf(inst
->src
[0].f
);
435 need_type
= BRW_REGISTER_TYPE_F
;
441 /* If this wasn't a MOV, or the destination register doesn't match,
442 * or we have to switch destination types, then this breaks our
443 * sequence. Combine anything we've accumulated so far.
445 if (last_reg
!= inst
->dst
.nr
||
446 last_offset
!= inst
->dst
.offset
||
447 last_reg_file
!= inst
->dst
.file
||
448 (vf
> 0 && dest_type
!= need_type
)) {
450 if (inst_count
> 1) {
452 memcpy(&vf
, imm
, sizeof(vf
));
453 vec4_instruction
*mov
= MOV(imm_inst
[0]->dst
, brw_imm_vf(vf
));
454 mov
->dst
.type
= dest_type
;
455 mov
->dst
.writemask
= writemask
;
456 inst
->insert_before(block
, mov
);
458 for (int i
= 0; i
< inst_count
; i
++) {
459 imm_inst
[i
]->remove(block
);
468 dest_type
= BRW_REGISTER_TYPE_F
;
470 for (int i
= 0; i
< 4; i
++) {
475 /* Record this instruction's value (if it was representable). */
477 if ((inst
->dst
.writemask
& WRITEMASK_X
) != 0)
479 if ((inst
->dst
.writemask
& WRITEMASK_Y
) != 0)
481 if ((inst
->dst
.writemask
& WRITEMASK_Z
) != 0)
483 if ((inst
->dst
.writemask
& WRITEMASK_W
) != 0)
486 writemask
|= inst
->dst
.writemask
;
487 imm_inst
[inst_count
++] = inst
;
489 last_reg
= inst
->dst
.nr
;
490 last_offset
= inst
->dst
.offset
;
491 last_reg_file
= inst
->dst
.file
;
493 dest_type
= need_type
;
499 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
);
504 /* Replaces unused channels of a swizzle with channels that are used.
506 * For instance, this pass transforms
508 * mov vgrf4.yz, vgrf5.wxzy
512 * mov vgrf4.yz, vgrf5.xxzx
514 * This eliminates false uses of some channels, letting dead code elimination
515 * remove the instructions that wrote them.
518 vec4_visitor::opt_reduce_swizzle()
520 bool progress
= false;
522 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
523 if (inst
->dst
.file
== BAD_FILE
||
524 inst
->dst
.file
== ARF
||
525 inst
->dst
.file
== FIXED_GRF
||
526 inst
->is_send_from_grf())
531 /* Determine which channels of the sources are read. */
532 switch (inst
->opcode
) {
533 case VEC4_OPCODE_PACK_BYTES
:
535 case BRW_OPCODE_DPH
: /* FINISHME: DPH reads only three channels of src0,
536 * but all four of src1.
538 swizzle
= brw_swizzle_for_size(4);
541 swizzle
= brw_swizzle_for_size(3);
544 swizzle
= brw_swizzle_for_size(2);
547 case VEC4_OPCODE_TO_DOUBLE
:
548 case VEC4_OPCODE_DOUBLE_TO_F32
:
549 case VEC4_OPCODE_DOUBLE_TO_D32
:
550 case VEC4_OPCODE_DOUBLE_TO_U32
:
551 case VEC4_OPCODE_PICK_LOW_32BIT
:
552 case VEC4_OPCODE_PICK_HIGH_32BIT
:
553 case VEC4_OPCODE_SET_LOW_32BIT
:
554 case VEC4_OPCODE_SET_HIGH_32BIT
:
555 swizzle
= brw_swizzle_for_size(4);
559 swizzle
= brw_swizzle_for_mask(inst
->dst
.writemask
);
563 /* Update sources' swizzles. */
564 for (int i
= 0; i
< 3; i
++) {
565 if (inst
->src
[i
].file
!= VGRF
&&
566 inst
->src
[i
].file
!= ATTR
&&
567 inst
->src
[i
].file
!= UNIFORM
)
570 const unsigned new_swizzle
=
571 brw_compose_swizzle(swizzle
, inst
->src
[i
].swizzle
);
572 if (inst
->src
[i
].swizzle
!= new_swizzle
) {
573 inst
->src
[i
].swizzle
= new_swizzle
;
580 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL
);
586 vec4_visitor::split_uniform_registers()
588 /* Prior to this, uniforms have been in an array sized according to
589 * the number of vector uniforms present, sparsely filled (so an
590 * aggregate results in reg indices being skipped over). Now we're
591 * going to cut those aggregates up so each .nr index is one
592 * vector. The goal is to make elimination of unused uniform
593 * components easier later.
595 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
596 for (int i
= 0 ; i
< 3; i
++) {
597 if (inst
->src
[i
].file
!= UNIFORM
)
600 assert(!inst
->src
[i
].reladdr
);
602 inst
->src
[i
].nr
+= inst
->src
[i
].offset
/ 16;
603 inst
->src
[i
].offset
%= 16;
608 /* This function returns the register number where we placed the uniform */
610 set_push_constant_loc(const int nr_uniforms
, int *new_uniform_count
,
611 const int src
, const int size
, const int channel_size
,
612 int *new_loc
, int *new_chan
,
616 /* Find the lowest place we can slot this uniform in. */
617 for (dst
= 0; dst
< nr_uniforms
; dst
++) {
618 if (ALIGN(new_chans_used
[dst
], channel_size
) + size
<= 4)
622 assert(dst
< nr_uniforms
);
625 new_chan
[src
] = ALIGN(new_chans_used
[dst
], channel_size
);
626 new_chans_used
[dst
] = ALIGN(new_chans_used
[dst
], channel_size
) + size
;
628 *new_uniform_count
= MAX2(*new_uniform_count
, dst
+ 1);
633 vec4_visitor::pack_uniform_registers()
635 if (!compiler
->compact_params
)
638 uint8_t chans_used
[this->uniforms
];
639 int new_loc
[this->uniforms
];
640 int new_chan
[this->uniforms
];
641 bool is_aligned_to_dvec4
[this->uniforms
];
642 int new_chans_used
[this->uniforms
];
643 int channel_sizes
[this->uniforms
];
645 memset(chans_used
, 0, sizeof(chans_used
));
646 memset(new_loc
, 0, sizeof(new_loc
));
647 memset(new_chan
, 0, sizeof(new_chan
));
648 memset(new_chans_used
, 0, sizeof(new_chans_used
));
649 memset(is_aligned_to_dvec4
, 0, sizeof(is_aligned_to_dvec4
));
650 memset(channel_sizes
, 0, sizeof(channel_sizes
));
652 /* Find which uniform vectors are actually used by the program. We
653 * expect unused vector elements when we've moved array access out
654 * to pull constants, and from some GLSL code generators like wine.
656 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
658 switch (inst
->opcode
) {
659 case VEC4_OPCODE_PACK_BYTES
:
671 readmask
= inst
->dst
.writemask
;
675 for (int i
= 0 ; i
< 3; i
++) {
676 if (inst
->src
[i
].file
!= UNIFORM
)
679 assert(type_sz(inst
->src
[i
].type
) % 4 == 0);
680 int channel_size
= type_sz(inst
->src
[i
].type
) / 4;
682 int reg
= inst
->src
[i
].nr
;
683 for (int c
= 0; c
< 4; c
++) {
684 if (!(readmask
& (1 << c
)))
687 unsigned channel
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
) + 1;
688 unsigned used
= MAX2(chans_used
[reg
], channel
* channel_size
);
690 chans_used
[reg
] = used
;
691 channel_sizes
[reg
] = MAX2(channel_sizes
[reg
], channel_size
);
693 is_aligned_to_dvec4
[reg
] = true;
694 is_aligned_to_dvec4
[reg
+ 1] = true;
695 chans_used
[reg
+ 1] = used
- 4;
696 channel_sizes
[reg
+ 1] = MAX2(channel_sizes
[reg
+ 1], channel_size
);
701 if (inst
->opcode
== SHADER_OPCODE_MOV_INDIRECT
&&
702 inst
->src
[0].file
== UNIFORM
) {
703 assert(inst
->src
[2].file
== BRW_IMMEDIATE_VALUE
);
704 assert(inst
->src
[0].subnr
== 0);
706 unsigned bytes_read
= inst
->src
[2].ud
;
707 assert(bytes_read
% 4 == 0);
708 unsigned vec4s_read
= DIV_ROUND_UP(bytes_read
, 16);
710 /* We just mark every register touched by a MOV_INDIRECT as being
711 * fully used. This ensures that it doesn't broken up piecewise by
712 * the next part of our packing algorithm.
714 int reg
= inst
->src
[0].nr
;
715 int channel_size
= type_sz(inst
->src
[0].type
) / 4;
716 for (unsigned i
= 0; i
< vec4s_read
; i
++) {
717 chans_used
[reg
+ i
] = 4;
718 channel_sizes
[reg
+ i
] = MAX2(channel_sizes
[reg
+ i
], channel_size
);
723 int new_uniform_count
= 0;
725 /* As the uniforms are going to be reordered, take the data from a temporary
726 * copy of the original param[].
728 uint32_t *param
= ralloc_array(NULL
, uint32_t, stage_prog_data
->nr_params
);
729 memcpy(param
, stage_prog_data
->param
,
730 sizeof(uint32_t) * stage_prog_data
->nr_params
);
732 /* Now, figure out a packing of the live uniform vectors into our
733 * push constants. Start with dvec{3,4} because they are aligned to
734 * dvec4 size (2 vec4).
736 for (int src
= 0; src
< uniforms
; src
++) {
737 int size
= chans_used
[src
];
739 if (size
== 0 || !is_aligned_to_dvec4
[src
])
742 /* dvec3 are aligned to dvec4 size, apply the alignment of the size
743 * to 4 to avoid moving last component of a dvec3 to the available
744 * location at the end of a previous dvec3. These available locations
745 * could be filled by smaller variables in next loop.
747 size
= ALIGN(size
, 4);
748 int dst
= set_push_constant_loc(uniforms
, &new_uniform_count
,
749 src
, size
, channel_sizes
[src
],
752 /* Move the references to the data */
753 for (int j
= 0; j
< size
; j
++) {
754 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
759 /* Continue with the rest of data, which is aligned to vec4. */
760 for (int src
= 0; src
< uniforms
; src
++) {
761 int size
= chans_used
[src
];
763 if (size
== 0 || is_aligned_to_dvec4
[src
])
766 int dst
= set_push_constant_loc(uniforms
, &new_uniform_count
,
767 src
, size
, channel_sizes
[src
],
770 /* Move the references to the data */
771 for (int j
= 0; j
< size
; j
++) {
772 stage_prog_data
->param
[dst
* 4 + new_chan
[src
] + j
] =
778 this->uniforms
= new_uniform_count
;
780 /* Now, update the instructions for our repacked uniforms. */
781 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
782 for (int i
= 0 ; i
< 3; i
++) {
783 int src
= inst
->src
[i
].nr
;
785 if (inst
->src
[i
].file
!= UNIFORM
)
788 int chan
= new_chan
[src
] / channel_sizes
[src
];
789 inst
->src
[i
].nr
= new_loc
[src
];
790 inst
->src
[i
].swizzle
+= BRW_SWIZZLE4(chan
, chan
, chan
, chan
);
796 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
798 * While GLSL IR also performs this optimization, we end up with it in
799 * our instruction stream for a couple of reasons. One is that we
800 * sometimes generate silly instructions, for example in array access
801 * where we'll generate "ADD offset, index, base" even if base is 0.
802 * The other is that GLSL IR's constant propagation doesn't track the
803 * components of aggregates, so some VS patterns (initialize matrix to
804 * 0, accumulate in vertex blending factors) end up breaking down to
805 * instructions involving 0.
808 vec4_visitor::opt_algebraic()
810 bool progress
= false;
812 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
813 switch (inst
->opcode
) {
815 if (inst
->src
[0].file
!= IMM
)
818 if (inst
->saturate
) {
819 /* Full mixed-type saturates don't happen. However, we can end up
822 * mov.sat(8) g21<1>DF -1F
824 * Other mixed-size-but-same-base-type cases may also be possible.
826 if (inst
->dst
.type
!= inst
->src
[0].type
&&
827 inst
->dst
.type
!= BRW_REGISTER_TYPE_DF
&&
828 inst
->src
[0].type
!= BRW_REGISTER_TYPE_F
)
829 assert(!"unimplemented: saturate mixed types");
831 if (brw_saturate_immediate(inst
->src
[0].type
,
832 &inst
->src
[0].as_brw_reg())) {
833 inst
->saturate
= false;
840 if (inst
->src
[1].is_zero()) {
841 inst
->opcode
= BRW_OPCODE_MOV
;
842 inst
->src
[1] = src_reg();
847 case VEC4_OPCODE_UNPACK_UNIFORM
:
848 if (inst
->src
[0].file
!= UNIFORM
) {
849 inst
->opcode
= BRW_OPCODE_MOV
;
855 if (inst
->src
[1].is_zero()) {
856 inst
->opcode
= BRW_OPCODE_MOV
;
857 inst
->src
[1] = src_reg();
863 if (inst
->src
[1].is_zero()) {
864 inst
->opcode
= BRW_OPCODE_MOV
;
865 switch (inst
->src
[0].type
) {
866 case BRW_REGISTER_TYPE_F
:
867 inst
->src
[0] = brw_imm_f(0.0f
);
869 case BRW_REGISTER_TYPE_D
:
870 inst
->src
[0] = brw_imm_d(0);
872 case BRW_REGISTER_TYPE_UD
:
873 inst
->src
[0] = brw_imm_ud(0u);
876 unreachable("not reached");
878 inst
->src
[1] = src_reg();
880 } else if (inst
->src
[1].is_one()) {
881 inst
->opcode
= BRW_OPCODE_MOV
;
882 inst
->src
[1] = src_reg();
884 } else if (inst
->src
[1].is_negative_one()) {
885 inst
->opcode
= BRW_OPCODE_MOV
;
886 inst
->src
[0].negate
= !inst
->src
[0].negate
;
887 inst
->src
[1] = src_reg();
891 case SHADER_OPCODE_BROADCAST
:
892 if (is_uniform(inst
->src
[0]) ||
893 inst
->src
[1].is_zero()) {
894 inst
->opcode
= BRW_OPCODE_MOV
;
895 inst
->src
[1] = src_reg();
896 inst
->force_writemask_all
= true;
907 invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW
|
908 DEPENDENCY_INSTRUCTION_DETAIL
);
914 * Only a limited number of hardware registers may be used for push
915 * constants, so this turns access to the overflowed constants into
919 vec4_visitor::move_push_constants_to_pull_constants()
921 int pull_constant_loc
[this->uniforms
];
923 /* Only allow 32 registers (256 uniform components) as push constants,
924 * which is the limit on gen6.
926 * If changing this value, note the limitation about total_regs in
929 int max_uniform_components
= 32 * 8;
930 if (this->uniforms
* 4 <= max_uniform_components
)
933 /* Make some sort of choice as to which uniforms get sent to pull
934 * constants. We could potentially do something clever here like
935 * look for the most infrequently used uniform vec4s, but leave
938 for (int i
= 0; i
< this->uniforms
* 4; i
+= 4) {
939 pull_constant_loc
[i
/ 4] = -1;
941 if (i
>= max_uniform_components
) {
942 uint32_t *values
= &stage_prog_data
->param
[i
];
944 /* Try to find an existing copy of this uniform in the pull
945 * constants if it was part of an array access already.
947 for (unsigned int j
= 0; j
< stage_prog_data
->nr_pull_params
; j
+= 4) {
950 for (matches
= 0; matches
< 4; matches
++) {
951 if (stage_prog_data
->pull_param
[j
+ matches
] != values
[matches
])
956 pull_constant_loc
[i
/ 4] = j
/ 4;
961 if (pull_constant_loc
[i
/ 4] == -1) {
962 assert(stage_prog_data
->nr_pull_params
% 4 == 0);
963 pull_constant_loc
[i
/ 4] = stage_prog_data
->nr_pull_params
/ 4;
965 for (int j
= 0; j
< 4; j
++) {
966 stage_prog_data
->pull_param
[stage_prog_data
->nr_pull_params
++] =
973 /* Now actually rewrite usage of the things we've moved to pull
976 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
977 for (int i
= 0 ; i
< 3; i
++) {
978 if (inst
->src
[i
].file
!= UNIFORM
||
979 pull_constant_loc
[inst
->src
[i
].nr
] == -1)
982 int uniform
= inst
->src
[i
].nr
;
984 const glsl_type
*temp_type
= type_sz(inst
->src
[i
].type
) == 8 ?
985 glsl_type::dvec4_type
: glsl_type::vec4_type
;
986 dst_reg temp
= dst_reg(this, temp_type
);
988 emit_pull_constant_load(block
, inst
, temp
, inst
->src
[i
],
989 pull_constant_loc
[uniform
], src_reg());
991 inst
->src
[i
].file
= temp
.file
;
992 inst
->src
[i
].nr
= temp
.nr
;
993 inst
->src
[i
].offset
%= 16;
994 inst
->src
[i
].reladdr
= NULL
;
998 /* Repack push constants to remove the now-unused ones. */
999 pack_uniform_registers();
1002 /* Conditions for which we want to avoid setting the dependency control bits */
1004 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction
*inst
)
1006 #define IS_DWORD(reg) \
1007 (reg.type == BRW_REGISTER_TYPE_UD || \
1008 reg.type == BRW_REGISTER_TYPE_D)
1010 #define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8)
1012 /* From the Cherryview and Broadwell PRMs:
1014 * "When source or destination datatype is 64b or operation is integer DWord
1015 * multiply, DepCtrl must not be used."
1017 * SKL PRMs don't include this restriction, however, gen7 seems to be
1018 * affected, at least by the 64b restriction, since DepCtrl with double
1019 * precision instructions seems to produce GPU hangs in some cases.
1021 if (devinfo
->gen
== 8 || gen_device_info_is_9lp(devinfo
)) {
1022 if (inst
->opcode
== BRW_OPCODE_MUL
&&
1023 IS_DWORD(inst
->src
[0]) &&
1024 IS_DWORD(inst
->src
[1]))
1028 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8) {
1029 if (IS_64BIT(inst
->dst
) || IS_64BIT(inst
->src
[0]) ||
1030 IS_64BIT(inst
->src
[1]) || IS_64BIT(inst
->src
[2]))
1037 if (devinfo
->gen
>= 8) {
1038 if (inst
->opcode
== BRW_OPCODE_F32TO16
)
1044 * In the presence of send messages, totally interrupt dependency
1045 * control. They're long enough that the chance of dependency
1046 * control around them just doesn't matter.
1049 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
1050 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
1051 * completes the scoreboard clear must have a non-zero execution mask. This
1052 * means, if any kind of predication can change the execution mask or channel
1053 * enable of the last instruction, the optimization must be avoided. This is
1054 * to avoid instructions being shot down the pipeline when no writes are
1058 * Dependency control does not work well over math instructions.
1059 * NB: Discovered empirically
1061 return (inst
->mlen
|| inst
->predicate
|| inst
->is_math());
1065 * Sets the dependency control fields on instructions after register
1066 * allocation and before the generator is run.
1068 * When you have a sequence of instructions like:
1070 * DP4 temp.x vertex uniform[0]
1071 * DP4 temp.y vertex uniform[0]
1072 * DP4 temp.z vertex uniform[0]
1073 * DP4 temp.w vertex uniform[0]
1075 * The hardware doesn't know that it can actually run the later instructions
1076 * while the previous ones are in flight, producing stalls. However, we have
1077 * manual fields we can set in the instructions that let it do so.
1080 vec4_visitor::opt_set_dependency_control()
1082 vec4_instruction
*last_grf_write
[BRW_MAX_GRF
];
1083 uint8_t grf_channels_written
[BRW_MAX_GRF
];
1084 vec4_instruction
*last_mrf_write
[BRW_MAX_GRF
];
1085 uint8_t mrf_channels_written
[BRW_MAX_GRF
];
1087 assert(prog_data
->total_grf
||
1088 !"Must be called after register allocation");
1090 foreach_block (block
, cfg
) {
1091 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1092 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1094 foreach_inst_in_block (vec4_instruction
, inst
, block
) {
1095 /* If we read from a register that we were doing dependency control
1096 * on, don't do dependency control across the read.
1098 for (int i
= 0; i
< 3; i
++) {
1099 int reg
= inst
->src
[i
].nr
+ inst
->src
[i
].offset
/ REG_SIZE
;
1100 if (inst
->src
[i
].file
== VGRF
) {
1101 last_grf_write
[reg
] = NULL
;
1102 } else if (inst
->src
[i
].file
== FIXED_GRF
) {
1103 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1106 assert(inst
->src
[i
].file
!= MRF
);
1109 if (is_dep_ctrl_unsafe(inst
)) {
1110 memset(last_grf_write
, 0, sizeof(last_grf_write
));
1111 memset(last_mrf_write
, 0, sizeof(last_mrf_write
));
1115 /* Now, see if we can do dependency control for this instruction
1116 * against a previous one writing to its destination.
1118 int reg
= inst
->dst
.nr
+ inst
->dst
.offset
/ REG_SIZE
;
1119 if (inst
->dst
.file
== VGRF
|| inst
->dst
.file
== FIXED_GRF
) {
1120 if (last_grf_write
[reg
] &&
1121 last_grf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
1122 !(inst
->dst
.writemask
& grf_channels_written
[reg
])) {
1123 last_grf_write
[reg
]->no_dd_clear
= true;
1124 inst
->no_dd_check
= true;
1126 grf_channels_written
[reg
] = 0;
1129 last_grf_write
[reg
] = inst
;
1130 grf_channels_written
[reg
] |= inst
->dst
.writemask
;
1131 } else if (inst
->dst
.file
== MRF
) {
1132 if (last_mrf_write
[reg
] &&
1133 last_mrf_write
[reg
]->dst
.offset
== inst
->dst
.offset
&&
1134 !(inst
->dst
.writemask
& mrf_channels_written
[reg
])) {
1135 last_mrf_write
[reg
]->no_dd_clear
= true;
1136 inst
->no_dd_check
= true;
1138 mrf_channels_written
[reg
] = 0;
1141 last_mrf_write
[reg
] = inst
;
1142 mrf_channels_written
[reg
] |= inst
->dst
.writemask
;
1149 vec4_instruction::can_reswizzle(const struct gen_device_info
*devinfo
,
1154 /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
1157 if (devinfo
->gen
== 6 && is_math() && swizzle
!= BRW_SWIZZLE_XYZW
)
1160 /* If we write to the flag register changing the swizzle would change
1161 * what channels are written to the flag register.
1166 /* We can't swizzle implicit accumulator access. We'd have to
1167 * reswizzle the producer of the accumulator value in addition
1168 * to the consumer (i.e. both MUL and MACH). Just skip this.
1170 if (reads_accumulator_implicitly())
1173 if (!can_do_writemask(devinfo
) && dst_writemask
!= WRITEMASK_XYZW
)
1176 /* If this instruction sets anything not referenced by swizzle, then we'd
1177 * totally break it when we reswizzle.
1179 if (dst
.writemask
& ~swizzle_mask
)
1185 for (int i
= 0; i
< 3; i
++) {
1186 if (src
[i
].is_accumulator())
1194 * For any channels in the swizzle's source that were populated by this
1195 * instruction, rewrite the instruction to put the appropriate result directly
1196 * in those channels.
1198 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1201 vec4_instruction::reswizzle(int dst_writemask
, int swizzle
)
1203 /* Destination write mask doesn't correspond to source swizzle for the dot
1204 * product and pack_bytes instructions.
1206 if (opcode
!= BRW_OPCODE_DP4
&& opcode
!= BRW_OPCODE_DPH
&&
1207 opcode
!= BRW_OPCODE_DP3
&& opcode
!= BRW_OPCODE_DP2
&&
1208 opcode
!= VEC4_OPCODE_PACK_BYTES
) {
1209 for (int i
= 0; i
< 3; i
++) {
1210 if (src
[i
].file
== BAD_FILE
)
1213 if (src
[i
].file
== IMM
) {
1214 assert(src
[i
].type
!= BRW_REGISTER_TYPE_V
&&
1215 src
[i
].type
!= BRW_REGISTER_TYPE_UV
);
1217 /* Vector immediate types need to be reswizzled. */
1218 if (src
[i
].type
== BRW_REGISTER_TYPE_VF
) {
1219 const unsigned imm
[] = {
1220 (src
[i
].ud
>> 0) & 0x0ff,
1221 (src
[i
].ud
>> 8) & 0x0ff,
1222 (src
[i
].ud
>> 16) & 0x0ff,
1223 (src
[i
].ud
>> 24) & 0x0ff,
1226 src
[i
] = brw_imm_vf4(imm
[BRW_GET_SWZ(swizzle
, 0)],
1227 imm
[BRW_GET_SWZ(swizzle
, 1)],
1228 imm
[BRW_GET_SWZ(swizzle
, 2)],
1229 imm
[BRW_GET_SWZ(swizzle
, 3)]);
1235 src
[i
].swizzle
= brw_compose_swizzle(swizzle
, src
[i
].swizzle
);
1239 /* Apply the specified swizzle and writemask to the original mask of
1240 * written components.
1242 dst
.writemask
= dst_writemask
&
1243 brw_apply_swizzle_to_mask(swizzle
, dst
.writemask
);
1247 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1248 * just written and then MOVed into another reg and making the original write
1249 * of the GRF write directly to the final destination instead.
1252 vec4_visitor::opt_register_coalesce()
1254 bool progress
= false;
1256 const vec4_live_variables
&live
= live_analysis
.require();
1258 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
1262 if (inst
->opcode
!= BRW_OPCODE_MOV
||
1263 (inst
->dst
.file
!= VGRF
&& inst
->dst
.file
!= MRF
) ||
1265 inst
->src
[0].file
!= VGRF
||
1266 inst
->dst
.type
!= inst
->src
[0].type
||
1267 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].reladdr
)
1270 /* Remove no-op MOVs */
1271 if (inst
->dst
.file
== inst
->src
[0].file
&&
1272 inst
->dst
.nr
== inst
->src
[0].nr
&&
1273 inst
->dst
.offset
== inst
->src
[0].offset
) {
1274 bool is_nop_mov
= true;
1276 for (unsigned c
= 0; c
< 4; c
++) {
1277 if ((inst
->dst
.writemask
& (1 << c
)) == 0)
1280 if (BRW_GET_SWZ(inst
->src
[0].swizzle
, c
) != c
) {
1287 inst
->remove(block
);
1293 bool to_mrf
= (inst
->dst
.file
== MRF
);
1295 /* Can't coalesce this GRF if someone else was going to
1298 if (live
.var_range_end(var_from_reg(alloc
, dst_reg(inst
->src
[0])), 8) > ip
)
1301 /* We need to check interference with the final destination between this
1302 * instruction and the earliest instruction involved in writing the GRF
1303 * we're eliminating. To do that, keep track of which of our source
1304 * channels we've seen initialized.
1306 const unsigned chans_needed
=
1307 brw_apply_inv_swizzle_to_mask(inst
->src
[0].swizzle
,
1308 inst
->dst
.writemask
);
1309 unsigned chans_remaining
= chans_needed
;
1311 /* Now walk up the instruction stream trying to see if we can rewrite
1312 * everything writing to the temporary to write into the destination
1315 vec4_instruction
*_scan_inst
= (vec4_instruction
*)inst
->prev
;
1316 foreach_inst_in_block_reverse_starting_from(vec4_instruction
, scan_inst
,
1318 _scan_inst
= scan_inst
;
1320 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1321 scan_inst
->dst
, scan_inst
->size_written
)) {
1322 /* Found something writing to the reg we want to coalesce away. */
1324 /* SEND instructions can't have MRF as a destination. */
1325 if (scan_inst
->mlen
)
1328 if (devinfo
->gen
== 6) {
1329 /* gen6 math instructions must have the destination be
1330 * VGRF, so no compute-to-MRF for them.
1332 if (scan_inst
->is_math()) {
1338 /* VS_OPCODE_UNPACK_FLAGS_SIMD4X2 generates a bunch of mov(1)
1339 * instructions, and this optimization pass is not capable of
1340 * handling that. Bail on these instructions and hope that some
1341 * later optimization pass can do the right thing after they are
1344 if (scan_inst
->opcode
== VS_OPCODE_UNPACK_FLAGS_SIMD4X2
)
1347 /* This doesn't handle saturation on the instruction we
1348 * want to coalesce away if the register types do not match.
1349 * But if scan_inst is a non type-converting 'mov', we can fix
1352 if (inst
->saturate
&&
1353 inst
->dst
.type
!= scan_inst
->dst
.type
&&
1354 !(scan_inst
->opcode
== BRW_OPCODE_MOV
&&
1355 scan_inst
->dst
.type
== scan_inst
->src
[0].type
))
1358 /* Only allow coalescing between registers of the same type size.
1359 * Otherwise we would need to make the pass aware of the fact that
1360 * channel sizes are different for single and double precision.
1362 if (type_sz(inst
->src
[0].type
) != type_sz(scan_inst
->src
[0].type
))
1365 /* Check that scan_inst writes the same amount of data as the
1366 * instruction, otherwise coalescing would lead to writing a
1367 * different (larger or smaller) region of the destination
1369 if (scan_inst
->size_written
!= inst
->size_written
)
1372 /* If we can't handle the swizzle, bail. */
1373 if (!scan_inst
->can_reswizzle(devinfo
, inst
->dst
.writemask
,
1374 inst
->src
[0].swizzle
,
1379 /* This only handles coalescing writes of 8 channels (1 register
1380 * for single-precision and 2 registers for double-precision)
1381 * starting at the source offset of the copy instruction.
1383 if (DIV_ROUND_UP(scan_inst
->size_written
,
1384 type_sz(scan_inst
->dst
.type
)) > 8 ||
1385 scan_inst
->dst
.offset
!= inst
->src
[0].offset
)
1388 /* Mark which channels we found unconditional writes for. */
1389 if (!scan_inst
->predicate
)
1390 chans_remaining
&= ~scan_inst
->dst
.writemask
;
1392 if (chans_remaining
== 0)
1396 /* You can't read from an MRF, so if someone else reads our MRF's
1397 * source GRF that we wanted to rewrite, that stops us. If it's a
1398 * GRF we're trying to coalesce to, we don't actually handle
1399 * rewriting sources so bail in that case as well.
1401 bool interfered
= false;
1402 for (int i
= 0; i
< 3; i
++) {
1403 if (regions_overlap(inst
->src
[0], inst
->size_read(0),
1404 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1410 /* If somebody else writes the same channels of our destination here,
1411 * we can't coalesce before that.
1413 if (regions_overlap(inst
->dst
, inst
->size_written
,
1414 scan_inst
->dst
, scan_inst
->size_written
) &&
1415 (inst
->dst
.writemask
& scan_inst
->dst
.writemask
) != 0) {
1419 /* Check for reads of the register we're trying to coalesce into. We
1420 * can't go rewriting instructions above that to put some other value
1421 * in the register instead.
1423 if (to_mrf
&& scan_inst
->mlen
> 0) {
1424 unsigned start
= scan_inst
->base_mrf
;
1425 unsigned end
= scan_inst
->base_mrf
+ scan_inst
->mlen
;
1427 if (inst
->dst
.nr
>= start
&& inst
->dst
.nr
< end
) {
1431 for (int i
= 0; i
< 3; i
++) {
1432 if (regions_overlap(inst
->dst
, inst
->size_written
,
1433 scan_inst
->src
[i
], scan_inst
->size_read(i
)))
1441 if (chans_remaining
== 0) {
1442 /* If we've made it here, we have an MOV we want to coalesce out, and
1443 * a scan_inst pointing to the earliest instruction involved in
1444 * computing the value. Now go rewrite the instruction stream
1447 vec4_instruction
*scan_inst
= _scan_inst
;
1448 while (scan_inst
!= inst
) {
1449 if (scan_inst
->dst
.file
== VGRF
&&
1450 scan_inst
->dst
.nr
== inst
->src
[0].nr
&&
1451 scan_inst
->dst
.offset
== inst
->src
[0].offset
) {
1452 scan_inst
->reswizzle(inst
->dst
.writemask
,
1453 inst
->src
[0].swizzle
);
1454 scan_inst
->dst
.file
= inst
->dst
.file
;
1455 scan_inst
->dst
.nr
= inst
->dst
.nr
;
1456 scan_inst
->dst
.offset
= inst
->dst
.offset
;
1457 if (inst
->saturate
&&
1458 inst
->dst
.type
!= scan_inst
->dst
.type
) {
1459 /* If we have reached this point, scan_inst is a non
1460 * type-converting 'mov' and we can modify its register types
1461 * to match the ones in inst. Otherwise, we could have an
1462 * incorrect saturation result.
1464 scan_inst
->dst
.type
= inst
->dst
.type
;
1465 scan_inst
->src
[0].type
= inst
->src
[0].type
;
1467 scan_inst
->saturate
|= inst
->saturate
;
1469 scan_inst
= (vec4_instruction
*)scan_inst
->next
;
1471 inst
->remove(block
);
1477 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
);
1483 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1484 * flow. We could probably do better here with some form of divergence
1488 vec4_visitor::eliminate_find_live_channel()
1490 bool progress
= false;
1493 if (!brw_stage_has_packed_dispatch(devinfo
, stage
, stage_prog_data
)) {
1494 /* The optimization below assumes that channel zero is live on thread
1495 * dispatch, which may not be the case if the fixed function dispatches
1501 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1502 switch (inst
->opcode
) {
1508 case BRW_OPCODE_ENDIF
:
1509 case BRW_OPCODE_WHILE
:
1513 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
1515 inst
->opcode
= BRW_OPCODE_MOV
;
1516 inst
->src
[0] = brw_imm_d(0);
1517 inst
->force_writemask_all
= true;
1528 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL
);
1534 * Splits virtual GRFs requesting more than one contiguous physical register.
1536 * We initially create large virtual GRFs for temporary structures, arrays,
1537 * and matrices, so that the visitor functions can add offsets to work their
1538 * way down to the actual member being accessed. But when it comes to
1539 * optimization, we'd like to treat each register as individual storage if
1542 * So far, the only thing that might prevent splitting is a send message from
1546 vec4_visitor::split_virtual_grfs()
1548 int num_vars
= this->alloc
.count
;
1549 int new_virtual_grf
[num_vars
];
1550 bool split_grf
[num_vars
];
1552 memset(new_virtual_grf
, 0, sizeof(new_virtual_grf
));
1554 /* Try to split anything > 0 sized. */
1555 for (int i
= 0; i
< num_vars
; i
++) {
1556 split_grf
[i
] = this->alloc
.sizes
[i
] != 1;
1559 /* Check that the instructions are compatible with the registers we're trying
1562 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1563 if (inst
->dst
.file
== VGRF
&& regs_written(inst
) > 1)
1564 split_grf
[inst
->dst
.nr
] = false;
1566 for (int i
= 0; i
< 3; i
++) {
1567 if (inst
->src
[i
].file
== VGRF
&& regs_read(inst
, i
) > 1)
1568 split_grf
[inst
->src
[i
].nr
] = false;
1572 /* Allocate new space for split regs. Note that the virtual
1573 * numbers will be contiguous.
1575 for (int i
= 0; i
< num_vars
; i
++) {
1579 new_virtual_grf
[i
] = alloc
.allocate(1);
1580 for (unsigned j
= 2; j
< this->alloc
.sizes
[i
]; j
++) {
1581 unsigned reg
= alloc
.allocate(1);
1582 assert(reg
== new_virtual_grf
[i
] + j
- 1);
1585 this->alloc
.sizes
[i
] = 1;
1588 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1589 if (inst
->dst
.file
== VGRF
&& split_grf
[inst
->dst
.nr
] &&
1590 inst
->dst
.offset
/ REG_SIZE
!= 0) {
1591 inst
->dst
.nr
= (new_virtual_grf
[inst
->dst
.nr
] +
1592 inst
->dst
.offset
/ REG_SIZE
- 1);
1593 inst
->dst
.offset
%= REG_SIZE
;
1595 for (int i
= 0; i
< 3; i
++) {
1596 if (inst
->src
[i
].file
== VGRF
&& split_grf
[inst
->src
[i
].nr
] &&
1597 inst
->src
[i
].offset
/ REG_SIZE
!= 0) {
1598 inst
->src
[i
].nr
= (new_virtual_grf
[inst
->src
[i
].nr
] +
1599 inst
->src
[i
].offset
/ REG_SIZE
- 1);
1600 inst
->src
[i
].offset
%= REG_SIZE
;
1604 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL
| DEPENDENCY_VARIABLES
);
1608 vec4_visitor::dump_instruction(backend_instruction
*be_inst
)
1610 dump_instruction(be_inst
, stderr
);
1614 vec4_visitor::dump_instruction(backend_instruction
*be_inst
, FILE *file
)
1616 vec4_instruction
*inst
= (vec4_instruction
*)be_inst
;
1618 if (inst
->predicate
) {
1619 fprintf(file
, "(%cf%d.%d%s) ",
1620 inst
->predicate_inverse
? '-' : '+',
1621 inst
->flag_subreg
/ 2,
1622 inst
->flag_subreg
% 2,
1623 pred_ctrl_align16
[inst
->predicate
]);
1626 fprintf(file
, "%s(%d)", brw_instruction_name(devinfo
, inst
->opcode
),
1629 fprintf(file
, ".sat");
1630 if (inst
->conditional_mod
) {
1631 fprintf(file
, "%s", conditional_modifier
[inst
->conditional_mod
]);
1632 if (!inst
->predicate
&&
1633 (devinfo
->gen
< 5 || (inst
->opcode
!= BRW_OPCODE_SEL
&&
1634 inst
->opcode
!= BRW_OPCODE_CSEL
&&
1635 inst
->opcode
!= BRW_OPCODE_IF
&&
1636 inst
->opcode
!= BRW_OPCODE_WHILE
))) {
1637 fprintf(file
, ".f%d.%d", inst
->flag_subreg
/ 2, inst
->flag_subreg
% 2);
1642 switch (inst
->dst
.file
) {
1644 fprintf(file
, "vgrf%d", inst
->dst
.nr
);
1647 fprintf(file
, "g%d", inst
->dst
.nr
);
1650 fprintf(file
, "m%d", inst
->dst
.nr
);
1653 switch (inst
->dst
.nr
) {
1655 fprintf(file
, "null");
1657 case BRW_ARF_ADDRESS
:
1658 fprintf(file
, "a0.%d", inst
->dst
.subnr
);
1660 case BRW_ARF_ACCUMULATOR
:
1661 fprintf(file
, "acc%d", inst
->dst
.subnr
);
1664 fprintf(file
, "f%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1667 fprintf(file
, "arf%d.%d", inst
->dst
.nr
& 0xf, inst
->dst
.subnr
);
1672 fprintf(file
, "(null)");
1677 unreachable("not reached");
1679 if (inst
->dst
.offset
||
1680 (inst
->dst
.file
== VGRF
&&
1681 alloc
.sizes
[inst
->dst
.nr
] * REG_SIZE
!= inst
->size_written
)) {
1682 const unsigned reg_size
= (inst
->dst
.file
== UNIFORM
? 16 : REG_SIZE
);
1683 fprintf(file
, "+%d.%d", inst
->dst
.offset
/ reg_size
,
1684 inst
->dst
.offset
% reg_size
);
1686 if (inst
->dst
.writemask
!= WRITEMASK_XYZW
) {
1688 if (inst
->dst
.writemask
& 1)
1690 if (inst
->dst
.writemask
& 2)
1692 if (inst
->dst
.writemask
& 4)
1694 if (inst
->dst
.writemask
& 8)
1697 fprintf(file
, ":%s", brw_reg_type_to_letters(inst
->dst
.type
));
1699 if (inst
->src
[0].file
!= BAD_FILE
)
1700 fprintf(file
, ", ");
1702 for (int i
= 0; i
< 3 && inst
->src
[i
].file
!= BAD_FILE
; i
++) {
1703 if (inst
->src
[i
].negate
)
1705 if (inst
->src
[i
].abs
)
1707 switch (inst
->src
[i
].file
) {
1709 fprintf(file
, "vgrf%d", inst
->src
[i
].nr
);
1712 fprintf(file
, "g%d.%d", inst
->src
[i
].nr
, inst
->src
[i
].subnr
);
1715 fprintf(file
, "attr%d", inst
->src
[i
].nr
);
1718 fprintf(file
, "u%d", inst
->src
[i
].nr
);
1721 switch (inst
->src
[i
].type
) {
1722 case BRW_REGISTER_TYPE_F
:
1723 fprintf(file
, "%fF", inst
->src
[i
].f
);
1725 case BRW_REGISTER_TYPE_DF
:
1726 fprintf(file
, "%fDF", inst
->src
[i
].df
);
1728 case BRW_REGISTER_TYPE_D
:
1729 fprintf(file
, "%dD", inst
->src
[i
].d
);
1731 case BRW_REGISTER_TYPE_UD
:
1732 fprintf(file
, "%uU", inst
->src
[i
].ud
);
1734 case BRW_REGISTER_TYPE_VF
:
1735 fprintf(file
, "[%-gF, %-gF, %-gF, %-gF]",
1736 brw_vf_to_float((inst
->src
[i
].ud
>> 0) & 0xff),
1737 brw_vf_to_float((inst
->src
[i
].ud
>> 8) & 0xff),
1738 brw_vf_to_float((inst
->src
[i
].ud
>> 16) & 0xff),
1739 brw_vf_to_float((inst
->src
[i
].ud
>> 24) & 0xff));
1742 fprintf(file
, "???");
1747 switch (inst
->src
[i
].nr
) {
1749 fprintf(file
, "null");
1751 case BRW_ARF_ADDRESS
:
1752 fprintf(file
, "a0.%d", inst
->src
[i
].subnr
);
1754 case BRW_ARF_ACCUMULATOR
:
1755 fprintf(file
, "acc%d", inst
->src
[i
].subnr
);
1758 fprintf(file
, "f%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1761 fprintf(file
, "arf%d.%d", inst
->src
[i
].nr
& 0xf, inst
->src
[i
].subnr
);
1766 fprintf(file
, "(null)");
1769 unreachable("not reached");
1772 if (inst
->src
[i
].offset
||
1773 (inst
->src
[i
].file
== VGRF
&&
1774 alloc
.sizes
[inst
->src
[i
].nr
] * REG_SIZE
!= inst
->size_read(i
))) {
1775 const unsigned reg_size
= (inst
->src
[i
].file
== UNIFORM
? 16 : REG_SIZE
);
1776 fprintf(file
, "+%d.%d", inst
->src
[i
].offset
/ reg_size
,
1777 inst
->src
[i
].offset
% reg_size
);
1780 if (inst
->src
[i
].file
!= IMM
) {
1781 static const char *chans
[4] = {"x", "y", "z", "w"};
1783 for (int c
= 0; c
< 4; c
++) {
1784 fprintf(file
, "%s", chans
[BRW_GET_SWZ(inst
->src
[i
].swizzle
, c
)]);
1788 if (inst
->src
[i
].abs
)
1791 if (inst
->src
[i
].file
!= IMM
) {
1792 fprintf(file
, ":%s", brw_reg_type_to_letters(inst
->src
[i
].type
));
1795 if (i
< 2 && inst
->src
[i
+ 1].file
!= BAD_FILE
)
1796 fprintf(file
, ", ");
1799 if (inst
->force_writemask_all
)
1800 fprintf(file
, " NoMask");
1802 if (inst
->exec_size
!= 8)
1803 fprintf(file
, " group%d", inst
->group
);
1805 fprintf(file
, "\n");
1810 vec4_vs_visitor::setup_attributes(int payload_reg
)
1812 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
1813 for (int i
= 0; i
< 3; i
++) {
1814 if (inst
->src
[i
].file
== ATTR
) {
1815 assert(inst
->src
[i
].offset
% REG_SIZE
== 0);
1816 int grf
= payload_reg
+ inst
->src
[i
].nr
+
1817 inst
->src
[i
].offset
/ REG_SIZE
;
1819 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
1820 reg
.swizzle
= inst
->src
[i
].swizzle
;
1821 reg
.type
= inst
->src
[i
].type
;
1822 reg
.abs
= inst
->src
[i
].abs
;
1823 reg
.negate
= inst
->src
[i
].negate
;
1829 return payload_reg
+ vs_prog_data
->nr_attribute_slots
;
1833 vec4_visitor::setup_uniforms(int reg
)
1835 prog_data
->base
.dispatch_grf_start_reg
= reg
;
1837 /* The pre-gen6 VS requires that some push constants get loaded no
1838 * matter what, or the GPU would hang.
1840 if (devinfo
->gen
< 6 && this->uniforms
== 0) {
1841 brw_stage_prog_data_add_params(stage_prog_data
, 4);
1842 for (unsigned int i
= 0; i
< 4; i
++) {
1843 unsigned int slot
= this->uniforms
* 4 + i
;
1844 stage_prog_data
->param
[slot
] = BRW_PARAM_BUILTIN_ZERO
;
1850 reg
+= ALIGN(uniforms
, 2) / 2;
1853 for (int i
= 0; i
< 4; i
++)
1854 reg
+= stage_prog_data
->ubo_ranges
[i
].length
;
1856 stage_prog_data
->nr_params
= this->uniforms
* 4;
1858 prog_data
->base
.curb_read_length
=
1859 reg
- prog_data
->base
.dispatch_grf_start_reg
;
1865 vec4_vs_visitor::setup_payload(void)
1869 /* The payload always contains important data in g0, which contains
1870 * the URB handles that are passed on to the URB write at the end
1871 * of the thread. So, we always start push constants at g1.
1875 reg
= setup_uniforms(reg
);
1877 reg
= setup_attributes(reg
);
1879 this->first_non_payload_grf
= reg
;
1883 vec4_visitor::lower_minmax()
1885 assert(devinfo
->gen
< 6);
1887 bool progress
= false;
1889 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
1890 const vec4_builder
ibld(this, block
, inst
);
1892 if (inst
->opcode
== BRW_OPCODE_SEL
&&
1893 inst
->predicate
== BRW_PREDICATE_NONE
) {
1894 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
1895 * the original SEL.L/GE instruction
1897 ibld
.CMP(ibld
.null_reg_d(), inst
->src
[0], inst
->src
[1],
1898 inst
->conditional_mod
);
1899 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1900 inst
->conditional_mod
= BRW_CONDITIONAL_NONE
;
1907 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
);
1913 vec4_visitor::get_timestamp()
1915 assert(devinfo
->gen
>= 7);
1917 src_reg ts
= src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE
,
1922 BRW_REGISTER_TYPE_UD
,
1923 BRW_VERTICAL_STRIDE_0
,
1925 BRW_HORIZONTAL_STRIDE_4
,
1929 dst_reg dst
= dst_reg(this, glsl_type::uvec4_type
);
1931 vec4_instruction
*mov
= emit(MOV(dst
, ts
));
1932 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1933 * even if it's not enabled in the dispatch.
1935 mov
->force_writemask_all
= true;
1937 return src_reg(dst
);
1941 vec4_visitor::emit_shader_time_begin()
1943 current_annotation
= "shader time start";
1944 shader_start_time
= get_timestamp();
1948 vec4_visitor::emit_shader_time_end()
1950 current_annotation
= "shader time end";
1951 src_reg shader_end_time
= get_timestamp();
1954 /* Check that there weren't any timestamp reset events (assuming these
1955 * were the only two timestamp reads that happened).
1957 src_reg reset_end
= shader_end_time
;
1958 reset_end
.swizzle
= BRW_SWIZZLE_ZZZZ
;
1959 vec4_instruction
*test
= emit(AND(dst_null_ud(), reset_end
, brw_imm_ud(1u)));
1960 test
->conditional_mod
= BRW_CONDITIONAL_Z
;
1962 emit(IF(BRW_PREDICATE_NORMAL
));
1964 /* Take the current timestamp and get the delta. */
1965 shader_start_time
.negate
= true;
1966 dst_reg diff
= dst_reg(this, glsl_type::uint_type
);
1967 emit(ADD(diff
, shader_start_time
, shader_end_time
));
1969 /* If there were no instructions between the two timestamp gets, the diff
1970 * is 2 cycles. Remove that overhead, so I can forget about that when
1971 * trying to determine the time taken for single instructions.
1973 emit(ADD(diff
, src_reg(diff
), brw_imm_ud(-2u)));
1975 emit_shader_time_write(0, src_reg(diff
));
1976 emit_shader_time_write(1, brw_imm_ud(1u));
1977 emit(BRW_OPCODE_ELSE
);
1978 emit_shader_time_write(2, brw_imm_ud(1u));
1979 emit(BRW_OPCODE_ENDIF
);
1983 vec4_visitor::emit_shader_time_write(int shader_time_subindex
, src_reg value
)
1986 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
, 2));
1988 dst_reg offset
= dst
;
1990 time
.offset
+= REG_SIZE
;
1992 offset
.type
= BRW_REGISTER_TYPE_UD
;
1993 int index
= shader_time_index
* 3 + shader_time_subindex
;
1994 emit(MOV(offset
, brw_imm_d(index
* BRW_SHADER_TIME_STRIDE
)));
1996 time
.type
= BRW_REGISTER_TYPE_UD
;
1997 emit(MOV(time
, value
));
1999 vec4_instruction
*inst
=
2000 emit(SHADER_OPCODE_SHADER_TIME_ADD
, dst_reg(), src_reg(dst
));
2005 is_align1_df(vec4_instruction
*inst
)
2007 switch (inst
->opcode
) {
2008 case VEC4_OPCODE_DOUBLE_TO_F32
:
2009 case VEC4_OPCODE_DOUBLE_TO_D32
:
2010 case VEC4_OPCODE_DOUBLE_TO_U32
:
2011 case VEC4_OPCODE_TO_DOUBLE
:
2012 case VEC4_OPCODE_PICK_LOW_32BIT
:
2013 case VEC4_OPCODE_PICK_HIGH_32BIT
:
2014 case VEC4_OPCODE_SET_LOW_32BIT
:
2015 case VEC4_OPCODE_SET_HIGH_32BIT
:
2023 * Three source instruction must have a GRF/MRF destination register.
2024 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
2027 vec4_visitor::fixup_3src_null_dest()
2029 bool progress
= false;
2031 foreach_block_and_inst_safe (block
, vec4_instruction
, inst
, cfg
) {
2032 if (inst
->is_3src(devinfo
) && inst
->dst
.is_null()) {
2033 const unsigned size_written
= type_sz(inst
->dst
.type
);
2034 const unsigned num_regs
= DIV_ROUND_UP(size_written
, REG_SIZE
);
2036 inst
->dst
= retype(dst_reg(VGRF
, alloc
.allocate(num_regs
)),
2043 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL
|
2044 DEPENDENCY_VARIABLES
);
2048 vec4_visitor::convert_to_hw_regs()
2050 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
2051 for (int i
= 0; i
< 3; i
++) {
2052 class src_reg
&src
= inst
->src
[i
];
2056 reg
= byte_offset(brw_vecn_grf(4, src
.nr
, 0), src
.offset
);
2057 reg
.type
= src
.type
;
2059 reg
.negate
= src
.negate
;
2064 reg
= stride(byte_offset(brw_vec4_grf(
2065 prog_data
->base
.dispatch_grf_start_reg
+
2066 src
.nr
/ 2, src
.nr
% 2 * 4),
2069 reg
.type
= src
.type
;
2071 reg
.negate
= src
.negate
;
2073 /* This should have been moved to pull constants. */
2074 assert(!src
.reladdr
);
2079 if (type_sz(src
.type
) == 8) {
2080 reg
= src
.as_brw_reg();
2089 /* Probably unused. */
2090 reg
= brw_null_reg();
2091 reg
= retype(reg
, src
.type
);
2096 unreachable("not reached");
2099 apply_logical_swizzle(®
, inst
, i
);
2102 /* From IVB PRM, vol4, part3, "General Restrictions on Regioning
2105 * "If ExecSize = Width and HorzStride ≠ 0, VertStride must be set
2106 * to Width * HorzStride."
2108 * We can break this rule with DF sources on DF align1
2109 * instructions, because the exec_size would be 4 and width is 4.
2110 * As we know we are not accessing to next GRF, it is safe to
2111 * set vstride to the formula given by the rule itself.
2113 if (is_align1_df(inst
) && (cvt(inst
->exec_size
) - 1) == src
.width
)
2114 src
.vstride
= src
.width
+ src
.hstride
;
2117 if (inst
->is_3src(devinfo
)) {
2118 /* 3-src instructions with scalar sources support arbitrary subnr,
2119 * but don't actually use swizzles. Convert swizzle into subnr.
2120 * Skip this for double-precision instructions: RepCtrl=1 is not
2121 * allowed for them and needs special handling.
2123 for (int i
= 0; i
< 3; i
++) {
2124 if (inst
->src
[i
].vstride
== BRW_VERTICAL_STRIDE_0
&&
2125 type_sz(inst
->src
[i
].type
) < 8) {
2126 assert(brw_is_single_value_swizzle(inst
->src
[i
].swizzle
));
2127 inst
->src
[i
].subnr
+= 4 * BRW_GET_SWZ(inst
->src
[i
].swizzle
, 0);
2132 dst_reg
&dst
= inst
->dst
;
2135 switch (inst
->dst
.file
) {
2137 reg
= byte_offset(brw_vec8_grf(dst
.nr
, 0), dst
.offset
);
2138 reg
.type
= dst
.type
;
2139 reg
.writemask
= dst
.writemask
;
2143 reg
= byte_offset(brw_message_reg(dst
.nr
), dst
.offset
);
2144 assert((reg
.nr
& ~BRW_MRF_COMPR4
) < BRW_MAX_MRF(devinfo
->gen
));
2145 reg
.type
= dst
.type
;
2146 reg
.writemask
= dst
.writemask
;
2151 reg
= dst
.as_brw_reg();
2155 reg
= brw_null_reg();
2156 reg
= retype(reg
, dst
.type
);
2162 unreachable("not reached");
2170 stage_uses_interleaved_attributes(unsigned stage
,
2171 enum shader_dispatch_mode dispatch_mode
)
2174 case MESA_SHADER_TESS_EVAL
:
2176 case MESA_SHADER_GEOMETRY
:
2177 return dispatch_mode
!= DISPATCH_MODE_4X2_DUAL_OBJECT
;
2184 * Get the closest native SIMD width supported by the hardware for instruction
2185 * \p inst. The instruction will be left untouched by
2186 * vec4_visitor::lower_simd_width() if the returned value matches the
2187 * instruction's original execution size.
2190 get_lowered_simd_width(const struct gen_device_info
*devinfo
,
2191 enum shader_dispatch_mode dispatch_mode
,
2192 unsigned stage
, const vec4_instruction
*inst
)
2194 /* Do not split some instructions that require special handling */
2195 switch (inst
->opcode
) {
2196 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
2197 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
2198 return inst
->exec_size
;
2203 unsigned lowered_width
= MIN2(16, inst
->exec_size
);
2205 /* We need to split some cases of double-precision instructions that write
2206 * 2 registers. We only need to care about this in gen7 because that is the
2207 * only hardware that implements fp64 in Align16.
2209 if (devinfo
->gen
== 7 && inst
->size_written
> REG_SIZE
) {
2210 /* Align16 8-wide double-precision SEL does not work well. Verified
2213 if (inst
->opcode
== BRW_OPCODE_SEL
&& type_sz(inst
->dst
.type
) == 8)
2214 lowered_width
= MIN2(lowered_width
, 4);
2216 /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
2217 * Register Addressing:
2219 * "When destination spans two registers, the source MUST span two
2222 for (unsigned i
= 0; i
< 3; i
++) {
2223 if (inst
->src
[i
].file
== BAD_FILE
)
2225 if (inst
->size_read(i
) <= REG_SIZE
)
2226 lowered_width
= MIN2(lowered_width
, 4);
2228 /* Interleaved attribute setups use a vertical stride of 0, which
2229 * makes them hit the associated instruction decompression bug in gen7.
2230 * Split them to prevent this.
2232 if (inst
->src
[i
].file
== ATTR
&&
2233 stage_uses_interleaved_attributes(stage
, dispatch_mode
))
2234 lowered_width
= MIN2(lowered_width
, 4);
2238 /* IvyBridge can manage a maximum of 4 DFs per SIMD4x2 instruction, since
2239 * it doesn't support compression in Align16 mode, no matter if it has
2240 * force_writemask_all enabled or disabled (the latter is affected by the
2241 * compressed instruction bug in gen7, which is another reason to enforce
2244 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
&&
2245 (get_exec_type_size(inst
) == 8 || type_sz(inst
->dst
.type
) == 8))
2246 lowered_width
= MIN2(lowered_width
, 4);
2248 return lowered_width
;
2252 dst_src_regions_overlap(vec4_instruction
*inst
)
2254 if (inst
->size_written
== 0)
2257 unsigned dst_start
= inst
->dst
.offset
;
2258 unsigned dst_end
= dst_start
+ inst
->size_written
- 1;
2259 for (int i
= 0; i
< 3; i
++) {
2260 if (inst
->src
[i
].file
== BAD_FILE
)
2263 if (inst
->dst
.file
!= inst
->src
[i
].file
||
2264 inst
->dst
.nr
!= inst
->src
[i
].nr
)
2267 unsigned src_start
= inst
->src
[i
].offset
;
2268 unsigned src_end
= src_start
+ inst
->size_read(i
) - 1;
2270 if ((dst_start
>= src_start
&& dst_start
<= src_end
) ||
2271 (dst_end
>= src_start
&& dst_end
<= src_end
) ||
2272 (dst_start
<= src_start
&& dst_end
>= src_end
)) {
2281 vec4_visitor::lower_simd_width()
2283 bool progress
= false;
2285 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2286 const unsigned lowered_width
=
2287 get_lowered_simd_width(devinfo
, prog_data
->dispatch_mode
, stage
, inst
);
2288 assert(lowered_width
<= inst
->exec_size
);
2289 if (lowered_width
== inst
->exec_size
)
2292 /* We need to deal with source / destination overlaps when splitting.
2293 * The hardware supports reading from and writing to the same register
2294 * in the same instruction, but we need to be careful that each split
2295 * instruction we produce does not corrupt the source of the next.
2297 * The easiest way to handle this is to make the split instructions write
2298 * to temporaries if there is an src/dst overlap and then move from the
2299 * temporaries to the original destination. We also need to consider
2300 * instructions that do partial writes via align1 opcodes, in which case
2301 * we need to make sure that the we initialize the temporary with the
2302 * value of the instruction's dst.
2304 bool needs_temp
= dst_src_regions_overlap(inst
);
2305 for (unsigned n
= 0; n
< inst
->exec_size
/ lowered_width
; n
++) {
2306 unsigned channel_offset
= lowered_width
* n
;
2308 unsigned size_written
= lowered_width
* type_sz(inst
->dst
.type
);
2310 /* Create the split instruction from the original so that we copy all
2311 * relevant instruction fields, then set the width and calculate the
2312 * new dst/src regions.
2314 vec4_instruction
*linst
= new(mem_ctx
) vec4_instruction(*inst
);
2315 linst
->exec_size
= lowered_width
;
2316 linst
->group
= channel_offset
;
2317 linst
->size_written
= size_written
;
2319 /* Compute split dst region */
2322 unsigned num_regs
= DIV_ROUND_UP(size_written
, REG_SIZE
);
2323 dst
= retype(dst_reg(VGRF
, alloc
.allocate(num_regs
)),
2325 if (inst
->is_align1_partial_write()) {
2326 vec4_instruction
*copy
= MOV(dst
, src_reg(inst
->dst
));
2327 copy
->exec_size
= lowered_width
;
2328 copy
->group
= channel_offset
;
2329 copy
->size_written
= size_written
;
2330 inst
->insert_before(block
, copy
);
2333 dst
= horiz_offset(inst
->dst
, channel_offset
);
2337 /* Compute split source regions */
2338 for (int i
= 0; i
< 3; i
++) {
2339 if (linst
->src
[i
].file
== BAD_FILE
)
2342 bool is_interleaved_attr
=
2343 linst
->src
[i
].file
== ATTR
&&
2344 stage_uses_interleaved_attributes(stage
,
2345 prog_data
->dispatch_mode
);
2347 if (!is_uniform(linst
->src
[i
]) && !is_interleaved_attr
)
2348 linst
->src
[i
] = horiz_offset(linst
->src
[i
], channel_offset
);
2351 inst
->insert_before(block
, linst
);
2353 /* If we used a temporary to store the result of the split
2354 * instruction, copy the result to the original destination
2357 vec4_instruction
*mov
=
2358 MOV(offset(inst
->dst
, lowered_width
, n
), src_reg(dst
));
2359 mov
->exec_size
= lowered_width
;
2360 mov
->group
= channel_offset
;
2361 mov
->size_written
= size_written
;
2362 mov
->predicate
= inst
->predicate
;
2363 inst
->insert_before(block
, mov
);
2367 inst
->remove(block
);
2372 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
| DEPENDENCY_VARIABLES
);
2377 static brw_predicate
2378 scalarize_predicate(brw_predicate predicate
, unsigned writemask
)
2380 if (predicate
!= BRW_PREDICATE_NORMAL
)
2383 switch (writemask
) {
2385 return BRW_PREDICATE_ALIGN16_REPLICATE_X
;
2387 return BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
2389 return BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
2391 return BRW_PREDICATE_ALIGN16_REPLICATE_W
;
2393 unreachable("invalid writemask");
2397 /* Gen7 has a hardware decompression bug that we can exploit to represent
2398 * handful of additional swizzles natively.
2401 is_gen7_supported_64bit_swizzle(vec4_instruction
*inst
, unsigned arg
)
2403 switch (inst
->src
[arg
].swizzle
) {
2404 case BRW_SWIZZLE_XXXX
:
2405 case BRW_SWIZZLE_YYYY
:
2406 case BRW_SWIZZLE_ZZZZ
:
2407 case BRW_SWIZZLE_WWWW
:
2408 case BRW_SWIZZLE_XYXY
:
2409 case BRW_SWIZZLE_YXYX
:
2410 case BRW_SWIZZLE_ZWZW
:
2411 case BRW_SWIZZLE_WZWZ
:
2418 /* 64-bit sources use regions with a width of 2. These 2 elements in each row
2419 * can be addressed using 32-bit swizzles (which is what the hardware supports)
2420 * but it also means that the swizzle we apply on the first two components of a
2421 * dvec4 is coupled with the swizzle we use for the last 2. In other words,
2422 * only some specific swizzle combinations can be natively supported.
2424 * FIXME: we can go an step further and implement even more swizzle
2425 * variations using only partial scalarization.
2427 * For more details see:
2428 * https://bugs.freedesktop.org/show_bug.cgi?id=92760#c82
2431 vec4_visitor::is_supported_64bit_region(vec4_instruction
*inst
, unsigned arg
)
2433 const src_reg
&src
= inst
->src
[arg
];
2434 assert(type_sz(src
.type
) == 8);
2436 /* Uniform regions have a vstride=0. Because we use 2-wide rows with
2437 * 64-bit regions it means that we cannot access components Z/W, so
2438 * return false for any such case. Interleaved attributes will also be
2439 * mapped to GRF registers with a vstride of 0, so apply the same
2442 if ((is_uniform(src
) ||
2443 (stage_uses_interleaved_attributes(stage
, prog_data
->dispatch_mode
) &&
2444 src
.file
== ATTR
)) &&
2445 (brw_mask_for_swizzle(src
.swizzle
) & 12))
2448 switch (src
.swizzle
) {
2449 case BRW_SWIZZLE_XYZW
:
2450 case BRW_SWIZZLE_XXZZ
:
2451 case BRW_SWIZZLE_YYWW
:
2452 case BRW_SWIZZLE_YXWZ
:
2455 return devinfo
->gen
== 7 && is_gen7_supported_64bit_swizzle(inst
, arg
);
2460 vec4_visitor::scalarize_df()
2462 bool progress
= false;
2464 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2465 /* Skip DF instructions that operate in Align1 mode */
2466 if (is_align1_df(inst
))
2469 /* Check if this is a double-precision instruction */
2470 bool is_double
= type_sz(inst
->dst
.type
) == 8;
2471 for (int arg
= 0; !is_double
&& arg
< 3; arg
++) {
2472 is_double
= inst
->src
[arg
].file
!= BAD_FILE
&&
2473 type_sz(inst
->src
[arg
].type
) == 8;
2479 /* Skip the lowering for specific regioning scenarios that we can
2482 bool skip_lowering
= true;
2484 /* XY and ZW writemasks operate in 32-bit, which means that they don't
2485 * have a native 64-bit representation and they should always be split.
2487 if (inst
->dst
.writemask
== WRITEMASK_XY
||
2488 inst
->dst
.writemask
== WRITEMASK_ZW
) {
2489 skip_lowering
= false;
2491 for (unsigned i
= 0; i
< 3; i
++) {
2492 if (inst
->src
[i
].file
== BAD_FILE
|| type_sz(inst
->src
[i
].type
) < 8)
2494 skip_lowering
= skip_lowering
&& is_supported_64bit_region(inst
, i
);
2501 /* Generate scalar instructions for each enabled channel */
2502 for (unsigned chan
= 0; chan
< 4; chan
++) {
2503 unsigned chan_mask
= 1 << chan
;
2504 if (!(inst
->dst
.writemask
& chan_mask
))
2507 vec4_instruction
*scalar_inst
= new(mem_ctx
) vec4_instruction(*inst
);
2509 for (unsigned i
= 0; i
< 3; i
++) {
2510 unsigned swz
= BRW_GET_SWZ(inst
->src
[i
].swizzle
, chan
);
2511 scalar_inst
->src
[i
].swizzle
= BRW_SWIZZLE4(swz
, swz
, swz
, swz
);
2514 scalar_inst
->dst
.writemask
= chan_mask
;
2516 if (inst
->predicate
!= BRW_PREDICATE_NONE
) {
2517 scalar_inst
->predicate
=
2518 scalarize_predicate(inst
->predicate
, chan_mask
);
2521 inst
->insert_before(block
, scalar_inst
);
2524 inst
->remove(block
);
2529 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
);
2535 vec4_visitor::lower_64bit_mad_to_mul_add()
2537 bool progress
= false;
2539 foreach_block_and_inst_safe(block
, vec4_instruction
, inst
, cfg
) {
2540 if (inst
->opcode
!= BRW_OPCODE_MAD
)
2543 if (type_sz(inst
->dst
.type
) != 8)
2546 dst_reg mul_dst
= dst_reg(this, glsl_type::dvec4_type
);
2548 /* Use the copy constructor so we copy all relevant instruction fields
2549 * from the original mad into the add and mul instructions
2551 vec4_instruction
*mul
= new(mem_ctx
) vec4_instruction(*inst
);
2552 mul
->opcode
= BRW_OPCODE_MUL
;
2554 mul
->src
[0] = inst
->src
[1];
2555 mul
->src
[1] = inst
->src
[2];
2556 mul
->src
[2].file
= BAD_FILE
;
2558 vec4_instruction
*add
= new(mem_ctx
) vec4_instruction(*inst
);
2559 add
->opcode
= BRW_OPCODE_ADD
;
2560 add
->src
[0] = src_reg(mul_dst
);
2561 add
->src
[1] = inst
->src
[0];
2562 add
->src
[2].file
= BAD_FILE
;
2564 inst
->insert_before(block
, mul
);
2565 inst
->insert_before(block
, add
);
2566 inst
->remove(block
);
2572 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
| DEPENDENCY_VARIABLES
);
2577 /* The align16 hardware can only do 32-bit swizzle channels, so we need to
2578 * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
2579 * to 32-bit swizzle channels in hardware registers.
2581 * @inst and @arg identify the original vec4 IR source operand we need to
2582 * translate the swizzle for and @hw_reg is the hardware register where we
2583 * will write the hardware swizzle to use.
2585 * This pass assumes that Align16/DF instructions have been fully scalarized
2586 * previously so there is just one 64-bit swizzle channel to deal with for any
2587 * given Vec4 IR source.
2590 vec4_visitor::apply_logical_swizzle(struct brw_reg
*hw_reg
,
2591 vec4_instruction
*inst
, int arg
)
2593 src_reg reg
= inst
->src
[arg
];
2595 if (reg
.file
== BAD_FILE
|| reg
.file
== BRW_IMMEDIATE_VALUE
)
2598 /* If this is not a 64-bit operand or this is a scalar instruction we don't
2599 * need to do anything about the swizzles.
2601 if(type_sz(reg
.type
) < 8 || is_align1_df(inst
)) {
2602 hw_reg
->swizzle
= reg
.swizzle
;
2606 /* Take the 64-bit logical swizzle channel and translate it to 32-bit */
2607 assert(brw_is_single_value_swizzle(reg
.swizzle
) ||
2608 is_supported_64bit_region(inst
, arg
));
2610 /* Apply the region <2, 2, 1> for GRF or <0, 2, 1> for uniforms, as align16
2611 * HW can only do 32-bit swizzle channels.
2613 hw_reg
->width
= BRW_WIDTH_2
;
2615 if (is_supported_64bit_region(inst
, arg
) &&
2616 !is_gen7_supported_64bit_swizzle(inst
, arg
)) {
2617 /* Supported 64-bit swizzles are those such that their first two
2618 * components, when expanded to 32-bit swizzles, match the semantics
2619 * of the original 64-bit swizzle with 2-wide row regioning.
2621 unsigned swizzle0
= BRW_GET_SWZ(reg
.swizzle
, 0);
2622 unsigned swizzle1
= BRW_GET_SWZ(reg
.swizzle
, 1);
2623 hw_reg
->swizzle
= BRW_SWIZZLE4(swizzle0
* 2, swizzle0
* 2 + 1,
2624 swizzle1
* 2, swizzle1
* 2 + 1);
2626 /* If we got here then we have one of the following:
2628 * 1. An unsupported swizzle, which should be single-value thanks to the
2629 * scalarization pass.
2631 * 2. A gen7 supported swizzle. These can be single-value or double-value
2632 * swizzles. If the latter, they are never cross-dvec2 channels. For
2633 * these we always need to activate the gen7 vstride=0 exploit.
2635 unsigned swizzle0
= BRW_GET_SWZ(reg
.swizzle
, 0);
2636 unsigned swizzle1
= BRW_GET_SWZ(reg
.swizzle
, 1);
2637 assert((swizzle0
< 2) == (swizzle1
< 2));
2639 /* To gain access to Z/W components we need to select the second half
2640 * of the register and then use a X/Y swizzle to select Z/W respectively.
2642 if (swizzle0
>= 2) {
2643 *hw_reg
= suboffset(*hw_reg
, 2);
2648 /* All gen7-specific supported swizzles require the vstride=0 exploit */
2649 if (devinfo
->gen
== 7 && is_gen7_supported_64bit_swizzle(inst
, arg
))
2650 hw_reg
->vstride
= BRW_VERTICAL_STRIDE_0
;
2652 /* Any 64-bit source with an offset at 16B is intended to address the
2653 * second half of a register and needs a vertical stride of 0 so we:
2655 * 1. Don't violate register region restrictions.
2656 * 2. Activate the gen7 instruction decompresion bug exploit when
2659 if (hw_reg
->subnr
% REG_SIZE
== 16) {
2660 assert(devinfo
->gen
== 7);
2661 hw_reg
->vstride
= BRW_VERTICAL_STRIDE_0
;
2664 hw_reg
->swizzle
= BRW_SWIZZLE4(swizzle0
* 2, swizzle0
* 2 + 1,
2665 swizzle1
* 2, swizzle1
* 2 + 1);
2670 vec4_visitor::invalidate_analysis(brw::analysis_dependency_class c
)
2672 backend_shader::invalidate_analysis(c
);
2673 live_analysis
.invalidate(c
);
2679 if (shader_time_index
>= 0)
2680 emit_shader_time_begin();
2693 /* Before any optimization, push array accesses out to scratch
2694 * space where we need them to be. This pass may allocate new
2695 * virtual GRFs, so we want to do it early. It also makes sure
2696 * that we have reladdr computations available for CSE, since we'll
2697 * often do repeated subexpressions for those.
2699 move_grf_array_access_to_scratch();
2700 move_uniform_array_access_to_pull_constants();
2702 pack_uniform_registers();
2703 move_push_constants_to_pull_constants();
2704 split_virtual_grfs();
2706 #define OPT(pass, args...) ({ \
2708 bool this_progress = pass(args); \
2710 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
2711 char filename[64]; \
2712 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
2713 stage_abbrev, nir->info.name, iteration, pass_num); \
2715 backend_shader::dump_instructions(filename); \
2718 progress = progress || this_progress; \
2723 if (unlikely(INTEL_DEBUG
& DEBUG_OPTIMIZER
)) {
2725 snprintf(filename
, 64, "%s-%s-00-00-start",
2726 stage_abbrev
, nir
->info
.name
);
2728 backend_shader::dump_instructions(filename
);
2739 OPT(opt_predicated_break
, this);
2740 OPT(opt_reduce_swizzle
);
2741 OPT(dead_code_eliminate
);
2742 OPT(dead_control_flow_eliminate
, this);
2743 OPT(opt_copy_propagation
);
2744 OPT(opt_cmod_propagation
);
2747 OPT(opt_register_coalesce
);
2748 OPT(eliminate_find_live_channel
);
2753 if (OPT(opt_vector_float
)) {
2755 OPT(opt_copy_propagation
, false);
2756 OPT(opt_copy_propagation
, true);
2757 OPT(dead_code_eliminate
);
2760 if (devinfo
->gen
<= 5 && OPT(lower_minmax
)) {
2761 OPT(opt_cmod_propagation
);
2763 OPT(opt_copy_propagation
);
2764 OPT(dead_code_eliminate
);
2767 if (OPT(lower_simd_width
)) {
2768 OPT(opt_copy_propagation
);
2769 OPT(dead_code_eliminate
);
2775 OPT(lower_64bit_mad_to_mul_add
);
2777 /* Run this before payload setup because tesselation shaders
2778 * rely on it to prevent cross dvec2 regioning on DF attributes
2779 * that are setup so that XY are on the second half of register and
2780 * ZW are in the first half of the next.
2786 if (unlikely(INTEL_DEBUG
& DEBUG_SPILL_VEC4
)) {
2787 /* Debug of register spilling: Go spill everything. */
2788 const int grf_count
= alloc
.count
;
2789 float spill_costs
[alloc
.count
];
2790 bool no_spill
[alloc
.count
];
2791 evaluate_spill_costs(spill_costs
, no_spill
);
2792 for (int i
= 0; i
< grf_count
; i
++) {
2798 /* We want to run this after spilling because 64-bit (un)spills need to
2799 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2800 * messages that can produce unsupported 64-bit swizzle regions.
2805 fixup_3src_null_dest();
2807 bool allocated_without_spills
= reg_allocate();
2809 if (!allocated_without_spills
) {
2810 compiler
->shader_perf_log(log_data
,
2811 "%s shader triggered register spilling. "
2812 "Try reducing the number of live vec4 values "
2813 "to improve performance.\n",
2816 while (!reg_allocate()) {
2821 /* We want to run this after spilling because 64-bit (un)spills need to
2822 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2823 * messages that can produce unsupported 64-bit swizzle regions.
2828 opt_schedule_instructions();
2830 opt_set_dependency_control();
2832 convert_to_hw_regs();
2834 if (last_scratch
> 0) {
2835 prog_data
->base
.total_scratch
=
2836 brw_get_scratch_size(last_scratch
* REG_SIZE
);
2842 } /* namespace brw */
2847 * Compile a vertex shader.
2849 * Returns the final assembly and the program's size.
2852 brw_compile_vs(const struct brw_compiler
*compiler
, void *log_data
,
2854 const struct brw_vs_prog_key
*key
,
2855 struct brw_vs_prog_data
*prog_data
,
2857 int shader_time_index
,
2858 struct brw_compile_stats
*stats
,
2861 const bool is_scalar
= compiler
->scalar_stage
[MESA_SHADER_VERTEX
];
2862 brw_nir_apply_key(shader
, compiler
, &key
->base
, 8, is_scalar
);
2864 const unsigned *assembly
= NULL
;
2866 if (prog_data
->base
.vue_map
.varying_to_slot
[VARYING_SLOT_EDGE
] != -1) {
2867 /* If the output VUE map contains VARYING_SLOT_EDGE then we need to copy
2868 * the edge flag from VERT_ATTRIB_EDGEFLAG. This will be done
2869 * automatically by brw_vec4_visitor::emit_urb_slot but we need to
2870 * ensure that prog_data->inputs_read is accurate.
2872 * In order to make late NIR passes aware of the change, we actually
2873 * whack shader->info.inputs_read instead. This is safe because we just
2874 * made a copy of the shader.
2877 assert(key
->copy_edgeflag
);
2878 shader
->info
.inputs_read
|= VERT_BIT_EDGEFLAG
;
2881 prog_data
->inputs_read
= shader
->info
.inputs_read
;
2882 prog_data
->double_inputs_read
= shader
->info
.vs
.double_inputs
;
2884 brw_nir_lower_vs_inputs(shader
, key
->gl_attrib_wa_flags
);
2885 brw_nir_lower_vue_outputs(shader
);
2886 brw_postprocess_nir(shader
, compiler
, is_scalar
);
2888 prog_data
->base
.clip_distance_mask
=
2889 ((1 << shader
->info
.clip_distance_array_size
) - 1);
2890 prog_data
->base
.cull_distance_mask
=
2891 ((1 << shader
->info
.cull_distance_array_size
) - 1) <<
2892 shader
->info
.clip_distance_array_size
;
2894 unsigned nr_attribute_slots
= util_bitcount64(prog_data
->inputs_read
);
2896 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2897 * incoming vertex attribute. So, add an extra slot.
2899 if (shader
->info
.system_values_read
&
2900 (BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX
) |
2901 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
2902 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
2903 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))) {
2904 nr_attribute_slots
++;
2907 /* gl_DrawID and IsIndexedDraw share its very own vec4 */
2908 if (shader
->info
.system_values_read
&
2909 (BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
) |
2910 BITFIELD64_BIT(SYSTEM_VALUE_IS_INDEXED_DRAW
))) {
2911 nr_attribute_slots
++;
2914 if (shader
->info
.system_values_read
&
2915 BITFIELD64_BIT(SYSTEM_VALUE_IS_INDEXED_DRAW
))
2916 prog_data
->uses_is_indexed_draw
= true;
2918 if (shader
->info
.system_values_read
&
2919 BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX
))
2920 prog_data
->uses_firstvertex
= true;
2922 if (shader
->info
.system_values_read
&
2923 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
))
2924 prog_data
->uses_baseinstance
= true;
2926 if (shader
->info
.system_values_read
&
2927 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
))
2928 prog_data
->uses_vertexid
= true;
2930 if (shader
->info
.system_values_read
&
2931 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
))
2932 prog_data
->uses_instanceid
= true;
2934 if (shader
->info
.system_values_read
&
2935 BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
))
2936 prog_data
->uses_drawid
= true;
2938 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2939 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
2940 * vec4 mode, the hardware appears to wedge unless we read something.
2943 prog_data
->base
.urb_read_length
=
2944 DIV_ROUND_UP(nr_attribute_slots
, 2);
2946 prog_data
->base
.urb_read_length
=
2947 DIV_ROUND_UP(MAX2(nr_attribute_slots
, 1), 2);
2949 prog_data
->nr_attribute_slots
= nr_attribute_slots
;
2951 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2952 * (overwriting the original contents), we need to make sure the size is
2953 * the larger of the two.
2955 const unsigned vue_entries
=
2956 MAX2(nr_attribute_slots
, (unsigned)prog_data
->base
.vue_map
.num_slots
);
2958 if (compiler
->devinfo
->gen
== 6) {
2959 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 8);
2961 prog_data
->base
.urb_entry_size
= DIV_ROUND_UP(vue_entries
, 4);
2962 /* On Cannonlake software shall not program an allocation size that
2963 * specifies a size that is a multiple of 3 64B (512-bit) cachelines.
2965 if (compiler
->devinfo
->gen
== 10 &&
2966 prog_data
->base
.urb_entry_size
% 3 == 0)
2967 prog_data
->base
.urb_entry_size
++;
2970 if (INTEL_DEBUG
& DEBUG_VS
) {
2971 fprintf(stderr
, "VS Output ");
2972 brw_print_vue_map(stderr
, &prog_data
->base
.vue_map
);
2976 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_SIMD8
;
2978 fs_visitor
v(compiler
, log_data
, mem_ctx
, &key
->base
,
2979 &prog_data
->base
.base
,
2980 shader
, 8, shader_time_index
);
2983 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
2988 prog_data
->base
.base
.dispatch_grf_start_reg
= v
.payload
.num_regs
;
2990 fs_generator
g(compiler
, log_data
, mem_ctx
,
2991 &prog_data
->base
.base
, v
.shader_stats
,
2992 v
.runtime_check_aads_emit
, MESA_SHADER_VERTEX
);
2993 if (INTEL_DEBUG
& DEBUG_VS
) {
2994 const char *debug_name
=
2995 ralloc_asprintf(mem_ctx
, "%s vertex shader %s",
2996 shader
->info
.label
? shader
->info
.label
:
3000 g
.enable_debug(debug_name
);
3002 g
.generate_code(v
.cfg
, 8, stats
);
3003 assembly
= g
.get_assembly();
3007 prog_data
->base
.dispatch_mode
= DISPATCH_MODE_4X2_DUAL_OBJECT
;
3009 vec4_vs_visitor
v(compiler
, log_data
, key
, prog_data
,
3010 shader
, mem_ctx
, shader_time_index
);
3013 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
3018 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
,
3019 shader
, &prog_data
->base
,