1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "glsl/ir_print_visitor.h"
35 vec4_visitor::setup_attributes(int payload_reg
)
38 int attribute_map
[VERT_ATTRIB_MAX
+ 1];
41 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
42 if (prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
43 attribute_map
[i
] = payload_reg
+ nr_attributes
;
48 /* VertexID is stored by the VF as the last vertex element, but we
49 * don't represent it with a flag in inputs_read, so we call it
52 if (prog_data
->uses_vertexid
) {
53 attribute_map
[VERT_ATTRIB_MAX
] = payload_reg
+ nr_attributes
;
57 foreach_list(node
, &this->instructions
) {
58 vec4_instruction
*inst
= (vec4_instruction
*)node
;
60 /* We have to support ATTR as a destination for GL_FIXED fixup. */
61 if (inst
->dst
.file
== ATTR
) {
62 int grf
= attribute_map
[inst
->dst
.reg
+ inst
->dst
.reg_offset
];
64 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
65 reg
.dw1
.bits
.writemask
= inst
->dst
.writemask
;
67 inst
->dst
.file
= HW_REG
;
68 inst
->dst
.fixed_hw_reg
= reg
;
71 for (int i
= 0; i
< 3; i
++) {
72 if (inst
->src
[i
].file
!= ATTR
)
75 int grf
= attribute_map
[inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
];
77 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
78 reg
.dw1
.bits
.swizzle
= inst
->src
[i
].swizzle
;
79 reg
.type
= inst
->src
[i
].type
;
82 if (inst
->src
[i
].negate
)
85 inst
->src
[i
].file
= HW_REG
;
86 inst
->src
[i
].fixed_hw_reg
= reg
;
90 /* The BSpec says we always have to read at least one thing from
91 * the VF, and it appears that the hardware wedges otherwise.
93 if (nr_attributes
== 0)
96 prog_data
->urb_read_length
= (nr_attributes
+ 1) / 2;
98 return payload_reg
+ nr_attributes
;
102 vec4_visitor::setup_uniforms(int reg
)
104 /* The pre-gen6 VS requires that some push constants get loaded no
105 * matter what, or the GPU would hang.
107 if (intel
->gen
< 6 && this->uniforms
== 0) {
108 this->uniform_vector_size
[this->uniforms
] = 1;
110 for (unsigned int i
= 0; i
< 4; i
++) {
111 unsigned int slot
= this->uniforms
* 4 + i
;
112 static float zero
= 0.0;
113 c
->prog_data
.param
[slot
] = &zero
;
119 reg
+= ALIGN(uniforms
, 2) / 2;
122 c
->prog_data
.nr_params
= this->uniforms
* 4;
124 c
->prog_data
.curb_read_length
= reg
- 1;
125 c
->prog_data
.uses_new_param_layout
= true;
131 vec4_visitor::setup_payload(void)
135 /* The payload always contains important data in g0, which contains
136 * the URB handles that are passed on to the URB write at the end
137 * of the thread. So, we always start push constants at g1.
141 reg
= setup_uniforms(reg
);
143 reg
= setup_attributes(reg
);
145 this->first_non_payload_grf
= reg
;
149 vec4_instruction::get_dst(void)
151 struct brw_reg brw_reg
;
155 brw_reg
= brw_vec8_grf(dst
.reg
+ dst
.reg_offset
, 0);
156 brw_reg
= retype(brw_reg
, dst
.type
);
157 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
161 brw_reg
= brw_message_reg(dst
.reg
+ dst
.reg_offset
);
162 brw_reg
= retype(brw_reg
, dst
.type
);
163 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
167 brw_reg
= dst
.fixed_hw_reg
;
171 brw_reg
= brw_null_reg();
175 assert(!"not reached");
176 brw_reg
= brw_null_reg();
183 vec4_instruction::get_src(int i
)
185 struct brw_reg brw_reg
;
187 switch (src
[i
].file
) {
189 brw_reg
= brw_vec8_grf(src
[i
].reg
+ src
[i
].reg_offset
, 0);
190 brw_reg
= retype(brw_reg
, src
[i
].type
);
191 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
193 brw_reg
= brw_abs(brw_reg
);
195 brw_reg
= negate(brw_reg
);
199 switch (src
[i
].type
) {
200 case BRW_REGISTER_TYPE_F
:
201 brw_reg
= brw_imm_f(src
[i
].imm
.f
);
203 case BRW_REGISTER_TYPE_D
:
204 brw_reg
= brw_imm_d(src
[i
].imm
.i
);
206 case BRW_REGISTER_TYPE_UD
:
207 brw_reg
= brw_imm_ud(src
[i
].imm
.u
);
210 assert(!"not reached");
211 brw_reg
= brw_null_reg();
217 brw_reg
= stride(brw_vec4_grf(1 + (src
[i
].reg
+ src
[i
].reg_offset
) / 2,
218 ((src
[i
].reg
+ src
[i
].reg_offset
) % 2) * 4),
220 brw_reg
= retype(brw_reg
, src
[i
].type
);
221 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
223 brw_reg
= brw_abs(brw_reg
);
225 brw_reg
= negate(brw_reg
);
227 /* This should have been moved to pull constants. */
228 assert(!src
[i
].reladdr
);
232 brw_reg
= src
[i
].fixed_hw_reg
;
236 /* Probably unused. */
237 brw_reg
= brw_null_reg();
241 assert(!"not reached");
242 brw_reg
= brw_null_reg();
250 vec4_visitor::generate_math1_gen4(vec4_instruction
*inst
,
256 brw_math_function(inst
->opcode
),
257 BRW_MATH_SATURATE_NONE
,
260 BRW_MATH_DATA_VECTOR
,
261 BRW_MATH_PRECISION_FULL
);
265 check_gen6_math_src_arg(struct brw_reg src
)
267 /* Source swizzles are ignored. */
270 assert(src
.dw1
.bits
.swizzle
== BRW_SWIZZLE_XYZW
);
274 vec4_visitor::generate_math1_gen6(vec4_instruction
*inst
,
278 /* Can't do writemask because math can't be align16. */
279 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
280 check_gen6_math_src_arg(src
);
282 brw_set_access_mode(p
, BRW_ALIGN_1
);
285 brw_math_function(inst
->opcode
),
286 BRW_MATH_SATURATE_NONE
,
289 BRW_MATH_DATA_SCALAR
,
290 BRW_MATH_PRECISION_FULL
);
291 brw_set_access_mode(p
, BRW_ALIGN_16
);
295 vec4_visitor::generate_math2_gen7(vec4_instruction
*inst
,
302 brw_math_function(inst
->opcode
),
307 vec4_visitor::generate_math2_gen6(vec4_instruction
*inst
,
312 /* Can't do writemask because math can't be align16. */
313 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
314 /* Source swizzles are ignored. */
315 check_gen6_math_src_arg(src0
);
316 check_gen6_math_src_arg(src1
);
318 brw_set_access_mode(p
, BRW_ALIGN_1
);
321 brw_math_function(inst
->opcode
),
323 brw_set_access_mode(p
, BRW_ALIGN_16
);
327 vec4_visitor::generate_math2_gen4(vec4_instruction
*inst
,
332 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
335 * "Operand0[7]. For the INT DIV functions, this operand is the
338 * "Operand1[7]. For the INT DIV functions, this operand is the
341 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
342 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
343 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
345 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
349 brw_math_function(inst
->opcode
),
350 BRW_MATH_SATURATE_NONE
,
353 BRW_MATH_DATA_VECTOR
,
354 BRW_MATH_PRECISION_FULL
);
358 vec4_visitor::generate_tex(vec4_instruction
*inst
,
364 if (intel
->gen
>= 5) {
365 switch (inst
->opcode
) {
366 case SHADER_OPCODE_TEX
:
367 case SHADER_OPCODE_TXL
:
368 if (inst
->shadow_compare
) {
369 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
371 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
374 case SHADER_OPCODE_TXD
:
375 /* There is no sample_d_c message; comparisons are done manually. */
376 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS
;
378 case SHADER_OPCODE_TXF
:
379 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LD
;
381 case SHADER_OPCODE_TXS
:
382 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO
;
385 assert(!"should not get here: invalid VS texture opcode");
389 switch (inst
->opcode
) {
390 case SHADER_OPCODE_TEX
:
391 case SHADER_OPCODE_TXL
:
392 if (inst
->shadow_compare
) {
393 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE
;
394 assert(inst
->mlen
== 3);
396 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD
;
397 assert(inst
->mlen
== 2);
400 case SHADER_OPCODE_TXD
:
401 /* There is no sample_d_c message; comparisons are done manually. */
402 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS
;
403 assert(inst
->mlen
== 4);
405 case SHADER_OPCODE_TXF
:
406 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_LD
;
407 assert(inst
->mlen
== 2);
409 case SHADER_OPCODE_TXS
:
410 msg_type
= BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO
;
411 assert(inst
->mlen
== 2);
414 assert(!"should not get here: invalid VS texture opcode");
419 assert(msg_type
!= -1);
421 /* Load the message header if present. If there's a texture offset, we need
422 * to set it up explicitly and load the offset bitfield. Otherwise, we can
423 * use an implied move from g0 to the first message register.
425 if (inst
->texture_offset
) {
426 /* Explicitly set up the message header by copying g0 to the MRF. */
427 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_UD
),
428 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD
));
430 /* Then set the offset bits in DWord 2. */
431 brw_set_access_mode(p
, BRW_ALIGN_1
);
433 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, inst
->base_mrf
, 2),
434 BRW_REGISTER_TYPE_UD
),
435 brw_imm_uw(inst
->texture_offset
));
436 brw_set_access_mode(p
, BRW_ALIGN_16
);
437 } else if (inst
->header_present
) {
438 /* Set up an implied move from g0 to the MRF. */
439 src
= brw_vec8_grf(0, 0);
442 uint32_t return_format
;
445 case BRW_REGISTER_TYPE_D
:
446 return_format
= BRW_SAMPLER_RETURN_FORMAT_SINT32
;
448 case BRW_REGISTER_TYPE_UD
:
449 return_format
= BRW_SAMPLER_RETURN_FORMAT_UINT32
;
452 return_format
= BRW_SAMPLER_RETURN_FORMAT_FLOAT32
;
460 SURF_INDEX_TEXTURE(inst
->sampler
),
464 1, /* response length */
466 inst
->header_present
,
467 BRW_SAMPLER_SIMD_MODE_SIMD4X2
,
472 vec4_visitor::generate_urb_write(vec4_instruction
*inst
)
475 brw_null_reg(), /* dest */
476 inst
->base_mrf
, /* starting mrf reg nr */
477 brw_vec8_grf(0, 0), /* src */
478 false, /* allocate */
481 0, /* response len */
483 inst
->eot
, /* writes complete */
484 inst
->offset
, /* urb destination offset */
485 BRW_URB_SWIZZLE_INTERLEAVE
);
489 vec4_visitor::generate_oword_dual_block_offsets(struct brw_reg m1
,
490 struct brw_reg index
)
492 int second_vertex_offset
;
495 second_vertex_offset
= 1;
497 second_vertex_offset
= 16;
499 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
501 /* Set up M1 (message payload). Only the block offsets in M1.0 and
502 * M1.4 are used, and the rest are ignored.
504 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
505 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
506 struct brw_reg index_0
= suboffset(vec1(index
), 0);
507 struct brw_reg index_4
= suboffset(vec1(index
), 4);
509 brw_push_insn_state(p
);
510 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
511 brw_set_access_mode(p
, BRW_ALIGN_1
);
513 brw_MOV(p
, m1_0
, index_0
);
515 brw_set_predicate_inverse(p
, true);
516 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
517 index_4
.dw1
.ud
+= second_vertex_offset
;
518 brw_MOV(p
, m1_4
, index_4
);
520 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
523 brw_pop_insn_state(p
);
527 vec4_visitor::generate_scratch_read(vec4_instruction
*inst
,
529 struct brw_reg index
)
531 struct brw_reg header
= brw_vec8_grf(0, 0);
533 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
535 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
541 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
542 else if (intel
->gen
== 5 || intel
->is_g4x
)
543 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
545 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
547 /* Each of the 8 channel enables is considered for whether each
550 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
551 brw_set_dest(p
, send
, dst
);
552 brw_set_src0(p
, send
, header
);
554 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
555 brw_set_dp_read_message(p
, send
,
556 255, /* binding table index: stateless access */
557 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
559 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
565 vec4_visitor::generate_scratch_write(vec4_instruction
*inst
,
568 struct brw_reg index
)
570 struct brw_reg header
= brw_vec8_grf(0, 0);
573 /* If the instruction is predicated, we'll predicate the send, not
576 brw_set_predicate_control(p
, false);
578 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
580 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
584 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
585 retype(src
, BRW_REGISTER_TYPE_D
));
590 msg_type
= GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
591 else if (intel
->gen
== 6)
592 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
594 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
596 brw_set_predicate_control(p
, inst
->predicate
);
598 /* Pre-gen6, we have to specify write commits to ensure ordering
599 * between reads and writes within a thread. Afterwards, that's
600 * guaranteed and write commits only matter for inter-thread
603 if (intel
->gen
>= 6) {
604 write_commit
= false;
606 /* The visitor set up our destination register to be g0. This
607 * means that when the next read comes along, we will end up
608 * reading from g0 and causing a block on the write commit. For
609 * write-after-read, we are relying on the value of the previous
610 * read being used (and thus blocking on completion) before our
611 * write is executed. This means we have to be careful in
612 * instruction scheduling to not violate this assumption.
617 /* Each of the 8 channel enables is considered for whether each
620 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
621 brw_set_dest(p
, send
, dst
);
622 brw_set_src0(p
, send
, header
);
624 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
625 brw_set_dp_write_message(p
, send
,
626 255, /* binding table index: stateless access */
627 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
630 true, /* header present */
631 false, /* not a render target write */
632 write_commit
, /* rlen */
638 vec4_visitor::generate_pull_constant_load(vec4_instruction
*inst
,
640 struct brw_reg index
)
642 struct brw_reg header
= brw_vec8_grf(0, 0);
644 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
646 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_D
),
652 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
653 else if (intel
->gen
== 5 || intel
->is_g4x
)
654 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
656 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
658 /* Each of the 8 channel enables is considered for whether each
661 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
662 brw_set_dest(p
, send
, dst
);
663 brw_set_src0(p
, send
, header
);
665 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
666 brw_set_dp_read_message(p
, send
,
667 SURF_INDEX_VERT_CONST_BUFFER
,
668 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
670 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
676 vec4_visitor::generate_vs_instruction(vec4_instruction
*instruction
,
680 vec4_instruction
*inst
= (vec4_instruction
*)instruction
;
682 switch (inst
->opcode
) {
683 case SHADER_OPCODE_RCP
:
684 case SHADER_OPCODE_RSQ
:
685 case SHADER_OPCODE_SQRT
:
686 case SHADER_OPCODE_EXP2
:
687 case SHADER_OPCODE_LOG2
:
688 case SHADER_OPCODE_SIN
:
689 case SHADER_OPCODE_COS
:
690 if (intel
->gen
== 6) {
691 generate_math1_gen6(inst
, dst
, src
[0]);
693 /* Also works for Gen7. */
694 generate_math1_gen4(inst
, dst
, src
[0]);
698 case SHADER_OPCODE_POW
:
699 case SHADER_OPCODE_INT_QUOTIENT
:
700 case SHADER_OPCODE_INT_REMAINDER
:
701 if (intel
->gen
>= 7) {
702 generate_math2_gen7(inst
, dst
, src
[0], src
[1]);
703 } else if (intel
->gen
== 6) {
704 generate_math2_gen6(inst
, dst
, src
[0], src
[1]);
706 generate_math2_gen4(inst
, dst
, src
[0], src
[1]);
710 case SHADER_OPCODE_TEX
:
711 case SHADER_OPCODE_TXD
:
712 case SHADER_OPCODE_TXF
:
713 case SHADER_OPCODE_TXL
:
714 case SHADER_OPCODE_TXS
:
715 generate_tex(inst
, dst
, src
[0]);
718 case VS_OPCODE_URB_WRITE
:
719 generate_urb_write(inst
);
722 case VS_OPCODE_SCRATCH_READ
:
723 generate_scratch_read(inst
, dst
, src
[0]);
726 case VS_OPCODE_SCRATCH_WRITE
:
727 generate_scratch_write(inst
, dst
, src
[0], src
[1]);
730 case VS_OPCODE_PULL_CONSTANT_LOAD
:
731 generate_pull_constant_load(inst
, dst
, src
[0]);
735 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
736 fail("unsupported opcode in `%s' in VS\n",
737 brw_opcodes
[inst
->opcode
].name
);
739 fail("Unsupported opcode %d in VS", inst
->opcode
);
747 if (c
->key
.userclip_active
&& !c
->key
.uses_clip_distance
)
748 setup_uniform_clipplane_values();
750 /* Generate VS IR for main(). (the visitor only descends into
751 * functions called "main").
753 visit_instructions(shader
->ir
);
757 /* Before any optimization, push array accesses out to scratch
758 * space where we need them to be. This pass may allocate new
759 * virtual GRFs, so we want to do it early. It also makes sure
760 * that we have reladdr computations available for CSE, since we'll
761 * often do repeated subexpressions for those.
763 move_grf_array_access_to_scratch();
764 move_uniform_array_access_to_pull_constants();
765 pack_uniform_registers();
766 move_push_constants_to_pull_constants();
771 progress
= dead_code_eliminate() || progress
;
772 progress
= opt_copy_propagation() || progress
;
773 progress
= opt_algebraic() || progress
;
774 progress
= opt_compute_to_mrf() || progress
;
787 brw_set_access_mode(p
, BRW_ALIGN_16
);
795 vec4_visitor::generate_code()
797 int last_native_inst
= 0;
798 const char *last_annotation_string
= NULL
;
799 ir_instruction
*last_annotation_ir
= NULL
;
801 int loop_stack_array_size
= 16;
802 int loop_stack_depth
= 0;
803 brw_instruction
**loop_stack
=
804 rzalloc_array(this->mem_ctx
, brw_instruction
*, loop_stack_array_size
);
805 int *if_depth_in_loop
=
806 rzalloc_array(this->mem_ctx
, int, loop_stack_array_size
);
809 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
810 printf("Native code for vertex shader %d:\n", prog
->Name
);
813 foreach_list(node
, &this->instructions
) {
814 vec4_instruction
*inst
= (vec4_instruction
*)node
;
815 struct brw_reg src
[3], dst
;
817 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
818 if (last_annotation_ir
!= inst
->ir
) {
819 last_annotation_ir
= inst
->ir
;
820 if (last_annotation_ir
) {
822 last_annotation_ir
->print();
826 if (last_annotation_string
!= inst
->annotation
) {
827 last_annotation_string
= inst
->annotation
;
828 if (last_annotation_string
)
829 printf(" %s\n", last_annotation_string
);
833 for (unsigned int i
= 0; i
< 3; i
++) {
834 src
[i
] = inst
->get_src(i
);
836 dst
= inst
->get_dst();
838 brw_set_conditionalmod(p
, inst
->conditional_mod
);
839 brw_set_predicate_control(p
, inst
->predicate
);
840 brw_set_predicate_inverse(p
, inst
->predicate_inverse
);
841 brw_set_saturate(p
, inst
->saturate
);
843 switch (inst
->opcode
) {
845 brw_MOV(p
, dst
, src
[0]);
848 brw_ADD(p
, dst
, src
[0], src
[1]);
851 brw_MUL(p
, dst
, src
[0], src
[1]);
853 case BRW_OPCODE_MACH
:
854 brw_set_acc_write_control(p
, 1);
855 brw_MACH(p
, dst
, src
[0], src
[1]);
856 brw_set_acc_write_control(p
, 0);
860 brw_FRC(p
, dst
, src
[0]);
862 case BRW_OPCODE_RNDD
:
863 brw_RNDD(p
, dst
, src
[0]);
865 case BRW_OPCODE_RNDE
:
866 brw_RNDE(p
, dst
, src
[0]);
868 case BRW_OPCODE_RNDZ
:
869 brw_RNDZ(p
, dst
, src
[0]);
873 brw_AND(p
, dst
, src
[0], src
[1]);
876 brw_OR(p
, dst
, src
[0], src
[1]);
879 brw_XOR(p
, dst
, src
[0], src
[1]);
882 brw_NOT(p
, dst
, src
[0]);
885 brw_ASR(p
, dst
, src
[0], src
[1]);
888 brw_SHR(p
, dst
, src
[0], src
[1]);
891 brw_SHL(p
, dst
, src
[0], src
[1]);
895 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
898 brw_SEL(p
, dst
, src
[0], src
[1]);
902 brw_DP4(p
, dst
, src
[0], src
[1]);
906 brw_DP3(p
, dst
, src
[0], src
[1]);
910 brw_DP2(p
, dst
, src
[0], src
[1]);
914 if (inst
->src
[0].file
!= BAD_FILE
) {
915 /* The instruction has an embedded compare (only allowed on gen6) */
916 assert(intel
->gen
== 6);
917 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
919 struct brw_instruction
*brw_inst
= brw_IF(p
, BRW_EXECUTE_8
);
920 brw_inst
->header
.predicate_control
= inst
->predicate
;
922 if_depth_in_loop
[loop_stack_depth
]++;
925 case BRW_OPCODE_ELSE
:
928 case BRW_OPCODE_ENDIF
:
930 if_depth_in_loop
[loop_stack_depth
]--;
934 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
935 if (loop_stack_array_size
<= loop_stack_depth
) {
936 loop_stack_array_size
*= 2;
937 loop_stack
= reralloc(this->mem_ctx
, loop_stack
, brw_instruction
*,
938 loop_stack_array_size
);
939 if_depth_in_loop
= reralloc(this->mem_ctx
, if_depth_in_loop
, int,
940 loop_stack_array_size
);
942 if_depth_in_loop
[loop_stack_depth
] = 0;
945 case BRW_OPCODE_BREAK
:
946 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
947 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
949 case BRW_OPCODE_CONTINUE
:
950 /* FINISHME: We need to write the loop instruction support still. */
954 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
955 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
958 case BRW_OPCODE_WHILE
: {
959 struct brw_instruction
*inst0
, *inst1
;
965 assert(loop_stack_depth
> 0);
967 inst0
= inst1
= brw_WHILE(p
);
968 if (intel
->gen
< 6) {
969 /* patch all the BREAK/CONT instructions from last BGNLOOP */
970 while (inst0
> loop_stack
[loop_stack_depth
]) {
972 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
973 inst0
->bits3
.if_else
.jump_count
== 0) {
974 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
976 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
977 inst0
->bits3
.if_else
.jump_count
== 0) {
978 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
986 generate_vs_instruction(inst
, dst
, src
);
990 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
991 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
993 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
994 ((uint32_t *)&p
->store
[i
])[3],
995 ((uint32_t *)&p
->store
[i
])[2],
996 ((uint32_t *)&p
->store
[i
])[1],
997 ((uint32_t *)&p
->store
[i
])[0]);
999 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
1003 last_native_inst
= p
->nr_insn
;
1006 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
1010 ralloc_free(loop_stack
);
1011 ralloc_free(if_depth_in_loop
);
1015 /* OK, while the INTEL_DEBUG=vs above is very nice for debugging VS
1016 * emit issues, it doesn't get the jump distances into the output,
1017 * which is often something we want to debug. So this is here in
1018 * case you're doing that.
1021 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
1022 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
1023 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
1024 ((uint32_t *)&p
->store
[i
])[3],
1025 ((uint32_t *)&p
->store
[i
])[2],
1026 ((uint32_t *)&p
->store
[i
])[1],
1027 ((uint32_t *)&p
->store
[i
])[0]);
1028 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
1037 brw_vs_emit(struct gl_shader_program
*prog
, struct brw_vs_compile
*c
)
1042 struct brw_shader
*shader
=
1043 (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
1047 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
1048 printf("GLSL IR for native vertex shader %d:\n", prog
->Name
);
1049 _mesa_print_ir(shader
->ir
, NULL
);
1053 vec4_visitor
v(c
, prog
, shader
);
1055 prog
->LinkStatus
= false;
1056 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);
1065 } /* namespace brw */