1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "glsl/ir_print_visitor.h"
35 vec4_visitor::setup_attributes(int payload_reg
)
38 int attribute_map
[VERT_ATTRIB_MAX
];
41 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
42 if (prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
43 attribute_map
[i
] = payload_reg
+ nr_attributes
;
48 foreach_list(node
, &this->instructions
) {
49 vec4_instruction
*inst
= (vec4_instruction
*)node
;
51 /* We have to support ATTR as a destination for GL_FIXED fixup. */
52 if (inst
->dst
.file
== ATTR
) {
53 int grf
= attribute_map
[inst
->dst
.reg
+ inst
->dst
.reg_offset
];
55 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
56 reg
.dw1
.bits
.writemask
= inst
->dst
.writemask
;
58 inst
->dst
.file
= HW_REG
;
59 inst
->dst
.fixed_hw_reg
= reg
;
62 for (int i
= 0; i
< 3; i
++) {
63 if (inst
->src
[i
].file
!= ATTR
)
66 int grf
= attribute_map
[inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
];
68 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
69 reg
.dw1
.bits
.swizzle
= inst
->src
[i
].swizzle
;
70 reg
.type
= inst
->src
[i
].type
;
73 if (inst
->src
[i
].negate
)
76 inst
->src
[i
].file
= HW_REG
;
77 inst
->src
[i
].fixed_hw_reg
= reg
;
81 /* The BSpec says we always have to read at least one thing from
82 * the VF, and it appears that the hardware wedges otherwise.
84 if (nr_attributes
== 0)
87 prog_data
->urb_read_length
= (nr_attributes
+ 1) / 2;
89 return payload_reg
+ nr_attributes
;
93 vec4_visitor::setup_uniforms(int reg
)
95 /* The pre-gen6 VS requires that some push constants get loaded no
96 * matter what, or the GPU would hang.
98 if (intel
->gen
< 6 && this->uniforms
== 0) {
99 this->uniform_vector_size
[this->uniforms
] = 1;
101 for (unsigned int i
= 0; i
< 4; i
++) {
102 unsigned int slot
= this->uniforms
* 4 + i
;
103 static float zero
= 0.0;
104 c
->prog_data
.param
[slot
] = &zero
;
110 reg
+= ALIGN(uniforms
, 2) / 2;
113 c
->prog_data
.nr_params
= this->uniforms
* 4;
115 c
->prog_data
.curb_read_length
= reg
- 1;
116 c
->prog_data
.uses_new_param_layout
= true;
122 vec4_visitor::setup_payload(void)
126 /* The payload always contains important data in g0, which contains
127 * the URB handles that are passed on to the URB write at the end
128 * of the thread. So, we always start push constants at g1.
132 reg
= setup_uniforms(reg
);
134 reg
= setup_attributes(reg
);
136 this->first_non_payload_grf
= reg
;
140 vec4_instruction::get_dst(void)
142 struct brw_reg brw_reg
;
146 brw_reg
= brw_vec8_grf(dst
.reg
+ dst
.reg_offset
, 0);
147 brw_reg
= retype(brw_reg
, dst
.type
);
148 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
152 brw_reg
= brw_message_reg(dst
.reg
+ dst
.reg_offset
);
153 brw_reg
= retype(brw_reg
, dst
.type
);
154 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
158 brw_reg
= dst
.fixed_hw_reg
;
162 brw_reg
= brw_null_reg();
166 assert(!"not reached");
167 brw_reg
= brw_null_reg();
174 vec4_instruction::get_src(int i
)
176 struct brw_reg brw_reg
;
178 switch (src
[i
].file
) {
180 brw_reg
= brw_vec8_grf(src
[i
].reg
+ src
[i
].reg_offset
, 0);
181 brw_reg
= retype(brw_reg
, src
[i
].type
);
182 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
184 brw_reg
= brw_abs(brw_reg
);
186 brw_reg
= negate(brw_reg
);
190 switch (src
[i
].type
) {
191 case BRW_REGISTER_TYPE_F
:
192 brw_reg
= brw_imm_f(src
[i
].imm
.f
);
194 case BRW_REGISTER_TYPE_D
:
195 brw_reg
= brw_imm_d(src
[i
].imm
.i
);
197 case BRW_REGISTER_TYPE_UD
:
198 brw_reg
= brw_imm_ud(src
[i
].imm
.u
);
201 assert(!"not reached");
202 brw_reg
= brw_null_reg();
208 brw_reg
= stride(brw_vec4_grf(1 + (src
[i
].reg
+ src
[i
].reg_offset
) / 2,
209 ((src
[i
].reg
+ src
[i
].reg_offset
) % 2) * 4),
211 brw_reg
= retype(brw_reg
, src
[i
].type
);
212 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
214 brw_reg
= brw_abs(brw_reg
);
216 brw_reg
= negate(brw_reg
);
218 /* This should have been moved to pull constants. */
219 assert(!src
[i
].reladdr
);
223 brw_reg
= src
[i
].fixed_hw_reg
;
227 /* Probably unused. */
228 brw_reg
= brw_null_reg();
232 assert(!"not reached");
233 brw_reg
= brw_null_reg();
241 vec4_visitor::generate_math1_gen4(vec4_instruction
*inst
,
247 brw_math_function(inst
->opcode
),
248 BRW_MATH_SATURATE_NONE
,
251 BRW_MATH_DATA_VECTOR
,
252 BRW_MATH_PRECISION_FULL
);
256 check_gen6_math_src_arg(struct brw_reg src
)
258 /* Source swizzles are ignored. */
261 assert(src
.dw1
.bits
.swizzle
== BRW_SWIZZLE_XYZW
);
265 vec4_visitor::generate_math1_gen6(vec4_instruction
*inst
,
269 /* Can't do writemask because math can't be align16. */
270 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
271 check_gen6_math_src_arg(src
);
273 brw_set_access_mode(p
, BRW_ALIGN_1
);
276 brw_math_function(inst
->opcode
),
277 BRW_MATH_SATURATE_NONE
,
280 BRW_MATH_DATA_SCALAR
,
281 BRW_MATH_PRECISION_FULL
);
282 brw_set_access_mode(p
, BRW_ALIGN_16
);
286 vec4_visitor::generate_math2_gen6(vec4_instruction
*inst
,
291 /* Can't do writemask because math can't be align16. */
292 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
293 /* Source swizzles are ignored. */
294 check_gen6_math_src_arg(src0
);
295 check_gen6_math_src_arg(src1
);
297 brw_set_access_mode(p
, BRW_ALIGN_1
);
300 brw_math_function(inst
->opcode
),
302 brw_set_access_mode(p
, BRW_ALIGN_16
);
306 vec4_visitor::generate_math2_gen4(vec4_instruction
*inst
,
311 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
314 * "Operand0[7]. For the INT DIV functions, this operand is the
317 * "Operand1[7]. For the INT DIV functions, this operand is the
320 bool is_int_div
= inst
->opcode
!= SHADER_OPCODE_POW
;
321 struct brw_reg
&op0
= is_int_div
? src1
: src0
;
322 struct brw_reg
&op1
= is_int_div
? src0
: src1
;
324 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), op1
.type
), op1
);
328 brw_math_function(inst
->opcode
),
329 BRW_MATH_SATURATE_NONE
,
332 BRW_MATH_DATA_VECTOR
,
333 BRW_MATH_PRECISION_FULL
);
337 vec4_visitor::generate_urb_write(vec4_instruction
*inst
)
340 brw_null_reg(), /* dest */
341 inst
->base_mrf
, /* starting mrf reg nr */
342 brw_vec8_grf(0, 0), /* src */
343 false, /* allocate */
346 0, /* response len */
348 inst
->eot
, /* writes complete */
349 inst
->offset
, /* urb destination offset */
350 BRW_URB_SWIZZLE_INTERLEAVE
);
354 vec4_visitor::generate_oword_dual_block_offsets(struct brw_reg m1
,
355 struct brw_reg index
)
357 int second_vertex_offset
;
360 second_vertex_offset
= 1;
362 second_vertex_offset
= 16;
364 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
366 /* Set up M1 (message payload). Only the block offsets in M1.0 and
367 * M1.4 are used, and the rest are ignored.
369 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
370 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
371 struct brw_reg index_0
= suboffset(vec1(index
), 0);
372 struct brw_reg index_4
= suboffset(vec1(index
), 4);
374 brw_push_insn_state(p
);
375 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
376 brw_set_access_mode(p
, BRW_ALIGN_1
);
378 brw_MOV(p
, m1_0
, index_0
);
380 brw_set_predicate_inverse(p
, true);
381 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
382 index_4
.dw1
.ud
+= second_vertex_offset
;
383 brw_MOV(p
, m1_4
, index_4
);
385 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
388 brw_pop_insn_state(p
);
392 vec4_visitor::generate_scratch_read(vec4_instruction
*inst
,
394 struct brw_reg index
)
396 struct brw_reg header
= brw_vec8_grf(0, 0);
398 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
400 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
406 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
407 else if (intel
->gen
== 5 || intel
->is_g4x
)
408 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
410 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
412 /* Each of the 8 channel enables is considered for whether each
415 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
416 brw_set_dest(p
, send
, dst
);
417 brw_set_src0(p
, send
, header
);
419 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
420 brw_set_dp_read_message(p
, send
,
421 255, /* binding table index: stateless access */
422 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
424 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
430 vec4_visitor::generate_scratch_write(vec4_instruction
*inst
,
433 struct brw_reg index
)
435 struct brw_reg header
= brw_vec8_grf(0, 0);
438 /* If the instruction is predicated, we'll predicate the send, not
441 brw_set_predicate_control(p
, false);
443 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
445 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
449 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
450 retype(src
, BRW_REGISTER_TYPE_D
));
455 msg_type
= GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
456 else if (intel
->gen
== 6)
457 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
459 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
461 brw_set_predicate_control(p
, inst
->predicate
);
463 /* Pre-gen6, we have to specify write commits to ensure ordering
464 * between reads and writes within a thread. Afterwards, that's
465 * guaranteed and write commits only matter for inter-thread
468 if (intel
->gen
>= 6) {
469 write_commit
= false;
471 /* The visitor set up our destination register to be g0. This
472 * means that when the next read comes along, we will end up
473 * reading from g0 and causing a block on the write commit. For
474 * write-after-read, we are relying on the value of the previous
475 * read being used (and thus blocking on completion) before our
476 * write is executed. This means we have to be careful in
477 * instruction scheduling to not violate this assumption.
482 /* Each of the 8 channel enables is considered for whether each
485 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
486 brw_set_dest(p
, send
, dst
);
487 brw_set_src0(p
, send
, header
);
489 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
490 brw_set_dp_write_message(p
, send
,
491 255, /* binding table index: stateless access */
492 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
495 true, /* header present */
496 false, /* not a render target write */
497 write_commit
, /* rlen */
503 vec4_visitor::generate_pull_constant_load(vec4_instruction
*inst
,
505 struct brw_reg index
)
507 struct brw_reg header
= brw_vec8_grf(0, 0);
509 gen6_resolve_implied_move(p
, &header
, inst
->base_mrf
);
511 brw_MOV(p
, retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_D
),
517 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
518 else if (intel
->gen
== 5 || intel
->is_g4x
)
519 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
521 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
523 /* Each of the 8 channel enables is considered for whether each
526 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
527 brw_set_dest(p
, send
, dst
);
528 brw_set_src0(p
, send
, header
);
530 send
->header
.destreg__conditionalmod
= inst
->base_mrf
;
531 brw_set_dp_read_message(p
, send
,
532 SURF_INDEX_VERT_CONST_BUFFER
,
533 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
535 BRW_DATAPORT_READ_TARGET_DATA_CACHE
,
541 vec4_visitor::generate_vs_instruction(vec4_instruction
*instruction
,
545 vec4_instruction
*inst
= (vec4_instruction
*)instruction
;
547 switch (inst
->opcode
) {
548 case SHADER_OPCODE_RCP
:
549 case SHADER_OPCODE_RSQ
:
550 case SHADER_OPCODE_SQRT
:
551 case SHADER_OPCODE_EXP2
:
552 case SHADER_OPCODE_LOG2
:
553 case SHADER_OPCODE_SIN
:
554 case SHADER_OPCODE_COS
:
555 if (intel
->gen
>= 6) {
556 generate_math1_gen6(inst
, dst
, src
[0]);
558 generate_math1_gen4(inst
, dst
, src
[0]);
562 case SHADER_OPCODE_POW
:
563 case SHADER_OPCODE_INT_QUOTIENT
:
564 case SHADER_OPCODE_INT_REMAINDER
:
565 if (intel
->gen
>= 6) {
566 generate_math2_gen6(inst
, dst
, src
[0], src
[1]);
568 generate_math2_gen4(inst
, dst
, src
[0], src
[1]);
572 case VS_OPCODE_URB_WRITE
:
573 generate_urb_write(inst
);
576 case VS_OPCODE_SCRATCH_READ
:
577 generate_scratch_read(inst
, dst
, src
[0]);
580 case VS_OPCODE_SCRATCH_WRITE
:
581 generate_scratch_write(inst
, dst
, src
[0], src
[1]);
584 case VS_OPCODE_PULL_CONSTANT_LOAD
:
585 generate_pull_constant_load(inst
, dst
, src
[0]);
589 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
590 fail("unsupported opcode in `%s' in VS\n",
591 brw_opcodes
[inst
->opcode
].name
);
593 fail("Unsupported opcode %d in VS", inst
->opcode
);
601 if (c
->key
.userclip_active
&& !c
->key
.uses_clip_distance
)
602 setup_uniform_clipplane_values();
604 /* Generate VS IR for main(). (the visitor only descends into
605 * functions called "main").
607 visit_instructions(shader
->ir
);
611 /* Before any optimization, push array accesses out to scratch
612 * space where we need them to be. This pass may allocate new
613 * virtual GRFs, so we want to do it early. It also makes sure
614 * that we have reladdr computations available for CSE, since we'll
615 * often do repeated subexpressions for those.
617 move_grf_array_access_to_scratch();
618 move_uniform_array_access_to_pull_constants();
619 pack_uniform_registers();
620 move_push_constants_to_pull_constants();
625 progress
= dead_code_eliminate() || progress
;
626 progress
= opt_copy_propagation() || progress
;
627 progress
= opt_algebraic() || progress
;
628 progress
= opt_compute_to_mrf() || progress
;
641 brw_set_access_mode(p
, BRW_ALIGN_16
);
649 vec4_visitor::generate_code()
651 int last_native_inst
= 0;
652 const char *last_annotation_string
= NULL
;
653 ir_instruction
*last_annotation_ir
= NULL
;
655 int loop_stack_array_size
= 16;
656 int loop_stack_depth
= 0;
657 brw_instruction
**loop_stack
=
658 rzalloc_array(this->mem_ctx
, brw_instruction
*, loop_stack_array_size
);
659 int *if_depth_in_loop
=
660 rzalloc_array(this->mem_ctx
, int, loop_stack_array_size
);
663 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
664 printf("Native code for vertex shader %d:\n", prog
->Name
);
667 foreach_list(node
, &this->instructions
) {
668 vec4_instruction
*inst
= (vec4_instruction
*)node
;
669 struct brw_reg src
[3], dst
;
671 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
672 if (last_annotation_ir
!= inst
->ir
) {
673 last_annotation_ir
= inst
->ir
;
674 if (last_annotation_ir
) {
676 last_annotation_ir
->print();
680 if (last_annotation_string
!= inst
->annotation
) {
681 last_annotation_string
= inst
->annotation
;
682 if (last_annotation_string
)
683 printf(" %s\n", last_annotation_string
);
687 for (unsigned int i
= 0; i
< 3; i
++) {
688 src
[i
] = inst
->get_src(i
);
690 dst
= inst
->get_dst();
692 brw_set_conditionalmod(p
, inst
->conditional_mod
);
693 brw_set_predicate_control(p
, inst
->predicate
);
694 brw_set_predicate_inverse(p
, inst
->predicate_inverse
);
695 brw_set_saturate(p
, inst
->saturate
);
697 switch (inst
->opcode
) {
699 brw_MOV(p
, dst
, src
[0]);
702 brw_ADD(p
, dst
, src
[0], src
[1]);
705 brw_MUL(p
, dst
, src
[0], src
[1]);
707 case BRW_OPCODE_MACH
:
708 brw_set_acc_write_control(p
, 1);
709 brw_MACH(p
, dst
, src
[0], src
[1]);
710 brw_set_acc_write_control(p
, 0);
714 brw_FRC(p
, dst
, src
[0]);
716 case BRW_OPCODE_RNDD
:
717 brw_RNDD(p
, dst
, src
[0]);
719 case BRW_OPCODE_RNDE
:
720 brw_RNDE(p
, dst
, src
[0]);
722 case BRW_OPCODE_RNDZ
:
723 brw_RNDZ(p
, dst
, src
[0]);
727 brw_AND(p
, dst
, src
[0], src
[1]);
730 brw_OR(p
, dst
, src
[0], src
[1]);
733 brw_XOR(p
, dst
, src
[0], src
[1]);
736 brw_NOT(p
, dst
, src
[0]);
739 brw_ASR(p
, dst
, src
[0], src
[1]);
742 brw_SHR(p
, dst
, src
[0], src
[1]);
745 brw_SHL(p
, dst
, src
[0], src
[1]);
749 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
752 brw_SEL(p
, dst
, src
[0], src
[1]);
756 brw_DP4(p
, dst
, src
[0], src
[1]);
760 brw_DP3(p
, dst
, src
[0], src
[1]);
764 brw_DP2(p
, dst
, src
[0], src
[1]);
768 if (inst
->src
[0].file
!= BAD_FILE
) {
769 /* The instruction has an embedded compare (only allowed on gen6) */
770 assert(intel
->gen
== 6);
771 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
773 struct brw_instruction
*brw_inst
= brw_IF(p
, BRW_EXECUTE_8
);
774 brw_inst
->header
.predicate_control
= inst
->predicate
;
776 if_depth_in_loop
[loop_stack_depth
]++;
779 case BRW_OPCODE_ELSE
:
782 case BRW_OPCODE_ENDIF
:
784 if_depth_in_loop
[loop_stack_depth
]--;
788 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
789 if (loop_stack_array_size
<= loop_stack_depth
) {
790 loop_stack_array_size
*= 2;
791 loop_stack
= reralloc(this->mem_ctx
, loop_stack
, brw_instruction
*,
792 loop_stack_array_size
);
793 if_depth_in_loop
= reralloc(this->mem_ctx
, if_depth_in_loop
, int,
794 loop_stack_array_size
);
796 if_depth_in_loop
[loop_stack_depth
] = 0;
799 case BRW_OPCODE_BREAK
:
800 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
801 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
803 case BRW_OPCODE_CONTINUE
:
804 /* FINISHME: We need to write the loop instruction support still. */
806 gen6_CONT(p
, loop_stack
[loop_stack_depth
- 1]);
808 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
809 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
812 case BRW_OPCODE_WHILE
: {
813 struct brw_instruction
*inst0
, *inst1
;
819 assert(loop_stack_depth
> 0);
821 inst0
= inst1
= brw_WHILE(p
, loop_stack
[loop_stack_depth
]);
822 if (intel
->gen
< 6) {
823 /* patch all the BREAK/CONT instructions from last BGNLOOP */
824 while (inst0
> loop_stack
[loop_stack_depth
]) {
826 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
827 inst0
->bits3
.if_else
.jump_count
== 0) {
828 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
830 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
831 inst0
->bits3
.if_else
.jump_count
== 0) {
832 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
840 generate_vs_instruction(inst
, dst
, src
);
844 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
845 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
847 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
848 ((uint32_t *)&p
->store
[i
])[3],
849 ((uint32_t *)&p
->store
[i
])[2],
850 ((uint32_t *)&p
->store
[i
])[1],
851 ((uint32_t *)&p
->store
[i
])[0]);
853 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
857 last_native_inst
= p
->nr_insn
;
860 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
864 ralloc_free(loop_stack
);
865 ralloc_free(if_depth_in_loop
);
869 /* OK, while the INTEL_DEBUG=vs above is very nice for debugging VS
870 * emit issues, it doesn't get the jump distances into the output,
871 * which is often something we want to debug. So this is here in
872 * case you're doing that.
875 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
876 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
877 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
878 ((uint32_t *)&p
->store
[i
])[3],
879 ((uint32_t *)&p
->store
[i
])[2],
880 ((uint32_t *)&p
->store
[i
])[1],
881 ((uint32_t *)&p
->store
[i
])[0]);
882 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
891 brw_vs_emit(struct gl_shader_program
*prog
, struct brw_vs_compile
*c
)
896 struct brw_shader
*shader
=
897 (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
901 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
902 printf("GLSL IR for native vertex shader %d:\n", prog
->Name
);
903 _mesa_print_ir(shader
->ir
, NULL
);
907 vec4_visitor
v(c
, prog
, shader
);
909 prog
->LinkStatus
= false;
910 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);
919 } /* namespace brw */