1 /* Copyright © 2011 Intel Corporation
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "../glsl/ir_print_visitor.h"
35 vec4_visitor::setup_attributes(int payload_reg
)
38 int attribute_map
[VERT_ATTRIB_MAX
];
41 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
42 if (prog_data
->inputs_read
& BITFIELD64_BIT(i
)) {
43 attribute_map
[i
] = payload_reg
+ nr_attributes
;
46 /* Do GL_FIXED rescaling for GLES2.0. Our GL_FIXED
47 * attributes come in as floating point conversions of the
50 if (c
->key
.gl_fixed_input_size
[i
] != 0) {
51 struct brw_reg reg
= brw_vec8_grf(attribute_map
[i
], 0);
54 brw_writemask(reg
, (1 << c
->key
.gl_fixed_input_size
[i
]) - 1),
55 reg
, brw_imm_f(1.0 / 65536.0));
60 foreach_list(node
, &this->instructions
) {
61 vec4_instruction
*inst
= (vec4_instruction
*)node
;
63 for (int i
= 0; i
< 3; i
++) {
64 if (inst
->src
[i
].file
!= ATTR
)
67 int grf
= attribute_map
[inst
->src
[i
].reg
+ inst
->src
[i
].reg_offset
];
69 struct brw_reg reg
= brw_vec8_grf(grf
, 0);
70 reg
.dw1
.bits
.swizzle
= inst
->src
[i
].swizzle
;
73 if (inst
->src
[i
].negate
)
76 inst
->src
[i
].file
= HW_REG
;
77 inst
->src
[i
].fixed_hw_reg
= reg
;
81 /* The BSpec says we always have to read at least one thing from
82 * the VF, and it appears that the hardware wedges otherwise.
84 if (nr_attributes
== 0)
87 prog_data
->urb_read_length
= (nr_attributes
+ 1) / 2;
89 return payload_reg
+ nr_attributes
;
93 vec4_visitor::setup_uniforms(int reg
)
95 /* User clip planes from curbe:
97 if (c
->key
.nr_userclip
) {
98 if (intel
->gen
>= 6) {
99 for (int i
= 0; i
< c
->key
.nr_userclip
; i
++) {
100 c
->userplane
[i
] = stride(brw_vec4_grf(reg
+ i
/ 2,
101 (i
% 2) * 4), 0, 4, 1);
103 reg
+= ALIGN(c
->key
.nr_userclip
, 2) / 2;
105 for (int i
= 0; i
< c
->key
.nr_userclip
; i
++) {
106 c
->userplane
[i
] = stride(brw_vec4_grf(reg
+ (6 + i
) / 2,
107 (i
% 2) * 4), 0, 4, 1);
109 reg
+= (ALIGN(6 + c
->key
.nr_userclip
, 4) / 4) * 2;
113 /* The pre-gen6 VS requires that some push constants get loaded no
114 * matter what, or the GPU would hang.
116 if (intel
->gen
< 6 && this->uniforms
== 0) {
117 this->uniform_size
[this->uniforms
] = 1;
119 for (unsigned int i
= 0; i
< 4; i
++) {
120 unsigned int slot
= this->uniforms
* 4 + i
;
122 c
->prog_data
.param
[slot
] = NULL
;
123 c
->prog_data
.param_convert
[slot
] = PARAM_CONVERT_ZERO
;
129 reg
+= ALIGN(uniforms
, 2) / 2;
132 /* for now, we are not doing any elimination of unused slots, nor
133 * are we packing our uniforms.
135 c
->prog_data
.nr_params
= this->uniforms
* 4;
137 c
->prog_data
.curb_read_length
= reg
- 1;
138 c
->prog_data
.uses_new_param_layout
= true;
144 vec4_visitor::setup_payload(void)
148 /* The payload always contains important data in g0, which contains
149 * the URB handles that are passed on to the URB write at the end
150 * of the thread. So, we always start push constants at g1.
154 reg
= setup_uniforms(reg
);
156 reg
= setup_attributes(reg
);
158 this->first_non_payload_grf
= reg
;
162 vec4_instruction::get_dst(void)
164 struct brw_reg brw_reg
;
168 brw_reg
= brw_vec8_grf(dst
.reg
+ dst
.reg_offset
, 0);
169 brw_reg
= retype(brw_reg
, dst
.type
);
170 brw_reg
.dw1
.bits
.writemask
= dst
.writemask
;
174 brw_reg
= dst
.fixed_hw_reg
;
178 brw_reg
= brw_null_reg();
182 assert(!"not reached");
183 brw_reg
= brw_null_reg();
190 vec4_instruction::get_src(int i
)
192 struct brw_reg brw_reg
;
194 switch (src
[i
].file
) {
196 brw_reg
= brw_vec8_grf(src
[i
].reg
+ src
[i
].reg_offset
, 0);
197 brw_reg
= retype(brw_reg
, src
[i
].type
);
198 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
200 brw_reg
= brw_abs(brw_reg
);
202 brw_reg
= negate(brw_reg
);
206 switch (src
[i
].type
) {
207 case BRW_REGISTER_TYPE_F
:
208 brw_reg
= brw_imm_f(src
[i
].imm
.f
);
210 case BRW_REGISTER_TYPE_D
:
211 brw_reg
= brw_imm_d(src
[i
].imm
.i
);
213 case BRW_REGISTER_TYPE_UD
:
214 brw_reg
= brw_imm_ud(src
[i
].imm
.u
);
217 assert(!"not reached");
218 brw_reg
= brw_null_reg();
224 brw_reg
= stride(brw_vec4_grf(1 + (src
[i
].reg
+ src
[i
].reg_offset
) / 2,
225 ((src
[i
].reg
+ src
[i
].reg_offset
) % 2) * 4),
227 brw_reg
= retype(brw_reg
, src
[i
].type
);
228 brw_reg
.dw1
.bits
.swizzle
= src
[i
].swizzle
;
230 brw_reg
= brw_abs(brw_reg
);
232 brw_reg
= negate(brw_reg
);
236 brw_reg
= src
[i
].fixed_hw_reg
;
240 /* Probably unused. */
241 brw_reg
= brw_null_reg();
245 assert(!"not reached");
246 brw_reg
= brw_null_reg();
254 vec4_visitor::generate_math1_gen4(vec4_instruction
*inst
,
260 brw_math_function(inst
->opcode
),
261 BRW_MATH_SATURATE_NONE
,
264 BRW_MATH_DATA_SCALAR
,
265 BRW_MATH_PRECISION_FULL
);
269 check_gen6_math_src_arg(struct brw_reg src
)
271 /* Source swizzles are ignored. */
274 assert(src
.dw1
.bits
.swizzle
= BRW_SWIZZLE_XYZW
);
278 vec4_visitor::generate_math1_gen6(vec4_instruction
*inst
,
282 /* Can't do writemask because math can't be align16. */
283 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
284 check_gen6_math_src_arg(src
);
286 brw_set_access_mode(p
, BRW_ALIGN_1
);
289 brw_math_function(inst
->opcode
),
290 BRW_MATH_SATURATE_NONE
,
293 BRW_MATH_DATA_SCALAR
,
294 BRW_MATH_PRECISION_FULL
);
295 brw_set_access_mode(p
, BRW_ALIGN_16
);
299 vec4_visitor::generate_math2_gen6(vec4_instruction
*inst
,
304 /* Can't do writemask because math can't be align16. */
305 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
306 /* Source swizzles are ignored. */
307 check_gen6_math_src_arg(src0
);
308 check_gen6_math_src_arg(src1
);
310 brw_set_access_mode(p
, BRW_ALIGN_1
);
313 brw_math_function(inst
->opcode
),
315 brw_set_access_mode(p
, BRW_ALIGN_16
);
319 vec4_visitor::generate_math2_gen4(vec4_instruction
*inst
,
324 /* Can't do writemask because math can't be align16. */
325 assert(dst
.dw1
.bits
.writemask
== WRITEMASK_XYZW
);
327 brw_MOV(p
, brw_message_reg(inst
->base_mrf
+ 1), src1
);
329 brw_set_access_mode(p
, BRW_ALIGN_1
);
332 brw_math_function(inst
->opcode
),
333 BRW_MATH_SATURATE_NONE
,
336 BRW_MATH_DATA_VECTOR
,
337 BRW_MATH_PRECISION_FULL
);
338 brw_set_access_mode(p
, BRW_ALIGN_16
);
342 vec4_visitor::generate_urb_write(vec4_instruction
*inst
)
345 brw_null_reg(), /* dest */
346 inst
->base_mrf
, /* starting mrf reg nr */
347 brw_vec8_grf(0, 0), /* src */
348 false, /* allocate */
351 0, /* response len */
353 inst
->eot
, /* writes complete */
354 inst
->offset
, /* urb destination offset */
355 BRW_URB_SWIZZLE_INTERLEAVE
);
359 vec4_visitor::generate_oword_dual_block_offsets(struct brw_reg m1
,
360 struct brw_reg index
)
362 int second_vertex_offset
;
365 second_vertex_offset
= 1;
367 second_vertex_offset
= 16;
369 m1
= retype(m1
, BRW_REGISTER_TYPE_D
);
371 /* Set up M1 (message payload). Only the block offsets in M1.0 and
372 * M1.4 are used, and the rest are ignored.
374 struct brw_reg m1_0
= suboffset(vec1(m1
), 0);
375 struct brw_reg m1_4
= suboffset(vec1(m1
), 4);
376 struct brw_reg index_0
= suboffset(vec1(index
), 0);
377 struct brw_reg index_4
= suboffset(vec1(index
), 4);
379 brw_push_insn_state(p
);
380 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
381 brw_set_access_mode(p
, BRW_ALIGN_1
);
383 brw_MOV(p
, m1_0
, index_0
);
385 brw_set_predicate_inverse(p
, true);
386 if (index
.file
== BRW_IMMEDIATE_VALUE
) {
388 brw_MOV(p
, m1_4
, index_4
);
390 brw_ADD(p
, m1_4
, index_4
, brw_imm_d(second_vertex_offset
));
393 brw_pop_insn_state(p
);
397 vec4_visitor::generate_scratch_read(vec4_instruction
*inst
,
399 struct brw_reg index
)
401 if (intel
->gen
>= 6) {
402 brw_push_insn_state(p
);
403 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
405 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_D
),
406 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_D
));
407 brw_pop_insn_state(p
);
410 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
416 msg_type
= GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
417 else if (intel
->gen
== 5 || intel
->is_g4x
)
418 msg_type
= G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
420 msg_type
= BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ
;
422 /* Each of the 8 channel enables is considered for whether each
425 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
426 brw_set_dest(p
, send
, dst
);
427 brw_set_src0(p
, send
, brw_message_reg(inst
->base_mrf
));
428 brw_set_dp_read_message(p
, send
,
429 255, /* binding table index: stateless access */
430 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
432 BRW_DATAPORT_READ_TARGET_RENDER_CACHE
,
438 vec4_visitor::generate_scratch_write(vec4_instruction
*inst
,
441 struct brw_reg index
)
443 /* If the instruction is predicated, we'll predicate the send, not
446 brw_set_predicate_control(p
, false);
448 if (intel
->gen
>= 6) {
449 brw_push_insn_state(p
);
450 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
452 retype(brw_message_reg(inst
->base_mrf
), BRW_REGISTER_TYPE_D
),
453 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_D
));
454 brw_pop_insn_state(p
);
457 generate_oword_dual_block_offsets(brw_message_reg(inst
->base_mrf
+ 1),
461 retype(brw_message_reg(inst
->base_mrf
+ 2), BRW_REGISTER_TYPE_D
),
462 retype(src
, BRW_REGISTER_TYPE_D
));
467 msg_type
= GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
469 msg_type
= BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE
;
471 brw_set_predicate_control(p
, inst
->predicate
);
473 /* Each of the 8 channel enables is considered for whether each
476 struct brw_instruction
*send
= brw_next_insn(p
, BRW_OPCODE_SEND
);
477 brw_set_dest(p
, send
, dst
);
478 brw_set_src0(p
, send
, brw_message_reg(inst
->base_mrf
));
479 brw_set_dp_write_message(p
, send
,
480 255, /* binding table index: stateless access */
481 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD
,
484 true, /* header present */
485 false, /* pixel scoreboard */
492 vec4_visitor::generate_vs_instruction(vec4_instruction
*instruction
,
496 vec4_instruction
*inst
= (vec4_instruction
*)instruction
;
498 switch (inst
->opcode
) {
499 case SHADER_OPCODE_RCP
:
500 case SHADER_OPCODE_RSQ
:
501 case SHADER_OPCODE_SQRT
:
502 case SHADER_OPCODE_EXP2
:
503 case SHADER_OPCODE_LOG2
:
504 case SHADER_OPCODE_SIN
:
505 case SHADER_OPCODE_COS
:
506 if (intel
->gen
>= 6) {
507 generate_math1_gen6(inst
, dst
, src
[0]);
509 generate_math1_gen4(inst
, dst
, src
[0]);
513 case SHADER_OPCODE_POW
:
514 if (intel
->gen
>= 6) {
515 generate_math2_gen6(inst
, dst
, src
[0], src
[1]);
517 generate_math2_gen4(inst
, dst
, src
[0], src
[1]);
521 case VS_OPCODE_URB_WRITE
:
522 generate_urb_write(inst
);
525 case VS_OPCODE_SCRATCH_READ
:
526 generate_scratch_read(inst
, dst
, src
[0]);
529 case VS_OPCODE_SCRATCH_WRITE
:
530 generate_scratch_write(inst
, dst
, src
[0], src
[1]);
534 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
535 fail("unsupported opcode in `%s' in VS\n",
536 brw_opcodes
[inst
->opcode
].name
);
538 fail("Unsupported opcode %d in VS", inst
->opcode
);
546 /* Generate VS IR for main(). (the visitor only descends into
547 * functions called "main").
549 visit_instructions(shader
->ir
);
553 /* Before any optimization, push array accesses out to scratch
554 * space where we need them to be. This pass may allocate new
555 * virtual GRFs, so we want to do it early. It also makes sure
556 * that we have reladdr computations available for CSE, since we'll
557 * often do repeated subexpressions for those.
559 move_grf_array_access_to_scratch();
564 progress
= dead_code_eliminate() || progress
;
576 brw_set_access_mode(p
, BRW_ALIGN_16
);
584 vec4_visitor::generate_code()
586 int last_native_inst
= p
->nr_insn
;
587 const char *last_annotation_string
= NULL
;
588 ir_instruction
*last_annotation_ir
= NULL
;
590 int loop_stack_array_size
= 16;
591 int loop_stack_depth
= 0;
592 brw_instruction
**loop_stack
=
593 rzalloc_array(this->mem_ctx
, brw_instruction
*, loop_stack_array_size
);
594 int *if_depth_in_loop
=
595 rzalloc_array(this->mem_ctx
, int, loop_stack_array_size
);
598 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
599 printf("Native code for vertex shader %d:\n", prog
->Name
);
602 foreach_list(node
, &this->instructions
) {
603 vec4_instruction
*inst
= (vec4_instruction
*)node
;
604 struct brw_reg src
[3], dst
;
606 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
607 if (last_annotation_ir
!= inst
->ir
) {
608 last_annotation_ir
= inst
->ir
;
609 if (last_annotation_ir
) {
611 last_annotation_ir
->print();
615 if (last_annotation_string
!= inst
->annotation
) {
616 last_annotation_string
= inst
->annotation
;
617 if (last_annotation_string
)
618 printf(" %s\n", last_annotation_string
);
622 for (unsigned int i
= 0; i
< 3; i
++) {
623 src
[i
] = inst
->get_src(i
);
625 dst
= inst
->get_dst();
627 brw_set_conditionalmod(p
, inst
->conditional_mod
);
628 brw_set_predicate_control(p
, inst
->predicate
);
629 brw_set_predicate_inverse(p
, inst
->predicate_inverse
);
630 brw_set_saturate(p
, inst
->saturate
);
632 switch (inst
->opcode
) {
634 brw_MOV(p
, dst
, src
[0]);
637 brw_ADD(p
, dst
, src
[0], src
[1]);
640 brw_MUL(p
, dst
, src
[0], src
[1]);
642 case BRW_OPCODE_MACH
:
643 brw_set_acc_write_control(p
, 1);
644 brw_MACH(p
, dst
, src
[0], src
[1]);
645 brw_set_acc_write_control(p
, 0);
649 brw_FRC(p
, dst
, src
[0]);
651 case BRW_OPCODE_RNDD
:
652 brw_RNDD(p
, dst
, src
[0]);
654 case BRW_OPCODE_RNDE
:
655 brw_RNDE(p
, dst
, src
[0]);
657 case BRW_OPCODE_RNDZ
:
658 brw_RNDZ(p
, dst
, src
[0]);
662 brw_AND(p
, dst
, src
[0], src
[1]);
665 brw_OR(p
, dst
, src
[0], src
[1]);
668 brw_XOR(p
, dst
, src
[0], src
[1]);
671 brw_NOT(p
, dst
, src
[0]);
674 brw_ASR(p
, dst
, src
[0], src
[1]);
677 brw_SHR(p
, dst
, src
[0], src
[1]);
680 brw_SHL(p
, dst
, src
[0], src
[1]);
684 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
687 brw_SEL(p
, dst
, src
[0], src
[1]);
691 brw_DP4(p
, dst
, src
[0], src
[1]);
695 brw_DP3(p
, dst
, src
[0], src
[1]);
699 brw_DP2(p
, dst
, src
[0], src
[1]);
703 if (inst
->src
[0].file
!= BAD_FILE
) {
704 /* The instruction has an embedded compare (only allowed on gen6) */
705 assert(intel
->gen
== 6);
706 gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
708 struct brw_instruction
*brw_inst
= brw_IF(p
, BRW_EXECUTE_8
);
709 brw_inst
->header
.predicate_control
= inst
->predicate
;
711 if_depth_in_loop
[loop_stack_depth
]++;
714 case BRW_OPCODE_ELSE
:
717 case BRW_OPCODE_ENDIF
:
719 if_depth_in_loop
[loop_stack_depth
]--;
723 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
724 if (loop_stack_array_size
<= loop_stack_depth
) {
725 loop_stack_array_size
*= 2;
726 loop_stack
= reralloc(this->mem_ctx
, loop_stack
, brw_instruction
*,
727 loop_stack_array_size
);
728 if_depth_in_loop
= reralloc(this->mem_ctx
, if_depth_in_loop
, int,
729 loop_stack_array_size
);
731 if_depth_in_loop
[loop_stack_depth
] = 0;
734 case BRW_OPCODE_BREAK
:
735 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
736 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
738 case BRW_OPCODE_CONTINUE
:
739 /* FINISHME: We need to write the loop instruction support still. */
741 gen6_CONT(p
, loop_stack
[loop_stack_depth
- 1]);
743 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
744 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
747 case BRW_OPCODE_WHILE
: {
748 struct brw_instruction
*inst0
, *inst1
;
754 assert(loop_stack_depth
> 0);
756 inst0
= inst1
= brw_WHILE(p
, loop_stack
[loop_stack_depth
]);
757 if (intel
->gen
< 6) {
758 /* patch all the BREAK/CONT instructions from last BGNLOOP */
759 while (inst0
> loop_stack
[loop_stack_depth
]) {
761 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
762 inst0
->bits3
.if_else
.jump_count
== 0) {
763 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
765 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
766 inst0
->bits3
.if_else
.jump_count
== 0) {
767 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
775 generate_vs_instruction(inst
, dst
, src
);
779 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
780 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
782 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
783 ((uint32_t *)&p
->store
[i
])[3],
784 ((uint32_t *)&p
->store
[i
])[2],
785 ((uint32_t *)&p
->store
[i
])[1],
786 ((uint32_t *)&p
->store
[i
])[0]);
788 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
792 last_native_inst
= p
->nr_insn
;
795 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
799 ralloc_free(loop_stack
);
800 ralloc_free(if_depth_in_loop
);
804 /* OK, while the INTEL_DEBUG=vs above is very nice for debugging VS
805 * emit issues, it doesn't get the jump distances into the output,
806 * which is often something we want to debug. So this is here in
807 * case you're doing that.
810 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
811 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
812 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
813 ((uint32_t *)&p
->store
[i
])[3],
814 ((uint32_t *)&p
->store
[i
])[2],
815 ((uint32_t *)&p
->store
[i
])[1],
816 ((uint32_t *)&p
->store
[i
])[0]);
817 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
826 brw_vs_emit(struct gl_shader_program
*prog
, struct brw_vs_compile
*c
)
831 struct brw_shader
*shader
=
832 (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
836 if (unlikely(INTEL_DEBUG
& DEBUG_VS
)) {
837 printf("GLSL IR for native vertex shader %d:\n", prog
->Name
);
838 _mesa_print_ir(shader
->ir
, NULL
);
842 vec4_visitor
v(c
, prog
, shader
);
844 prog
->LinkStatus
= GL_FALSE
;
845 ralloc_strcat(&prog
->InfoLog
, v
.fail_msg
);
854 } /* namespace brw */