2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
27 * \file ir_to_mesa.cpp
29 * Translates the IR to ARB_fragment_program text if possible,
35 #include "ir_visitor.h"
36 #include "ir_print_visitor.h"
37 #include "ir_expression_flattening.h"
38 #include "glsl_types.h"
39 #include "glsl_parser_extras.h"
40 #include "../glsl/program.h"
41 #include "ir_optimization.h"
45 #include "main/mtypes.h"
46 #include "shader/prog_instruction.h"
47 #include "shader/prog_optimize.h"
48 #include "shader/prog_print.h"
49 #include "shader/program.h"
50 #include "shader/prog_uniform.h"
51 #include "shader/prog_parameter.h"
52 #include "shader/shader_api.h"
56 * This struct is a corresponding struct to Mesa prog_src_register, with
59 typedef struct ir_to_mesa_src_reg
{
60 int file
; /**< PROGRAM_* from Mesa */
61 int index
; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
62 GLuint swizzle
; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
63 int negate
; /**< NEGATE_XYZW mask from mesa */
64 /** Register index should be offset by the integer in this reg. */
65 ir_to_mesa_src_reg
*reladdr
;
68 typedef struct ir_to_mesa_dst_reg
{
69 int file
; /**< PROGRAM_* from Mesa */
70 int index
; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
71 int writemask
; /**< Bitfield of WRITEMASK_[XYZW] */
73 /** Register index should be offset by the integer in this reg. */
74 ir_to_mesa_src_reg
*reladdr
;
77 extern ir_to_mesa_src_reg ir_to_mesa_undef
;
79 class ir_to_mesa_instruction
: public exec_node
{
82 ir_to_mesa_dst_reg dst_reg
;
83 ir_to_mesa_src_reg src_reg
[3];
84 /** Pointer to the ir source this tree came from for debugging */
86 GLboolean cond_update
;
87 int sampler
; /**< sampler index */
88 int tex_target
; /**< One of TEXTURE_*_INDEX */
91 class function_entry
*function
; /* Set on OPCODE_CAL or OPCODE_BGNSUB */
94 class variable_storage
: public exec_node
{
96 variable_storage(ir_variable
*var
, int file
, int index
)
97 : file(file
), index(index
), var(var
)
104 ir_variable
*var
; /* variable that maps to this, if any */
107 class function_entry
: public exec_node
{
109 ir_function_signature
*sig
;
112 * identifier of this function signature used by the program.
114 * At the point that Mesa instructions for function calls are
115 * generated, we don't know the address of the first instruction of
116 * the function body. So we make the BranchTarget that is called a
117 * small integer and rewrite them during set_branchtargets().
122 * Pointer to first instruction of the function body.
124 * Set during function body emits after main() is processed.
126 ir_to_mesa_instruction
*bgn_inst
;
129 * Index of the first instruction of the function body in actual
132 * Set after convertion from ir_to_mesa_instruction to prog_instruction.
136 /** Storage for the return value. */
137 ir_to_mesa_src_reg return_reg
;
140 class ir_to_mesa_visitor
: public ir_visitor
{
142 ir_to_mesa_visitor();
144 function_entry
*current_function
;
147 struct gl_program
*prog
;
151 variable_storage
*find_variable_storage(ir_variable
*var
);
153 function_entry
*get_function_signature(ir_function_signature
*sig
);
155 ir_to_mesa_src_reg
get_temp(const glsl_type
*type
);
156 void reladdr_to_temp(ir_instruction
*ir
,
157 ir_to_mesa_src_reg
*reg
, int *num_reladdr
);
159 struct ir_to_mesa_src_reg
src_reg_for_float(float val
);
162 * \name Visit methods
164 * As typical for the visitor pattern, there must be one \c visit method for
165 * each concrete subclass of \c ir_instruction. Virtual base classes within
166 * the hierarchy should not have \c visit methods.
169 virtual void visit(ir_variable
*);
170 virtual void visit(ir_loop
*);
171 virtual void visit(ir_loop_jump
*);
172 virtual void visit(ir_function_signature
*);
173 virtual void visit(ir_function
*);
174 virtual void visit(ir_expression
*);
175 virtual void visit(ir_swizzle
*);
176 virtual void visit(ir_dereference_variable
*);
177 virtual void visit(ir_dereference_array
*);
178 virtual void visit(ir_dereference_record
*);
179 virtual void visit(ir_assignment
*);
180 virtual void visit(ir_constant
*);
181 virtual void visit(ir_call
*);
182 virtual void visit(ir_return
*);
183 virtual void visit(ir_discard
*);
184 virtual void visit(ir_texture
*);
185 virtual void visit(ir_if
*);
188 struct ir_to_mesa_src_reg result
;
190 /** List of variable_storage */
193 /** List of function_entry */
194 exec_list function_signatures
;
195 int next_signature_id
;
197 /** List of ir_to_mesa_instruction */
198 exec_list instructions
;
200 ir_to_mesa_instruction
*ir_to_mesa_emit_op0(ir_instruction
*ir
,
201 enum prog_opcode op
);
203 ir_to_mesa_instruction
*ir_to_mesa_emit_op1(ir_instruction
*ir
,
205 ir_to_mesa_dst_reg dst
,
206 ir_to_mesa_src_reg src0
);
208 ir_to_mesa_instruction
*ir_to_mesa_emit_op2(ir_instruction
*ir
,
210 ir_to_mesa_dst_reg dst
,
211 ir_to_mesa_src_reg src0
,
212 ir_to_mesa_src_reg src1
);
214 ir_to_mesa_instruction
*ir_to_mesa_emit_op3(ir_instruction
*ir
,
216 ir_to_mesa_dst_reg dst
,
217 ir_to_mesa_src_reg src0
,
218 ir_to_mesa_src_reg src1
,
219 ir_to_mesa_src_reg src2
);
221 void ir_to_mesa_emit_scalar_op1(ir_instruction
*ir
,
223 ir_to_mesa_dst_reg dst
,
224 ir_to_mesa_src_reg src0
);
226 void ir_to_mesa_emit_scalar_op2(ir_instruction
*ir
,
228 ir_to_mesa_dst_reg dst
,
229 ir_to_mesa_src_reg src0
,
230 ir_to_mesa_src_reg src1
);
232 GLboolean
try_emit_mad(ir_expression
*ir
,
236 int sampler_map_size
;
238 void map_sampler(int location
, int sampler
);
239 int get_sampler_number(int location
);
244 ir_to_mesa_src_reg ir_to_mesa_undef
= {
245 PROGRAM_UNDEFINED
, 0, SWIZZLE_NOOP
, NEGATE_NONE
, NULL
,
248 ir_to_mesa_dst_reg ir_to_mesa_undef_dst
= {
249 PROGRAM_UNDEFINED
, 0, SWIZZLE_NOOP
, COND_TR
, NULL
,
252 ir_to_mesa_dst_reg ir_to_mesa_address_reg
= {
253 PROGRAM_ADDRESS
, 0, WRITEMASK_X
, COND_TR
, NULL
256 static int swizzle_for_size(int size
)
258 int size_swizzles
[4] = {
259 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
),
260 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Y
, SWIZZLE_Y
),
261 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_Z
),
262 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_W
),
265 return size_swizzles
[size
- 1];
268 ir_to_mesa_instruction
*
269 ir_to_mesa_visitor::ir_to_mesa_emit_op3(ir_instruction
*ir
,
271 ir_to_mesa_dst_reg dst
,
272 ir_to_mesa_src_reg src0
,
273 ir_to_mesa_src_reg src1
,
274 ir_to_mesa_src_reg src2
)
276 ir_to_mesa_instruction
*inst
= new(mem_ctx
) ir_to_mesa_instruction();
279 /* If we have to do relative addressing, we want to load the ARL
280 * reg directly for one of the regs, and preload the other reladdr
281 * sources into temps.
283 num_reladdr
+= dst
.reladdr
!= NULL
;
284 num_reladdr
+= src0
.reladdr
!= NULL
;
285 num_reladdr
+= src1
.reladdr
!= NULL
;
286 num_reladdr
+= src2
.reladdr
!= NULL
;
288 reladdr_to_temp(ir
, &src2
, &num_reladdr
);
289 reladdr_to_temp(ir
, &src1
, &num_reladdr
);
290 reladdr_to_temp(ir
, &src0
, &num_reladdr
);
293 ir_to_mesa_emit_op1(ir
, OPCODE_ARL
, ir_to_mesa_address_reg
,
298 assert(num_reladdr
== 0);
302 inst
->src_reg
[0] = src0
;
303 inst
->src_reg
[1] = src1
;
304 inst
->src_reg
[2] = src2
;
307 inst
->function
= NULL
;
309 this->instructions
.push_tail(inst
);
315 ir_to_mesa_instruction
*
316 ir_to_mesa_visitor::ir_to_mesa_emit_op2(ir_instruction
*ir
,
318 ir_to_mesa_dst_reg dst
,
319 ir_to_mesa_src_reg src0
,
320 ir_to_mesa_src_reg src1
)
322 return ir_to_mesa_emit_op3(ir
, op
, dst
, src0
, src1
, ir_to_mesa_undef
);
325 ir_to_mesa_instruction
*
326 ir_to_mesa_visitor::ir_to_mesa_emit_op1(ir_instruction
*ir
,
328 ir_to_mesa_dst_reg dst
,
329 ir_to_mesa_src_reg src0
)
331 return ir_to_mesa_emit_op3(ir
, op
, dst
,
332 src0
, ir_to_mesa_undef
, ir_to_mesa_undef
);
335 ir_to_mesa_instruction
*
336 ir_to_mesa_visitor::ir_to_mesa_emit_op0(ir_instruction
*ir
,
339 return ir_to_mesa_emit_op3(ir
, op
, ir_to_mesa_undef_dst
,
346 ir_to_mesa_visitor::map_sampler(int location
, int sampler
)
348 if (this->sampler_map_size
<= location
) {
349 this->sampler_map
= talloc_realloc(this->mem_ctx
, this->sampler_map
,
351 this->sampler_map_size
= location
+ 1;
354 this->sampler_map
[location
] = sampler
;
358 ir_to_mesa_visitor::get_sampler_number(int location
)
360 assert(location
< this->sampler_map_size
);
361 return this->sampler_map
[location
];
364 inline ir_to_mesa_dst_reg
365 ir_to_mesa_dst_reg_from_src(ir_to_mesa_src_reg reg
)
367 ir_to_mesa_dst_reg dst_reg
;
369 dst_reg
.file
= reg
.file
;
370 dst_reg
.index
= reg
.index
;
371 dst_reg
.writemask
= WRITEMASK_XYZW
;
372 dst_reg
.cond_mask
= COND_TR
;
373 dst_reg
.reladdr
= reg
.reladdr
;
378 inline ir_to_mesa_src_reg
379 ir_to_mesa_src_reg_from_dst(ir_to_mesa_dst_reg reg
)
381 ir_to_mesa_src_reg src_reg
;
383 src_reg
.file
= reg
.file
;
384 src_reg
.index
= reg
.index
;
385 src_reg
.swizzle
= SWIZZLE_XYZW
;
387 src_reg
.reladdr
= reg
.reladdr
;
393 * Emits Mesa scalar opcodes to produce unique answers across channels.
395 * Some Mesa opcodes are scalar-only, like ARB_fp/vp. The src X
396 * channel determines the result across all channels. So to do a vec4
397 * of this operation, we want to emit a scalar per source channel used
398 * to produce dest channels.
401 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op2(ir_instruction
*ir
,
403 ir_to_mesa_dst_reg dst
,
404 ir_to_mesa_src_reg orig_src0
,
405 ir_to_mesa_src_reg orig_src1
)
408 int done_mask
= ~dst
.writemask
;
410 /* Mesa RCP is a scalar operation splatting results to all channels,
411 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
414 for (i
= 0; i
< 4; i
++) {
415 GLuint this_mask
= (1 << i
);
416 ir_to_mesa_instruction
*inst
;
417 ir_to_mesa_src_reg src0
= orig_src0
;
418 ir_to_mesa_src_reg src1
= orig_src1
;
420 if (done_mask
& this_mask
)
423 GLuint src0_swiz
= GET_SWZ(src0
.swizzle
, i
);
424 GLuint src1_swiz
= GET_SWZ(src1
.swizzle
, i
);
425 for (j
= i
+ 1; j
< 4; j
++) {
426 if (!(done_mask
& (1 << j
)) &&
427 GET_SWZ(src0
.swizzle
, j
) == src0_swiz
&&
428 GET_SWZ(src1
.swizzle
, j
) == src1_swiz
) {
429 this_mask
|= (1 << j
);
432 src0
.swizzle
= MAKE_SWIZZLE4(src0_swiz
, src0_swiz
,
433 src0_swiz
, src0_swiz
);
434 src1
.swizzle
= MAKE_SWIZZLE4(src1_swiz
, src1_swiz
,
435 src1_swiz
, src1_swiz
);
437 inst
= ir_to_mesa_emit_op2(ir
, op
,
441 inst
->dst_reg
.writemask
= this_mask
;
442 done_mask
|= this_mask
;
447 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op1(ir_instruction
*ir
,
449 ir_to_mesa_dst_reg dst
,
450 ir_to_mesa_src_reg src0
)
452 ir_to_mesa_src_reg undef
= ir_to_mesa_undef
;
454 undef
.swizzle
= SWIZZLE_XXXX
;
456 ir_to_mesa_emit_scalar_op2(ir
, op
, dst
, src0
, undef
);
459 struct ir_to_mesa_src_reg
460 ir_to_mesa_visitor::src_reg_for_float(float val
)
462 ir_to_mesa_src_reg src_reg
;
464 src_reg
.file
= PROGRAM_CONSTANT
;
465 src_reg
.index
= _mesa_add_unnamed_constant(this->prog
->Parameters
,
466 &val
, 1, &src_reg
.swizzle
);
467 src_reg
.reladdr
= NULL
;
474 type_size(const struct glsl_type
*type
)
479 switch (type
->base_type
) {
482 case GLSL_TYPE_FLOAT
:
484 if (type
->is_matrix()) {
485 return type
->matrix_columns
;
487 /* Regardless of size of vector, it gets a vec4. This is bad
488 * packing for things like floats, but otherwise arrays become a
489 * mess. Hopefully a later pass over the code can pack scalars
490 * down if appropriate.
494 case GLSL_TYPE_ARRAY
:
495 return type_size(type
->fields
.array
) * type
->length
;
496 case GLSL_TYPE_STRUCT
:
498 for (i
= 0; i
< type
->length
; i
++) {
499 size
+= type_size(type
->fields
.structure
[i
].type
);
508 * In the initial pass of codegen, we assign temporary numbers to
509 * intermediate results. (not SSA -- variable assignments will reuse
510 * storage). Actual register allocation for the Mesa VM occurs in a
511 * pass over the Mesa IR later.
514 ir_to_mesa_visitor::get_temp(const glsl_type
*type
)
516 ir_to_mesa_src_reg src_reg
;
520 assert(!type
->is_array());
522 src_reg
.file
= PROGRAM_TEMPORARY
;
523 src_reg
.index
= next_temp
;
524 src_reg
.reladdr
= NULL
;
525 next_temp
+= type_size(type
);
527 for (i
= 0; i
< type
->vector_elements
; i
++)
530 swizzle
[i
] = type
->vector_elements
- 1;
531 src_reg
.swizzle
= MAKE_SWIZZLE4(swizzle
[0], swizzle
[1],
532 swizzle
[2], swizzle
[3]);
539 ir_to_mesa_visitor::find_variable_storage(ir_variable
*var
)
542 variable_storage
*entry
;
544 foreach_iter(exec_list_iterator
, iter
, this->variables
) {
545 entry
= (variable_storage
*)iter
.get();
547 if (entry
->var
== var
)
555 ir_to_mesa_visitor::visit(ir_variable
*ir
)
561 ir_to_mesa_visitor::visit(ir_loop
*ir
)
565 assert(!ir
->increment
);
566 assert(!ir
->counter
);
568 ir_to_mesa_emit_op0(NULL
, OPCODE_BGNLOOP
);
569 visit_exec_list(&ir
->body_instructions
, this);
570 ir_to_mesa_emit_op0(NULL
, OPCODE_ENDLOOP
);
574 ir_to_mesa_visitor::visit(ir_loop_jump
*ir
)
577 case ir_loop_jump::jump_break
:
578 ir_to_mesa_emit_op0(NULL
, OPCODE_BRK
);
580 case ir_loop_jump::jump_continue
:
581 ir_to_mesa_emit_op0(NULL
, OPCODE_CONT
);
588 ir_to_mesa_visitor::visit(ir_function_signature
*ir
)
595 ir_to_mesa_visitor::visit(ir_function
*ir
)
597 /* Ignore function bodies other than main() -- we shouldn't see calls to
598 * them since they should all be inlined before we get to ir_to_mesa.
600 if (strcmp(ir
->name
, "main") == 0) {
601 const ir_function_signature
*sig
;
604 sig
= ir
->matching_signature(&empty
);
608 foreach_iter(exec_list_iterator
, iter
, sig
->body
) {
609 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
617 ir_to_mesa_visitor::try_emit_mad(ir_expression
*ir
, int mul_operand
)
619 int nonmul_operand
= 1 - mul_operand
;
620 ir_to_mesa_src_reg a
, b
, c
;
622 ir_expression
*expr
= ir
->operands
[mul_operand
]->as_expression();
623 if (!expr
|| expr
->operation
!= ir_binop_mul
)
626 expr
->operands
[0]->accept(this);
628 expr
->operands
[1]->accept(this);
630 ir
->operands
[nonmul_operand
]->accept(this);
633 this->result
= get_temp(ir
->type
);
634 ir_to_mesa_emit_op3(ir
, OPCODE_MAD
,
635 ir_to_mesa_dst_reg_from_src(this->result
), a
, b
, c
);
641 ir_to_mesa_visitor::reladdr_to_temp(ir_instruction
*ir
,
642 ir_to_mesa_src_reg
*reg
, int *num_reladdr
)
647 ir_to_mesa_emit_op1(ir
, OPCODE_ARL
, ir_to_mesa_address_reg
, *reg
->reladdr
);
649 if (*num_reladdr
!= 1) {
650 ir_to_mesa_src_reg temp
= get_temp(glsl_type::vec4_type
);
652 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
,
653 ir_to_mesa_dst_reg_from_src(temp
), *reg
);
661 ir_to_mesa_visitor::visit(ir_expression
*ir
)
663 unsigned int operand
;
664 struct ir_to_mesa_src_reg op
[2];
665 struct ir_to_mesa_src_reg result_src
;
666 struct ir_to_mesa_dst_reg result_dst
;
667 const glsl_type
*vec4_type
= glsl_type::get_instance(GLSL_TYPE_FLOAT
, 4, 1);
668 const glsl_type
*vec3_type
= glsl_type::get_instance(GLSL_TYPE_FLOAT
, 3, 1);
669 const glsl_type
*vec2_type
= glsl_type::get_instance(GLSL_TYPE_FLOAT
, 2, 1);
671 /* Quick peephole: Emit OPCODE_MAD(a, b, c) instead of ADD(MUL(a, b), c)
673 if (ir
->operation
== ir_binop_add
) {
674 if (try_emit_mad(ir
, 1))
676 if (try_emit_mad(ir
, 0))
680 for (operand
= 0; operand
< ir
->get_num_operands(); operand
++) {
681 this->result
.file
= PROGRAM_UNDEFINED
;
682 ir
->operands
[operand
]->accept(this);
683 if (this->result
.file
== PROGRAM_UNDEFINED
) {
685 printf("Failed to get tree for expression operand:\n");
686 ir
->operands
[operand
]->accept(&v
);
689 op
[operand
] = this->result
;
691 /* Matrix expression operands should have been broken down to vector
692 * operations already.
694 assert(!ir
->operands
[operand
]->type
->is_matrix());
697 this->result
.file
= PROGRAM_UNDEFINED
;
699 /* Storage for our result. Ideally for an assignment we'd be using
700 * the actual storage for the result here, instead.
702 result_src
= get_temp(ir
->type
);
703 /* convenience for the emit functions below. */
704 result_dst
= ir_to_mesa_dst_reg_from_src(result_src
);
705 /* Limit writes to the channels that will be used by result_src later.
706 * This does limit this temp's use as a temporary for multi-instruction
709 result_dst
.writemask
= (1 << ir
->type
->vector_elements
) - 1;
711 switch (ir
->operation
) {
712 case ir_unop_logic_not
:
713 ir_to_mesa_emit_op2(ir
, OPCODE_SEQ
, result_dst
,
714 op
[0], src_reg_for_float(0.0));
717 op
[0].negate
= ~op
[0].negate
;
721 ir_to_mesa_emit_op1(ir
, OPCODE_ABS
, result_dst
, op
[0]);
724 ir_to_mesa_emit_op1(ir
, OPCODE_SSG
, result_dst
, op
[0]);
727 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_RCP
, result_dst
, op
[0]);
731 ir_to_mesa_emit_scalar_op2(ir
, OPCODE_POW
, result_dst
,
732 src_reg_for_float(M_E
), op
[0]);
735 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_EX2
, result_dst
, op
[0]);
738 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_LOG
, result_dst
, op
[0]);
741 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_LG2
, result_dst
, op
[0]);
744 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_SIN
, result_dst
, op
[0]);
747 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_COS
, result_dst
, op
[0]);
751 ir_to_mesa_emit_op1(ir
, OPCODE_DDX
, result_dst
, op
[0]);
754 ir_to_mesa_emit_op1(ir
, OPCODE_DDY
, result_dst
, op
[0]);
758 ir_to_mesa_emit_op2(ir
, OPCODE_ADD
, result_dst
, op
[0], op
[1]);
761 ir_to_mesa_emit_op2(ir
, OPCODE_SUB
, result_dst
, op
[0], op
[1]);
765 ir_to_mesa_emit_op2(ir
, OPCODE_MUL
, result_dst
, op
[0], op
[1]);
768 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
770 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
774 ir_to_mesa_emit_op2(ir
, OPCODE_SLT
, result_dst
, op
[0], op
[1]);
776 case ir_binop_greater
:
777 ir_to_mesa_emit_op2(ir
, OPCODE_SGT
, result_dst
, op
[0], op
[1]);
779 case ir_binop_lequal
:
780 ir_to_mesa_emit_op2(ir
, OPCODE_SLE
, result_dst
, op
[0], op
[1]);
782 case ir_binop_gequal
:
783 ir_to_mesa_emit_op2(ir
, OPCODE_SGE
, result_dst
, op
[0], op
[1]);
786 ir_to_mesa_emit_op2(ir
, OPCODE_SEQ
, result_dst
, op
[0], op
[1]);
788 case ir_binop_logic_xor
:
789 case ir_binop_nequal
:
790 ir_to_mesa_emit_op2(ir
, OPCODE_SNE
, result_dst
, op
[0], op
[1]);
793 case ir_binop_logic_or
:
794 /* This could be a saturated add and skip the SNE. */
795 ir_to_mesa_emit_op2(ir
, OPCODE_ADD
,
799 ir_to_mesa_emit_op2(ir
, OPCODE_SNE
,
801 result_src
, src_reg_for_float(0.0));
804 case ir_binop_logic_and
:
805 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
806 ir_to_mesa_emit_op2(ir
, OPCODE_MUL
,
812 if (ir
->operands
[0]->type
== vec4_type
) {
813 assert(ir
->operands
[1]->type
== vec4_type
);
814 ir_to_mesa_emit_op2(ir
, OPCODE_DP4
,
817 } else if (ir
->operands
[0]->type
== vec3_type
) {
818 assert(ir
->operands
[1]->type
== vec3_type
);
819 ir_to_mesa_emit_op2(ir
, OPCODE_DP3
,
822 } else if (ir
->operands
[0]->type
== vec2_type
) {
823 assert(ir
->operands
[1]->type
== vec2_type
);
824 ir_to_mesa_emit_op2(ir
, OPCODE_DP2
,
831 ir_to_mesa_emit_op2(ir
, OPCODE_XPD
, result_dst
, op
[0], op
[1]);
835 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_RSQ
, result_dst
, op
[0]);
836 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_RCP
, result_dst
, result_src
);
837 /* For incoming channels < 0, set the result to 0. */
838 ir_to_mesa_emit_op3(ir
, OPCODE_CMP
, result_dst
,
839 op
[0], src_reg_for_float(0.0), result_src
);
842 ir_to_mesa_emit_scalar_op1(ir
, OPCODE_RSQ
, result_dst
, op
[0]);
847 /* Mesa IR lacks types, ints are stored as truncated floats. */
851 ir_to_mesa_emit_op1(ir
, OPCODE_TRUNC
, result_dst
, op
[0]);
855 ir_to_mesa_emit_op2(ir
, OPCODE_SNE
, result_dst
,
856 result_src
, src_reg_for_float(0.0));
859 ir_to_mesa_emit_op1(ir
, OPCODE_TRUNC
, result_dst
, op
[0]);
862 op
[0].negate
= ~op
[0].negate
;
863 ir_to_mesa_emit_op1(ir
, OPCODE_FLR
, result_dst
, op
[0]);
864 result_src
.negate
= ~result_src
.negate
;
867 ir_to_mesa_emit_op1(ir
, OPCODE_FLR
, result_dst
, op
[0]);
870 ir_to_mesa_emit_op1(ir
, OPCODE_FRC
, result_dst
, op
[0]);
874 ir_to_mesa_emit_op2(ir
, OPCODE_MIN
, result_dst
, op
[0], op
[1]);
877 ir_to_mesa_emit_op2(ir
, OPCODE_MAX
, result_dst
, op
[0], op
[1]);
880 ir_to_mesa_emit_scalar_op2(ir
, OPCODE_POW
, result_dst
, op
[0], op
[1]);
883 case ir_unop_bit_not
:
885 case ir_binop_lshift
:
886 case ir_binop_rshift
:
887 case ir_binop_bit_and
:
888 case ir_binop_bit_xor
:
889 case ir_binop_bit_or
:
890 assert(!"GLSL 1.30 features unsupported");
894 this->result
= result_src
;
899 ir_to_mesa_visitor::visit(ir_swizzle
*ir
)
901 ir_to_mesa_src_reg src_reg
;
905 /* Note that this is only swizzles in expressions, not those on the left
906 * hand side of an assignment, which do write masking. See ir_assignment
910 ir
->val
->accept(this);
911 src_reg
= this->result
;
912 assert(src_reg
.file
!= PROGRAM_UNDEFINED
);
914 for (i
= 0; i
< 4; i
++) {
915 if (i
< ir
->type
->vector_elements
) {
918 swizzle
[i
] = GET_SWZ(src_reg
.swizzle
, ir
->mask
.x
);
921 swizzle
[i
] = GET_SWZ(src_reg
.swizzle
, ir
->mask
.y
);
924 swizzle
[i
] = GET_SWZ(src_reg
.swizzle
, ir
->mask
.z
);
927 swizzle
[i
] = GET_SWZ(src_reg
.swizzle
, ir
->mask
.w
);
931 /* If the type is smaller than a vec4, replicate the last
934 swizzle
[i
] = swizzle
[ir
->type
->vector_elements
- 1];
938 src_reg
.swizzle
= MAKE_SWIZZLE4(swizzle
[0],
943 this->result
= src_reg
;
947 add_matrix_ref(struct gl_program
*prog
, int *tokens
)
952 /* Add a ref for each column. It looks like the reason we do
953 * it this way is that _mesa_add_state_reference doesn't work
954 * for things that aren't vec4s, so the tokens[2]/tokens[3]
955 * range has to be equal.
957 for (i
= 0; i
< 4; i
++) {
960 int pos
= _mesa_add_state_reference(prog
->Parameters
,
961 (gl_state_index
*)tokens
);
965 assert(base_pos
+ i
== pos
);
971 static variable_storage
*
972 get_builtin_matrix_ref(void *mem_ctx
, struct gl_program
*prog
, ir_variable
*var
,
973 ir_rvalue
*array_index
)
976 * NOTE: The ARB_vertex_program extension specified that matrices get
977 * loaded in registers in row-major order. With GLSL, we want column-
978 * major order. So, we need to transpose all matrices here...
980 static const struct {
985 { "gl_ModelViewMatrix", STATE_MODELVIEW_MATRIX
, STATE_MATRIX_TRANSPOSE
},
986 { "gl_ModelViewMatrixInverse", STATE_MODELVIEW_MATRIX
, STATE_MATRIX_INVTRANS
},
987 { "gl_ModelViewMatrixTranspose", STATE_MODELVIEW_MATRIX
, 0 },
988 { "gl_ModelViewMatrixInverseTranspose", STATE_MODELVIEW_MATRIX
, STATE_MATRIX_INVERSE
},
990 { "gl_ProjectionMatrix", STATE_PROJECTION_MATRIX
, STATE_MATRIX_TRANSPOSE
},
991 { "gl_ProjectionMatrixInverse", STATE_PROJECTION_MATRIX
, STATE_MATRIX_INVTRANS
},
992 { "gl_ProjectionMatrixTranspose", STATE_PROJECTION_MATRIX
, 0 },
993 { "gl_ProjectionMatrixInverseTranspose", STATE_PROJECTION_MATRIX
, STATE_MATRIX_INVERSE
},
995 { "gl_ModelViewProjectionMatrix", STATE_MVP_MATRIX
, STATE_MATRIX_TRANSPOSE
},
996 { "gl_ModelViewProjectionMatrixInverse", STATE_MVP_MATRIX
, STATE_MATRIX_INVTRANS
},
997 { "gl_ModelViewProjectionMatrixTranspose", STATE_MVP_MATRIX
, 0 },
998 { "gl_ModelViewProjectionMatrixInverseTranspose", STATE_MVP_MATRIX
, STATE_MATRIX_INVERSE
},
1000 { "gl_TextureMatrix", STATE_TEXTURE_MATRIX
, STATE_MATRIX_TRANSPOSE
},
1001 { "gl_TextureMatrixInverse", STATE_TEXTURE_MATRIX
, STATE_MATRIX_INVTRANS
},
1002 { "gl_TextureMatrixTranspose", STATE_TEXTURE_MATRIX
, 0 },
1003 { "gl_TextureMatrixInverseTranspose", STATE_TEXTURE_MATRIX
, STATE_MATRIX_INVERSE
},
1005 { "gl_NormalMatrix", STATE_MODELVIEW_MATRIX
, STATE_MATRIX_INVERSE
},
1009 variable_storage
*entry
;
1011 /* C++ gets angry when we try to use an int as a gl_state_index, so we use
1012 * ints for gl_state_index. Make sure they're compatible.
1014 assert(sizeof(gl_state_index
) == sizeof(int));
1016 for (i
= 0; i
< Elements(matrices
); i
++) {
1017 if (strcmp(var
->name
, matrices
[i
].name
) == 0) {
1018 int tokens
[STATE_LENGTH
];
1021 tokens
[0] = matrices
[i
].matrix
;
1022 tokens
[4] = matrices
[i
].modifier
;
1023 if (matrices
[i
].matrix
== STATE_TEXTURE_MATRIX
) {
1024 ir_constant
*index
= array_index
->constant_expression_value();
1026 tokens
[1] = index
->value
.i
[0];
1027 base_pos
= add_matrix_ref(prog
, tokens
);
1029 for (i
= 0; i
< var
->type
->length
; i
++) {
1031 int pos
= add_matrix_ref(prog
, tokens
);
1035 assert(base_pos
+ (int)i
* 4 == pos
);
1039 tokens
[1] = 0; /* unused array index */
1040 base_pos
= add_matrix_ref(prog
, tokens
);
1042 tokens
[4] = matrices
[i
].modifier
;
1044 entry
= new(mem_ctx
) variable_storage(var
,
1056 ir_to_mesa_visitor::visit(ir_dereference_variable
*ir
)
1058 ir_to_mesa_src_reg src_reg
;
1059 variable_storage
*entry
= find_variable_storage(ir
->var
);
1063 switch (ir
->var
->mode
) {
1064 case ir_var_uniform
:
1065 entry
= get_builtin_matrix_ref(this->mem_ctx
, this->prog
, ir
->var
,
1070 /* FINISHME: Fix up uniform name for arrays and things */
1071 if (ir
->var
->type
->base_type
== GLSL_TYPE_SAMPLER
) {
1072 /* FINISHME: we whack the location of the var here, which
1073 * is probably not expected. But we need to communicate
1074 * mesa's sampler number to the tex instruction.
1076 int sampler
= _mesa_add_sampler(this->prog
->Parameters
,
1078 ir
->var
->type
->gl_type
);
1079 map_sampler(ir
->var
->location
, sampler
);
1081 entry
= new(mem_ctx
) variable_storage(ir
->var
, PROGRAM_SAMPLER
,
1083 this->variables
.push_tail(entry
);
1087 assert(ir
->var
->type
->gl_type
!= 0 &&
1088 ir
->var
->type
->gl_type
!= GL_INVALID_ENUM
);
1089 loc
= _mesa_add_uniform(this->prog
->Parameters
,
1091 type_size(ir
->var
->type
) * 4,
1092 ir
->var
->type
->gl_type
,
1095 /* Always mark the uniform used at this point. If it isn't
1096 * used, dead code elimination should have nuked the decl already.
1098 this->prog
->Parameters
->Parameters
[loc
].Used
= GL_TRUE
;
1100 entry
= new(mem_ctx
) variable_storage(ir
->var
, PROGRAM_UNIFORM
, loc
);
1101 this->variables
.push_tail(entry
);
1106 /* The linker assigns locations for varyings and attributes,
1107 * including deprecated builtins (like gl_Color), user-assign
1108 * generic attributes (glBindVertexLocation), and
1109 * user-defined varyings.
1111 * FINISHME: We would hit this path for function arguments. Fix!
1113 assert(ir
->var
->location
!= -1);
1114 if (ir
->var
->mode
== ir_var_in
||
1115 ir
->var
->mode
== ir_var_inout
) {
1116 entry
= new(mem_ctx
) variable_storage(ir
->var
,
1120 if (this->prog
->Target
== GL_VERTEX_PROGRAM_ARB
&&
1121 ir
->var
->location
>= VERT_ATTRIB_GENERIC0
) {
1122 _mesa_add_attribute(prog
->Attributes
,
1124 type_size(ir
->var
->type
) * 4,
1125 ir
->var
->type
->gl_type
,
1126 ir
->var
->location
- VERT_ATTRIB_GENERIC0
);
1129 entry
= new(mem_ctx
) variable_storage(ir
->var
,
1136 case ir_var_temporary
:
1137 entry
= new(mem_ctx
) variable_storage(ir
->var
, PROGRAM_TEMPORARY
,
1139 this->variables
.push_tail(entry
);
1141 next_temp
+= type_size(ir
->var
->type
);
1146 printf("Failed to make storage for %s\n", ir
->var
->name
);
1151 src_reg
.file
= entry
->file
;
1152 src_reg
.index
= entry
->index
;
1153 /* If the type is smaller than a vec4, replicate the last channel out. */
1154 src_reg
.swizzle
= swizzle_for_size(ir
->var
->type
->vector_elements
);
1155 src_reg
.reladdr
= NULL
;
1158 this->result
= src_reg
;
1162 ir_to_mesa_visitor::visit(ir_dereference_array
*ir
)
1165 ir_to_mesa_src_reg src_reg
;
1166 ir_dereference_variable
*deref_var
= ir
->array
->as_dereference_variable();
1167 int element_size
= type_size(ir
->type
);
1169 index
= ir
->array_index
->constant_expression_value();
1171 if (deref_var
&& strncmp(deref_var
->var
->name
,
1173 strlen("gl_TextureMatrix")) == 0) {
1174 ir_to_mesa_src_reg src_reg
;
1175 struct variable_storage
*entry
;
1177 entry
= get_builtin_matrix_ref(this->mem_ctx
, this->prog
, deref_var
->var
,
1181 src_reg
.file
= entry
->file
;
1182 src_reg
.index
= entry
->index
;
1183 src_reg
.swizzle
= swizzle_for_size(ir
->type
->vector_elements
);
1187 src_reg
.reladdr
= NULL
;
1189 ir_to_mesa_src_reg index_reg
= get_temp(glsl_type::float_type
);
1191 ir
->array_index
->accept(this);
1192 ir_to_mesa_emit_op2(ir
, OPCODE_MUL
,
1193 ir_to_mesa_dst_reg_from_src(index_reg
),
1194 this->result
, src_reg_for_float(element_size
));
1196 src_reg
.reladdr
= talloc(mem_ctx
, ir_to_mesa_src_reg
);
1197 memcpy(src_reg
.reladdr
, &index_reg
, sizeof(index_reg
));
1200 this->result
= src_reg
;
1204 ir
->array
->accept(this);
1205 src_reg
= this->result
;
1208 src_reg
.index
+= index
->value
.i
[0] * element_size
;
1210 ir_to_mesa_src_reg array_base
= this->result
;
1211 /* Variable index array dereference. It eats the "vec4" of the
1212 * base of the array and an index that offsets the Mesa register
1215 ir
->array_index
->accept(this);
1217 ir_to_mesa_src_reg index_reg
;
1219 if (element_size
== 1) {
1220 index_reg
= this->result
;
1222 index_reg
= get_temp(glsl_type::float_type
);
1224 ir_to_mesa_emit_op2(ir
, OPCODE_MUL
,
1225 ir_to_mesa_dst_reg_from_src(index_reg
),
1226 this->result
, src_reg_for_float(element_size
));
1229 src_reg
.reladdr
= talloc(mem_ctx
, ir_to_mesa_src_reg
);
1230 memcpy(src_reg
.reladdr
, &index_reg
, sizeof(index_reg
));
1233 /* If the type is smaller than a vec4, replicate the last channel out. */
1234 src_reg
.swizzle
= swizzle_for_size(ir
->type
->vector_elements
);
1236 this->result
= src_reg
;
1240 ir_to_mesa_visitor::visit(ir_dereference_record
*ir
)
1243 const glsl_type
*struct_type
= ir
->record
->type
;
1246 ir
->record
->accept(this);
1248 for (i
= 0; i
< struct_type
->length
; i
++) {
1249 if (strcmp(struct_type
->fields
.structure
[i
].name
, ir
->field
) == 0)
1251 offset
+= type_size(struct_type
->fields
.structure
[i
].type
);
1253 this->result
.index
+= offset
;
1257 * We want to be careful in assignment setup to hit the actual storage
1258 * instead of potentially using a temporary like we might with the
1259 * ir_dereference handler.
1261 * Thanks to ir_swizzle_swizzle, and ir_vec_index_to_swizzle, we
1262 * should only see potentially one variable array index of a vector,
1263 * and one swizzle, before getting to actual vec4 storage. So handle
1264 * those, then go use ir_dereference to handle the rest.
1266 static struct ir_to_mesa_dst_reg
1267 get_assignment_lhs(ir_instruction
*ir
, ir_to_mesa_visitor
*v
,
1268 ir_to_mesa_src_reg
*r
)
1270 struct ir_to_mesa_dst_reg dst_reg
;
1273 ir_dereference_array
*deref_array
= ir
->as_dereference_array();
1274 /* This should have been handled by ir_vec_index_to_cond_assign */
1276 assert(!deref_array
->array
->type
->is_vector());
1279 /* Use the rvalue deref handler for the most part. We'll ignore
1280 * swizzles in it and write swizzles using writemask, though.
1283 dst_reg
= ir_to_mesa_dst_reg_from_src(v
->result
);
1285 if ((swiz
= ir
->as_swizzle())) {
1292 int new_r_swizzle
[4];
1293 int orig_r_swizzle
= r
->swizzle
;
1296 for (i
= 0; i
< 4; i
++) {
1297 new_r_swizzle
[i
] = GET_SWZ(orig_r_swizzle
, 0);
1300 dst_reg
.writemask
= 0;
1301 for (i
= 0; i
< 4; i
++) {
1302 if (i
< swiz
->mask
.num_components
) {
1303 dst_reg
.writemask
|= 1 << swizzles
[i
];
1304 new_r_swizzle
[swizzles
[i
]] = GET_SWZ(orig_r_swizzle
, i
);
1308 r
->swizzle
= MAKE_SWIZZLE4(new_r_swizzle
[0],
1318 ir_to_mesa_visitor::visit(ir_assignment
*ir
)
1320 struct ir_to_mesa_dst_reg l
;
1321 struct ir_to_mesa_src_reg r
;
1324 assert(!ir
->lhs
->type
->is_array());
1325 assert(ir
->lhs
->type
->base_type
!= GLSL_TYPE_STRUCT
);
1327 ir
->rhs
->accept(this);
1330 l
= get_assignment_lhs(ir
->lhs
, this, &r
);
1332 assert(l
.file
!= PROGRAM_UNDEFINED
);
1333 assert(r
.file
!= PROGRAM_UNDEFINED
);
1335 if (ir
->condition
) {
1336 ir_to_mesa_src_reg condition
;
1338 ir
->condition
->accept(this);
1339 condition
= this->result
;
1341 /* We use the OPCODE_CMP (a < 0 ? b : c) for conditional moves,
1342 * and the condition we produced is 0.0 or 1.0. By flipping the
1343 * sign, we can choose which value OPCODE_CMP produces without
1344 * an extra computing the condition.
1346 condition
.negate
= ~condition
.negate
;
1347 for (i
= 0; i
< type_size(ir
->lhs
->type
); i
++) {
1348 ir_to_mesa_emit_op3(ir
, OPCODE_CMP
, l
,
1349 condition
, r
, ir_to_mesa_src_reg_from_dst(l
));
1354 for (i
= 0; i
< type_size(ir
->lhs
->type
); i
++) {
1355 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, l
, r
);
1364 ir_to_mesa_visitor::visit(ir_constant
*ir
)
1366 ir_to_mesa_src_reg src_reg
;
1367 GLfloat stack_vals
[4];
1368 GLfloat
*values
= stack_vals
;
1371 if (ir
->type
->is_array()) {
1374 assert(!"FINISHME: array constants");
1377 if (ir
->type
->is_matrix()) {
1378 /* Unfortunately, 4 floats is all we can get into
1379 * _mesa_add_unnamed_constant. So, make a temp to store the
1380 * matrix and move each constant value into it. If we get
1381 * lucky, copy propagation will eliminate the extra moves.
1383 ir_to_mesa_src_reg mat
= get_temp(glsl_type::vec4_type
);
1384 ir_to_mesa_dst_reg mat_column
= ir_to_mesa_dst_reg_from_src(mat
);
1386 for (i
= 0; i
< ir
->type
->matrix_columns
; i
++) {
1387 src_reg
.file
= PROGRAM_CONSTANT
;
1389 assert(ir
->type
->base_type
== GLSL_TYPE_FLOAT
);
1390 values
= &ir
->value
.f
[i
* ir
->type
->vector_elements
];
1392 src_reg
.index
= _mesa_add_unnamed_constant(this->prog
->Parameters
,
1394 ir
->type
->vector_elements
,
1396 src_reg
.reladdr
= NULL
;
1398 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, mat_column
, src_reg
);
1406 src_reg
.file
= PROGRAM_CONSTANT
;
1407 switch (ir
->type
->base_type
) {
1408 case GLSL_TYPE_FLOAT
:
1409 values
= &ir
->value
.f
[0];
1411 case GLSL_TYPE_UINT
:
1412 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1413 values
[i
] = ir
->value
.u
[i
];
1417 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1418 values
[i
] = ir
->value
.i
[i
];
1421 case GLSL_TYPE_BOOL
:
1422 for (i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1423 values
[i
] = ir
->value
.b
[i
];
1427 assert(!"Non-float/uint/int/bool constant");
1430 src_reg
.index
= _mesa_add_unnamed_constant(this->prog
->Parameters
,
1431 values
, ir
->type
->vector_elements
,
1433 src_reg
.reladdr
= NULL
;
1436 this->result
= src_reg
;
1440 ir_to_mesa_visitor::get_function_signature(ir_function_signature
*sig
)
1442 function_entry
*entry
;
1444 foreach_iter(exec_list_iterator
, iter
, this->function_signatures
) {
1445 entry
= (function_entry
*)iter
.get();
1447 if (entry
->sig
== sig
)
1451 entry
= talloc(mem_ctx
, function_entry
);
1453 entry
->sig_id
= this->next_signature_id
++;
1454 entry
->bgn_inst
= NULL
;
1456 /* Allocate storage for all the parameters. */
1457 foreach_iter(exec_list_iterator
, iter
, sig
->parameters
) {
1458 ir_variable
*param
= (ir_variable
*)iter
.get();
1459 variable_storage
*storage
;
1461 storage
= find_variable_storage(param
);
1464 storage
= new(mem_ctx
) variable_storage(param
, PROGRAM_TEMPORARY
,
1466 this->variables
.push_tail(storage
);
1468 this->next_temp
+= type_size(param
->type
);
1472 if (sig
->return_type
) {
1473 entry
->return_reg
= get_temp(sig
->return_type
);
1475 entry
->return_reg
= ir_to_mesa_undef
;
1478 this->function_signatures
.push_tail(entry
);
1483 ir_to_mesa_visitor::visit(ir_call
*ir
)
1485 ir_to_mesa_instruction
*call_inst
;
1486 ir_function_signature
*sig
= ir
->get_callee();
1487 function_entry
*entry
= get_function_signature(sig
);
1490 /* Process in parameters. */
1491 exec_list_iterator sig_iter
= sig
->parameters
.iterator();
1492 foreach_iter(exec_list_iterator
, iter
, *ir
) {
1493 ir_rvalue
*param_rval
= (ir_rvalue
*)iter
.get();
1494 ir_variable
*param
= (ir_variable
*)sig_iter
.get();
1496 if (param
->mode
== ir_var_in
||
1497 param
->mode
== ir_var_inout
) {
1498 variable_storage
*storage
= find_variable_storage(param
);
1501 param_rval
->accept(this);
1502 ir_to_mesa_src_reg r
= this->result
;
1504 ir_to_mesa_dst_reg l
;
1505 l
.file
= storage
->file
;
1506 l
.index
= storage
->index
;
1508 l
.writemask
= WRITEMASK_XYZW
;
1509 l
.cond_mask
= COND_TR
;
1511 for (i
= 0; i
< type_size(param
->type
); i
++) {
1512 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, l
, r
);
1520 assert(!sig_iter
.has_next());
1522 /* Emit call instruction */
1523 call_inst
= ir_to_mesa_emit_op1(ir
, OPCODE_CAL
,
1524 ir_to_mesa_undef_dst
, ir_to_mesa_undef
);
1525 call_inst
->function
= entry
;
1527 /* Process out parameters. */
1528 sig_iter
= sig
->parameters
.iterator();
1529 foreach_iter(exec_list_iterator
, iter
, *ir
) {
1530 ir_rvalue
*param_rval
= (ir_rvalue
*)iter
.get();
1531 ir_variable
*param
= (ir_variable
*)sig_iter
.get();
1533 if (param
->mode
== ir_var_out
||
1534 param
->mode
== ir_var_inout
) {
1535 variable_storage
*storage
= find_variable_storage(param
);
1538 ir_to_mesa_src_reg r
;
1539 r
.file
= storage
->file
;
1540 r
.index
= storage
->index
;
1542 r
.swizzle
= SWIZZLE_NOOP
;
1545 param_rval
->accept(this);
1546 ir_to_mesa_dst_reg l
= ir_to_mesa_dst_reg_from_src(this->result
);
1548 for (i
= 0; i
< type_size(param
->type
); i
++) {
1549 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, l
, r
);
1557 assert(!sig_iter
.has_next());
1559 /* Process return value. */
1560 this->result
= entry
->return_reg
;
1565 ir_to_mesa_visitor::visit(ir_texture
*ir
)
1567 ir_to_mesa_src_reg result_src
, coord
, lod_info
, projector
;
1568 ir_to_mesa_dst_reg result_dst
, coord_dst
;
1569 ir_to_mesa_instruction
*inst
= NULL
;
1570 prog_opcode opcode
= OPCODE_NOP
;
1572 ir
->coordinate
->accept(this);
1574 /* Put our coords in a temp. We'll need to modify them for shadow,
1575 * projection, or LOD, so the only case we'd use it as is is if
1576 * we're doing plain old texturing. Mesa IR optimization should
1577 * handle cleaning up our mess in that case.
1579 coord
= get_temp(glsl_type::vec4_type
);
1580 coord_dst
= ir_to_mesa_dst_reg_from_src(coord
);
1581 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, coord_dst
,
1584 if (ir
->projector
) {
1585 ir
->projector
->accept(this);
1586 projector
= this->result
;
1589 /* Storage for our result. Ideally for an assignment we'd be using
1590 * the actual storage for the result here, instead.
1592 result_src
= get_temp(glsl_type::vec4_type
);
1593 result_dst
= ir_to_mesa_dst_reg_from_src(result_src
);
1597 opcode
= OPCODE_TEX
;
1600 opcode
= OPCODE_TXB
;
1601 ir
->lod_info
.bias
->accept(this);
1602 lod_info
= this->result
;
1605 opcode
= OPCODE_TXL
;
1606 ir
->lod_info
.lod
->accept(this);
1607 lod_info
= this->result
;
1611 assert(!"GLSL 1.30 features unsupported");
1615 if (ir
->projector
) {
1616 if (opcode
== OPCODE_TEX
) {
1617 /* Slot the projector in as the last component of the coord. */
1618 coord_dst
.writemask
= WRITEMASK_W
;
1619 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, coord_dst
, projector
);
1620 coord_dst
.writemask
= WRITEMASK_XYZW
;
1621 opcode
= OPCODE_TXP
;
1623 ir_to_mesa_src_reg coord_w
= coord
;
1624 coord_w
.swizzle
= SWIZZLE_WWWW
;
1626 /* For the other TEX opcodes there's no projective version
1627 * since the last slot is taken up by lod info. Do the
1628 * projective divide now.
1630 coord_dst
.writemask
= WRITEMASK_W
;
1631 ir_to_mesa_emit_op1(ir
, OPCODE_RCP
, coord_dst
, projector
);
1633 coord_dst
.writemask
= WRITEMASK_XYZ
;
1634 ir_to_mesa_emit_op2(ir
, OPCODE_MUL
, coord_dst
, coord
, coord_w
);
1636 coord_dst
.writemask
= WRITEMASK_XYZW
;
1637 coord
.swizzle
= SWIZZLE_XYZW
;
1641 if (ir
->shadow_comparitor
) {
1642 /* Slot the shadow value in as the second to last component of the
1645 ir
->shadow_comparitor
->accept(this);
1646 coord_dst
.writemask
= WRITEMASK_Z
;
1647 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, coord_dst
, this->result
);
1648 coord_dst
.writemask
= WRITEMASK_XYZW
;
1651 if (opcode
== OPCODE_TXL
|| opcode
== OPCODE_TXB
) {
1652 /* Mesa IR stores lod or lod bias in the last channel of the coords. */
1653 coord_dst
.writemask
= WRITEMASK_W
;
1654 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, coord_dst
, lod_info
);
1655 coord_dst
.writemask
= WRITEMASK_XYZW
;
1658 inst
= ir_to_mesa_emit_op1(ir
, opcode
, result_dst
, coord
);
1660 if (ir
->shadow_comparitor
)
1661 inst
->tex_shadow
= GL_TRUE
;
1663 ir_dereference_variable
*sampler
= ir
->sampler
->as_dereference_variable();
1664 assert(sampler
); /* FINISHME: sampler arrays */
1665 /* generate the mapping, remove when we generate storage at
1668 sampler
->accept(this);
1670 inst
->sampler
= get_sampler_number(sampler
->var
->location
);
1672 switch (sampler
->type
->sampler_dimensionality
) {
1673 case GLSL_SAMPLER_DIM_1D
:
1674 inst
->tex_target
= TEXTURE_1D_INDEX
;
1676 case GLSL_SAMPLER_DIM_2D
:
1677 inst
->tex_target
= TEXTURE_2D_INDEX
;
1679 case GLSL_SAMPLER_DIM_3D
:
1680 inst
->tex_target
= TEXTURE_3D_INDEX
;
1682 case GLSL_SAMPLER_DIM_CUBE
:
1683 inst
->tex_target
= TEXTURE_CUBE_INDEX
;
1686 assert(!"FINISHME: other texture targets");
1689 this->result
= result_src
;
1693 ir_to_mesa_visitor::visit(ir_return
*ir
)
1695 assert(current_function
);
1697 if (ir
->get_value()) {
1698 ir_to_mesa_dst_reg l
;
1701 ir
->get_value()->accept(this);
1702 ir_to_mesa_src_reg r
= this->result
;
1704 l
= ir_to_mesa_dst_reg_from_src(current_function
->return_reg
);
1706 for (i
= 0; i
< type_size(current_function
->sig
->return_type
); i
++) {
1707 ir_to_mesa_emit_op1(ir
, OPCODE_MOV
, l
, r
);
1713 ir_to_mesa_emit_op0(ir
, OPCODE_RET
);
1717 ir_to_mesa_visitor::visit(ir_discard
*ir
)
1719 assert(ir
->condition
== NULL
); /* FINISHME */
1721 ir_to_mesa_emit_op0(ir
, OPCODE_KIL_NV
);
1725 ir_to_mesa_visitor::visit(ir_if
*ir
)
1727 ir_to_mesa_instruction
*cond_inst
, *if_inst
, *else_inst
= NULL
;
1728 ir_to_mesa_instruction
*prev_inst
;
1730 prev_inst
= (ir_to_mesa_instruction
*)this->instructions
.get_tail();
1732 ir
->condition
->accept(this);
1733 assert(this->result
.file
!= PROGRAM_UNDEFINED
);
1735 if (ctx
->Shader
.EmitCondCodes
) {
1736 cond_inst
= (ir_to_mesa_instruction
*)this->instructions
.get_tail();
1738 /* See if we actually generated any instruction for generating
1739 * the condition. If not, then cook up a move to a temp so we
1740 * have something to set cond_update on.
1742 if (cond_inst
== prev_inst
) {
1743 ir_to_mesa_src_reg temp
= get_temp(glsl_type::bool_type
);
1744 cond_inst
= ir_to_mesa_emit_op1(ir
->condition
, OPCODE_MOV
,
1745 ir_to_mesa_dst_reg_from_src(temp
),
1748 cond_inst
->cond_update
= GL_TRUE
;
1750 if_inst
= ir_to_mesa_emit_op0(ir
->condition
, OPCODE_IF
);
1751 if_inst
->dst_reg
.cond_mask
= COND_NE
;
1753 if_inst
= ir_to_mesa_emit_op1(ir
->condition
,
1754 OPCODE_IF
, ir_to_mesa_undef_dst
,
1758 this->instructions
.push_tail(if_inst
);
1760 visit_exec_list(&ir
->then_instructions
, this);
1762 if (!ir
->else_instructions
.is_empty()) {
1763 else_inst
= ir_to_mesa_emit_op0(ir
->condition
, OPCODE_ELSE
);
1764 visit_exec_list(&ir
->else_instructions
, this);
1767 if_inst
= ir_to_mesa_emit_op1(ir
->condition
, OPCODE_ENDIF
,
1768 ir_to_mesa_undef_dst
, ir_to_mesa_undef
);
1771 ir_to_mesa_visitor::ir_to_mesa_visitor()
1773 result
.file
= PROGRAM_UNDEFINED
;
1775 next_signature_id
= 1;
1777 sampler_map_size
= 0;
1778 current_function
= NULL
;
1781 static struct prog_src_register
1782 mesa_src_reg_from_ir_src_reg(ir_to_mesa_src_reg reg
)
1784 struct prog_src_register mesa_reg
;
1786 mesa_reg
.File
= reg
.file
;
1787 assert(reg
.index
< (1 << INST_INDEX_BITS
) - 1);
1788 mesa_reg
.Index
= reg
.index
;
1789 mesa_reg
.Swizzle
= reg
.swizzle
;
1790 mesa_reg
.RelAddr
= reg
.reladdr
!= NULL
;
1791 mesa_reg
.Negate
= reg
.negate
;
1798 set_branchtargets(ir_to_mesa_visitor
*v
,
1799 struct prog_instruction
*mesa_instructions
,
1800 int num_instructions
)
1802 int if_count
= 0, loop_count
= 0;
1803 int *if_stack
, *loop_stack
;
1804 int if_stack_pos
= 0, loop_stack_pos
= 0;
1807 for (i
= 0; i
< num_instructions
; i
++) {
1808 switch (mesa_instructions
[i
].Opcode
) {
1812 case OPCODE_BGNLOOP
:
1817 mesa_instructions
[i
].BranchTarget
= -1;
1824 if_stack
= (int *)calloc(if_count
, sizeof(*if_stack
));
1825 loop_stack
= (int *)calloc(loop_count
, sizeof(*loop_stack
));
1827 for (i
= 0; i
< num_instructions
; i
++) {
1828 switch (mesa_instructions
[i
].Opcode
) {
1830 if_stack
[if_stack_pos
] = i
;
1834 mesa_instructions
[if_stack
[if_stack_pos
- 1]].BranchTarget
= i
;
1835 if_stack
[if_stack_pos
- 1] = i
;
1838 mesa_instructions
[if_stack
[if_stack_pos
- 1]].BranchTarget
= i
;
1841 case OPCODE_BGNLOOP
:
1842 loop_stack
[loop_stack_pos
] = i
;
1845 case OPCODE_ENDLOOP
:
1847 /* Rewrite any breaks/conts at this nesting level (haven't
1848 * already had a BranchTarget assigned) to point to the end
1851 for (j
= loop_stack
[loop_stack_pos
]; j
< i
; j
++) {
1852 if (mesa_instructions
[j
].Opcode
== OPCODE_BRK
||
1853 mesa_instructions
[j
].Opcode
== OPCODE_CONT
) {
1854 if (mesa_instructions
[j
].BranchTarget
== -1) {
1855 mesa_instructions
[j
].BranchTarget
= i
;
1859 /* The loop ends point at each other. */
1860 mesa_instructions
[i
].BranchTarget
= loop_stack
[loop_stack_pos
];
1861 mesa_instructions
[loop_stack
[loop_stack_pos
]].BranchTarget
= i
;
1864 foreach_iter(exec_list_iterator
, iter
, v
->function_signatures
) {
1865 function_entry
*entry
= (function_entry
*)iter
.get();
1867 if (entry
->sig_id
== mesa_instructions
[i
].BranchTarget
) {
1868 mesa_instructions
[i
].BranchTarget
= entry
->inst
;
1882 print_program(struct prog_instruction
*mesa_instructions
,
1883 ir_instruction
**mesa_instruction_annotation
,
1884 int num_instructions
)
1886 ir_instruction
*last_ir
= NULL
;
1889 for (i
= 0; i
< num_instructions
; i
++) {
1890 struct prog_instruction
*mesa_inst
= mesa_instructions
+ i
;
1891 ir_instruction
*ir
= mesa_instruction_annotation
[i
];
1893 if (last_ir
!= ir
&& ir
) {
1894 ir_print_visitor print
;
1900 _mesa_print_instruction(mesa_inst
);
1905 mark_input(struct gl_program
*prog
,
1909 prog
->InputsRead
|= BITFIELD64_BIT(index
);
1913 if (index
>= FRAG_ATTRIB_TEX0
&& index
<= FRAG_ATTRIB_TEX7
) {
1914 for (i
= 0; i
< 8; i
++) {
1915 prog
->InputsRead
|= BITFIELD64_BIT(FRAG_ATTRIB_TEX0
+ i
);
1918 assert(!"FINISHME: Mark InputsRead for varying arrays");
1924 mark_output(struct gl_program
*prog
,
1928 prog
->OutputsWritten
|= BITFIELD64_BIT(index
);
1932 if (index
>= VERT_RESULT_TEX0
&& index
<= VERT_RESULT_TEX7
) {
1933 for (i
= 0; i
< 8; i
++) {
1934 prog
->OutputsWritten
|= BITFIELD64_BIT(FRAG_ATTRIB_TEX0
+ i
);
1937 assert(!"FINISHME: Mark OutputsWritten for varying arrays");
1943 count_resources(struct gl_program
*prog
)
1947 prog
->InputsRead
= 0;
1948 prog
->OutputsWritten
= 0;
1949 prog
->SamplersUsed
= 0;
1951 for (i
= 0; i
< prog
->NumInstructions
; i
++) {
1952 struct prog_instruction
*inst
= &prog
->Instructions
[i
];
1955 switch (inst
->DstReg
.File
) {
1956 case PROGRAM_OUTPUT
:
1957 mark_output(prog
, inst
->DstReg
.Index
, inst
->DstReg
.RelAddr
);
1960 mark_input(prog
, inst
->DstReg
.Index
, inst
->DstReg
.RelAddr
);
1966 for (reg
= 0; reg
< _mesa_num_inst_src_regs(inst
->Opcode
); reg
++) {
1967 switch (inst
->SrcReg
[reg
].File
) {
1968 case PROGRAM_OUTPUT
:
1969 mark_output(prog
, inst
->SrcReg
[reg
].Index
,
1970 inst
->SrcReg
[reg
].RelAddr
);
1973 mark_input(prog
, inst
->SrcReg
[reg
].Index
, inst
->SrcReg
[reg
].RelAddr
);
1980 /* Instead of just using the uniform's value to map to a
1981 * sampler, Mesa first allocates a separate number for the
1982 * sampler (_mesa_add_sampler), then we reindex it down to a
1983 * small integer (sampler_map[], SamplersUsed), then that gets
1984 * mapped to the uniform's value, and we get an actual sampler.
1986 if (_mesa_is_tex_instruction(inst
->Opcode
)) {
1987 prog
->SamplerTargets
[inst
->TexSrcUnit
] =
1988 (gl_texture_index
)inst
->TexSrcTarget
;
1989 prog
->SamplersUsed
|= 1 << inst
->TexSrcUnit
;
1990 if (inst
->TexShadow
) {
1991 prog
->ShadowSamplers
|= 1 << inst
->TexSrcUnit
;
1996 _mesa_update_shader_textures_used(prog
);
1999 /* Each stage has some uniforms in its Parameters list. The Uniforms
2000 * list for the linked shader program has a pointer to these uniforms
2001 * in each of the stage's Parameters list, so that their values can be
2002 * updated when a uniform is set.
2005 link_uniforms_to_shared_uniform_list(struct gl_uniform_list
*uniforms
,
2006 struct gl_program
*prog
)
2010 for (i
= 0; i
< prog
->Parameters
->NumParameters
; i
++) {
2011 const struct gl_program_parameter
*p
= prog
->Parameters
->Parameters
+ i
;
2013 if (p
->Type
== PROGRAM_UNIFORM
|| p
->Type
== PROGRAM_SAMPLER
) {
2014 struct gl_uniform
*uniform
=
2015 _mesa_append_uniform(uniforms
, p
->Name
, prog
->Target
, i
);
2017 uniform
->Initialized
= p
->Initialized
;
2023 get_mesa_program(GLcontext
*ctx
, struct gl_shader_program
*shader_program
,
2024 struct gl_shader
*shader
)
2026 void *mem_ctx
= shader_program
;
2027 ir_to_mesa_visitor v
;
2028 struct prog_instruction
*mesa_instructions
, *mesa_inst
;
2029 ir_instruction
**mesa_instruction_annotation
;
2031 struct gl_program
*prog
;
2035 switch (shader
->Type
) {
2036 case GL_VERTEX_SHADER
: target
= GL_VERTEX_PROGRAM_ARB
; break;
2037 case GL_FRAGMENT_SHADER
: target
= GL_FRAGMENT_PROGRAM_ARB
; break;
2038 default: assert(!"should not be reached"); break;
2041 validate_ir_tree(shader
->ir
);
2043 prog
= ctx
->Driver
.NewProgram(ctx
, target
, 1);
2046 prog
->Parameters
= _mesa_new_parameter_list();
2047 prog
->Varying
= _mesa_new_parameter_list();
2048 prog
->Attributes
= _mesa_new_parameter_list();
2052 v
.mem_ctx
= talloc_new(NULL
);
2054 /* Emit Mesa IR for main(). */
2055 visit_exec_list(shader
->ir
, &v
);
2056 v
.ir_to_mesa_emit_op0(NULL
, OPCODE_END
);
2058 /* Now emit bodies for any functions that were used. */
2060 progress
= GL_FALSE
;
2062 foreach_iter(exec_list_iterator
, iter
, v
.function_signatures
) {
2063 function_entry
*entry
= (function_entry
*)iter
.get();
2065 if (!entry
->bgn_inst
) {
2066 v
.current_function
= entry
;
2068 entry
->bgn_inst
= v
.ir_to_mesa_emit_op0(NULL
, OPCODE_BGNSUB
);
2069 entry
->bgn_inst
->function
= entry
;
2071 visit_exec_list(&entry
->sig
->body
, &v
);
2073 entry
->bgn_inst
= v
.ir_to_mesa_emit_op0(NULL
, OPCODE_RET
);
2074 entry
->bgn_inst
= v
.ir_to_mesa_emit_op0(NULL
, OPCODE_ENDSUB
);
2080 prog
->NumTemporaries
= v
.next_temp
;
2082 int num_instructions
= 0;
2083 foreach_iter(exec_list_iterator
, iter
, v
.instructions
) {
2088 (struct prog_instruction
*)calloc(num_instructions
,
2089 sizeof(*mesa_instructions
));
2090 mesa_instruction_annotation
= talloc_array(mem_ctx
, ir_instruction
*,
2093 mesa_inst
= mesa_instructions
;
2095 foreach_iter(exec_list_iterator
, iter
, v
.instructions
) {
2096 ir_to_mesa_instruction
*inst
= (ir_to_mesa_instruction
*)iter
.get();
2098 mesa_inst
->Opcode
= inst
->op
;
2099 mesa_inst
->CondUpdate
= inst
->cond_update
;
2100 mesa_inst
->DstReg
.File
= inst
->dst_reg
.file
;
2101 mesa_inst
->DstReg
.Index
= inst
->dst_reg
.index
;
2102 mesa_inst
->DstReg
.CondMask
= inst
->dst_reg
.cond_mask
;
2103 mesa_inst
->DstReg
.WriteMask
= inst
->dst_reg
.writemask
;
2104 mesa_inst
->DstReg
.RelAddr
= inst
->dst_reg
.reladdr
!= NULL
;
2105 mesa_inst
->SrcReg
[0] = mesa_src_reg_from_ir_src_reg(inst
->src_reg
[0]);
2106 mesa_inst
->SrcReg
[1] = mesa_src_reg_from_ir_src_reg(inst
->src_reg
[1]);
2107 mesa_inst
->SrcReg
[2] = mesa_src_reg_from_ir_src_reg(inst
->src_reg
[2]);
2108 mesa_inst
->TexSrcUnit
= inst
->sampler
;
2109 mesa_inst
->TexSrcTarget
= inst
->tex_target
;
2110 mesa_inst
->TexShadow
= inst
->tex_shadow
;
2111 mesa_instruction_annotation
[i
] = inst
->ir
;
2113 if (ctx
->Shader
.EmitNoIfs
&& mesa_inst
->Opcode
== OPCODE_IF
) {
2114 shader_program
->InfoLog
=
2115 talloc_asprintf_append(shader_program
->InfoLog
,
2116 "Couldn't flatten if statement\n");
2117 shader_program
->LinkStatus
= false;
2120 if (mesa_inst
->Opcode
== OPCODE_BGNSUB
)
2121 inst
->function
->inst
= i
;
2122 else if (mesa_inst
->Opcode
== OPCODE_CAL
)
2123 mesa_inst
->BranchTarget
= inst
->function
->sig_id
; /* rewritten later */
2129 set_branchtargets(&v
, mesa_instructions
, num_instructions
);
2131 print_program(mesa_instructions
, mesa_instruction_annotation
,
2135 prog
->Instructions
= mesa_instructions
;
2136 prog
->NumInstructions
= num_instructions
;
2138 _mesa_reference_program(ctx
, &shader
->Program
, prog
);
2140 if ((ctx
->Shader
.Flags
& GLSL_NO_OPT
) == 0) {
2141 _mesa_optimize_program(ctx
, prog
);
2150 steal_memory(ir_instruction
*ir
, void *new_ctx
)
2152 talloc_steal(new_ctx
, ir
);
2156 _mesa_glsl_compile_shader(GLcontext
*ctx
, struct gl_shader
*shader
)
2158 struct _mesa_glsl_parse_state
*state
;
2160 state
= talloc_zero(shader
, struct _mesa_glsl_parse_state
);
2161 switch (shader
->Type
) {
2162 case GL_VERTEX_SHADER
: state
->target
= vertex_shader
; break;
2163 case GL_FRAGMENT_SHADER
: state
->target
= fragment_shader
; break;
2164 case GL_GEOMETRY_SHADER
: state
->target
= geometry_shader
; break;
2167 state
->scanner
= NULL
;
2168 state
->translation_unit
.make_empty();
2169 state
->symbols
= new(shader
) glsl_symbol_table
;
2170 state
->info_log
= talloc_strdup(shader
, "");
2171 state
->error
= false;
2172 state
->loop_or_switch_nesting
= NULL
;
2173 state
->ARB_texture_rectangle_enable
= true;
2175 state
->extensions
= &ctx
->Extensions
;
2177 state
->Const
.MaxLights
= ctx
->Const
.MaxLights
;
2178 state
->Const
.MaxClipPlanes
= ctx
->Const
.MaxClipPlanes
;
2179 state
->Const
.MaxTextureUnits
= ctx
->Const
.MaxTextureUnits
;
2180 state
->Const
.MaxTextureCoords
= ctx
->Const
.MaxTextureCoordUnits
;
2181 state
->Const
.MaxVertexAttribs
= ctx
->Const
.VertexProgram
.MaxAttribs
;
2182 state
->Const
.MaxVertexUniformComponents
= ctx
->Const
.VertexProgram
.MaxUniformComponents
;
2183 state
->Const
.MaxVaryingFloats
= ctx
->Const
.MaxVarying
* 4;
2184 state
->Const
.MaxVertexTextureImageUnits
= ctx
->Const
.MaxVertexTextureImageUnits
;
2185 state
->Const
.MaxCombinedTextureImageUnits
= ctx
->Const
.MaxCombinedTextureImageUnits
;
2186 state
->Const
.MaxTextureImageUnits
= ctx
->Const
.MaxTextureImageUnits
;
2187 state
->Const
.MaxFragmentUniformComponents
= ctx
->Const
.FragmentProgram
.MaxUniformComponents
;
2189 state
->Const
.MaxDrawBuffers
= ctx
->Const
.MaxDrawBuffers
;
2191 const char *source
= shader
->Source
;
2192 state
->error
= preprocess(state
, &source
, &state
->info_log
,
2195 if (!state
->error
) {
2196 _mesa_glsl_lexer_ctor(state
, source
);
2197 _mesa_glsl_parse(state
);
2198 _mesa_glsl_lexer_dtor(state
);
2201 shader
->ir
= new(shader
) exec_list
;
2202 if (!state
->error
&& !state
->translation_unit
.is_empty())
2203 _mesa_ast_to_hir(shader
->ir
, state
);
2205 if (!state
->error
&& !shader
->ir
->is_empty()) {
2206 validate_ir_tree(shader
->ir
);
2209 do_mat_op_to_vec(shader
->ir
);
2210 do_mod_to_fract(shader
->ir
);
2211 do_div_to_mul_rcp(shader
->ir
);
2213 /* Optimization passes */
2218 progress
= do_function_inlining(shader
->ir
) || progress
;
2219 progress
= do_if_simplification(shader
->ir
) || progress
;
2220 progress
= do_copy_propagation(shader
->ir
) || progress
;
2221 progress
= do_dead_code_local(shader
->ir
) || progress
;
2222 progress
= do_dead_code_unlinked(state
, shader
->ir
) || progress
;
2223 progress
= do_constant_variable_unlinked(shader
->ir
) || progress
;
2224 progress
= do_constant_folding(shader
->ir
) || progress
;
2225 progress
= do_if_return(shader
->ir
) || progress
;
2226 if (ctx
->Shader
.EmitNoIfs
)
2227 progress
= do_if_to_cond_assign(shader
->ir
) || progress
;
2229 progress
= do_vec_index_to_swizzle(shader
->ir
) || progress
;
2230 /* Do this one after the previous to let the easier pass handle
2231 * constant vector indexing.
2233 progress
= do_vec_index_to_cond_assign(shader
->ir
) || progress
;
2235 progress
= do_swizzle_swizzle(shader
->ir
) || progress
;
2238 validate_ir_tree(shader
->ir
);
2241 shader
->symbols
= state
->symbols
;
2243 shader
->CompileStatus
= !state
->error
;
2244 shader
->InfoLog
= state
->info_log
;
2245 shader
->Version
= state
->language_version
;
2247 /* Retain any live IR, but trash the rest. */
2248 foreach_list(node
, shader
->ir
) {
2249 visit_tree((ir_instruction
*) node
, steal_memory
, shader
);
2256 _mesa_glsl_link_shader(GLcontext
*ctx
, struct gl_shader_program
*prog
)
2260 _mesa_clear_shader_program_data(ctx
, prog
);
2262 prog
->LinkStatus
= GL_TRUE
;
2264 for (i
= 0; i
< prog
->NumShaders
; i
++) {
2265 if (!prog
->Shaders
[i
]->CompileStatus
) {
2267 talloc_asprintf_append(prog
->InfoLog
,
2268 "linking with uncompiled shader");
2269 prog
->LinkStatus
= GL_FALSE
;
2273 prog
->Varying
= _mesa_new_parameter_list();
2274 _mesa_reference_vertprog(ctx
, &prog
->VertexProgram
, NULL
);
2275 _mesa_reference_fragprog(ctx
, &prog
->FragmentProgram
, NULL
);
2277 if (prog
->LinkStatus
) {
2280 /* We don't use the linker's uniforms list, and cook up our own at
2283 free(prog
->Uniforms
);
2284 prog
->Uniforms
= _mesa_new_uniform_list();
2287 if (prog
->LinkStatus
) {
2288 for (i
= 0; i
< prog
->_NumLinkedShaders
; i
++) {
2289 struct gl_program
*linked_prog
;
2291 linked_prog
= get_mesa_program(ctx
, prog
,
2292 prog
->_LinkedShaders
[i
]);
2293 count_resources(linked_prog
);
2295 link_uniforms_to_shared_uniform_list(prog
->Uniforms
, linked_prog
);
2297 switch (prog
->_LinkedShaders
[i
]->Type
) {
2298 case GL_VERTEX_SHADER
:
2299 _mesa_reference_vertprog(ctx
, &prog
->VertexProgram
,
2300 (struct gl_vertex_program
*)linked_prog
);
2301 ctx
->Driver
.ProgramStringNotify(ctx
, GL_VERTEX_PROGRAM_ARB
,
2304 case GL_FRAGMENT_SHADER
:
2305 _mesa_reference_fragprog(ctx
, &prog
->FragmentProgram
,
2306 (struct gl_fragment_program
*)linked_prog
);
2307 ctx
->Driver
.ProgramStringNotify(ctx
, GL_FRAGMENT_PROGRAM_ARB
,