glsl2: Add and use new variable mode ir_var_temporary
[mesa.git] / src / mesa / shader / ir_to_mesa.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 /**
27 * \file ir_to_mesa.cpp
28 *
29 * Translates the IR to ARB_fragment_program text if possible,
30 * printing the result
31 */
32
33 #include <stdio.h>
34 #include "ir.h"
35 #include "ir_visitor.h"
36 #include "ir_print_visitor.h"
37 #include "ir_expression_flattening.h"
38 #include "glsl_types.h"
39 #include "glsl_parser_extras.h"
40 #include "../glsl/program.h"
41 #include "ir_optimization.h"
42 #include "ast.h"
43
44 extern "C" {
45 #include "main/mtypes.h"
46 #include "shader/prog_instruction.h"
47 #include "shader/prog_optimize.h"
48 #include "shader/prog_print.h"
49 #include "shader/program.h"
50 #include "shader/prog_uniform.h"
51 #include "shader/prog_parameter.h"
52 #include "shader/shader_api.h"
53 }
54
55 /**
56 * This struct is a corresponding struct to Mesa prog_src_register, with
57 * wider fields.
58 */
59 typedef struct ir_to_mesa_src_reg {
60 int file; /**< PROGRAM_* from Mesa */
61 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
62 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
63 int negate; /**< NEGATE_XYZW mask from mesa */
64 /** Register index should be offset by the integer in this reg. */
65 ir_to_mesa_src_reg *reladdr;
66 } ir_to_mesa_src_reg;
67
68 typedef struct ir_to_mesa_dst_reg {
69 int file; /**< PROGRAM_* from Mesa */
70 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
71 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
72 GLuint cond_mask:4;
73 /** Register index should be offset by the integer in this reg. */
74 ir_to_mesa_src_reg *reladdr;
75 } ir_to_mesa_dst_reg;
76
77 extern ir_to_mesa_src_reg ir_to_mesa_undef;
78
79 class ir_to_mesa_instruction : public exec_node {
80 public:
81 enum prog_opcode op;
82 ir_to_mesa_dst_reg dst_reg;
83 ir_to_mesa_src_reg src_reg[3];
84 /** Pointer to the ir source this tree came from for debugging */
85 ir_instruction *ir;
86 GLboolean cond_update;
87 int sampler; /**< sampler index */
88 int tex_target; /**< One of TEXTURE_*_INDEX */
89 GLboolean tex_shadow;
90
91 class function_entry *function; /* Set on OPCODE_CAL or OPCODE_BGNSUB */
92 };
93
94 class variable_storage : public exec_node {
95 public:
96 variable_storage(ir_variable *var, int file, int index)
97 : file(file), index(index), var(var)
98 {
99 /* empty */
100 }
101
102 int file;
103 int index;
104 ir_variable *var; /* variable that maps to this, if any */
105 };
106
107 class function_entry : public exec_node {
108 public:
109 ir_function_signature *sig;
110
111 /**
112 * identifier of this function signature used by the program.
113 *
114 * At the point that Mesa instructions for function calls are
115 * generated, we don't know the address of the first instruction of
116 * the function body. So we make the BranchTarget that is called a
117 * small integer and rewrite them during set_branchtargets().
118 */
119 int sig_id;
120
121 /**
122 * Pointer to first instruction of the function body.
123 *
124 * Set during function body emits after main() is processed.
125 */
126 ir_to_mesa_instruction *bgn_inst;
127
128 /**
129 * Index of the first instruction of the function body in actual
130 * Mesa IR.
131 *
132 * Set after convertion from ir_to_mesa_instruction to prog_instruction.
133 */
134 int inst;
135
136 /** Storage for the return value. */
137 ir_to_mesa_src_reg return_reg;
138 };
139
140 class ir_to_mesa_visitor : public ir_visitor {
141 public:
142 ir_to_mesa_visitor();
143
144 function_entry *current_function;
145
146 GLcontext *ctx;
147 struct gl_program *prog;
148
149 int next_temp;
150
151 variable_storage *find_variable_storage(ir_variable *var);
152
153 function_entry *get_function_signature(ir_function_signature *sig);
154
155 ir_to_mesa_src_reg get_temp(const glsl_type *type);
156 void reladdr_to_temp(ir_instruction *ir,
157 ir_to_mesa_src_reg *reg, int *num_reladdr);
158
159 struct ir_to_mesa_src_reg src_reg_for_float(float val);
160
161 /**
162 * \name Visit methods
163 *
164 * As typical for the visitor pattern, there must be one \c visit method for
165 * each concrete subclass of \c ir_instruction. Virtual base classes within
166 * the hierarchy should not have \c visit methods.
167 */
168 /*@{*/
169 virtual void visit(ir_variable *);
170 virtual void visit(ir_loop *);
171 virtual void visit(ir_loop_jump *);
172 virtual void visit(ir_function_signature *);
173 virtual void visit(ir_function *);
174 virtual void visit(ir_expression *);
175 virtual void visit(ir_swizzle *);
176 virtual void visit(ir_dereference_variable *);
177 virtual void visit(ir_dereference_array *);
178 virtual void visit(ir_dereference_record *);
179 virtual void visit(ir_assignment *);
180 virtual void visit(ir_constant *);
181 virtual void visit(ir_call *);
182 virtual void visit(ir_return *);
183 virtual void visit(ir_discard *);
184 virtual void visit(ir_texture *);
185 virtual void visit(ir_if *);
186 /*@}*/
187
188 struct ir_to_mesa_src_reg result;
189
190 /** List of variable_storage */
191 exec_list variables;
192
193 /** List of function_entry */
194 exec_list function_signatures;
195 int next_signature_id;
196
197 /** List of ir_to_mesa_instruction */
198 exec_list instructions;
199
200 ir_to_mesa_instruction *ir_to_mesa_emit_op0(ir_instruction *ir,
201 enum prog_opcode op);
202
203 ir_to_mesa_instruction *ir_to_mesa_emit_op1(ir_instruction *ir,
204 enum prog_opcode op,
205 ir_to_mesa_dst_reg dst,
206 ir_to_mesa_src_reg src0);
207
208 ir_to_mesa_instruction *ir_to_mesa_emit_op2(ir_instruction *ir,
209 enum prog_opcode op,
210 ir_to_mesa_dst_reg dst,
211 ir_to_mesa_src_reg src0,
212 ir_to_mesa_src_reg src1);
213
214 ir_to_mesa_instruction *ir_to_mesa_emit_op3(ir_instruction *ir,
215 enum prog_opcode op,
216 ir_to_mesa_dst_reg dst,
217 ir_to_mesa_src_reg src0,
218 ir_to_mesa_src_reg src1,
219 ir_to_mesa_src_reg src2);
220
221 void ir_to_mesa_emit_scalar_op1(ir_instruction *ir,
222 enum prog_opcode op,
223 ir_to_mesa_dst_reg dst,
224 ir_to_mesa_src_reg src0);
225
226 void ir_to_mesa_emit_scalar_op2(ir_instruction *ir,
227 enum prog_opcode op,
228 ir_to_mesa_dst_reg dst,
229 ir_to_mesa_src_reg src0,
230 ir_to_mesa_src_reg src1);
231
232 GLboolean try_emit_mad(ir_expression *ir,
233 int mul_operand);
234
235 int *sampler_map;
236 int sampler_map_size;
237
238 void map_sampler(int location, int sampler);
239 int get_sampler_number(int location);
240
241 void *mem_ctx;
242 };
243
244 ir_to_mesa_src_reg ir_to_mesa_undef = {
245 PROGRAM_UNDEFINED, 0, SWIZZLE_NOOP, NEGATE_NONE, NULL,
246 };
247
248 ir_to_mesa_dst_reg ir_to_mesa_undef_dst = {
249 PROGRAM_UNDEFINED, 0, SWIZZLE_NOOP, COND_TR, NULL,
250 };
251
252 ir_to_mesa_dst_reg ir_to_mesa_address_reg = {
253 PROGRAM_ADDRESS, 0, WRITEMASK_X, COND_TR, NULL
254 };
255
256 static int swizzle_for_size(int size)
257 {
258 int size_swizzles[4] = {
259 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
260 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
261 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
262 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
263 };
264
265 return size_swizzles[size - 1];
266 }
267
268 ir_to_mesa_instruction *
269 ir_to_mesa_visitor::ir_to_mesa_emit_op3(ir_instruction *ir,
270 enum prog_opcode op,
271 ir_to_mesa_dst_reg dst,
272 ir_to_mesa_src_reg src0,
273 ir_to_mesa_src_reg src1,
274 ir_to_mesa_src_reg src2)
275 {
276 ir_to_mesa_instruction *inst = new(mem_ctx) ir_to_mesa_instruction();
277 int num_reladdr = 0;
278
279 /* If we have to do relative addressing, we want to load the ARL
280 * reg directly for one of the regs, and preload the other reladdr
281 * sources into temps.
282 */
283 num_reladdr += dst.reladdr != NULL;
284 num_reladdr += src0.reladdr != NULL;
285 num_reladdr += src1.reladdr != NULL;
286 num_reladdr += src2.reladdr != NULL;
287
288 reladdr_to_temp(ir, &src2, &num_reladdr);
289 reladdr_to_temp(ir, &src1, &num_reladdr);
290 reladdr_to_temp(ir, &src0, &num_reladdr);
291
292 if (dst.reladdr) {
293 ir_to_mesa_emit_op1(ir, OPCODE_ARL, ir_to_mesa_address_reg,
294 *dst.reladdr);
295
296 num_reladdr--;
297 }
298 assert(num_reladdr == 0);
299
300 inst->op = op;
301 inst->dst_reg = dst;
302 inst->src_reg[0] = src0;
303 inst->src_reg[1] = src1;
304 inst->src_reg[2] = src2;
305 inst->ir = ir;
306
307 inst->function = NULL;
308
309 this->instructions.push_tail(inst);
310
311 return inst;
312 }
313
314
315 ir_to_mesa_instruction *
316 ir_to_mesa_visitor::ir_to_mesa_emit_op2(ir_instruction *ir,
317 enum prog_opcode op,
318 ir_to_mesa_dst_reg dst,
319 ir_to_mesa_src_reg src0,
320 ir_to_mesa_src_reg src1)
321 {
322 return ir_to_mesa_emit_op3(ir, op, dst, src0, src1, ir_to_mesa_undef);
323 }
324
325 ir_to_mesa_instruction *
326 ir_to_mesa_visitor::ir_to_mesa_emit_op1(ir_instruction *ir,
327 enum prog_opcode op,
328 ir_to_mesa_dst_reg dst,
329 ir_to_mesa_src_reg src0)
330 {
331 return ir_to_mesa_emit_op3(ir, op, dst,
332 src0, ir_to_mesa_undef, ir_to_mesa_undef);
333 }
334
335 ir_to_mesa_instruction *
336 ir_to_mesa_visitor::ir_to_mesa_emit_op0(ir_instruction *ir,
337 enum prog_opcode op)
338 {
339 return ir_to_mesa_emit_op3(ir, op, ir_to_mesa_undef_dst,
340 ir_to_mesa_undef,
341 ir_to_mesa_undef,
342 ir_to_mesa_undef);
343 }
344
345 void
346 ir_to_mesa_visitor::map_sampler(int location, int sampler)
347 {
348 if (this->sampler_map_size <= location) {
349 this->sampler_map = talloc_realloc(this->mem_ctx, this->sampler_map,
350 int, location + 1);
351 this->sampler_map_size = location + 1;
352 }
353
354 this->sampler_map[location] = sampler;
355 }
356
357 int
358 ir_to_mesa_visitor::get_sampler_number(int location)
359 {
360 assert(location < this->sampler_map_size);
361 return this->sampler_map[location];
362 }
363
364 inline ir_to_mesa_dst_reg
365 ir_to_mesa_dst_reg_from_src(ir_to_mesa_src_reg reg)
366 {
367 ir_to_mesa_dst_reg dst_reg;
368
369 dst_reg.file = reg.file;
370 dst_reg.index = reg.index;
371 dst_reg.writemask = WRITEMASK_XYZW;
372 dst_reg.cond_mask = COND_TR;
373 dst_reg.reladdr = reg.reladdr;
374
375 return dst_reg;
376 }
377
378 inline ir_to_mesa_src_reg
379 ir_to_mesa_src_reg_from_dst(ir_to_mesa_dst_reg reg)
380 {
381 ir_to_mesa_src_reg src_reg;
382
383 src_reg.file = reg.file;
384 src_reg.index = reg.index;
385 src_reg.swizzle = SWIZZLE_XYZW;
386 src_reg.negate = 0;
387 src_reg.reladdr = reg.reladdr;
388
389 return src_reg;
390 }
391
392 /**
393 * Emits Mesa scalar opcodes to produce unique answers across channels.
394 *
395 * Some Mesa opcodes are scalar-only, like ARB_fp/vp. The src X
396 * channel determines the result across all channels. So to do a vec4
397 * of this operation, we want to emit a scalar per source channel used
398 * to produce dest channels.
399 */
400 void
401 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op2(ir_instruction *ir,
402 enum prog_opcode op,
403 ir_to_mesa_dst_reg dst,
404 ir_to_mesa_src_reg orig_src0,
405 ir_to_mesa_src_reg orig_src1)
406 {
407 int i, j;
408 int done_mask = ~dst.writemask;
409
410 /* Mesa RCP is a scalar operation splatting results to all channels,
411 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
412 * dst channels.
413 */
414 for (i = 0; i < 4; i++) {
415 GLuint this_mask = (1 << i);
416 ir_to_mesa_instruction *inst;
417 ir_to_mesa_src_reg src0 = orig_src0;
418 ir_to_mesa_src_reg src1 = orig_src1;
419
420 if (done_mask & this_mask)
421 continue;
422
423 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
424 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
425 for (j = i + 1; j < 4; j++) {
426 if (!(done_mask & (1 << j)) &&
427 GET_SWZ(src0.swizzle, j) == src0_swiz &&
428 GET_SWZ(src1.swizzle, j) == src1_swiz) {
429 this_mask |= (1 << j);
430 }
431 }
432 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
433 src0_swiz, src0_swiz);
434 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
435 src1_swiz, src1_swiz);
436
437 inst = ir_to_mesa_emit_op2(ir, op,
438 dst,
439 src0,
440 src1);
441 inst->dst_reg.writemask = this_mask;
442 done_mask |= this_mask;
443 }
444 }
445
446 void
447 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op1(ir_instruction *ir,
448 enum prog_opcode op,
449 ir_to_mesa_dst_reg dst,
450 ir_to_mesa_src_reg src0)
451 {
452 ir_to_mesa_src_reg undef = ir_to_mesa_undef;
453
454 undef.swizzle = SWIZZLE_XXXX;
455
456 ir_to_mesa_emit_scalar_op2(ir, op, dst, src0, undef);
457 }
458
459 struct ir_to_mesa_src_reg
460 ir_to_mesa_visitor::src_reg_for_float(float val)
461 {
462 ir_to_mesa_src_reg src_reg;
463
464 src_reg.file = PROGRAM_CONSTANT;
465 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
466 &val, 1, &src_reg.swizzle);
467 src_reg.reladdr = NULL;
468 src_reg.negate = 0;
469
470 return src_reg;
471 }
472
473 static int
474 type_size(const struct glsl_type *type)
475 {
476 unsigned int i;
477 int size;
478
479 switch (type->base_type) {
480 case GLSL_TYPE_UINT:
481 case GLSL_TYPE_INT:
482 case GLSL_TYPE_FLOAT:
483 case GLSL_TYPE_BOOL:
484 if (type->is_matrix()) {
485 return type->matrix_columns;
486 } else {
487 /* Regardless of size of vector, it gets a vec4. This is bad
488 * packing for things like floats, but otherwise arrays become a
489 * mess. Hopefully a later pass over the code can pack scalars
490 * down if appropriate.
491 */
492 return 1;
493 }
494 case GLSL_TYPE_ARRAY:
495 return type_size(type->fields.array) * type->length;
496 case GLSL_TYPE_STRUCT:
497 size = 0;
498 for (i = 0; i < type->length; i++) {
499 size += type_size(type->fields.structure[i].type);
500 }
501 return size;
502 default:
503 assert(0);
504 }
505 }
506
507 /**
508 * In the initial pass of codegen, we assign temporary numbers to
509 * intermediate results. (not SSA -- variable assignments will reuse
510 * storage). Actual register allocation for the Mesa VM occurs in a
511 * pass over the Mesa IR later.
512 */
513 ir_to_mesa_src_reg
514 ir_to_mesa_visitor::get_temp(const glsl_type *type)
515 {
516 ir_to_mesa_src_reg src_reg;
517 int swizzle[4];
518 int i;
519
520 assert(!type->is_array());
521
522 src_reg.file = PROGRAM_TEMPORARY;
523 src_reg.index = next_temp;
524 src_reg.reladdr = NULL;
525 next_temp += type_size(type);
526
527 for (i = 0; i < type->vector_elements; i++)
528 swizzle[i] = i;
529 for (; i < 4; i++)
530 swizzle[i] = type->vector_elements - 1;
531 src_reg.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1],
532 swizzle[2], swizzle[3]);
533 src_reg.negate = 0;
534
535 return src_reg;
536 }
537
538 variable_storage *
539 ir_to_mesa_visitor::find_variable_storage(ir_variable *var)
540 {
541
542 variable_storage *entry;
543
544 foreach_iter(exec_list_iterator, iter, this->variables) {
545 entry = (variable_storage *)iter.get();
546
547 if (entry->var == var)
548 return entry;
549 }
550
551 return NULL;
552 }
553
554 void
555 ir_to_mesa_visitor::visit(ir_variable *ir)
556 {
557 (void)ir;
558 }
559
560 void
561 ir_to_mesa_visitor::visit(ir_loop *ir)
562 {
563 assert(!ir->from);
564 assert(!ir->to);
565 assert(!ir->increment);
566 assert(!ir->counter);
567
568 ir_to_mesa_emit_op0(NULL, OPCODE_BGNLOOP);
569 visit_exec_list(&ir->body_instructions, this);
570 ir_to_mesa_emit_op0(NULL, OPCODE_ENDLOOP);
571 }
572
573 void
574 ir_to_mesa_visitor::visit(ir_loop_jump *ir)
575 {
576 switch (ir->mode) {
577 case ir_loop_jump::jump_break:
578 ir_to_mesa_emit_op0(NULL, OPCODE_BRK);
579 break;
580 case ir_loop_jump::jump_continue:
581 ir_to_mesa_emit_op0(NULL, OPCODE_CONT);
582 break;
583 }
584 }
585
586
587 void
588 ir_to_mesa_visitor::visit(ir_function_signature *ir)
589 {
590 assert(0);
591 (void)ir;
592 }
593
594 void
595 ir_to_mesa_visitor::visit(ir_function *ir)
596 {
597 /* Ignore function bodies other than main() -- we shouldn't see calls to
598 * them since they should all be inlined before we get to ir_to_mesa.
599 */
600 if (strcmp(ir->name, "main") == 0) {
601 const ir_function_signature *sig;
602 exec_list empty;
603
604 sig = ir->matching_signature(&empty);
605
606 assert(sig);
607
608 foreach_iter(exec_list_iterator, iter, sig->body) {
609 ir_instruction *ir = (ir_instruction *)iter.get();
610
611 ir->accept(this);
612 }
613 }
614 }
615
616 GLboolean
617 ir_to_mesa_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
618 {
619 int nonmul_operand = 1 - mul_operand;
620 ir_to_mesa_src_reg a, b, c;
621
622 ir_expression *expr = ir->operands[mul_operand]->as_expression();
623 if (!expr || expr->operation != ir_binop_mul)
624 return false;
625
626 expr->operands[0]->accept(this);
627 a = this->result;
628 expr->operands[1]->accept(this);
629 b = this->result;
630 ir->operands[nonmul_operand]->accept(this);
631 c = this->result;
632
633 this->result = get_temp(ir->type);
634 ir_to_mesa_emit_op3(ir, OPCODE_MAD,
635 ir_to_mesa_dst_reg_from_src(this->result), a, b, c);
636
637 return true;
638 }
639
640 void
641 ir_to_mesa_visitor::reladdr_to_temp(ir_instruction *ir,
642 ir_to_mesa_src_reg *reg, int *num_reladdr)
643 {
644 if (!reg->reladdr)
645 return;
646
647 ir_to_mesa_emit_op1(ir, OPCODE_ARL, ir_to_mesa_address_reg, *reg->reladdr);
648
649 if (*num_reladdr != 1) {
650 ir_to_mesa_src_reg temp = get_temp(glsl_type::vec4_type);
651
652 ir_to_mesa_emit_op1(ir, OPCODE_MOV,
653 ir_to_mesa_dst_reg_from_src(temp), *reg);
654 *reg = temp;
655 }
656
657 (*num_reladdr)--;
658 }
659
660 void
661 ir_to_mesa_visitor::visit(ir_expression *ir)
662 {
663 unsigned int operand;
664 struct ir_to_mesa_src_reg op[2];
665 struct ir_to_mesa_src_reg result_src;
666 struct ir_to_mesa_dst_reg result_dst;
667 const glsl_type *vec4_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 4, 1);
668 const glsl_type *vec3_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 3, 1);
669 const glsl_type *vec2_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 2, 1);
670
671 /* Quick peephole: Emit OPCODE_MAD(a, b, c) instead of ADD(MUL(a, b), c)
672 */
673 if (ir->operation == ir_binop_add) {
674 if (try_emit_mad(ir, 1))
675 return;
676 if (try_emit_mad(ir, 0))
677 return;
678 }
679
680 for (operand = 0; operand < ir->get_num_operands(); operand++) {
681 this->result.file = PROGRAM_UNDEFINED;
682 ir->operands[operand]->accept(this);
683 if (this->result.file == PROGRAM_UNDEFINED) {
684 ir_print_visitor v;
685 printf("Failed to get tree for expression operand:\n");
686 ir->operands[operand]->accept(&v);
687 exit(1);
688 }
689 op[operand] = this->result;
690
691 /* Matrix expression operands should have been broken down to vector
692 * operations already.
693 */
694 assert(!ir->operands[operand]->type->is_matrix());
695 }
696
697 this->result.file = PROGRAM_UNDEFINED;
698
699 /* Storage for our result. Ideally for an assignment we'd be using
700 * the actual storage for the result here, instead.
701 */
702 result_src = get_temp(ir->type);
703 /* convenience for the emit functions below. */
704 result_dst = ir_to_mesa_dst_reg_from_src(result_src);
705 /* Limit writes to the channels that will be used by result_src later.
706 * This does limit this temp's use as a temporary for multi-instruction
707 * sequences.
708 */
709 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
710
711 switch (ir->operation) {
712 case ir_unop_logic_not:
713 ir_to_mesa_emit_op2(ir, OPCODE_SEQ, result_dst,
714 op[0], src_reg_for_float(0.0));
715 break;
716 case ir_unop_neg:
717 op[0].negate = ~op[0].negate;
718 result_src = op[0];
719 break;
720 case ir_unop_abs:
721 ir_to_mesa_emit_op1(ir, OPCODE_ABS, result_dst, op[0]);
722 break;
723 case ir_unop_sign:
724 ir_to_mesa_emit_op1(ir, OPCODE_SSG, result_dst, op[0]);
725 break;
726 case ir_unop_rcp:
727 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RCP, result_dst, op[0]);
728 break;
729
730 case ir_unop_exp:
731 ir_to_mesa_emit_scalar_op2(ir, OPCODE_POW, result_dst,
732 src_reg_for_float(M_E), op[0]);
733 break;
734 case ir_unop_exp2:
735 ir_to_mesa_emit_scalar_op1(ir, OPCODE_EX2, result_dst, op[0]);
736 break;
737 case ir_unop_log:
738 ir_to_mesa_emit_scalar_op1(ir, OPCODE_LOG, result_dst, op[0]);
739 break;
740 case ir_unop_log2:
741 ir_to_mesa_emit_scalar_op1(ir, OPCODE_LG2, result_dst, op[0]);
742 break;
743 case ir_unop_sin:
744 ir_to_mesa_emit_scalar_op1(ir, OPCODE_SIN, result_dst, op[0]);
745 break;
746 case ir_unop_cos:
747 ir_to_mesa_emit_scalar_op1(ir, OPCODE_COS, result_dst, op[0]);
748 break;
749
750 case ir_unop_dFdx:
751 ir_to_mesa_emit_op1(ir, OPCODE_DDX, result_dst, op[0]);
752 break;
753 case ir_unop_dFdy:
754 ir_to_mesa_emit_op1(ir, OPCODE_DDY, result_dst, op[0]);
755 break;
756
757 case ir_binop_add:
758 ir_to_mesa_emit_op2(ir, OPCODE_ADD, result_dst, op[0], op[1]);
759 break;
760 case ir_binop_sub:
761 ir_to_mesa_emit_op2(ir, OPCODE_SUB, result_dst, op[0], op[1]);
762 break;
763
764 case ir_binop_mul:
765 ir_to_mesa_emit_op2(ir, OPCODE_MUL, result_dst, op[0], op[1]);
766 break;
767 case ir_binop_div:
768 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
769 case ir_binop_mod:
770 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
771 break;
772
773 case ir_binop_less:
774 ir_to_mesa_emit_op2(ir, OPCODE_SLT, result_dst, op[0], op[1]);
775 break;
776 case ir_binop_greater:
777 ir_to_mesa_emit_op2(ir, OPCODE_SGT, result_dst, op[0], op[1]);
778 break;
779 case ir_binop_lequal:
780 ir_to_mesa_emit_op2(ir, OPCODE_SLE, result_dst, op[0], op[1]);
781 break;
782 case ir_binop_gequal:
783 ir_to_mesa_emit_op2(ir, OPCODE_SGE, result_dst, op[0], op[1]);
784 break;
785 case ir_binop_equal:
786 ir_to_mesa_emit_op2(ir, OPCODE_SEQ, result_dst, op[0], op[1]);
787 break;
788 case ir_binop_logic_xor:
789 case ir_binop_nequal:
790 ir_to_mesa_emit_op2(ir, OPCODE_SNE, result_dst, op[0], op[1]);
791 break;
792
793 case ir_binop_logic_or:
794 /* This could be a saturated add and skip the SNE. */
795 ir_to_mesa_emit_op2(ir, OPCODE_ADD,
796 result_dst,
797 op[0], op[1]);
798
799 ir_to_mesa_emit_op2(ir, OPCODE_SNE,
800 result_dst,
801 result_src, src_reg_for_float(0.0));
802 break;
803
804 case ir_binop_logic_and:
805 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
806 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
807 result_dst,
808 op[0], op[1]);
809 break;
810
811 case ir_binop_dot:
812 if (ir->operands[0]->type == vec4_type) {
813 assert(ir->operands[1]->type == vec4_type);
814 ir_to_mesa_emit_op2(ir, OPCODE_DP4,
815 result_dst,
816 op[0], op[1]);
817 } else if (ir->operands[0]->type == vec3_type) {
818 assert(ir->operands[1]->type == vec3_type);
819 ir_to_mesa_emit_op2(ir, OPCODE_DP3,
820 result_dst,
821 op[0], op[1]);
822 } else if (ir->operands[0]->type == vec2_type) {
823 assert(ir->operands[1]->type == vec2_type);
824 ir_to_mesa_emit_op2(ir, OPCODE_DP2,
825 result_dst,
826 op[0], op[1]);
827 }
828 break;
829
830 case ir_binop_cross:
831 ir_to_mesa_emit_op2(ir, OPCODE_XPD, result_dst, op[0], op[1]);
832 break;
833
834 case ir_unop_sqrt:
835 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RSQ, result_dst, op[0]);
836 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RCP, result_dst, result_src);
837 /* For incoming channels < 0, set the result to 0. */
838 ir_to_mesa_emit_op3(ir, OPCODE_CMP, result_dst,
839 op[0], src_reg_for_float(0.0), result_src);
840 break;
841 case ir_unop_rsq:
842 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RSQ, result_dst, op[0]);
843 break;
844 case ir_unop_i2f:
845 case ir_unop_b2f:
846 case ir_unop_b2i:
847 /* Mesa IR lacks types, ints are stored as truncated floats. */
848 result_src = op[0];
849 break;
850 case ir_unop_f2i:
851 ir_to_mesa_emit_op1(ir, OPCODE_TRUNC, result_dst, op[0]);
852 break;
853 case ir_unop_f2b:
854 case ir_unop_i2b:
855 ir_to_mesa_emit_op2(ir, OPCODE_SNE, result_dst,
856 result_src, src_reg_for_float(0.0));
857 break;
858 case ir_unop_trunc:
859 ir_to_mesa_emit_op1(ir, OPCODE_TRUNC, result_dst, op[0]);
860 break;
861 case ir_unop_ceil:
862 op[0].negate = ~op[0].negate;
863 ir_to_mesa_emit_op1(ir, OPCODE_FLR, result_dst, op[0]);
864 result_src.negate = ~result_src.negate;
865 break;
866 case ir_unop_floor:
867 ir_to_mesa_emit_op1(ir, OPCODE_FLR, result_dst, op[0]);
868 break;
869 case ir_unop_fract:
870 ir_to_mesa_emit_op1(ir, OPCODE_FRC, result_dst, op[0]);
871 break;
872
873 case ir_binop_min:
874 ir_to_mesa_emit_op2(ir, OPCODE_MIN, result_dst, op[0], op[1]);
875 break;
876 case ir_binop_max:
877 ir_to_mesa_emit_op2(ir, OPCODE_MAX, result_dst, op[0], op[1]);
878 break;
879 case ir_binop_pow:
880 ir_to_mesa_emit_scalar_op2(ir, OPCODE_POW, result_dst, op[0], op[1]);
881 break;
882
883 case ir_unop_bit_not:
884 case ir_unop_u2f:
885 case ir_binop_lshift:
886 case ir_binop_rshift:
887 case ir_binop_bit_and:
888 case ir_binop_bit_xor:
889 case ir_binop_bit_or:
890 assert(!"GLSL 1.30 features unsupported");
891 break;
892 }
893
894 this->result = result_src;
895 }
896
897
898 void
899 ir_to_mesa_visitor::visit(ir_swizzle *ir)
900 {
901 ir_to_mesa_src_reg src_reg;
902 int i;
903 int swizzle[4];
904
905 /* Note that this is only swizzles in expressions, not those on the left
906 * hand side of an assignment, which do write masking. See ir_assignment
907 * for that.
908 */
909
910 ir->val->accept(this);
911 src_reg = this->result;
912 assert(src_reg.file != PROGRAM_UNDEFINED);
913
914 for (i = 0; i < 4; i++) {
915 if (i < ir->type->vector_elements) {
916 switch (i) {
917 case 0:
918 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.x);
919 break;
920 case 1:
921 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.y);
922 break;
923 case 2:
924 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.z);
925 break;
926 case 3:
927 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.w);
928 break;
929 }
930 } else {
931 /* If the type is smaller than a vec4, replicate the last
932 * channel out.
933 */
934 swizzle[i] = swizzle[ir->type->vector_elements - 1];
935 }
936 }
937
938 src_reg.swizzle = MAKE_SWIZZLE4(swizzle[0],
939 swizzle[1],
940 swizzle[2],
941 swizzle[3]);
942
943 this->result = src_reg;
944 }
945
946 static int
947 add_matrix_ref(struct gl_program *prog, int *tokens)
948 {
949 int base_pos = -1;
950 int i;
951
952 /* Add a ref for each column. It looks like the reason we do
953 * it this way is that _mesa_add_state_reference doesn't work
954 * for things that aren't vec4s, so the tokens[2]/tokens[3]
955 * range has to be equal.
956 */
957 for (i = 0; i < 4; i++) {
958 tokens[2] = i;
959 tokens[3] = i;
960 int pos = _mesa_add_state_reference(prog->Parameters,
961 (gl_state_index *)tokens);
962 if (base_pos == -1)
963 base_pos = pos;
964 else
965 assert(base_pos + i == pos);
966 }
967
968 return base_pos;
969 }
970
971 static variable_storage *
972 get_builtin_matrix_ref(void *mem_ctx, struct gl_program *prog, ir_variable *var,
973 ir_rvalue *array_index)
974 {
975 /*
976 * NOTE: The ARB_vertex_program extension specified that matrices get
977 * loaded in registers in row-major order. With GLSL, we want column-
978 * major order. So, we need to transpose all matrices here...
979 */
980 static const struct {
981 const char *name;
982 int matrix;
983 int modifier;
984 } matrices[] = {
985 { "gl_ModelViewMatrix", STATE_MODELVIEW_MATRIX, STATE_MATRIX_TRANSPOSE },
986 { "gl_ModelViewMatrixInverse", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVTRANS },
987 { "gl_ModelViewMatrixTranspose", STATE_MODELVIEW_MATRIX, 0 },
988 { "gl_ModelViewMatrixInverseTranspose", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE },
989
990 { "gl_ProjectionMatrix", STATE_PROJECTION_MATRIX, STATE_MATRIX_TRANSPOSE },
991 { "gl_ProjectionMatrixInverse", STATE_PROJECTION_MATRIX, STATE_MATRIX_INVTRANS },
992 { "gl_ProjectionMatrixTranspose", STATE_PROJECTION_MATRIX, 0 },
993 { "gl_ProjectionMatrixInverseTranspose", STATE_PROJECTION_MATRIX, STATE_MATRIX_INVERSE },
994
995 { "gl_ModelViewProjectionMatrix", STATE_MVP_MATRIX, STATE_MATRIX_TRANSPOSE },
996 { "gl_ModelViewProjectionMatrixInverse", STATE_MVP_MATRIX, STATE_MATRIX_INVTRANS },
997 { "gl_ModelViewProjectionMatrixTranspose", STATE_MVP_MATRIX, 0 },
998 { "gl_ModelViewProjectionMatrixInverseTranspose", STATE_MVP_MATRIX, STATE_MATRIX_INVERSE },
999
1000 { "gl_TextureMatrix", STATE_TEXTURE_MATRIX, STATE_MATRIX_TRANSPOSE },
1001 { "gl_TextureMatrixInverse", STATE_TEXTURE_MATRIX, STATE_MATRIX_INVTRANS },
1002 { "gl_TextureMatrixTranspose", STATE_TEXTURE_MATRIX, 0 },
1003 { "gl_TextureMatrixInverseTranspose", STATE_TEXTURE_MATRIX, STATE_MATRIX_INVERSE },
1004
1005 { "gl_NormalMatrix", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE },
1006
1007 };
1008 unsigned int i;
1009 variable_storage *entry;
1010
1011 /* C++ gets angry when we try to use an int as a gl_state_index, so we use
1012 * ints for gl_state_index. Make sure they're compatible.
1013 */
1014 assert(sizeof(gl_state_index) == sizeof(int));
1015
1016 for (i = 0; i < Elements(matrices); i++) {
1017 if (strcmp(var->name, matrices[i].name) == 0) {
1018 int tokens[STATE_LENGTH];
1019 int base_pos = -1;
1020
1021 tokens[0] = matrices[i].matrix;
1022 tokens[4] = matrices[i].modifier;
1023 if (matrices[i].matrix == STATE_TEXTURE_MATRIX) {
1024 ir_constant *index = array_index->constant_expression_value();
1025 if (index) {
1026 tokens[1] = index->value.i[0];
1027 base_pos = add_matrix_ref(prog, tokens);
1028 } else {
1029 for (i = 0; i < var->type->length; i++) {
1030 tokens[1] = i;
1031 int pos = add_matrix_ref(prog, tokens);
1032 if (base_pos == -1)
1033 base_pos = pos;
1034 else
1035 assert(base_pos + (int)i * 4 == pos);
1036 }
1037 }
1038 } else {
1039 tokens[1] = 0; /* unused array index */
1040 base_pos = add_matrix_ref(prog, tokens);
1041 }
1042 tokens[4] = matrices[i].modifier;
1043
1044 entry = new(mem_ctx) variable_storage(var,
1045 PROGRAM_STATE_VAR,
1046 base_pos);
1047
1048 return entry;
1049 }
1050 }
1051
1052 return NULL;
1053 }
1054
1055 void
1056 ir_to_mesa_visitor::visit(ir_dereference_variable *ir)
1057 {
1058 ir_to_mesa_src_reg src_reg;
1059 variable_storage *entry = find_variable_storage(ir->var);
1060 unsigned int loc;
1061
1062 if (!entry) {
1063 switch (ir->var->mode) {
1064 case ir_var_uniform:
1065 entry = get_builtin_matrix_ref(this->mem_ctx, this->prog, ir->var,
1066 NULL);
1067 if (entry)
1068 break;
1069
1070 /* FINISHME: Fix up uniform name for arrays and things */
1071 if (ir->var->type->base_type == GLSL_TYPE_SAMPLER) {
1072 /* FINISHME: we whack the location of the var here, which
1073 * is probably not expected. But we need to communicate
1074 * mesa's sampler number to the tex instruction.
1075 */
1076 int sampler = _mesa_add_sampler(this->prog->Parameters,
1077 ir->var->name,
1078 ir->var->type->gl_type);
1079 map_sampler(ir->var->location, sampler);
1080
1081 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_SAMPLER,
1082 sampler);
1083 this->variables.push_tail(entry);
1084 break;
1085 }
1086
1087 assert(ir->var->type->gl_type != 0 &&
1088 ir->var->type->gl_type != GL_INVALID_ENUM);
1089 loc = _mesa_add_uniform(this->prog->Parameters,
1090 ir->var->name,
1091 type_size(ir->var->type) * 4,
1092 ir->var->type->gl_type,
1093 NULL);
1094
1095 /* Always mark the uniform used at this point. If it isn't
1096 * used, dead code elimination should have nuked the decl already.
1097 */
1098 this->prog->Parameters->Parameters[loc].Used = GL_TRUE;
1099
1100 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_UNIFORM, loc);
1101 this->variables.push_tail(entry);
1102 break;
1103 case ir_var_in:
1104 case ir_var_out:
1105 case ir_var_inout:
1106 /* The linker assigns locations for varyings and attributes,
1107 * including deprecated builtins (like gl_Color), user-assign
1108 * generic attributes (glBindVertexLocation), and
1109 * user-defined varyings.
1110 *
1111 * FINISHME: We would hit this path for function arguments. Fix!
1112 */
1113 assert(ir->var->location != -1);
1114 if (ir->var->mode == ir_var_in ||
1115 ir->var->mode == ir_var_inout) {
1116 entry = new(mem_ctx) variable_storage(ir->var,
1117 PROGRAM_INPUT,
1118 ir->var->location);
1119
1120 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1121 ir->var->location >= VERT_ATTRIB_GENERIC0) {
1122 _mesa_add_attribute(prog->Attributes,
1123 ir->var->name,
1124 type_size(ir->var->type) * 4,
1125 ir->var->type->gl_type,
1126 ir->var->location - VERT_ATTRIB_GENERIC0);
1127 }
1128 } else {
1129 entry = new(mem_ctx) variable_storage(ir->var,
1130 PROGRAM_OUTPUT,
1131 ir->var->location);
1132 }
1133
1134 break;
1135 case ir_var_auto:
1136 case ir_var_temporary:
1137 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_TEMPORARY,
1138 this->next_temp);
1139 this->variables.push_tail(entry);
1140
1141 next_temp += type_size(ir->var->type);
1142 break;
1143 }
1144
1145 if (!entry) {
1146 printf("Failed to make storage for %s\n", ir->var->name);
1147 exit(1);
1148 }
1149 }
1150
1151 src_reg.file = entry->file;
1152 src_reg.index = entry->index;
1153 /* If the type is smaller than a vec4, replicate the last channel out. */
1154 src_reg.swizzle = swizzle_for_size(ir->var->type->vector_elements);
1155 src_reg.reladdr = NULL;
1156 src_reg.negate = 0;
1157
1158 this->result = src_reg;
1159 }
1160
1161 void
1162 ir_to_mesa_visitor::visit(ir_dereference_array *ir)
1163 {
1164 ir_constant *index;
1165 ir_to_mesa_src_reg src_reg;
1166 ir_dereference_variable *deref_var = ir->array->as_dereference_variable();
1167 int element_size = type_size(ir->type);
1168
1169 index = ir->array_index->constant_expression_value();
1170
1171 if (deref_var && strncmp(deref_var->var->name,
1172 "gl_TextureMatrix",
1173 strlen("gl_TextureMatrix")) == 0) {
1174 ir_to_mesa_src_reg src_reg;
1175 struct variable_storage *entry;
1176
1177 entry = get_builtin_matrix_ref(this->mem_ctx, this->prog, deref_var->var,
1178 ir->array_index);
1179 assert(entry);
1180
1181 src_reg.file = entry->file;
1182 src_reg.index = entry->index;
1183 src_reg.swizzle = swizzle_for_size(ir->type->vector_elements);
1184 src_reg.negate = 0;
1185
1186 if (index) {
1187 src_reg.reladdr = NULL;
1188 } else {
1189 ir_to_mesa_src_reg index_reg = get_temp(glsl_type::float_type);
1190
1191 ir->array_index->accept(this);
1192 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
1193 ir_to_mesa_dst_reg_from_src(index_reg),
1194 this->result, src_reg_for_float(element_size));
1195
1196 src_reg.reladdr = talloc(mem_ctx, ir_to_mesa_src_reg);
1197 memcpy(src_reg.reladdr, &index_reg, sizeof(index_reg));
1198 }
1199
1200 this->result = src_reg;
1201 return;
1202 }
1203
1204 ir->array->accept(this);
1205 src_reg = this->result;
1206
1207 if (index) {
1208 src_reg.index += index->value.i[0] * element_size;
1209 } else {
1210 ir_to_mesa_src_reg array_base = this->result;
1211 /* Variable index array dereference. It eats the "vec4" of the
1212 * base of the array and an index that offsets the Mesa register
1213 * index.
1214 */
1215 ir->array_index->accept(this);
1216
1217 ir_to_mesa_src_reg index_reg;
1218
1219 if (element_size == 1) {
1220 index_reg = this->result;
1221 } else {
1222 index_reg = get_temp(glsl_type::float_type);
1223
1224 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
1225 ir_to_mesa_dst_reg_from_src(index_reg),
1226 this->result, src_reg_for_float(element_size));
1227 }
1228
1229 src_reg.reladdr = talloc(mem_ctx, ir_to_mesa_src_reg);
1230 memcpy(src_reg.reladdr, &index_reg, sizeof(index_reg));
1231 }
1232
1233 /* If the type is smaller than a vec4, replicate the last channel out. */
1234 src_reg.swizzle = swizzle_for_size(ir->type->vector_elements);
1235
1236 this->result = src_reg;
1237 }
1238
1239 void
1240 ir_to_mesa_visitor::visit(ir_dereference_record *ir)
1241 {
1242 unsigned int i;
1243 const glsl_type *struct_type = ir->record->type;
1244 int offset = 0;
1245
1246 ir->record->accept(this);
1247
1248 for (i = 0; i < struct_type->length; i++) {
1249 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1250 break;
1251 offset += type_size(struct_type->fields.structure[i].type);
1252 }
1253 this->result.index += offset;
1254 }
1255
1256 /**
1257 * We want to be careful in assignment setup to hit the actual storage
1258 * instead of potentially using a temporary like we might with the
1259 * ir_dereference handler.
1260 *
1261 * Thanks to ir_swizzle_swizzle, and ir_vec_index_to_swizzle, we
1262 * should only see potentially one variable array index of a vector,
1263 * and one swizzle, before getting to actual vec4 storage. So handle
1264 * those, then go use ir_dereference to handle the rest.
1265 */
1266 static struct ir_to_mesa_dst_reg
1267 get_assignment_lhs(ir_instruction *ir, ir_to_mesa_visitor *v,
1268 ir_to_mesa_src_reg *r)
1269 {
1270 struct ir_to_mesa_dst_reg dst_reg;
1271 ir_swizzle *swiz;
1272
1273 ir_dereference_array *deref_array = ir->as_dereference_array();
1274 /* This should have been handled by ir_vec_index_to_cond_assign */
1275 if (deref_array) {
1276 assert(!deref_array->array->type->is_vector());
1277 }
1278
1279 /* Use the rvalue deref handler for the most part. We'll ignore
1280 * swizzles in it and write swizzles using writemask, though.
1281 */
1282 ir->accept(v);
1283 dst_reg = ir_to_mesa_dst_reg_from_src(v->result);
1284
1285 if ((swiz = ir->as_swizzle())) {
1286 int swizzles[4] = {
1287 swiz->mask.x,
1288 swiz->mask.y,
1289 swiz->mask.z,
1290 swiz->mask.w
1291 };
1292 int new_r_swizzle[4];
1293 int orig_r_swizzle = r->swizzle;
1294 int i;
1295
1296 for (i = 0; i < 4; i++) {
1297 new_r_swizzle[i] = GET_SWZ(orig_r_swizzle, 0);
1298 }
1299
1300 dst_reg.writemask = 0;
1301 for (i = 0; i < 4; i++) {
1302 if (i < swiz->mask.num_components) {
1303 dst_reg.writemask |= 1 << swizzles[i];
1304 new_r_swizzle[swizzles[i]] = GET_SWZ(orig_r_swizzle, i);
1305 }
1306 }
1307
1308 r->swizzle = MAKE_SWIZZLE4(new_r_swizzle[0],
1309 new_r_swizzle[1],
1310 new_r_swizzle[2],
1311 new_r_swizzle[3]);
1312 }
1313
1314 return dst_reg;
1315 }
1316
1317 void
1318 ir_to_mesa_visitor::visit(ir_assignment *ir)
1319 {
1320 struct ir_to_mesa_dst_reg l;
1321 struct ir_to_mesa_src_reg r;
1322 int i;
1323
1324 assert(!ir->lhs->type->is_array());
1325 assert(ir->lhs->type->base_type != GLSL_TYPE_STRUCT);
1326
1327 ir->rhs->accept(this);
1328 r = this->result;
1329
1330 l = get_assignment_lhs(ir->lhs, this, &r);
1331
1332 assert(l.file != PROGRAM_UNDEFINED);
1333 assert(r.file != PROGRAM_UNDEFINED);
1334
1335 if (ir->condition) {
1336 ir_to_mesa_src_reg condition;
1337
1338 ir->condition->accept(this);
1339 condition = this->result;
1340
1341 /* We use the OPCODE_CMP (a < 0 ? b : c) for conditional moves,
1342 * and the condition we produced is 0.0 or 1.0. By flipping the
1343 * sign, we can choose which value OPCODE_CMP produces without
1344 * an extra computing the condition.
1345 */
1346 condition.negate = ~condition.negate;
1347 for (i = 0; i < type_size(ir->lhs->type); i++) {
1348 ir_to_mesa_emit_op3(ir, OPCODE_CMP, l,
1349 condition, r, ir_to_mesa_src_reg_from_dst(l));
1350 l.index++;
1351 r.index++;
1352 }
1353 } else {
1354 for (i = 0; i < type_size(ir->lhs->type); i++) {
1355 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1356 l.index++;
1357 r.index++;
1358 }
1359 }
1360 }
1361
1362
1363 void
1364 ir_to_mesa_visitor::visit(ir_constant *ir)
1365 {
1366 ir_to_mesa_src_reg src_reg;
1367 GLfloat stack_vals[4];
1368 GLfloat *values = stack_vals;
1369 unsigned int i;
1370
1371 if (ir->type->is_array()) {
1372 ir->print();
1373 printf("\n");
1374 assert(!"FINISHME: array constants");
1375 }
1376
1377 if (ir->type->is_matrix()) {
1378 /* Unfortunately, 4 floats is all we can get into
1379 * _mesa_add_unnamed_constant. So, make a temp to store the
1380 * matrix and move each constant value into it. If we get
1381 * lucky, copy propagation will eliminate the extra moves.
1382 */
1383 ir_to_mesa_src_reg mat = get_temp(glsl_type::vec4_type);
1384 ir_to_mesa_dst_reg mat_column = ir_to_mesa_dst_reg_from_src(mat);
1385
1386 for (i = 0; i < ir->type->matrix_columns; i++) {
1387 src_reg.file = PROGRAM_CONSTANT;
1388
1389 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
1390 values = &ir->value.f[i * ir->type->vector_elements];
1391
1392 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1393 values,
1394 ir->type->vector_elements,
1395 &src_reg.swizzle);
1396 src_reg.reladdr = NULL;
1397 src_reg.negate = 0;
1398 ir_to_mesa_emit_op1(ir, OPCODE_MOV, mat_column, src_reg);
1399
1400 mat_column.index++;
1401 }
1402
1403 this->result = mat;
1404 }
1405
1406 src_reg.file = PROGRAM_CONSTANT;
1407 switch (ir->type->base_type) {
1408 case GLSL_TYPE_FLOAT:
1409 values = &ir->value.f[0];
1410 break;
1411 case GLSL_TYPE_UINT:
1412 for (i = 0; i < ir->type->vector_elements; i++) {
1413 values[i] = ir->value.u[i];
1414 }
1415 break;
1416 case GLSL_TYPE_INT:
1417 for (i = 0; i < ir->type->vector_elements; i++) {
1418 values[i] = ir->value.i[i];
1419 }
1420 break;
1421 case GLSL_TYPE_BOOL:
1422 for (i = 0; i < ir->type->vector_elements; i++) {
1423 values[i] = ir->value.b[i];
1424 }
1425 break;
1426 default:
1427 assert(!"Non-float/uint/int/bool constant");
1428 }
1429
1430 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1431 values, ir->type->vector_elements,
1432 &src_reg.swizzle);
1433 src_reg.reladdr = NULL;
1434 src_reg.negate = 0;
1435
1436 this->result = src_reg;
1437 }
1438
1439 function_entry *
1440 ir_to_mesa_visitor::get_function_signature(ir_function_signature *sig)
1441 {
1442 function_entry *entry;
1443
1444 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
1445 entry = (function_entry *)iter.get();
1446
1447 if (entry->sig == sig)
1448 return entry;
1449 }
1450
1451 entry = talloc(mem_ctx, function_entry);
1452 entry->sig = sig;
1453 entry->sig_id = this->next_signature_id++;
1454 entry->bgn_inst = NULL;
1455
1456 /* Allocate storage for all the parameters. */
1457 foreach_iter(exec_list_iterator, iter, sig->parameters) {
1458 ir_variable *param = (ir_variable *)iter.get();
1459 variable_storage *storage;
1460
1461 storage = find_variable_storage(param);
1462 assert(!storage);
1463
1464 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
1465 this->next_temp);
1466 this->variables.push_tail(storage);
1467
1468 this->next_temp += type_size(param->type);
1469 break;
1470 }
1471
1472 if (sig->return_type) {
1473 entry->return_reg = get_temp(sig->return_type);
1474 } else {
1475 entry->return_reg = ir_to_mesa_undef;
1476 }
1477
1478 this->function_signatures.push_tail(entry);
1479 return entry;
1480 }
1481
1482 void
1483 ir_to_mesa_visitor::visit(ir_call *ir)
1484 {
1485 ir_to_mesa_instruction *call_inst;
1486 ir_function_signature *sig = ir->get_callee();
1487 function_entry *entry = get_function_signature(sig);
1488 int i;
1489
1490 /* Process in parameters. */
1491 exec_list_iterator sig_iter = sig->parameters.iterator();
1492 foreach_iter(exec_list_iterator, iter, *ir) {
1493 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1494 ir_variable *param = (ir_variable *)sig_iter.get();
1495
1496 if (param->mode == ir_var_in ||
1497 param->mode == ir_var_inout) {
1498 variable_storage *storage = find_variable_storage(param);
1499 assert(storage);
1500
1501 param_rval->accept(this);
1502 ir_to_mesa_src_reg r = this->result;
1503
1504 ir_to_mesa_dst_reg l;
1505 l.file = storage->file;
1506 l.index = storage->index;
1507 l.reladdr = NULL;
1508 l.writemask = WRITEMASK_XYZW;
1509 l.cond_mask = COND_TR;
1510
1511 for (i = 0; i < type_size(param->type); i++) {
1512 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1513 l.index++;
1514 r.index++;
1515 }
1516 }
1517
1518 sig_iter.next();
1519 }
1520 assert(!sig_iter.has_next());
1521
1522 /* Emit call instruction */
1523 call_inst = ir_to_mesa_emit_op1(ir, OPCODE_CAL,
1524 ir_to_mesa_undef_dst, ir_to_mesa_undef);
1525 call_inst->function = entry;
1526
1527 /* Process out parameters. */
1528 sig_iter = sig->parameters.iterator();
1529 foreach_iter(exec_list_iterator, iter, *ir) {
1530 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1531 ir_variable *param = (ir_variable *)sig_iter.get();
1532
1533 if (param->mode == ir_var_out ||
1534 param->mode == ir_var_inout) {
1535 variable_storage *storage = find_variable_storage(param);
1536 assert(storage);
1537
1538 ir_to_mesa_src_reg r;
1539 r.file = storage->file;
1540 r.index = storage->index;
1541 r.reladdr = NULL;
1542 r.swizzle = SWIZZLE_NOOP;
1543 r.negate = 0;
1544
1545 param_rval->accept(this);
1546 ir_to_mesa_dst_reg l = ir_to_mesa_dst_reg_from_src(this->result);
1547
1548 for (i = 0; i < type_size(param->type); i++) {
1549 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1550 l.index++;
1551 r.index++;
1552 }
1553 }
1554
1555 sig_iter.next();
1556 }
1557 assert(!sig_iter.has_next());
1558
1559 /* Process return value. */
1560 this->result = entry->return_reg;
1561 }
1562
1563
1564 void
1565 ir_to_mesa_visitor::visit(ir_texture *ir)
1566 {
1567 ir_to_mesa_src_reg result_src, coord, lod_info, projector;
1568 ir_to_mesa_dst_reg result_dst, coord_dst;
1569 ir_to_mesa_instruction *inst = NULL;
1570 prog_opcode opcode = OPCODE_NOP;
1571
1572 ir->coordinate->accept(this);
1573
1574 /* Put our coords in a temp. We'll need to modify them for shadow,
1575 * projection, or LOD, so the only case we'd use it as is is if
1576 * we're doing plain old texturing. Mesa IR optimization should
1577 * handle cleaning up our mess in that case.
1578 */
1579 coord = get_temp(glsl_type::vec4_type);
1580 coord_dst = ir_to_mesa_dst_reg_from_src(coord);
1581 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst,
1582 this->result);
1583
1584 if (ir->projector) {
1585 ir->projector->accept(this);
1586 projector = this->result;
1587 }
1588
1589 /* Storage for our result. Ideally for an assignment we'd be using
1590 * the actual storage for the result here, instead.
1591 */
1592 result_src = get_temp(glsl_type::vec4_type);
1593 result_dst = ir_to_mesa_dst_reg_from_src(result_src);
1594
1595 switch (ir->op) {
1596 case ir_tex:
1597 opcode = OPCODE_TEX;
1598 break;
1599 case ir_txb:
1600 opcode = OPCODE_TXB;
1601 ir->lod_info.bias->accept(this);
1602 lod_info = this->result;
1603 break;
1604 case ir_txl:
1605 opcode = OPCODE_TXL;
1606 ir->lod_info.lod->accept(this);
1607 lod_info = this->result;
1608 break;
1609 case ir_txd:
1610 case ir_txf:
1611 assert(!"GLSL 1.30 features unsupported");
1612 break;
1613 }
1614
1615 if (ir->projector) {
1616 if (opcode == OPCODE_TEX) {
1617 /* Slot the projector in as the last component of the coord. */
1618 coord_dst.writemask = WRITEMASK_W;
1619 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, projector);
1620 coord_dst.writemask = WRITEMASK_XYZW;
1621 opcode = OPCODE_TXP;
1622 } else {
1623 ir_to_mesa_src_reg coord_w = coord;
1624 coord_w.swizzle = SWIZZLE_WWWW;
1625
1626 /* For the other TEX opcodes there's no projective version
1627 * since the last slot is taken up by lod info. Do the
1628 * projective divide now.
1629 */
1630 coord_dst.writemask = WRITEMASK_W;
1631 ir_to_mesa_emit_op1(ir, OPCODE_RCP, coord_dst, projector);
1632
1633 coord_dst.writemask = WRITEMASK_XYZ;
1634 ir_to_mesa_emit_op2(ir, OPCODE_MUL, coord_dst, coord, coord_w);
1635
1636 coord_dst.writemask = WRITEMASK_XYZW;
1637 coord.swizzle = SWIZZLE_XYZW;
1638 }
1639 }
1640
1641 if (ir->shadow_comparitor) {
1642 /* Slot the shadow value in as the second to last component of the
1643 * coord.
1644 */
1645 ir->shadow_comparitor->accept(this);
1646 coord_dst.writemask = WRITEMASK_Z;
1647 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, this->result);
1648 coord_dst.writemask = WRITEMASK_XYZW;
1649 }
1650
1651 if (opcode == OPCODE_TXL || opcode == OPCODE_TXB) {
1652 /* Mesa IR stores lod or lod bias in the last channel of the coords. */
1653 coord_dst.writemask = WRITEMASK_W;
1654 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, lod_info);
1655 coord_dst.writemask = WRITEMASK_XYZW;
1656 }
1657
1658 inst = ir_to_mesa_emit_op1(ir, opcode, result_dst, coord);
1659
1660 if (ir->shadow_comparitor)
1661 inst->tex_shadow = GL_TRUE;
1662
1663 ir_dereference_variable *sampler = ir->sampler->as_dereference_variable();
1664 assert(sampler); /* FINISHME: sampler arrays */
1665 /* generate the mapping, remove when we generate storage at
1666 * declaration time
1667 */
1668 sampler->accept(this);
1669
1670 inst->sampler = get_sampler_number(sampler->var->location);
1671
1672 switch (sampler->type->sampler_dimensionality) {
1673 case GLSL_SAMPLER_DIM_1D:
1674 inst->tex_target = TEXTURE_1D_INDEX;
1675 break;
1676 case GLSL_SAMPLER_DIM_2D:
1677 inst->tex_target = TEXTURE_2D_INDEX;
1678 break;
1679 case GLSL_SAMPLER_DIM_3D:
1680 inst->tex_target = TEXTURE_3D_INDEX;
1681 break;
1682 case GLSL_SAMPLER_DIM_CUBE:
1683 inst->tex_target = TEXTURE_CUBE_INDEX;
1684 break;
1685 default:
1686 assert(!"FINISHME: other texture targets");
1687 }
1688
1689 this->result = result_src;
1690 }
1691
1692 void
1693 ir_to_mesa_visitor::visit(ir_return *ir)
1694 {
1695 assert(current_function);
1696
1697 if (ir->get_value()) {
1698 ir_to_mesa_dst_reg l;
1699 int i;
1700
1701 ir->get_value()->accept(this);
1702 ir_to_mesa_src_reg r = this->result;
1703
1704 l = ir_to_mesa_dst_reg_from_src(current_function->return_reg);
1705
1706 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
1707 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1708 l.index++;
1709 r.index++;
1710 }
1711 }
1712
1713 ir_to_mesa_emit_op0(ir, OPCODE_RET);
1714 }
1715
1716 void
1717 ir_to_mesa_visitor::visit(ir_discard *ir)
1718 {
1719 assert(ir->condition == NULL); /* FINISHME */
1720
1721 ir_to_mesa_emit_op0(ir, OPCODE_KIL_NV);
1722 }
1723
1724 void
1725 ir_to_mesa_visitor::visit(ir_if *ir)
1726 {
1727 ir_to_mesa_instruction *cond_inst, *if_inst, *else_inst = NULL;
1728 ir_to_mesa_instruction *prev_inst;
1729
1730 prev_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
1731
1732 ir->condition->accept(this);
1733 assert(this->result.file != PROGRAM_UNDEFINED);
1734
1735 if (ctx->Shader.EmitCondCodes) {
1736 cond_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
1737
1738 /* See if we actually generated any instruction for generating
1739 * the condition. If not, then cook up a move to a temp so we
1740 * have something to set cond_update on.
1741 */
1742 if (cond_inst == prev_inst) {
1743 ir_to_mesa_src_reg temp = get_temp(glsl_type::bool_type);
1744 cond_inst = ir_to_mesa_emit_op1(ir->condition, OPCODE_MOV,
1745 ir_to_mesa_dst_reg_from_src(temp),
1746 result);
1747 }
1748 cond_inst->cond_update = GL_TRUE;
1749
1750 if_inst = ir_to_mesa_emit_op0(ir->condition, OPCODE_IF);
1751 if_inst->dst_reg.cond_mask = COND_NE;
1752 } else {
1753 if_inst = ir_to_mesa_emit_op1(ir->condition,
1754 OPCODE_IF, ir_to_mesa_undef_dst,
1755 this->result);
1756 }
1757
1758 this->instructions.push_tail(if_inst);
1759
1760 visit_exec_list(&ir->then_instructions, this);
1761
1762 if (!ir->else_instructions.is_empty()) {
1763 else_inst = ir_to_mesa_emit_op0(ir->condition, OPCODE_ELSE);
1764 visit_exec_list(&ir->else_instructions, this);
1765 }
1766
1767 if_inst = ir_to_mesa_emit_op1(ir->condition, OPCODE_ENDIF,
1768 ir_to_mesa_undef_dst, ir_to_mesa_undef);
1769 }
1770
1771 ir_to_mesa_visitor::ir_to_mesa_visitor()
1772 {
1773 result.file = PROGRAM_UNDEFINED;
1774 next_temp = 1;
1775 next_signature_id = 1;
1776 sampler_map = NULL;
1777 sampler_map_size = 0;
1778 current_function = NULL;
1779 }
1780
1781 static struct prog_src_register
1782 mesa_src_reg_from_ir_src_reg(ir_to_mesa_src_reg reg)
1783 {
1784 struct prog_src_register mesa_reg;
1785
1786 mesa_reg.File = reg.file;
1787 assert(reg.index < (1 << INST_INDEX_BITS) - 1);
1788 mesa_reg.Index = reg.index;
1789 mesa_reg.Swizzle = reg.swizzle;
1790 mesa_reg.RelAddr = reg.reladdr != NULL;
1791 mesa_reg.Negate = reg.negate;
1792 mesa_reg.Abs = 0;
1793
1794 return mesa_reg;
1795 }
1796
1797 static void
1798 set_branchtargets(ir_to_mesa_visitor *v,
1799 struct prog_instruction *mesa_instructions,
1800 int num_instructions)
1801 {
1802 int if_count = 0, loop_count = 0;
1803 int *if_stack, *loop_stack;
1804 int if_stack_pos = 0, loop_stack_pos = 0;
1805 int i, j;
1806
1807 for (i = 0; i < num_instructions; i++) {
1808 switch (mesa_instructions[i].Opcode) {
1809 case OPCODE_IF:
1810 if_count++;
1811 break;
1812 case OPCODE_BGNLOOP:
1813 loop_count++;
1814 break;
1815 case OPCODE_BRK:
1816 case OPCODE_CONT:
1817 mesa_instructions[i].BranchTarget = -1;
1818 break;
1819 default:
1820 break;
1821 }
1822 }
1823
1824 if_stack = (int *)calloc(if_count, sizeof(*if_stack));
1825 loop_stack = (int *)calloc(loop_count, sizeof(*loop_stack));
1826
1827 for (i = 0; i < num_instructions; i++) {
1828 switch (mesa_instructions[i].Opcode) {
1829 case OPCODE_IF:
1830 if_stack[if_stack_pos] = i;
1831 if_stack_pos++;
1832 break;
1833 case OPCODE_ELSE:
1834 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
1835 if_stack[if_stack_pos - 1] = i;
1836 break;
1837 case OPCODE_ENDIF:
1838 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
1839 if_stack_pos--;
1840 break;
1841 case OPCODE_BGNLOOP:
1842 loop_stack[loop_stack_pos] = i;
1843 loop_stack_pos++;
1844 break;
1845 case OPCODE_ENDLOOP:
1846 loop_stack_pos--;
1847 /* Rewrite any breaks/conts at this nesting level (haven't
1848 * already had a BranchTarget assigned) to point to the end
1849 * of the loop.
1850 */
1851 for (j = loop_stack[loop_stack_pos]; j < i; j++) {
1852 if (mesa_instructions[j].Opcode == OPCODE_BRK ||
1853 mesa_instructions[j].Opcode == OPCODE_CONT) {
1854 if (mesa_instructions[j].BranchTarget == -1) {
1855 mesa_instructions[j].BranchTarget = i;
1856 }
1857 }
1858 }
1859 /* The loop ends point at each other. */
1860 mesa_instructions[i].BranchTarget = loop_stack[loop_stack_pos];
1861 mesa_instructions[loop_stack[loop_stack_pos]].BranchTarget = i;
1862 break;
1863 case OPCODE_CAL:
1864 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
1865 function_entry *entry = (function_entry *)iter.get();
1866
1867 if (entry->sig_id == mesa_instructions[i].BranchTarget) {
1868 mesa_instructions[i].BranchTarget = entry->inst;
1869 break;
1870 }
1871 }
1872 break;
1873 default:
1874 break;
1875 }
1876 }
1877
1878 free(if_stack);
1879 }
1880
1881 static void
1882 print_program(struct prog_instruction *mesa_instructions,
1883 ir_instruction **mesa_instruction_annotation,
1884 int num_instructions)
1885 {
1886 ir_instruction *last_ir = NULL;
1887 int i;
1888
1889 for (i = 0; i < num_instructions; i++) {
1890 struct prog_instruction *mesa_inst = mesa_instructions + i;
1891 ir_instruction *ir = mesa_instruction_annotation[i];
1892
1893 if (last_ir != ir && ir) {
1894 ir_print_visitor print;
1895 ir->accept(&print);
1896 printf("\n");
1897 last_ir = ir;
1898 }
1899
1900 _mesa_print_instruction(mesa_inst);
1901 }
1902 }
1903
1904 static void
1905 mark_input(struct gl_program *prog,
1906 int index,
1907 GLboolean reladdr)
1908 {
1909 prog->InputsRead |= BITFIELD64_BIT(index);
1910 int i;
1911
1912 if (reladdr) {
1913 if (index >= FRAG_ATTRIB_TEX0 && index <= FRAG_ATTRIB_TEX7) {
1914 for (i = 0; i < 8; i++) {
1915 prog->InputsRead |= BITFIELD64_BIT(FRAG_ATTRIB_TEX0 + i);
1916 }
1917 } else {
1918 assert(!"FINISHME: Mark InputsRead for varying arrays");
1919 }
1920 }
1921 }
1922
1923 static void
1924 mark_output(struct gl_program *prog,
1925 int index,
1926 GLboolean reladdr)
1927 {
1928 prog->OutputsWritten |= BITFIELD64_BIT(index);
1929 int i;
1930
1931 if (reladdr) {
1932 if (index >= VERT_RESULT_TEX0 && index <= VERT_RESULT_TEX7) {
1933 for (i = 0; i < 8; i++) {
1934 prog->OutputsWritten |= BITFIELD64_BIT(FRAG_ATTRIB_TEX0 + i);
1935 }
1936 } else {
1937 assert(!"FINISHME: Mark OutputsWritten for varying arrays");
1938 }
1939 }
1940 }
1941
1942 static void
1943 count_resources(struct gl_program *prog)
1944 {
1945 unsigned int i;
1946
1947 prog->InputsRead = 0;
1948 prog->OutputsWritten = 0;
1949 prog->SamplersUsed = 0;
1950
1951 for (i = 0; i < prog->NumInstructions; i++) {
1952 struct prog_instruction *inst = &prog->Instructions[i];
1953 unsigned int reg;
1954
1955 switch (inst->DstReg.File) {
1956 case PROGRAM_OUTPUT:
1957 mark_output(prog, inst->DstReg.Index, inst->DstReg.RelAddr);
1958 break;
1959 case PROGRAM_INPUT:
1960 mark_input(prog, inst->DstReg.Index, inst->DstReg.RelAddr);
1961 break;
1962 default:
1963 break;
1964 }
1965
1966 for (reg = 0; reg < _mesa_num_inst_src_regs(inst->Opcode); reg++) {
1967 switch (inst->SrcReg[reg].File) {
1968 case PROGRAM_OUTPUT:
1969 mark_output(prog, inst->SrcReg[reg].Index,
1970 inst->SrcReg[reg].RelAddr);
1971 break;
1972 case PROGRAM_INPUT:
1973 mark_input(prog, inst->SrcReg[reg].Index, inst->SrcReg[reg].RelAddr);
1974 break;
1975 default:
1976 break;
1977 }
1978 }
1979
1980 /* Instead of just using the uniform's value to map to a
1981 * sampler, Mesa first allocates a separate number for the
1982 * sampler (_mesa_add_sampler), then we reindex it down to a
1983 * small integer (sampler_map[], SamplersUsed), then that gets
1984 * mapped to the uniform's value, and we get an actual sampler.
1985 */
1986 if (_mesa_is_tex_instruction(inst->Opcode)) {
1987 prog->SamplerTargets[inst->TexSrcUnit] =
1988 (gl_texture_index)inst->TexSrcTarget;
1989 prog->SamplersUsed |= 1 << inst->TexSrcUnit;
1990 if (inst->TexShadow) {
1991 prog->ShadowSamplers |= 1 << inst->TexSrcUnit;
1992 }
1993 }
1994 }
1995
1996 _mesa_update_shader_textures_used(prog);
1997 }
1998
1999 /* Each stage has some uniforms in its Parameters list. The Uniforms
2000 * list for the linked shader program has a pointer to these uniforms
2001 * in each of the stage's Parameters list, so that their values can be
2002 * updated when a uniform is set.
2003 */
2004 static void
2005 link_uniforms_to_shared_uniform_list(struct gl_uniform_list *uniforms,
2006 struct gl_program *prog)
2007 {
2008 unsigned int i;
2009
2010 for (i = 0; i < prog->Parameters->NumParameters; i++) {
2011 const struct gl_program_parameter *p = prog->Parameters->Parameters + i;
2012
2013 if (p->Type == PROGRAM_UNIFORM || p->Type == PROGRAM_SAMPLER) {
2014 struct gl_uniform *uniform =
2015 _mesa_append_uniform(uniforms, p->Name, prog->Target, i);
2016 if (uniform)
2017 uniform->Initialized = p->Initialized;
2018 }
2019 }
2020 }
2021
2022 struct gl_program *
2023 get_mesa_program(GLcontext *ctx, struct gl_shader_program *shader_program,
2024 struct gl_shader *shader)
2025 {
2026 void *mem_ctx = shader_program;
2027 ir_to_mesa_visitor v;
2028 struct prog_instruction *mesa_instructions, *mesa_inst;
2029 ir_instruction **mesa_instruction_annotation;
2030 int i;
2031 struct gl_program *prog;
2032 GLenum target;
2033 GLboolean progress;
2034
2035 switch (shader->Type) {
2036 case GL_VERTEX_SHADER: target = GL_VERTEX_PROGRAM_ARB; break;
2037 case GL_FRAGMENT_SHADER: target = GL_FRAGMENT_PROGRAM_ARB; break;
2038 default: assert(!"should not be reached"); break;
2039 }
2040
2041 validate_ir_tree(shader->ir);
2042
2043 prog = ctx->Driver.NewProgram(ctx, target, 1);
2044 if (!prog)
2045 return NULL;
2046 prog->Parameters = _mesa_new_parameter_list();
2047 prog->Varying = _mesa_new_parameter_list();
2048 prog->Attributes = _mesa_new_parameter_list();
2049 v.ctx = ctx;
2050 v.prog = prog;
2051
2052 v.mem_ctx = talloc_new(NULL);
2053
2054 /* Emit Mesa IR for main(). */
2055 visit_exec_list(shader->ir, &v);
2056 v.ir_to_mesa_emit_op0(NULL, OPCODE_END);
2057
2058 /* Now emit bodies for any functions that were used. */
2059 do {
2060 progress = GL_FALSE;
2061
2062 foreach_iter(exec_list_iterator, iter, v.function_signatures) {
2063 function_entry *entry = (function_entry *)iter.get();
2064
2065 if (!entry->bgn_inst) {
2066 v.current_function = entry;
2067
2068 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_BGNSUB);
2069 entry->bgn_inst->function = entry;
2070
2071 visit_exec_list(&entry->sig->body, &v);
2072
2073 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_RET);
2074 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_ENDSUB);
2075 progress = GL_TRUE;
2076 }
2077 }
2078 } while (progress);
2079
2080 prog->NumTemporaries = v.next_temp;
2081
2082 int num_instructions = 0;
2083 foreach_iter(exec_list_iterator, iter, v.instructions) {
2084 num_instructions++;
2085 }
2086
2087 mesa_instructions =
2088 (struct prog_instruction *)calloc(num_instructions,
2089 sizeof(*mesa_instructions));
2090 mesa_instruction_annotation = talloc_array(mem_ctx, ir_instruction *,
2091 num_instructions);
2092
2093 mesa_inst = mesa_instructions;
2094 i = 0;
2095 foreach_iter(exec_list_iterator, iter, v.instructions) {
2096 ir_to_mesa_instruction *inst = (ir_to_mesa_instruction *)iter.get();
2097
2098 mesa_inst->Opcode = inst->op;
2099 mesa_inst->CondUpdate = inst->cond_update;
2100 mesa_inst->DstReg.File = inst->dst_reg.file;
2101 mesa_inst->DstReg.Index = inst->dst_reg.index;
2102 mesa_inst->DstReg.CondMask = inst->dst_reg.cond_mask;
2103 mesa_inst->DstReg.WriteMask = inst->dst_reg.writemask;
2104 mesa_inst->DstReg.RelAddr = inst->dst_reg.reladdr != NULL;
2105 mesa_inst->SrcReg[0] = mesa_src_reg_from_ir_src_reg(inst->src_reg[0]);
2106 mesa_inst->SrcReg[1] = mesa_src_reg_from_ir_src_reg(inst->src_reg[1]);
2107 mesa_inst->SrcReg[2] = mesa_src_reg_from_ir_src_reg(inst->src_reg[2]);
2108 mesa_inst->TexSrcUnit = inst->sampler;
2109 mesa_inst->TexSrcTarget = inst->tex_target;
2110 mesa_inst->TexShadow = inst->tex_shadow;
2111 mesa_instruction_annotation[i] = inst->ir;
2112
2113 if (ctx->Shader.EmitNoIfs && mesa_inst->Opcode == OPCODE_IF) {
2114 shader_program->InfoLog =
2115 talloc_asprintf_append(shader_program->InfoLog,
2116 "Couldn't flatten if statement\n");
2117 shader_program->LinkStatus = false;
2118 }
2119
2120 if (mesa_inst->Opcode == OPCODE_BGNSUB)
2121 inst->function->inst = i;
2122 else if (mesa_inst->Opcode == OPCODE_CAL)
2123 mesa_inst->BranchTarget = inst->function->sig_id; /* rewritten later */
2124
2125 mesa_inst++;
2126 i++;
2127 }
2128
2129 set_branchtargets(&v, mesa_instructions, num_instructions);
2130 if (0) {
2131 print_program(mesa_instructions, mesa_instruction_annotation,
2132 num_instructions);
2133 }
2134
2135 prog->Instructions = mesa_instructions;
2136 prog->NumInstructions = num_instructions;
2137
2138 _mesa_reference_program(ctx, &shader->Program, prog);
2139
2140 if ((ctx->Shader.Flags & GLSL_NO_OPT) == 0) {
2141 _mesa_optimize_program(ctx, prog);
2142 }
2143
2144 return prog;
2145 }
2146
2147 extern "C" {
2148
2149 static void
2150 steal_memory(ir_instruction *ir, void *new_ctx)
2151 {
2152 talloc_steal(new_ctx, ir);
2153 }
2154
2155 void
2156 _mesa_glsl_compile_shader(GLcontext *ctx, struct gl_shader *shader)
2157 {
2158 struct _mesa_glsl_parse_state *state;
2159
2160 state = talloc_zero(shader, struct _mesa_glsl_parse_state);
2161 switch (shader->Type) {
2162 case GL_VERTEX_SHADER: state->target = vertex_shader; break;
2163 case GL_FRAGMENT_SHADER: state->target = fragment_shader; break;
2164 case GL_GEOMETRY_SHADER: state->target = geometry_shader; break;
2165 }
2166
2167 state->scanner = NULL;
2168 state->translation_unit.make_empty();
2169 state->symbols = new(shader) glsl_symbol_table;
2170 state->info_log = talloc_strdup(shader, "");
2171 state->error = false;
2172 state->loop_or_switch_nesting = NULL;
2173 state->ARB_texture_rectangle_enable = true;
2174
2175 state->extensions = &ctx->Extensions;
2176
2177 state->Const.MaxLights = ctx->Const.MaxLights;
2178 state->Const.MaxClipPlanes = ctx->Const.MaxClipPlanes;
2179 state->Const.MaxTextureUnits = ctx->Const.MaxTextureUnits;
2180 state->Const.MaxTextureCoords = ctx->Const.MaxTextureCoordUnits;
2181 state->Const.MaxVertexAttribs = ctx->Const.VertexProgram.MaxAttribs;
2182 state->Const.MaxVertexUniformComponents = ctx->Const.VertexProgram.MaxUniformComponents;
2183 state->Const.MaxVaryingFloats = ctx->Const.MaxVarying * 4;
2184 state->Const.MaxVertexTextureImageUnits = ctx->Const.MaxVertexTextureImageUnits;
2185 state->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxCombinedTextureImageUnits;
2186 state->Const.MaxTextureImageUnits = ctx->Const.MaxTextureImageUnits;
2187 state->Const.MaxFragmentUniformComponents = ctx->Const.FragmentProgram.MaxUniformComponents;
2188
2189 state->Const.MaxDrawBuffers = ctx->Const.MaxDrawBuffers;
2190
2191 const char *source = shader->Source;
2192 state->error = preprocess(state, &source, &state->info_log,
2193 &ctx->Extensions);
2194
2195 if (!state->error) {
2196 _mesa_glsl_lexer_ctor(state, source);
2197 _mesa_glsl_parse(state);
2198 _mesa_glsl_lexer_dtor(state);
2199 }
2200
2201 shader->ir = new(shader) exec_list;
2202 if (!state->error && !state->translation_unit.is_empty())
2203 _mesa_ast_to_hir(shader->ir, state);
2204
2205 if (!state->error && !shader->ir->is_empty()) {
2206 validate_ir_tree(shader->ir);
2207
2208 /* Lowering */
2209 do_mat_op_to_vec(shader->ir);
2210 do_mod_to_fract(shader->ir);
2211 do_div_to_mul_rcp(shader->ir);
2212
2213 /* Optimization passes */
2214 bool progress;
2215 do {
2216 progress = false;
2217
2218 progress = do_function_inlining(shader->ir) || progress;
2219 progress = do_if_simplification(shader->ir) || progress;
2220 progress = do_copy_propagation(shader->ir) || progress;
2221 progress = do_dead_code_local(shader->ir) || progress;
2222 progress = do_dead_code_unlinked(state, shader->ir) || progress;
2223 progress = do_constant_variable_unlinked(shader->ir) || progress;
2224 progress = do_constant_folding(shader->ir) || progress;
2225 progress = do_if_return(shader->ir) || progress;
2226 if (ctx->Shader.EmitNoIfs)
2227 progress = do_if_to_cond_assign(shader->ir) || progress;
2228
2229 progress = do_vec_index_to_swizzle(shader->ir) || progress;
2230 /* Do this one after the previous to let the easier pass handle
2231 * constant vector indexing.
2232 */
2233 progress = do_vec_index_to_cond_assign(shader->ir) || progress;
2234
2235 progress = do_swizzle_swizzle(shader->ir) || progress;
2236 } while (progress);
2237
2238 validate_ir_tree(shader->ir);
2239 }
2240
2241 shader->symbols = state->symbols;
2242
2243 shader->CompileStatus = !state->error;
2244 shader->InfoLog = state->info_log;
2245 shader->Version = state->language_version;
2246
2247 /* Retain any live IR, but trash the rest. */
2248 foreach_list(node, shader->ir) {
2249 visit_tree((ir_instruction *) node, steal_memory, shader);
2250 }
2251
2252 talloc_free(state);
2253 }
2254
2255 void
2256 _mesa_glsl_link_shader(GLcontext *ctx, struct gl_shader_program *prog)
2257 {
2258 unsigned int i;
2259
2260 _mesa_clear_shader_program_data(ctx, prog);
2261
2262 prog->LinkStatus = GL_TRUE;
2263
2264 for (i = 0; i < prog->NumShaders; i++) {
2265 if (!prog->Shaders[i]->CompileStatus) {
2266 prog->InfoLog =
2267 talloc_asprintf_append(prog->InfoLog,
2268 "linking with uncompiled shader");
2269 prog->LinkStatus = GL_FALSE;
2270 }
2271 }
2272
2273 prog->Varying = _mesa_new_parameter_list();
2274 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
2275 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
2276
2277 if (prog->LinkStatus) {
2278 link_shaders(prog);
2279
2280 /* We don't use the linker's uniforms list, and cook up our own at
2281 * generate time.
2282 */
2283 free(prog->Uniforms);
2284 prog->Uniforms = _mesa_new_uniform_list();
2285 }
2286
2287 if (prog->LinkStatus) {
2288 for (i = 0; i < prog->_NumLinkedShaders; i++) {
2289 struct gl_program *linked_prog;
2290
2291 linked_prog = get_mesa_program(ctx, prog,
2292 prog->_LinkedShaders[i]);
2293 count_resources(linked_prog);
2294
2295 link_uniforms_to_shared_uniform_list(prog->Uniforms, linked_prog);
2296
2297 switch (prog->_LinkedShaders[i]->Type) {
2298 case GL_VERTEX_SHADER:
2299 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
2300 (struct gl_vertex_program *)linked_prog);
2301 ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
2302 linked_prog);
2303 break;
2304 case GL_FRAGMENT_SHADER:
2305 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
2306 (struct gl_fragment_program *)linked_prog);
2307 ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
2308 linked_prog);
2309 break;
2310 }
2311 }
2312 }
2313 }
2314
2315 } /* extern "C" */