ir_to_mesa: Fix the swizzles on record and array dereferences.
[mesa.git] / src / mesa / shader / ir_to_mesa.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 /**
27 * \file ir_to_mesa.cpp
28 *
29 * Translates the IR to ARB_fragment_program text if possible,
30 * printing the result
31 */
32
33 #include <stdio.h>
34 #include "ir.h"
35 #include "ir_visitor.h"
36 #include "ir_print_visitor.h"
37 #include "ir_expression_flattening.h"
38 #include "glsl_types.h"
39 #include "glsl_parser_extras.h"
40 #include "../glsl/program.h"
41 #include "ir_optimization.h"
42 #include "ast.h"
43
44 extern "C" {
45 #include "main/mtypes.h"
46 #include "shader/prog_instruction.h"
47 #include "shader/prog_optimize.h"
48 #include "shader/prog_print.h"
49 #include "shader/program.h"
50 #include "shader/prog_uniform.h"
51 #include "shader/prog_parameter.h"
52 #include "shader/shader_api.h"
53 }
54
55 /**
56 * This struct is a corresponding struct to Mesa prog_src_register, with
57 * wider fields.
58 */
59 typedef struct ir_to_mesa_src_reg {
60 int file; /**< PROGRAM_* from Mesa */
61 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
62 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
63 int negate; /**< NEGATE_XYZW mask from mesa */
64 /** Register index should be offset by the integer in this reg. */
65 ir_to_mesa_src_reg *reladdr;
66 } ir_to_mesa_src_reg;
67
68 typedef struct ir_to_mesa_dst_reg {
69 int file; /**< PROGRAM_* from Mesa */
70 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
71 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
72 GLuint cond_mask:4;
73 /** Register index should be offset by the integer in this reg. */
74 ir_to_mesa_src_reg *reladdr;
75 } ir_to_mesa_dst_reg;
76
77 extern ir_to_mesa_src_reg ir_to_mesa_undef;
78
79 class ir_to_mesa_instruction : public exec_node {
80 public:
81 enum prog_opcode op;
82 ir_to_mesa_dst_reg dst_reg;
83 ir_to_mesa_src_reg src_reg[3];
84 /** Pointer to the ir source this tree came from for debugging */
85 ir_instruction *ir;
86 GLboolean cond_update;
87 int sampler; /**< sampler index */
88 int tex_target; /**< One of TEXTURE_*_INDEX */
89 GLboolean tex_shadow;
90
91 class function_entry *function; /* Set on OPCODE_CAL or OPCODE_BGNSUB */
92 };
93
94 class variable_storage : public exec_node {
95 public:
96 variable_storage(ir_variable *var, int file, int index)
97 : file(file), index(index), var(var)
98 {
99 /* empty */
100 }
101
102 int file;
103 int index;
104 ir_variable *var; /* variable that maps to this, if any */
105 };
106
107 class function_entry : public exec_node {
108 public:
109 ir_function_signature *sig;
110
111 /**
112 * identifier of this function signature used by the program.
113 *
114 * At the point that Mesa instructions for function calls are
115 * generated, we don't know the address of the first instruction of
116 * the function body. So we make the BranchTarget that is called a
117 * small integer and rewrite them during set_branchtargets().
118 */
119 int sig_id;
120
121 /**
122 * Pointer to first instruction of the function body.
123 *
124 * Set during function body emits after main() is processed.
125 */
126 ir_to_mesa_instruction *bgn_inst;
127
128 /**
129 * Index of the first instruction of the function body in actual
130 * Mesa IR.
131 *
132 * Set after convertion from ir_to_mesa_instruction to prog_instruction.
133 */
134 int inst;
135
136 /** Storage for the return value. */
137 ir_to_mesa_src_reg return_reg;
138 };
139
140 class ir_to_mesa_visitor : public ir_visitor {
141 public:
142 ir_to_mesa_visitor();
143
144 function_entry *current_function;
145
146 GLcontext *ctx;
147 struct gl_program *prog;
148
149 int next_temp;
150
151 variable_storage *find_variable_storage(ir_variable *var);
152
153 function_entry *get_function_signature(ir_function_signature *sig);
154
155 ir_to_mesa_src_reg get_temp(const glsl_type *type);
156 void reladdr_to_temp(ir_instruction *ir,
157 ir_to_mesa_src_reg *reg, int *num_reladdr);
158
159 struct ir_to_mesa_src_reg src_reg_for_float(float val);
160
161 /**
162 * \name Visit methods
163 *
164 * As typical for the visitor pattern, there must be one \c visit method for
165 * each concrete subclass of \c ir_instruction. Virtual base classes within
166 * the hierarchy should not have \c visit methods.
167 */
168 /*@{*/
169 virtual void visit(ir_variable *);
170 virtual void visit(ir_loop *);
171 virtual void visit(ir_loop_jump *);
172 virtual void visit(ir_function_signature *);
173 virtual void visit(ir_function *);
174 virtual void visit(ir_expression *);
175 virtual void visit(ir_swizzle *);
176 virtual void visit(ir_dereference_variable *);
177 virtual void visit(ir_dereference_array *);
178 virtual void visit(ir_dereference_record *);
179 virtual void visit(ir_assignment *);
180 virtual void visit(ir_constant *);
181 virtual void visit(ir_call *);
182 virtual void visit(ir_return *);
183 virtual void visit(ir_discard *);
184 virtual void visit(ir_texture *);
185 virtual void visit(ir_if *);
186 /*@}*/
187
188 struct ir_to_mesa_src_reg result;
189
190 /** List of variable_storage */
191 exec_list variables;
192
193 /** List of function_entry */
194 exec_list function_signatures;
195 int next_signature_id;
196
197 /** List of ir_to_mesa_instruction */
198 exec_list instructions;
199
200 ir_to_mesa_instruction *ir_to_mesa_emit_op0(ir_instruction *ir,
201 enum prog_opcode op);
202
203 ir_to_mesa_instruction *ir_to_mesa_emit_op1(ir_instruction *ir,
204 enum prog_opcode op,
205 ir_to_mesa_dst_reg dst,
206 ir_to_mesa_src_reg src0);
207
208 ir_to_mesa_instruction *ir_to_mesa_emit_op2(ir_instruction *ir,
209 enum prog_opcode op,
210 ir_to_mesa_dst_reg dst,
211 ir_to_mesa_src_reg src0,
212 ir_to_mesa_src_reg src1);
213
214 ir_to_mesa_instruction *ir_to_mesa_emit_op3(ir_instruction *ir,
215 enum prog_opcode op,
216 ir_to_mesa_dst_reg dst,
217 ir_to_mesa_src_reg src0,
218 ir_to_mesa_src_reg src1,
219 ir_to_mesa_src_reg src2);
220
221 void ir_to_mesa_emit_scalar_op1(ir_instruction *ir,
222 enum prog_opcode op,
223 ir_to_mesa_dst_reg dst,
224 ir_to_mesa_src_reg src0);
225
226 void ir_to_mesa_emit_scalar_op2(ir_instruction *ir,
227 enum prog_opcode op,
228 ir_to_mesa_dst_reg dst,
229 ir_to_mesa_src_reg src0,
230 ir_to_mesa_src_reg src1);
231
232 GLboolean try_emit_mad(ir_expression *ir,
233 int mul_operand);
234
235 int *sampler_map;
236 int sampler_map_size;
237
238 void map_sampler(int location, int sampler);
239 int get_sampler_number(int location);
240
241 void *mem_ctx;
242 };
243
244 ir_to_mesa_src_reg ir_to_mesa_undef = {
245 PROGRAM_UNDEFINED, 0, SWIZZLE_NOOP, NEGATE_NONE, NULL,
246 };
247
248 ir_to_mesa_dst_reg ir_to_mesa_undef_dst = {
249 PROGRAM_UNDEFINED, 0, SWIZZLE_NOOP, COND_TR, NULL,
250 };
251
252 ir_to_mesa_dst_reg ir_to_mesa_address_reg = {
253 PROGRAM_ADDRESS, 0, WRITEMASK_X, COND_TR, NULL
254 };
255
256 static int swizzle_for_size(int size)
257 {
258 int size_swizzles[4] = {
259 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
260 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
261 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
262 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
263 };
264
265 return size_swizzles[size - 1];
266 }
267
268 ir_to_mesa_instruction *
269 ir_to_mesa_visitor::ir_to_mesa_emit_op3(ir_instruction *ir,
270 enum prog_opcode op,
271 ir_to_mesa_dst_reg dst,
272 ir_to_mesa_src_reg src0,
273 ir_to_mesa_src_reg src1,
274 ir_to_mesa_src_reg src2)
275 {
276 ir_to_mesa_instruction *inst = new(mem_ctx) ir_to_mesa_instruction();
277 int num_reladdr = 0;
278
279 /* If we have to do relative addressing, we want to load the ARL
280 * reg directly for one of the regs, and preload the other reladdr
281 * sources into temps.
282 */
283 num_reladdr += dst.reladdr != NULL;
284 num_reladdr += src0.reladdr != NULL;
285 num_reladdr += src1.reladdr != NULL;
286 num_reladdr += src2.reladdr != NULL;
287
288 reladdr_to_temp(ir, &src2, &num_reladdr);
289 reladdr_to_temp(ir, &src1, &num_reladdr);
290 reladdr_to_temp(ir, &src0, &num_reladdr);
291
292 if (dst.reladdr) {
293 ir_to_mesa_emit_op1(ir, OPCODE_ARL, ir_to_mesa_address_reg,
294 *dst.reladdr);
295
296 num_reladdr--;
297 }
298 assert(num_reladdr == 0);
299
300 inst->op = op;
301 inst->dst_reg = dst;
302 inst->src_reg[0] = src0;
303 inst->src_reg[1] = src1;
304 inst->src_reg[2] = src2;
305 inst->ir = ir;
306
307 inst->function = NULL;
308
309 this->instructions.push_tail(inst);
310
311 return inst;
312 }
313
314
315 ir_to_mesa_instruction *
316 ir_to_mesa_visitor::ir_to_mesa_emit_op2(ir_instruction *ir,
317 enum prog_opcode op,
318 ir_to_mesa_dst_reg dst,
319 ir_to_mesa_src_reg src0,
320 ir_to_mesa_src_reg src1)
321 {
322 return ir_to_mesa_emit_op3(ir, op, dst, src0, src1, ir_to_mesa_undef);
323 }
324
325 ir_to_mesa_instruction *
326 ir_to_mesa_visitor::ir_to_mesa_emit_op1(ir_instruction *ir,
327 enum prog_opcode op,
328 ir_to_mesa_dst_reg dst,
329 ir_to_mesa_src_reg src0)
330 {
331 return ir_to_mesa_emit_op3(ir, op, dst,
332 src0, ir_to_mesa_undef, ir_to_mesa_undef);
333 }
334
335 ir_to_mesa_instruction *
336 ir_to_mesa_visitor::ir_to_mesa_emit_op0(ir_instruction *ir,
337 enum prog_opcode op)
338 {
339 return ir_to_mesa_emit_op3(ir, op, ir_to_mesa_undef_dst,
340 ir_to_mesa_undef,
341 ir_to_mesa_undef,
342 ir_to_mesa_undef);
343 }
344
345 void
346 ir_to_mesa_visitor::map_sampler(int location, int sampler)
347 {
348 if (this->sampler_map_size <= location) {
349 this->sampler_map = talloc_realloc(this->mem_ctx, this->sampler_map,
350 int, location + 1);
351 this->sampler_map_size = location + 1;
352 }
353
354 this->sampler_map[location] = sampler;
355 }
356
357 int
358 ir_to_mesa_visitor::get_sampler_number(int location)
359 {
360 assert(location < this->sampler_map_size);
361 return this->sampler_map[location];
362 }
363
364 inline ir_to_mesa_dst_reg
365 ir_to_mesa_dst_reg_from_src(ir_to_mesa_src_reg reg)
366 {
367 ir_to_mesa_dst_reg dst_reg;
368
369 dst_reg.file = reg.file;
370 dst_reg.index = reg.index;
371 dst_reg.writemask = WRITEMASK_XYZW;
372 dst_reg.cond_mask = COND_TR;
373 dst_reg.reladdr = reg.reladdr;
374
375 return dst_reg;
376 }
377
378 inline ir_to_mesa_src_reg
379 ir_to_mesa_src_reg_from_dst(ir_to_mesa_dst_reg reg)
380 {
381 ir_to_mesa_src_reg src_reg;
382
383 src_reg.file = reg.file;
384 src_reg.index = reg.index;
385 src_reg.swizzle = SWIZZLE_XYZW;
386 src_reg.negate = 0;
387 src_reg.reladdr = reg.reladdr;
388
389 return src_reg;
390 }
391
392 /**
393 * Emits Mesa scalar opcodes to produce unique answers across channels.
394 *
395 * Some Mesa opcodes are scalar-only, like ARB_fp/vp. The src X
396 * channel determines the result across all channels. So to do a vec4
397 * of this operation, we want to emit a scalar per source channel used
398 * to produce dest channels.
399 */
400 void
401 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op2(ir_instruction *ir,
402 enum prog_opcode op,
403 ir_to_mesa_dst_reg dst,
404 ir_to_mesa_src_reg orig_src0,
405 ir_to_mesa_src_reg orig_src1)
406 {
407 int i, j;
408 int done_mask = ~dst.writemask;
409
410 /* Mesa RCP is a scalar operation splatting results to all channels,
411 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
412 * dst channels.
413 */
414 for (i = 0; i < 4; i++) {
415 GLuint this_mask = (1 << i);
416 ir_to_mesa_instruction *inst;
417 ir_to_mesa_src_reg src0 = orig_src0;
418 ir_to_mesa_src_reg src1 = orig_src1;
419
420 if (done_mask & this_mask)
421 continue;
422
423 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
424 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
425 for (j = i + 1; j < 4; j++) {
426 if (!(done_mask & (1 << j)) &&
427 GET_SWZ(src0.swizzle, j) == src0_swiz &&
428 GET_SWZ(src1.swizzle, j) == src1_swiz) {
429 this_mask |= (1 << j);
430 }
431 }
432 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
433 src0_swiz, src0_swiz);
434 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
435 src1_swiz, src1_swiz);
436
437 inst = ir_to_mesa_emit_op2(ir, op,
438 dst,
439 src0,
440 src1);
441 inst->dst_reg.writemask = this_mask;
442 done_mask |= this_mask;
443 }
444 }
445
446 void
447 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op1(ir_instruction *ir,
448 enum prog_opcode op,
449 ir_to_mesa_dst_reg dst,
450 ir_to_mesa_src_reg src0)
451 {
452 ir_to_mesa_src_reg undef = ir_to_mesa_undef;
453
454 undef.swizzle = SWIZZLE_XXXX;
455
456 ir_to_mesa_emit_scalar_op2(ir, op, dst, src0, undef);
457 }
458
459 struct ir_to_mesa_src_reg
460 ir_to_mesa_visitor::src_reg_for_float(float val)
461 {
462 ir_to_mesa_src_reg src_reg;
463
464 src_reg.file = PROGRAM_CONSTANT;
465 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
466 &val, 1, &src_reg.swizzle);
467 src_reg.reladdr = NULL;
468 src_reg.negate = 0;
469
470 return src_reg;
471 }
472
473 static int
474 type_size(const struct glsl_type *type)
475 {
476 unsigned int i;
477 int size;
478
479 switch (type->base_type) {
480 case GLSL_TYPE_UINT:
481 case GLSL_TYPE_INT:
482 case GLSL_TYPE_FLOAT:
483 case GLSL_TYPE_BOOL:
484 if (type->is_matrix()) {
485 return type->matrix_columns;
486 } else {
487 /* Regardless of size of vector, it gets a vec4. This is bad
488 * packing for things like floats, but otherwise arrays become a
489 * mess. Hopefully a later pass over the code can pack scalars
490 * down if appropriate.
491 */
492 return 1;
493 }
494 case GLSL_TYPE_ARRAY:
495 return type_size(type->fields.array) * type->length;
496 case GLSL_TYPE_STRUCT:
497 size = 0;
498 for (i = 0; i < type->length; i++) {
499 size += type_size(type->fields.structure[i].type);
500 }
501 return size;
502 default:
503 assert(0);
504 }
505 }
506
507 /**
508 * In the initial pass of codegen, we assign temporary numbers to
509 * intermediate results. (not SSA -- variable assignments will reuse
510 * storage). Actual register allocation for the Mesa VM occurs in a
511 * pass over the Mesa IR later.
512 */
513 ir_to_mesa_src_reg
514 ir_to_mesa_visitor::get_temp(const glsl_type *type)
515 {
516 ir_to_mesa_src_reg src_reg;
517 int swizzle[4];
518 int i;
519
520 assert(!type->is_array());
521
522 src_reg.file = PROGRAM_TEMPORARY;
523 src_reg.index = next_temp;
524 src_reg.reladdr = NULL;
525 next_temp += type_size(type);
526
527 for (i = 0; i < type->vector_elements; i++)
528 swizzle[i] = i;
529 for (; i < 4; i++)
530 swizzle[i] = type->vector_elements - 1;
531 src_reg.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1],
532 swizzle[2], swizzle[3]);
533 src_reg.negate = 0;
534
535 return src_reg;
536 }
537
538 variable_storage *
539 ir_to_mesa_visitor::find_variable_storage(ir_variable *var)
540 {
541
542 variable_storage *entry;
543
544 foreach_iter(exec_list_iterator, iter, this->variables) {
545 entry = (variable_storage *)iter.get();
546
547 if (entry->var == var)
548 return entry;
549 }
550
551 return NULL;
552 }
553
554 void
555 ir_to_mesa_visitor::visit(ir_variable *ir)
556 {
557 (void)ir;
558 }
559
560 void
561 ir_to_mesa_visitor::visit(ir_loop *ir)
562 {
563 assert(!ir->from);
564 assert(!ir->to);
565 assert(!ir->increment);
566 assert(!ir->counter);
567
568 ir_to_mesa_emit_op0(NULL, OPCODE_BGNLOOP);
569 visit_exec_list(&ir->body_instructions, this);
570 ir_to_mesa_emit_op0(NULL, OPCODE_ENDLOOP);
571 }
572
573 void
574 ir_to_mesa_visitor::visit(ir_loop_jump *ir)
575 {
576 switch (ir->mode) {
577 case ir_loop_jump::jump_break:
578 ir_to_mesa_emit_op0(NULL, OPCODE_BRK);
579 break;
580 case ir_loop_jump::jump_continue:
581 ir_to_mesa_emit_op0(NULL, OPCODE_CONT);
582 break;
583 }
584 }
585
586
587 void
588 ir_to_mesa_visitor::visit(ir_function_signature *ir)
589 {
590 assert(0);
591 (void)ir;
592 }
593
594 void
595 ir_to_mesa_visitor::visit(ir_function *ir)
596 {
597 /* Ignore function bodies other than main() -- we shouldn't see calls to
598 * them since they should all be inlined before we get to ir_to_mesa.
599 */
600 if (strcmp(ir->name, "main") == 0) {
601 const ir_function_signature *sig;
602 exec_list empty;
603
604 sig = ir->matching_signature(&empty);
605
606 assert(sig);
607
608 foreach_iter(exec_list_iterator, iter, sig->body) {
609 ir_instruction *ir = (ir_instruction *)iter.get();
610
611 ir->accept(this);
612 }
613 }
614 }
615
616 GLboolean
617 ir_to_mesa_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
618 {
619 int nonmul_operand = 1 - mul_operand;
620 ir_to_mesa_src_reg a, b, c;
621
622 ir_expression *expr = ir->operands[mul_operand]->as_expression();
623 if (!expr || expr->operation != ir_binop_mul)
624 return false;
625
626 expr->operands[0]->accept(this);
627 a = this->result;
628 expr->operands[1]->accept(this);
629 b = this->result;
630 ir->operands[nonmul_operand]->accept(this);
631 c = this->result;
632
633 this->result = get_temp(ir->type);
634 ir_to_mesa_emit_op3(ir, OPCODE_MAD,
635 ir_to_mesa_dst_reg_from_src(this->result), a, b, c);
636
637 return true;
638 }
639
640 void
641 ir_to_mesa_visitor::reladdr_to_temp(ir_instruction *ir,
642 ir_to_mesa_src_reg *reg, int *num_reladdr)
643 {
644 if (!reg->reladdr)
645 return;
646
647 ir_to_mesa_emit_op1(ir, OPCODE_ARL, ir_to_mesa_address_reg, *reg->reladdr);
648
649 if (*num_reladdr != 1) {
650 ir_to_mesa_src_reg temp = get_temp(glsl_type::vec4_type);
651
652 ir_to_mesa_emit_op1(ir, OPCODE_MOV,
653 ir_to_mesa_dst_reg_from_src(temp), *reg);
654 *reg = temp;
655 }
656
657 (*num_reladdr)--;
658 }
659
660 void
661 ir_to_mesa_visitor::visit(ir_expression *ir)
662 {
663 unsigned int operand;
664 struct ir_to_mesa_src_reg op[2];
665 struct ir_to_mesa_src_reg result_src;
666 struct ir_to_mesa_dst_reg result_dst;
667 const glsl_type *vec4_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 4, 1);
668 const glsl_type *vec3_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 3, 1);
669 const glsl_type *vec2_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 2, 1);
670
671 /* Quick peephole: Emit OPCODE_MAD(a, b, c) instead of ADD(MUL(a, b), c)
672 */
673 if (ir->operation == ir_binop_add) {
674 if (try_emit_mad(ir, 1))
675 return;
676 if (try_emit_mad(ir, 0))
677 return;
678 }
679
680 for (operand = 0; operand < ir->get_num_operands(); operand++) {
681 this->result.file = PROGRAM_UNDEFINED;
682 ir->operands[operand]->accept(this);
683 if (this->result.file == PROGRAM_UNDEFINED) {
684 ir_print_visitor v;
685 printf("Failed to get tree for expression operand:\n");
686 ir->operands[operand]->accept(&v);
687 exit(1);
688 }
689 op[operand] = this->result;
690
691 /* Matrix expression operands should have been broken down to vector
692 * operations already.
693 */
694 assert(!ir->operands[operand]->type->is_matrix());
695 }
696
697 this->result.file = PROGRAM_UNDEFINED;
698
699 /* Storage for our result. Ideally for an assignment we'd be using
700 * the actual storage for the result here, instead.
701 */
702 result_src = get_temp(ir->type);
703 /* convenience for the emit functions below. */
704 result_dst = ir_to_mesa_dst_reg_from_src(result_src);
705 /* Limit writes to the channels that will be used by result_src later.
706 * This does limit this temp's use as a temporary for multi-instruction
707 * sequences.
708 */
709 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
710
711 switch (ir->operation) {
712 case ir_unop_logic_not:
713 ir_to_mesa_emit_op2(ir, OPCODE_SEQ, result_dst,
714 op[0], src_reg_for_float(0.0));
715 break;
716 case ir_unop_neg:
717 op[0].negate = ~op[0].negate;
718 result_src = op[0];
719 break;
720 case ir_unop_abs:
721 ir_to_mesa_emit_op1(ir, OPCODE_ABS, result_dst, op[0]);
722 break;
723 case ir_unop_sign:
724 ir_to_mesa_emit_op1(ir, OPCODE_SSG, result_dst, op[0]);
725 break;
726 case ir_unop_rcp:
727 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RCP, result_dst, op[0]);
728 break;
729
730 case ir_unop_exp:
731 ir_to_mesa_emit_scalar_op2(ir, OPCODE_POW, result_dst,
732 src_reg_for_float(M_E), op[0]);
733 break;
734 case ir_unop_exp2:
735 ir_to_mesa_emit_scalar_op1(ir, OPCODE_EX2, result_dst, op[0]);
736 break;
737 case ir_unop_log:
738 ir_to_mesa_emit_scalar_op1(ir, OPCODE_LOG, result_dst, op[0]);
739 break;
740 case ir_unop_log2:
741 ir_to_mesa_emit_scalar_op1(ir, OPCODE_LG2, result_dst, op[0]);
742 break;
743 case ir_unop_sin:
744 ir_to_mesa_emit_scalar_op1(ir, OPCODE_SIN, result_dst, op[0]);
745 break;
746 case ir_unop_cos:
747 ir_to_mesa_emit_scalar_op1(ir, OPCODE_COS, result_dst, op[0]);
748 break;
749
750 case ir_unop_dFdx:
751 ir_to_mesa_emit_op1(ir, OPCODE_DDX, result_dst, op[0]);
752 break;
753 case ir_unop_dFdy:
754 ir_to_mesa_emit_op1(ir, OPCODE_DDY, result_dst, op[0]);
755 break;
756
757 case ir_binop_add:
758 ir_to_mesa_emit_op2(ir, OPCODE_ADD, result_dst, op[0], op[1]);
759 break;
760 case ir_binop_sub:
761 ir_to_mesa_emit_op2(ir, OPCODE_SUB, result_dst, op[0], op[1]);
762 break;
763
764 case ir_binop_mul:
765 ir_to_mesa_emit_op2(ir, OPCODE_MUL, result_dst, op[0], op[1]);
766 break;
767 case ir_binop_div:
768 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
769 case ir_binop_mod:
770 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
771 break;
772
773 case ir_binop_less:
774 ir_to_mesa_emit_op2(ir, OPCODE_SLT, result_dst, op[0], op[1]);
775 break;
776 case ir_binop_greater:
777 ir_to_mesa_emit_op2(ir, OPCODE_SGT, result_dst, op[0], op[1]);
778 break;
779 case ir_binop_lequal:
780 ir_to_mesa_emit_op2(ir, OPCODE_SLE, result_dst, op[0], op[1]);
781 break;
782 case ir_binop_gequal:
783 ir_to_mesa_emit_op2(ir, OPCODE_SGE, result_dst, op[0], op[1]);
784 break;
785 case ir_binop_equal:
786 ir_to_mesa_emit_op2(ir, OPCODE_SEQ, result_dst, op[0], op[1]);
787 break;
788 case ir_binop_logic_xor:
789 case ir_binop_nequal:
790 ir_to_mesa_emit_op2(ir, OPCODE_SNE, result_dst, op[0], op[1]);
791 break;
792
793 case ir_binop_logic_or:
794 /* This could be a saturated add and skip the SNE. */
795 ir_to_mesa_emit_op2(ir, OPCODE_ADD,
796 result_dst,
797 op[0], op[1]);
798
799 ir_to_mesa_emit_op2(ir, OPCODE_SNE,
800 result_dst,
801 result_src, src_reg_for_float(0.0));
802 break;
803
804 case ir_binop_logic_and:
805 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
806 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
807 result_dst,
808 op[0], op[1]);
809 break;
810
811 case ir_binop_dot:
812 if (ir->operands[0]->type == vec4_type) {
813 assert(ir->operands[1]->type == vec4_type);
814 ir_to_mesa_emit_op2(ir, OPCODE_DP4,
815 result_dst,
816 op[0], op[1]);
817 } else if (ir->operands[0]->type == vec3_type) {
818 assert(ir->operands[1]->type == vec3_type);
819 ir_to_mesa_emit_op2(ir, OPCODE_DP3,
820 result_dst,
821 op[0], op[1]);
822 } else if (ir->operands[0]->type == vec2_type) {
823 assert(ir->operands[1]->type == vec2_type);
824 ir_to_mesa_emit_op2(ir, OPCODE_DP2,
825 result_dst,
826 op[0], op[1]);
827 }
828 break;
829
830 case ir_binop_cross:
831 ir_to_mesa_emit_op2(ir, OPCODE_XPD, result_dst, op[0], op[1]);
832 break;
833
834 case ir_unop_sqrt:
835 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RSQ, result_dst, op[0]);
836 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RCP, result_dst, result_src);
837 /* For incoming channels < 0, set the result to 0. */
838 ir_to_mesa_emit_op3(ir, OPCODE_CMP, result_dst,
839 op[0], src_reg_for_float(0.0), result_src);
840 break;
841 case ir_unop_rsq:
842 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RSQ, result_dst, op[0]);
843 break;
844 case ir_unop_i2f:
845 case ir_unop_b2f:
846 case ir_unop_b2i:
847 /* Mesa IR lacks types, ints are stored as truncated floats. */
848 result_src = op[0];
849 break;
850 case ir_unop_f2i:
851 ir_to_mesa_emit_op1(ir, OPCODE_TRUNC, result_dst, op[0]);
852 break;
853 case ir_unop_f2b:
854 case ir_unop_i2b:
855 ir_to_mesa_emit_op2(ir, OPCODE_SNE, result_dst,
856 result_src, src_reg_for_float(0.0));
857 break;
858 case ir_unop_trunc:
859 ir_to_mesa_emit_op1(ir, OPCODE_TRUNC, result_dst, op[0]);
860 break;
861 case ir_unop_ceil:
862 op[0].negate = ~op[0].negate;
863 ir_to_mesa_emit_op1(ir, OPCODE_FLR, result_dst, op[0]);
864 result_src.negate = ~result_src.negate;
865 break;
866 case ir_unop_floor:
867 ir_to_mesa_emit_op1(ir, OPCODE_FLR, result_dst, op[0]);
868 break;
869 case ir_unop_fract:
870 ir_to_mesa_emit_op1(ir, OPCODE_FRC, result_dst, op[0]);
871 break;
872
873 case ir_binop_min:
874 ir_to_mesa_emit_op2(ir, OPCODE_MIN, result_dst, op[0], op[1]);
875 break;
876 case ir_binop_max:
877 ir_to_mesa_emit_op2(ir, OPCODE_MAX, result_dst, op[0], op[1]);
878 break;
879 case ir_binop_pow:
880 ir_to_mesa_emit_scalar_op2(ir, OPCODE_POW, result_dst, op[0], op[1]);
881 break;
882
883 case ir_unop_bit_not:
884 case ir_unop_u2f:
885 case ir_binop_lshift:
886 case ir_binop_rshift:
887 case ir_binop_bit_and:
888 case ir_binop_bit_xor:
889 case ir_binop_bit_or:
890 assert(!"GLSL 1.30 features unsupported");
891 break;
892 }
893
894 this->result = result_src;
895 }
896
897
898 void
899 ir_to_mesa_visitor::visit(ir_swizzle *ir)
900 {
901 ir_to_mesa_src_reg src_reg;
902 int i;
903 int swizzle[4];
904
905 /* Note that this is only swizzles in expressions, not those on the left
906 * hand side of an assignment, which do write masking. See ir_assignment
907 * for that.
908 */
909
910 ir->val->accept(this);
911 src_reg = this->result;
912 assert(src_reg.file != PROGRAM_UNDEFINED);
913
914 for (i = 0; i < 4; i++) {
915 if (i < ir->type->vector_elements) {
916 switch (i) {
917 case 0:
918 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.x);
919 break;
920 case 1:
921 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.y);
922 break;
923 case 2:
924 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.z);
925 break;
926 case 3:
927 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.w);
928 break;
929 }
930 } else {
931 /* If the type is smaller than a vec4, replicate the last
932 * channel out.
933 */
934 swizzle[i] = swizzle[ir->type->vector_elements - 1];
935 }
936 }
937
938 src_reg.swizzle = MAKE_SWIZZLE4(swizzle[0],
939 swizzle[1],
940 swizzle[2],
941 swizzle[3]);
942
943 this->result = src_reg;
944 }
945
946 static int
947 add_matrix_ref(struct gl_program *prog, int *tokens)
948 {
949 int base_pos = -1;
950 int i;
951
952 /* Add a ref for each column. It looks like the reason we do
953 * it this way is that _mesa_add_state_reference doesn't work
954 * for things that aren't vec4s, so the tokens[2]/tokens[3]
955 * range has to be equal.
956 */
957 for (i = 0; i < 4; i++) {
958 tokens[2] = i;
959 tokens[3] = i;
960 int pos = _mesa_add_state_reference(prog->Parameters,
961 (gl_state_index *)tokens);
962 if (base_pos == -1)
963 base_pos = pos;
964 else
965 assert(base_pos + i == pos);
966 }
967
968 return base_pos;
969 }
970
971 static variable_storage *
972 get_builtin_matrix_ref(void *mem_ctx, struct gl_program *prog, ir_variable *var,
973 ir_rvalue *array_index)
974 {
975 /*
976 * NOTE: The ARB_vertex_program extension specified that matrices get
977 * loaded in registers in row-major order. With GLSL, we want column-
978 * major order. So, we need to transpose all matrices here...
979 */
980 static const struct {
981 const char *name;
982 int matrix;
983 int modifier;
984 } matrices[] = {
985 { "gl_ModelViewMatrix", STATE_MODELVIEW_MATRIX, STATE_MATRIX_TRANSPOSE },
986 { "gl_ModelViewMatrixInverse", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVTRANS },
987 { "gl_ModelViewMatrixTranspose", STATE_MODELVIEW_MATRIX, 0 },
988 { "gl_ModelViewMatrixInverseTranspose", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE },
989
990 { "gl_ProjectionMatrix", STATE_PROJECTION_MATRIX, STATE_MATRIX_TRANSPOSE },
991 { "gl_ProjectionMatrixInverse", STATE_PROJECTION_MATRIX, STATE_MATRIX_INVTRANS },
992 { "gl_ProjectionMatrixTranspose", STATE_PROJECTION_MATRIX, 0 },
993 { "gl_ProjectionMatrixInverseTranspose", STATE_PROJECTION_MATRIX, STATE_MATRIX_INVERSE },
994
995 { "gl_ModelViewProjectionMatrix", STATE_MVP_MATRIX, STATE_MATRIX_TRANSPOSE },
996 { "gl_ModelViewProjectionMatrixInverse", STATE_MVP_MATRIX, STATE_MATRIX_INVTRANS },
997 { "gl_ModelViewProjectionMatrixTranspose", STATE_MVP_MATRIX, 0 },
998 { "gl_ModelViewProjectionMatrixInverseTranspose", STATE_MVP_MATRIX, STATE_MATRIX_INVERSE },
999
1000 { "gl_TextureMatrix", STATE_TEXTURE_MATRIX, STATE_MATRIX_TRANSPOSE },
1001 { "gl_TextureMatrixInverse", STATE_TEXTURE_MATRIX, STATE_MATRIX_INVTRANS },
1002 { "gl_TextureMatrixTranspose", STATE_TEXTURE_MATRIX, 0 },
1003 { "gl_TextureMatrixInverseTranspose", STATE_TEXTURE_MATRIX, STATE_MATRIX_INVERSE },
1004
1005 { "gl_NormalMatrix", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE },
1006
1007 };
1008 unsigned int i;
1009 variable_storage *entry;
1010
1011 /* C++ gets angry when we try to use an int as a gl_state_index, so we use
1012 * ints for gl_state_index. Make sure they're compatible.
1013 */
1014 assert(sizeof(gl_state_index) == sizeof(int));
1015
1016 for (i = 0; i < Elements(matrices); i++) {
1017 if (strcmp(var->name, matrices[i].name) == 0) {
1018 int tokens[STATE_LENGTH];
1019 int base_pos = -1;
1020
1021 tokens[0] = matrices[i].matrix;
1022 tokens[4] = matrices[i].modifier;
1023 if (matrices[i].matrix == STATE_TEXTURE_MATRIX) {
1024 ir_constant *index = array_index->constant_expression_value();
1025 if (index) {
1026 tokens[1] = index->value.i[0];
1027 base_pos = add_matrix_ref(prog, tokens);
1028 } else {
1029 for (i = 0; i < var->type->length; i++) {
1030 tokens[1] = i;
1031 int pos = add_matrix_ref(prog, tokens);
1032 if (base_pos == -1)
1033 base_pos = pos;
1034 else
1035 assert(base_pos + (int)i * 4 == pos);
1036 }
1037 }
1038 } else {
1039 tokens[1] = 0; /* unused array index */
1040 base_pos = add_matrix_ref(prog, tokens);
1041 }
1042 tokens[4] = matrices[i].modifier;
1043
1044 entry = new(mem_ctx) variable_storage(var,
1045 PROGRAM_STATE_VAR,
1046 base_pos);
1047
1048 return entry;
1049 }
1050 }
1051
1052 return NULL;
1053 }
1054
1055 void
1056 ir_to_mesa_visitor::visit(ir_dereference_variable *ir)
1057 {
1058 ir_to_mesa_src_reg src_reg;
1059 variable_storage *entry = find_variable_storage(ir->var);
1060 unsigned int loc;
1061
1062 if (!entry) {
1063 switch (ir->var->mode) {
1064 case ir_var_uniform:
1065 entry = get_builtin_matrix_ref(this->mem_ctx, this->prog, ir->var,
1066 NULL);
1067 if (entry)
1068 break;
1069
1070 /* FINISHME: Fix up uniform name for arrays and things */
1071 if (ir->var->type->base_type == GLSL_TYPE_SAMPLER) {
1072 /* FINISHME: we whack the location of the var here, which
1073 * is probably not expected. But we need to communicate
1074 * mesa's sampler number to the tex instruction.
1075 */
1076 int sampler = _mesa_add_sampler(this->prog->Parameters,
1077 ir->var->name,
1078 ir->var->type->gl_type);
1079 map_sampler(ir->var->location, sampler);
1080
1081 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_SAMPLER,
1082 sampler);
1083 this->variables.push_tail(entry);
1084 break;
1085 }
1086
1087 assert(ir->var->type->gl_type != 0 &&
1088 ir->var->type->gl_type != GL_INVALID_ENUM);
1089 loc = _mesa_add_uniform(this->prog->Parameters,
1090 ir->var->name,
1091 type_size(ir->var->type) * 4,
1092 ir->var->type->gl_type,
1093 NULL);
1094
1095 /* Always mark the uniform used at this point. If it isn't
1096 * used, dead code elimination should have nuked the decl already.
1097 */
1098 this->prog->Parameters->Parameters[loc].Used = GL_TRUE;
1099
1100 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_UNIFORM, loc);
1101 this->variables.push_tail(entry);
1102 break;
1103 case ir_var_in:
1104 case ir_var_out:
1105 case ir_var_inout:
1106 /* The linker assigns locations for varyings and attributes,
1107 * including deprecated builtins (like gl_Color), user-assign
1108 * generic attributes (glBindVertexLocation), and
1109 * user-defined varyings.
1110 *
1111 * FINISHME: We would hit this path for function arguments. Fix!
1112 */
1113 assert(ir->var->location != -1);
1114 if (ir->var->mode == ir_var_in ||
1115 ir->var->mode == ir_var_inout) {
1116 entry = new(mem_ctx) variable_storage(ir->var,
1117 PROGRAM_INPUT,
1118 ir->var->location);
1119
1120 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1121 ir->var->location >= VERT_ATTRIB_GENERIC0) {
1122 _mesa_add_attribute(prog->Attributes,
1123 ir->var->name,
1124 type_size(ir->var->type) * 4,
1125 ir->var->type->gl_type,
1126 ir->var->location - VERT_ATTRIB_GENERIC0);
1127 }
1128 } else {
1129 entry = new(mem_ctx) variable_storage(ir->var,
1130 PROGRAM_OUTPUT,
1131 ir->var->location);
1132 }
1133
1134 break;
1135 case ir_var_auto:
1136 case ir_var_temporary:
1137 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_TEMPORARY,
1138 this->next_temp);
1139 this->variables.push_tail(entry);
1140
1141 next_temp += type_size(ir->var->type);
1142 break;
1143 }
1144
1145 if (!entry) {
1146 printf("Failed to make storage for %s\n", ir->var->name);
1147 exit(1);
1148 }
1149 }
1150
1151 src_reg.file = entry->file;
1152 src_reg.index = entry->index;
1153 /* If the type is smaller than a vec4, replicate the last channel out. */
1154 if (ir->type->is_scalar() || ir->type->is_vector())
1155 src_reg.swizzle = swizzle_for_size(ir->var->type->vector_elements);
1156 else
1157 src_reg.swizzle = SWIZZLE_NOOP;
1158 src_reg.reladdr = NULL;
1159 src_reg.negate = 0;
1160
1161 this->result = src_reg;
1162 }
1163
1164 void
1165 ir_to_mesa_visitor::visit(ir_dereference_array *ir)
1166 {
1167 ir_constant *index;
1168 ir_to_mesa_src_reg src_reg;
1169 ir_dereference_variable *deref_var = ir->array->as_dereference_variable();
1170 int element_size = type_size(ir->type);
1171
1172 index = ir->array_index->constant_expression_value();
1173
1174 if (deref_var && strncmp(deref_var->var->name,
1175 "gl_TextureMatrix",
1176 strlen("gl_TextureMatrix")) == 0) {
1177 ir_to_mesa_src_reg src_reg;
1178 struct variable_storage *entry;
1179
1180 entry = get_builtin_matrix_ref(this->mem_ctx, this->prog, deref_var->var,
1181 ir->array_index);
1182 assert(entry);
1183
1184 src_reg.file = entry->file;
1185 src_reg.index = entry->index;
1186 src_reg.swizzle = swizzle_for_size(ir->type->vector_elements);
1187 src_reg.negate = 0;
1188
1189 if (index) {
1190 src_reg.reladdr = NULL;
1191 } else {
1192 ir_to_mesa_src_reg index_reg = get_temp(glsl_type::float_type);
1193
1194 ir->array_index->accept(this);
1195 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
1196 ir_to_mesa_dst_reg_from_src(index_reg),
1197 this->result, src_reg_for_float(element_size));
1198
1199 src_reg.reladdr = talloc(mem_ctx, ir_to_mesa_src_reg);
1200 memcpy(src_reg.reladdr, &index_reg, sizeof(index_reg));
1201 }
1202
1203 this->result = src_reg;
1204 return;
1205 }
1206
1207 ir->array->accept(this);
1208 src_reg = this->result;
1209
1210 if (index) {
1211 src_reg.index += index->value.i[0] * element_size;
1212 } else {
1213 ir_to_mesa_src_reg array_base = this->result;
1214 /* Variable index array dereference. It eats the "vec4" of the
1215 * base of the array and an index that offsets the Mesa register
1216 * index.
1217 */
1218 ir->array_index->accept(this);
1219
1220 ir_to_mesa_src_reg index_reg;
1221
1222 if (element_size == 1) {
1223 index_reg = this->result;
1224 } else {
1225 index_reg = get_temp(glsl_type::float_type);
1226
1227 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
1228 ir_to_mesa_dst_reg_from_src(index_reg),
1229 this->result, src_reg_for_float(element_size));
1230 }
1231
1232 src_reg.reladdr = talloc(mem_ctx, ir_to_mesa_src_reg);
1233 memcpy(src_reg.reladdr, &index_reg, sizeof(index_reg));
1234 }
1235
1236 /* If the type is smaller than a vec4, replicate the last channel out. */
1237 if (ir->type->is_scalar() || ir->type->is_vector())
1238 src_reg.swizzle = swizzle_for_size(ir->type->vector_elements);
1239 else
1240 src_reg.swizzle = SWIZZLE_NOOP;
1241
1242 this->result = src_reg;
1243 }
1244
1245 void
1246 ir_to_mesa_visitor::visit(ir_dereference_record *ir)
1247 {
1248 unsigned int i;
1249 const glsl_type *struct_type = ir->record->type;
1250 int offset = 0;
1251
1252 ir->record->accept(this);
1253
1254 for (i = 0; i < struct_type->length; i++) {
1255 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1256 break;
1257 offset += type_size(struct_type->fields.structure[i].type);
1258 }
1259 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1260 this->result.index += offset;
1261 }
1262
1263 /**
1264 * We want to be careful in assignment setup to hit the actual storage
1265 * instead of potentially using a temporary like we might with the
1266 * ir_dereference handler.
1267 *
1268 * Thanks to ir_swizzle_swizzle, and ir_vec_index_to_swizzle, we
1269 * should only see potentially one variable array index of a vector,
1270 * and one swizzle, before getting to actual vec4 storage. So handle
1271 * those, then go use ir_dereference to handle the rest.
1272 */
1273 static struct ir_to_mesa_dst_reg
1274 get_assignment_lhs(ir_instruction *ir, ir_to_mesa_visitor *v,
1275 ir_to_mesa_src_reg *r)
1276 {
1277 struct ir_to_mesa_dst_reg dst_reg;
1278 ir_swizzle *swiz;
1279
1280 ir_dereference_array *deref_array = ir->as_dereference_array();
1281 /* This should have been handled by ir_vec_index_to_cond_assign */
1282 if (deref_array) {
1283 assert(!deref_array->array->type->is_vector());
1284 }
1285
1286 /* Use the rvalue deref handler for the most part. We'll ignore
1287 * swizzles in it and write swizzles using writemask, though.
1288 */
1289 ir->accept(v);
1290 dst_reg = ir_to_mesa_dst_reg_from_src(v->result);
1291
1292 if ((swiz = ir->as_swizzle())) {
1293 int swizzles[4] = {
1294 swiz->mask.x,
1295 swiz->mask.y,
1296 swiz->mask.z,
1297 swiz->mask.w
1298 };
1299 int new_r_swizzle[4];
1300 int orig_r_swizzle = r->swizzle;
1301 int i;
1302
1303 for (i = 0; i < 4; i++) {
1304 new_r_swizzle[i] = GET_SWZ(orig_r_swizzle, 0);
1305 }
1306
1307 dst_reg.writemask = 0;
1308 for (i = 0; i < 4; i++) {
1309 if (i < swiz->mask.num_components) {
1310 dst_reg.writemask |= 1 << swizzles[i];
1311 new_r_swizzle[swizzles[i]] = GET_SWZ(orig_r_swizzle, i);
1312 }
1313 }
1314
1315 r->swizzle = MAKE_SWIZZLE4(new_r_swizzle[0],
1316 new_r_swizzle[1],
1317 new_r_swizzle[2],
1318 new_r_swizzle[3]);
1319 }
1320
1321 return dst_reg;
1322 }
1323
1324 void
1325 ir_to_mesa_visitor::visit(ir_assignment *ir)
1326 {
1327 struct ir_to_mesa_dst_reg l;
1328 struct ir_to_mesa_src_reg r;
1329 int i;
1330
1331 assert(!ir->lhs->type->is_array());
1332
1333 ir->rhs->accept(this);
1334 r = this->result;
1335
1336 l = get_assignment_lhs(ir->lhs, this, &r);
1337
1338 assert(l.file != PROGRAM_UNDEFINED);
1339 assert(r.file != PROGRAM_UNDEFINED);
1340
1341 if (ir->condition) {
1342 ir_to_mesa_src_reg condition;
1343
1344 ir->condition->accept(this);
1345 condition = this->result;
1346
1347 /* We use the OPCODE_CMP (a < 0 ? b : c) for conditional moves,
1348 * and the condition we produced is 0.0 or 1.0. By flipping the
1349 * sign, we can choose which value OPCODE_CMP produces without
1350 * an extra computing the condition.
1351 */
1352 condition.negate = ~condition.negate;
1353 for (i = 0; i < type_size(ir->lhs->type); i++) {
1354 ir_to_mesa_emit_op3(ir, OPCODE_CMP, l,
1355 condition, r, ir_to_mesa_src_reg_from_dst(l));
1356 l.index++;
1357 r.index++;
1358 }
1359 } else {
1360 for (i = 0; i < type_size(ir->lhs->type); i++) {
1361 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1362 l.index++;
1363 r.index++;
1364 }
1365 }
1366 }
1367
1368
1369 void
1370 ir_to_mesa_visitor::visit(ir_constant *ir)
1371 {
1372 ir_to_mesa_src_reg src_reg;
1373 GLfloat stack_vals[4];
1374 GLfloat *values = stack_vals;
1375 unsigned int i;
1376
1377 if (ir->type->is_array()) {
1378 ir->print();
1379 printf("\n");
1380 assert(!"FINISHME: array constants");
1381 }
1382
1383 if (ir->type->is_matrix()) {
1384 /* Unfortunately, 4 floats is all we can get into
1385 * _mesa_add_unnamed_constant. So, make a temp to store the
1386 * matrix and move each constant value into it. If we get
1387 * lucky, copy propagation will eliminate the extra moves.
1388 */
1389 ir_to_mesa_src_reg mat = get_temp(glsl_type::vec4_type);
1390 ir_to_mesa_dst_reg mat_column = ir_to_mesa_dst_reg_from_src(mat);
1391
1392 for (i = 0; i < ir->type->matrix_columns; i++) {
1393 src_reg.file = PROGRAM_CONSTANT;
1394
1395 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
1396 values = &ir->value.f[i * ir->type->vector_elements];
1397
1398 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1399 values,
1400 ir->type->vector_elements,
1401 &src_reg.swizzle);
1402 src_reg.reladdr = NULL;
1403 src_reg.negate = 0;
1404 ir_to_mesa_emit_op1(ir, OPCODE_MOV, mat_column, src_reg);
1405
1406 mat_column.index++;
1407 }
1408
1409 this->result = mat;
1410 }
1411
1412 src_reg.file = PROGRAM_CONSTANT;
1413 switch (ir->type->base_type) {
1414 case GLSL_TYPE_FLOAT:
1415 values = &ir->value.f[0];
1416 break;
1417 case GLSL_TYPE_UINT:
1418 for (i = 0; i < ir->type->vector_elements; i++) {
1419 values[i] = ir->value.u[i];
1420 }
1421 break;
1422 case GLSL_TYPE_INT:
1423 for (i = 0; i < ir->type->vector_elements; i++) {
1424 values[i] = ir->value.i[i];
1425 }
1426 break;
1427 case GLSL_TYPE_BOOL:
1428 for (i = 0; i < ir->type->vector_elements; i++) {
1429 values[i] = ir->value.b[i];
1430 }
1431 break;
1432 default:
1433 assert(!"Non-float/uint/int/bool constant");
1434 }
1435
1436 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1437 values, ir->type->vector_elements,
1438 &src_reg.swizzle);
1439 src_reg.reladdr = NULL;
1440 src_reg.negate = 0;
1441
1442 this->result = src_reg;
1443 }
1444
1445 function_entry *
1446 ir_to_mesa_visitor::get_function_signature(ir_function_signature *sig)
1447 {
1448 function_entry *entry;
1449
1450 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
1451 entry = (function_entry *)iter.get();
1452
1453 if (entry->sig == sig)
1454 return entry;
1455 }
1456
1457 entry = talloc(mem_ctx, function_entry);
1458 entry->sig = sig;
1459 entry->sig_id = this->next_signature_id++;
1460 entry->bgn_inst = NULL;
1461
1462 /* Allocate storage for all the parameters. */
1463 foreach_iter(exec_list_iterator, iter, sig->parameters) {
1464 ir_variable *param = (ir_variable *)iter.get();
1465 variable_storage *storage;
1466
1467 storage = find_variable_storage(param);
1468 assert(!storage);
1469
1470 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
1471 this->next_temp);
1472 this->variables.push_tail(storage);
1473
1474 this->next_temp += type_size(param->type);
1475 break;
1476 }
1477
1478 if (sig->return_type) {
1479 entry->return_reg = get_temp(sig->return_type);
1480 } else {
1481 entry->return_reg = ir_to_mesa_undef;
1482 }
1483
1484 this->function_signatures.push_tail(entry);
1485 return entry;
1486 }
1487
1488 void
1489 ir_to_mesa_visitor::visit(ir_call *ir)
1490 {
1491 ir_to_mesa_instruction *call_inst;
1492 ir_function_signature *sig = ir->get_callee();
1493 function_entry *entry = get_function_signature(sig);
1494 int i;
1495
1496 /* Process in parameters. */
1497 exec_list_iterator sig_iter = sig->parameters.iterator();
1498 foreach_iter(exec_list_iterator, iter, *ir) {
1499 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1500 ir_variable *param = (ir_variable *)sig_iter.get();
1501
1502 if (param->mode == ir_var_in ||
1503 param->mode == ir_var_inout) {
1504 variable_storage *storage = find_variable_storage(param);
1505 assert(storage);
1506
1507 param_rval->accept(this);
1508 ir_to_mesa_src_reg r = this->result;
1509
1510 ir_to_mesa_dst_reg l;
1511 l.file = storage->file;
1512 l.index = storage->index;
1513 l.reladdr = NULL;
1514 l.writemask = WRITEMASK_XYZW;
1515 l.cond_mask = COND_TR;
1516
1517 for (i = 0; i < type_size(param->type); i++) {
1518 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1519 l.index++;
1520 r.index++;
1521 }
1522 }
1523
1524 sig_iter.next();
1525 }
1526 assert(!sig_iter.has_next());
1527
1528 /* Emit call instruction */
1529 call_inst = ir_to_mesa_emit_op1(ir, OPCODE_CAL,
1530 ir_to_mesa_undef_dst, ir_to_mesa_undef);
1531 call_inst->function = entry;
1532
1533 /* Process out parameters. */
1534 sig_iter = sig->parameters.iterator();
1535 foreach_iter(exec_list_iterator, iter, *ir) {
1536 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1537 ir_variable *param = (ir_variable *)sig_iter.get();
1538
1539 if (param->mode == ir_var_out ||
1540 param->mode == ir_var_inout) {
1541 variable_storage *storage = find_variable_storage(param);
1542 assert(storage);
1543
1544 ir_to_mesa_src_reg r;
1545 r.file = storage->file;
1546 r.index = storage->index;
1547 r.reladdr = NULL;
1548 r.swizzle = SWIZZLE_NOOP;
1549 r.negate = 0;
1550
1551 param_rval->accept(this);
1552 ir_to_mesa_dst_reg l = ir_to_mesa_dst_reg_from_src(this->result);
1553
1554 for (i = 0; i < type_size(param->type); i++) {
1555 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1556 l.index++;
1557 r.index++;
1558 }
1559 }
1560
1561 sig_iter.next();
1562 }
1563 assert(!sig_iter.has_next());
1564
1565 /* Process return value. */
1566 this->result = entry->return_reg;
1567 }
1568
1569
1570 void
1571 ir_to_mesa_visitor::visit(ir_texture *ir)
1572 {
1573 ir_to_mesa_src_reg result_src, coord, lod_info = { 0 }, projector;
1574 ir_to_mesa_dst_reg result_dst, coord_dst;
1575 ir_to_mesa_instruction *inst = NULL;
1576 prog_opcode opcode = OPCODE_NOP;
1577
1578 ir->coordinate->accept(this);
1579
1580 /* Put our coords in a temp. We'll need to modify them for shadow,
1581 * projection, or LOD, so the only case we'd use it as is is if
1582 * we're doing plain old texturing. Mesa IR optimization should
1583 * handle cleaning up our mess in that case.
1584 */
1585 coord = get_temp(glsl_type::vec4_type);
1586 coord_dst = ir_to_mesa_dst_reg_from_src(coord);
1587 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst,
1588 this->result);
1589
1590 if (ir->projector) {
1591 ir->projector->accept(this);
1592 projector = this->result;
1593 }
1594
1595 /* Storage for our result. Ideally for an assignment we'd be using
1596 * the actual storage for the result here, instead.
1597 */
1598 result_src = get_temp(glsl_type::vec4_type);
1599 result_dst = ir_to_mesa_dst_reg_from_src(result_src);
1600
1601 switch (ir->op) {
1602 case ir_tex:
1603 opcode = OPCODE_TEX;
1604 break;
1605 case ir_txb:
1606 opcode = OPCODE_TXB;
1607 ir->lod_info.bias->accept(this);
1608 lod_info = this->result;
1609 break;
1610 case ir_txl:
1611 opcode = OPCODE_TXL;
1612 ir->lod_info.lod->accept(this);
1613 lod_info = this->result;
1614 break;
1615 case ir_txd:
1616 case ir_txf:
1617 assert(!"GLSL 1.30 features unsupported");
1618 break;
1619 }
1620
1621 if (ir->projector) {
1622 if (opcode == OPCODE_TEX) {
1623 /* Slot the projector in as the last component of the coord. */
1624 coord_dst.writemask = WRITEMASK_W;
1625 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, projector);
1626 coord_dst.writemask = WRITEMASK_XYZW;
1627 opcode = OPCODE_TXP;
1628 } else {
1629 ir_to_mesa_src_reg coord_w = coord;
1630 coord_w.swizzle = SWIZZLE_WWWW;
1631
1632 /* For the other TEX opcodes there's no projective version
1633 * since the last slot is taken up by lod info. Do the
1634 * projective divide now.
1635 */
1636 coord_dst.writemask = WRITEMASK_W;
1637 ir_to_mesa_emit_op1(ir, OPCODE_RCP, coord_dst, projector);
1638
1639 coord_dst.writemask = WRITEMASK_XYZ;
1640 ir_to_mesa_emit_op2(ir, OPCODE_MUL, coord_dst, coord, coord_w);
1641
1642 coord_dst.writemask = WRITEMASK_XYZW;
1643 coord.swizzle = SWIZZLE_XYZW;
1644 }
1645 }
1646
1647 if (ir->shadow_comparitor) {
1648 /* Slot the shadow value in as the second to last component of the
1649 * coord.
1650 */
1651 ir->shadow_comparitor->accept(this);
1652 coord_dst.writemask = WRITEMASK_Z;
1653 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, this->result);
1654 coord_dst.writemask = WRITEMASK_XYZW;
1655 }
1656
1657 if (opcode == OPCODE_TXL || opcode == OPCODE_TXB) {
1658 /* Mesa IR stores lod or lod bias in the last channel of the coords. */
1659 coord_dst.writemask = WRITEMASK_W;
1660 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, lod_info);
1661 coord_dst.writemask = WRITEMASK_XYZW;
1662 }
1663
1664 inst = ir_to_mesa_emit_op1(ir, opcode, result_dst, coord);
1665
1666 if (ir->shadow_comparitor)
1667 inst->tex_shadow = GL_TRUE;
1668
1669 ir_dereference_variable *sampler = ir->sampler->as_dereference_variable();
1670 assert(sampler); /* FINISHME: sampler arrays */
1671 /* generate the mapping, remove when we generate storage at
1672 * declaration time
1673 */
1674 sampler->accept(this);
1675
1676 inst->sampler = get_sampler_number(sampler->var->location);
1677
1678 switch (sampler->type->sampler_dimensionality) {
1679 case GLSL_SAMPLER_DIM_1D:
1680 inst->tex_target = TEXTURE_1D_INDEX;
1681 break;
1682 case GLSL_SAMPLER_DIM_2D:
1683 inst->tex_target = TEXTURE_2D_INDEX;
1684 break;
1685 case GLSL_SAMPLER_DIM_3D:
1686 inst->tex_target = TEXTURE_3D_INDEX;
1687 break;
1688 case GLSL_SAMPLER_DIM_CUBE:
1689 inst->tex_target = TEXTURE_CUBE_INDEX;
1690 break;
1691 default:
1692 assert(!"FINISHME: other texture targets");
1693 }
1694
1695 this->result = result_src;
1696 }
1697
1698 void
1699 ir_to_mesa_visitor::visit(ir_return *ir)
1700 {
1701 assert(current_function);
1702
1703 if (ir->get_value()) {
1704 ir_to_mesa_dst_reg l;
1705 int i;
1706
1707 ir->get_value()->accept(this);
1708 ir_to_mesa_src_reg r = this->result;
1709
1710 l = ir_to_mesa_dst_reg_from_src(current_function->return_reg);
1711
1712 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
1713 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1714 l.index++;
1715 r.index++;
1716 }
1717 }
1718
1719 ir_to_mesa_emit_op0(ir, OPCODE_RET);
1720 }
1721
1722 void
1723 ir_to_mesa_visitor::visit(ir_discard *ir)
1724 {
1725 assert(ir->condition == NULL); /* FINISHME */
1726
1727 ir_to_mesa_emit_op0(ir, OPCODE_KIL_NV);
1728 }
1729
1730 void
1731 ir_to_mesa_visitor::visit(ir_if *ir)
1732 {
1733 ir_to_mesa_instruction *cond_inst, *if_inst, *else_inst = NULL;
1734 ir_to_mesa_instruction *prev_inst;
1735
1736 prev_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
1737
1738 ir->condition->accept(this);
1739 assert(this->result.file != PROGRAM_UNDEFINED);
1740
1741 if (ctx->Shader.EmitCondCodes) {
1742 cond_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
1743
1744 /* See if we actually generated any instruction for generating
1745 * the condition. If not, then cook up a move to a temp so we
1746 * have something to set cond_update on.
1747 */
1748 if (cond_inst == prev_inst) {
1749 ir_to_mesa_src_reg temp = get_temp(glsl_type::bool_type);
1750 cond_inst = ir_to_mesa_emit_op1(ir->condition, OPCODE_MOV,
1751 ir_to_mesa_dst_reg_from_src(temp),
1752 result);
1753 }
1754 cond_inst->cond_update = GL_TRUE;
1755
1756 if_inst = ir_to_mesa_emit_op0(ir->condition, OPCODE_IF);
1757 if_inst->dst_reg.cond_mask = COND_NE;
1758 } else {
1759 if_inst = ir_to_mesa_emit_op1(ir->condition,
1760 OPCODE_IF, ir_to_mesa_undef_dst,
1761 this->result);
1762 }
1763
1764 this->instructions.push_tail(if_inst);
1765
1766 visit_exec_list(&ir->then_instructions, this);
1767
1768 if (!ir->else_instructions.is_empty()) {
1769 else_inst = ir_to_mesa_emit_op0(ir->condition, OPCODE_ELSE);
1770 visit_exec_list(&ir->else_instructions, this);
1771 }
1772
1773 if_inst = ir_to_mesa_emit_op1(ir->condition, OPCODE_ENDIF,
1774 ir_to_mesa_undef_dst, ir_to_mesa_undef);
1775 }
1776
1777 ir_to_mesa_visitor::ir_to_mesa_visitor()
1778 {
1779 result.file = PROGRAM_UNDEFINED;
1780 next_temp = 1;
1781 next_signature_id = 1;
1782 sampler_map = NULL;
1783 sampler_map_size = 0;
1784 current_function = NULL;
1785 }
1786
1787 static struct prog_src_register
1788 mesa_src_reg_from_ir_src_reg(ir_to_mesa_src_reg reg)
1789 {
1790 struct prog_src_register mesa_reg;
1791
1792 mesa_reg.File = reg.file;
1793 assert(reg.index < (1 << INST_INDEX_BITS) - 1);
1794 mesa_reg.Index = reg.index;
1795 mesa_reg.Swizzle = reg.swizzle;
1796 mesa_reg.RelAddr = reg.reladdr != NULL;
1797 mesa_reg.Negate = reg.negate;
1798 mesa_reg.Abs = 0;
1799
1800 return mesa_reg;
1801 }
1802
1803 static void
1804 set_branchtargets(ir_to_mesa_visitor *v,
1805 struct prog_instruction *mesa_instructions,
1806 int num_instructions)
1807 {
1808 int if_count = 0, loop_count = 0;
1809 int *if_stack, *loop_stack;
1810 int if_stack_pos = 0, loop_stack_pos = 0;
1811 int i, j;
1812
1813 for (i = 0; i < num_instructions; i++) {
1814 switch (mesa_instructions[i].Opcode) {
1815 case OPCODE_IF:
1816 if_count++;
1817 break;
1818 case OPCODE_BGNLOOP:
1819 loop_count++;
1820 break;
1821 case OPCODE_BRK:
1822 case OPCODE_CONT:
1823 mesa_instructions[i].BranchTarget = -1;
1824 break;
1825 default:
1826 break;
1827 }
1828 }
1829
1830 if_stack = (int *)calloc(if_count, sizeof(*if_stack));
1831 loop_stack = (int *)calloc(loop_count, sizeof(*loop_stack));
1832
1833 for (i = 0; i < num_instructions; i++) {
1834 switch (mesa_instructions[i].Opcode) {
1835 case OPCODE_IF:
1836 if_stack[if_stack_pos] = i;
1837 if_stack_pos++;
1838 break;
1839 case OPCODE_ELSE:
1840 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
1841 if_stack[if_stack_pos - 1] = i;
1842 break;
1843 case OPCODE_ENDIF:
1844 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
1845 if_stack_pos--;
1846 break;
1847 case OPCODE_BGNLOOP:
1848 loop_stack[loop_stack_pos] = i;
1849 loop_stack_pos++;
1850 break;
1851 case OPCODE_ENDLOOP:
1852 loop_stack_pos--;
1853 /* Rewrite any breaks/conts at this nesting level (haven't
1854 * already had a BranchTarget assigned) to point to the end
1855 * of the loop.
1856 */
1857 for (j = loop_stack[loop_stack_pos]; j < i; j++) {
1858 if (mesa_instructions[j].Opcode == OPCODE_BRK ||
1859 mesa_instructions[j].Opcode == OPCODE_CONT) {
1860 if (mesa_instructions[j].BranchTarget == -1) {
1861 mesa_instructions[j].BranchTarget = i;
1862 }
1863 }
1864 }
1865 /* The loop ends point at each other. */
1866 mesa_instructions[i].BranchTarget = loop_stack[loop_stack_pos];
1867 mesa_instructions[loop_stack[loop_stack_pos]].BranchTarget = i;
1868 break;
1869 case OPCODE_CAL:
1870 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
1871 function_entry *entry = (function_entry *)iter.get();
1872
1873 if (entry->sig_id == mesa_instructions[i].BranchTarget) {
1874 mesa_instructions[i].BranchTarget = entry->inst;
1875 break;
1876 }
1877 }
1878 break;
1879 default:
1880 break;
1881 }
1882 }
1883
1884 free(if_stack);
1885 }
1886
1887 static void
1888 print_program(struct prog_instruction *mesa_instructions,
1889 ir_instruction **mesa_instruction_annotation,
1890 int num_instructions)
1891 {
1892 ir_instruction *last_ir = NULL;
1893 int i;
1894 int indent = 0;
1895
1896 for (i = 0; i < num_instructions; i++) {
1897 struct prog_instruction *mesa_inst = mesa_instructions + i;
1898 ir_instruction *ir = mesa_instruction_annotation[i];
1899
1900 fprintf(stdout, "%3d: ", i);
1901
1902 if (last_ir != ir && ir) {
1903 int j;
1904
1905 for (j = 0; j < indent; j++) {
1906 fprintf(stdout, " ");
1907 }
1908 ir->print();
1909 printf("\n");
1910 last_ir = ir;
1911
1912 fprintf(stdout, " "); /* line number spacing. */
1913 }
1914
1915 indent = _mesa_fprint_instruction_opt(stdout, mesa_inst, indent,
1916 PROG_PRINT_DEBUG, NULL);
1917 }
1918 }
1919
1920 static void
1921 mark_input(struct gl_program *prog,
1922 int index,
1923 GLboolean reladdr)
1924 {
1925 prog->InputsRead |= BITFIELD64_BIT(index);
1926 int i;
1927
1928 if (reladdr) {
1929 if (index >= FRAG_ATTRIB_TEX0 && index <= FRAG_ATTRIB_TEX7) {
1930 for (i = 0; i < 8; i++) {
1931 prog->InputsRead |= BITFIELD64_BIT(FRAG_ATTRIB_TEX0 + i);
1932 }
1933 } else {
1934 assert(!"FINISHME: Mark InputsRead for varying arrays");
1935 }
1936 }
1937 }
1938
1939 static void
1940 mark_output(struct gl_program *prog,
1941 int index,
1942 GLboolean reladdr)
1943 {
1944 prog->OutputsWritten |= BITFIELD64_BIT(index);
1945 int i;
1946
1947 if (reladdr) {
1948 if (index >= VERT_RESULT_TEX0 && index <= VERT_RESULT_TEX7) {
1949 for (i = 0; i < 8; i++) {
1950 prog->OutputsWritten |= BITFIELD64_BIT(FRAG_ATTRIB_TEX0 + i);
1951 }
1952 } else {
1953 assert(!"FINISHME: Mark OutputsWritten for varying arrays");
1954 }
1955 }
1956 }
1957
1958 static void
1959 count_resources(struct gl_program *prog)
1960 {
1961 unsigned int i;
1962
1963 prog->InputsRead = 0;
1964 prog->OutputsWritten = 0;
1965 prog->SamplersUsed = 0;
1966
1967 for (i = 0; i < prog->NumInstructions; i++) {
1968 struct prog_instruction *inst = &prog->Instructions[i];
1969 unsigned int reg;
1970
1971 switch (inst->DstReg.File) {
1972 case PROGRAM_OUTPUT:
1973 mark_output(prog, inst->DstReg.Index, inst->DstReg.RelAddr);
1974 break;
1975 case PROGRAM_INPUT:
1976 mark_input(prog, inst->DstReg.Index, inst->DstReg.RelAddr);
1977 break;
1978 default:
1979 break;
1980 }
1981
1982 for (reg = 0; reg < _mesa_num_inst_src_regs(inst->Opcode); reg++) {
1983 switch (inst->SrcReg[reg].File) {
1984 case PROGRAM_OUTPUT:
1985 mark_output(prog, inst->SrcReg[reg].Index,
1986 inst->SrcReg[reg].RelAddr);
1987 break;
1988 case PROGRAM_INPUT:
1989 mark_input(prog, inst->SrcReg[reg].Index, inst->SrcReg[reg].RelAddr);
1990 break;
1991 default:
1992 break;
1993 }
1994 }
1995
1996 /* Instead of just using the uniform's value to map to a
1997 * sampler, Mesa first allocates a separate number for the
1998 * sampler (_mesa_add_sampler), then we reindex it down to a
1999 * small integer (sampler_map[], SamplersUsed), then that gets
2000 * mapped to the uniform's value, and we get an actual sampler.
2001 */
2002 if (_mesa_is_tex_instruction(inst->Opcode)) {
2003 prog->SamplerTargets[inst->TexSrcUnit] =
2004 (gl_texture_index)inst->TexSrcTarget;
2005 prog->SamplersUsed |= 1 << inst->TexSrcUnit;
2006 if (inst->TexShadow) {
2007 prog->ShadowSamplers |= 1 << inst->TexSrcUnit;
2008 }
2009 }
2010 }
2011
2012 _mesa_update_shader_textures_used(prog);
2013 }
2014
2015 /* Each stage has some uniforms in its Parameters list. The Uniforms
2016 * list for the linked shader program has a pointer to these uniforms
2017 * in each of the stage's Parameters list, so that their values can be
2018 * updated when a uniform is set.
2019 */
2020 static void
2021 link_uniforms_to_shared_uniform_list(struct gl_uniform_list *uniforms,
2022 struct gl_program *prog)
2023 {
2024 unsigned int i;
2025
2026 for (i = 0; i < prog->Parameters->NumParameters; i++) {
2027 const struct gl_program_parameter *p = prog->Parameters->Parameters + i;
2028
2029 if (p->Type == PROGRAM_UNIFORM || p->Type == PROGRAM_SAMPLER) {
2030 struct gl_uniform *uniform =
2031 _mesa_append_uniform(uniforms, p->Name, prog->Target, i);
2032 if (uniform)
2033 uniform->Initialized = p->Initialized;
2034 }
2035 }
2036 }
2037
2038 struct gl_program *
2039 get_mesa_program(GLcontext *ctx, struct gl_shader_program *shader_program,
2040 struct gl_shader *shader)
2041 {
2042 void *mem_ctx = shader_program;
2043 ir_to_mesa_visitor v;
2044 struct prog_instruction *mesa_instructions, *mesa_inst;
2045 ir_instruction **mesa_instruction_annotation;
2046 int i;
2047 struct gl_program *prog;
2048 GLenum target;
2049 const char *target_string;
2050 GLboolean progress;
2051
2052 switch (shader->Type) {
2053 case GL_VERTEX_SHADER:
2054 target = GL_VERTEX_PROGRAM_ARB;
2055 target_string = "vertex";
2056 break;
2057 case GL_FRAGMENT_SHADER:
2058 target = GL_FRAGMENT_PROGRAM_ARB;
2059 target_string = "fragment";
2060 break;
2061 default:
2062 assert(!"should not be reached");
2063 break;
2064 }
2065
2066 validate_ir_tree(shader->ir);
2067
2068 prog = ctx->Driver.NewProgram(ctx, target, 1);
2069 if (!prog)
2070 return NULL;
2071 prog->Parameters = _mesa_new_parameter_list();
2072 prog->Varying = _mesa_new_parameter_list();
2073 prog->Attributes = _mesa_new_parameter_list();
2074 v.ctx = ctx;
2075 v.prog = prog;
2076
2077 v.mem_ctx = talloc_new(NULL);
2078
2079 /* Emit Mesa IR for main(). */
2080 visit_exec_list(shader->ir, &v);
2081 v.ir_to_mesa_emit_op0(NULL, OPCODE_END);
2082
2083 /* Now emit bodies for any functions that were used. */
2084 do {
2085 progress = GL_FALSE;
2086
2087 foreach_iter(exec_list_iterator, iter, v.function_signatures) {
2088 function_entry *entry = (function_entry *)iter.get();
2089
2090 if (!entry->bgn_inst) {
2091 v.current_function = entry;
2092
2093 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_BGNSUB);
2094 entry->bgn_inst->function = entry;
2095
2096 visit_exec_list(&entry->sig->body, &v);
2097
2098 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_RET);
2099 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_ENDSUB);
2100 progress = GL_TRUE;
2101 }
2102 }
2103 } while (progress);
2104
2105 prog->NumTemporaries = v.next_temp;
2106
2107 int num_instructions = 0;
2108 foreach_iter(exec_list_iterator, iter, v.instructions) {
2109 num_instructions++;
2110 }
2111
2112 mesa_instructions =
2113 (struct prog_instruction *)calloc(num_instructions,
2114 sizeof(*mesa_instructions));
2115 mesa_instruction_annotation = talloc_array(mem_ctx, ir_instruction *,
2116 num_instructions);
2117
2118 mesa_inst = mesa_instructions;
2119 i = 0;
2120 foreach_iter(exec_list_iterator, iter, v.instructions) {
2121 ir_to_mesa_instruction *inst = (ir_to_mesa_instruction *)iter.get();
2122
2123 mesa_inst->Opcode = inst->op;
2124 mesa_inst->CondUpdate = inst->cond_update;
2125 mesa_inst->DstReg.File = inst->dst_reg.file;
2126 mesa_inst->DstReg.Index = inst->dst_reg.index;
2127 mesa_inst->DstReg.CondMask = inst->dst_reg.cond_mask;
2128 mesa_inst->DstReg.WriteMask = inst->dst_reg.writemask;
2129 mesa_inst->DstReg.RelAddr = inst->dst_reg.reladdr != NULL;
2130 mesa_inst->SrcReg[0] = mesa_src_reg_from_ir_src_reg(inst->src_reg[0]);
2131 mesa_inst->SrcReg[1] = mesa_src_reg_from_ir_src_reg(inst->src_reg[1]);
2132 mesa_inst->SrcReg[2] = mesa_src_reg_from_ir_src_reg(inst->src_reg[2]);
2133 mesa_inst->TexSrcUnit = inst->sampler;
2134 mesa_inst->TexSrcTarget = inst->tex_target;
2135 mesa_inst->TexShadow = inst->tex_shadow;
2136 mesa_instruction_annotation[i] = inst->ir;
2137
2138 if (ctx->Shader.EmitNoIfs && mesa_inst->Opcode == OPCODE_IF) {
2139 shader_program->InfoLog =
2140 talloc_asprintf_append(shader_program->InfoLog,
2141 "Couldn't flatten if statement\n");
2142 shader_program->LinkStatus = false;
2143 }
2144
2145 if (mesa_inst->Opcode == OPCODE_BGNSUB)
2146 inst->function->inst = i;
2147 else if (mesa_inst->Opcode == OPCODE_CAL)
2148 mesa_inst->BranchTarget = inst->function->sig_id; /* rewritten later */
2149
2150 mesa_inst++;
2151 i++;
2152 }
2153
2154 set_branchtargets(&v, mesa_instructions, num_instructions);
2155 if (ctx->Shader.Flags & GLSL_DUMP) {
2156 printf("Mesa %s program:\n", target_string);
2157 print_program(mesa_instructions, mesa_instruction_annotation,
2158 num_instructions);
2159 }
2160
2161 prog->Instructions = mesa_instructions;
2162 prog->NumInstructions = num_instructions;
2163
2164 _mesa_reference_program(ctx, &shader->Program, prog);
2165
2166 if ((ctx->Shader.Flags & GLSL_NO_OPT) == 0) {
2167 _mesa_optimize_program(ctx, prog);
2168 }
2169
2170 return prog;
2171 }
2172
2173 extern "C" {
2174
2175 void
2176 _mesa_glsl_compile_shader(GLcontext *ctx, struct gl_shader *shader)
2177 {
2178 struct _mesa_glsl_parse_state *state =
2179 new(shader) _mesa_glsl_parse_state(ctx, shader->Type, shader);
2180
2181 const char *source = shader->Source;
2182 state->error = preprocess(state, &source, &state->info_log,
2183 &ctx->Extensions);
2184
2185 if (!state->error) {
2186 _mesa_glsl_lexer_ctor(state, source);
2187 _mesa_glsl_parse(state);
2188 _mesa_glsl_lexer_dtor(state);
2189 }
2190
2191 shader->ir = new(shader) exec_list;
2192 if (!state->error && !state->translation_unit.is_empty())
2193 _mesa_ast_to_hir(shader->ir, state);
2194
2195 if (!state->error && !shader->ir->is_empty()) {
2196 validate_ir_tree(shader->ir);
2197
2198 /* Lowering */
2199 do_mat_op_to_vec(shader->ir);
2200 do_mod_to_fract(shader->ir);
2201 do_div_to_mul_rcp(shader->ir);
2202
2203 /* Optimization passes */
2204 bool progress;
2205 do {
2206 progress = false;
2207
2208 progress = do_function_inlining(shader->ir) || progress;
2209 progress = do_if_simplification(shader->ir) || progress;
2210 progress = do_copy_propagation(shader->ir) || progress;
2211 progress = do_dead_code_local(shader->ir) || progress;
2212 progress = do_dead_code_unlinked(state, shader->ir) || progress;
2213 progress = do_constant_variable_unlinked(shader->ir) || progress;
2214 progress = do_constant_folding(shader->ir) || progress;
2215 progress = do_if_return(shader->ir) || progress;
2216 if (ctx->Shader.EmitNoIfs)
2217 progress = do_if_to_cond_assign(shader->ir) || progress;
2218
2219 progress = do_vec_index_to_swizzle(shader->ir) || progress;
2220 /* Do this one after the previous to let the easier pass handle
2221 * constant vector indexing.
2222 */
2223 progress = do_vec_index_to_cond_assign(shader->ir) || progress;
2224
2225 progress = do_swizzle_swizzle(shader->ir) || progress;
2226 } while (progress);
2227
2228 validate_ir_tree(shader->ir);
2229 }
2230
2231 shader->symbols = state->symbols;
2232
2233 shader->CompileStatus = !state->error;
2234 shader->InfoLog = state->info_log;
2235 shader->Version = state->language_version;
2236 memcpy(shader->builtins_to_link, state->builtins_to_link,
2237 sizeof(shader->builtins_to_link[0]) * state->num_builtins_to_link);
2238 shader->num_builtins_to_link = state->num_builtins_to_link;
2239
2240 /* Retain any live IR, but trash the rest. */
2241 reparent_ir(shader->ir, shader);
2242
2243 talloc_free(state);
2244 }
2245
2246 void
2247 _mesa_glsl_link_shader(GLcontext *ctx, struct gl_shader_program *prog)
2248 {
2249 unsigned int i;
2250
2251 _mesa_clear_shader_program_data(ctx, prog);
2252
2253 prog->LinkStatus = GL_TRUE;
2254
2255 for (i = 0; i < prog->NumShaders; i++) {
2256 if (!prog->Shaders[i]->CompileStatus) {
2257 prog->InfoLog =
2258 talloc_asprintf_append(prog->InfoLog,
2259 "linking with uncompiled shader");
2260 prog->LinkStatus = GL_FALSE;
2261 }
2262 }
2263
2264 prog->Varying = _mesa_new_parameter_list();
2265 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
2266 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
2267
2268 if (prog->LinkStatus) {
2269 link_shaders(prog);
2270
2271 /* We don't use the linker's uniforms list, and cook up our own at
2272 * generate time.
2273 */
2274 free(prog->Uniforms);
2275 prog->Uniforms = _mesa_new_uniform_list();
2276 }
2277
2278 if (prog->LinkStatus) {
2279 for (i = 0; i < prog->_NumLinkedShaders; i++) {
2280 struct gl_program *linked_prog;
2281
2282 linked_prog = get_mesa_program(ctx, prog,
2283 prog->_LinkedShaders[i]);
2284 count_resources(linked_prog);
2285
2286 link_uniforms_to_shared_uniform_list(prog->Uniforms, linked_prog);
2287
2288 switch (prog->_LinkedShaders[i]->Type) {
2289 case GL_VERTEX_SHADER:
2290 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
2291 (struct gl_vertex_program *)linked_prog);
2292 ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
2293 linked_prog);
2294 break;
2295 case GL_FRAGMENT_SHADER:
2296 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
2297 (struct gl_fragment_program *)linked_prog);
2298 ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
2299 linked_prog);
2300 break;
2301 }
2302 }
2303 }
2304 }
2305
2306 } /* extern "C" */