i965: First cut at register allocation using graph coloring.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 extern "C" {
29
30 #include <sys/types.h>
31
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_wm.h"
44 #include "talloc.h"
45 }
46 #include "../glsl/glsl_types.h"
47 #include "../glsl/ir_optimization.h"
48 #include "../glsl/ir_print_visitor.h"
49
50 enum register_file {
51 ARF = BRW_ARCHITECTURE_REGISTER_FILE,
52 GRF = BRW_GENERAL_REGISTER_FILE,
53 MRF = BRW_MESSAGE_REGISTER_FILE,
54 IMM = BRW_IMMEDIATE_VALUE,
55 FIXED_HW_REG, /* a struct brw_reg */
56 UNIFORM, /* prog_data->params[hw_reg] */
57 BAD_FILE
58 };
59
60 enum fs_opcodes {
61 FS_OPCODE_FB_WRITE = 256,
62 FS_OPCODE_RCP,
63 FS_OPCODE_RSQ,
64 FS_OPCODE_SQRT,
65 FS_OPCODE_EXP2,
66 FS_OPCODE_LOG2,
67 FS_OPCODE_POW,
68 FS_OPCODE_SIN,
69 FS_OPCODE_COS,
70 FS_OPCODE_DDX,
71 FS_OPCODE_DDY,
72 FS_OPCODE_LINTERP,
73 FS_OPCODE_TEX,
74 FS_OPCODE_TXB,
75 FS_OPCODE_TXL,
76 FS_OPCODE_DISCARD,
77 };
78
79 static int using_new_fs = -1;
80 static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg);
81
82 struct gl_shader *
83 brw_new_shader(GLcontext *ctx, GLuint name, GLuint type)
84 {
85 struct brw_shader *shader;
86
87 shader = talloc_zero(NULL, struct brw_shader);
88 if (shader) {
89 shader->base.Type = type;
90 shader->base.Name = name;
91 _mesa_init_shader(ctx, &shader->base);
92 }
93
94 return &shader->base;
95 }
96
97 struct gl_shader_program *
98 brw_new_shader_program(GLcontext *ctx, GLuint name)
99 {
100 struct brw_shader_program *prog;
101 prog = talloc_zero(NULL, struct brw_shader_program);
102 if (prog) {
103 prog->base.Name = name;
104 _mesa_init_shader_program(ctx, &prog->base);
105 }
106 return &prog->base;
107 }
108
109 GLboolean
110 brw_compile_shader(GLcontext *ctx, struct gl_shader *shader)
111 {
112 if (!_mesa_ir_compile_shader(ctx, shader))
113 return GL_FALSE;
114
115 return GL_TRUE;
116 }
117
118 GLboolean
119 brw_link_shader(GLcontext *ctx, struct gl_shader_program *prog)
120 {
121 if (using_new_fs == -1)
122 using_new_fs = getenv("INTEL_NEW_FS") != NULL;
123
124 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
125 struct brw_shader *shader = (struct brw_shader *)prog->_LinkedShaders[i];
126
127 if (using_new_fs && shader->base.Type == GL_FRAGMENT_SHADER) {
128 void *mem_ctx = talloc_new(NULL);
129 bool progress;
130
131 if (shader->ir)
132 talloc_free(shader->ir);
133 shader->ir = new(shader) exec_list;
134 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
135
136 do_mat_op_to_vec(shader->ir);
137 do_mod_to_fract(shader->ir);
138 do_div_to_mul_rcp(shader->ir);
139 do_sub_to_add_neg(shader->ir);
140 do_explog_to_explog2(shader->ir);
141
142 do {
143 progress = false;
144
145 brw_do_channel_expressions(shader->ir);
146 brw_do_vector_splitting(shader->ir);
147
148 progress = do_lower_jumps(shader->ir, true, true,
149 true, /* main return */
150 false, /* continue */
151 false /* loops */
152 ) || progress;
153
154 progress = do_common_optimization(shader->ir, true, 32) || progress;
155
156 progress = lower_noise(shader->ir) || progress;
157 progress =
158 lower_variable_index_to_cond_assign(shader->ir,
159 GL_TRUE, /* input */
160 GL_TRUE, /* output */
161 GL_TRUE, /* temp */
162 GL_TRUE /* uniform */
163 ) || progress;
164 } while (progress);
165
166 validate_ir_tree(shader->ir);
167
168 reparent_ir(shader->ir, shader->ir);
169 talloc_free(mem_ctx);
170 }
171 }
172
173 if (!_mesa_ir_link_shader(ctx, prog))
174 return GL_FALSE;
175
176 return GL_TRUE;
177 }
178
179 static int
180 type_size(const struct glsl_type *type)
181 {
182 unsigned int size, i;
183
184 switch (type->base_type) {
185 case GLSL_TYPE_UINT:
186 case GLSL_TYPE_INT:
187 case GLSL_TYPE_FLOAT:
188 case GLSL_TYPE_BOOL:
189 return type->components();
190 case GLSL_TYPE_ARRAY:
191 return type_size(type->fields.array) * type->length;
192 case GLSL_TYPE_STRUCT:
193 size = 0;
194 for (i = 0; i < type->length; i++) {
195 size += type_size(type->fields.structure[i].type);
196 }
197 return size;
198 case GLSL_TYPE_SAMPLER:
199 /* Samplers take up no register space, since they're baked in at
200 * link time.
201 */
202 return 0;
203 default:
204 assert(!"not reached");
205 return 0;
206 }
207 }
208
209 class fs_reg {
210 public:
211 /* Callers of this talloc-based new need not call delete. It's
212 * easier to just talloc_free 'ctx' (or any of its ancestors). */
213 static void* operator new(size_t size, void *ctx)
214 {
215 void *node;
216
217 node = talloc_size(ctx, size);
218 assert(node != NULL);
219
220 return node;
221 }
222
223 void init()
224 {
225 this->reg = 0;
226 this->reg_offset = 0;
227 this->negate = 0;
228 this->abs = 0;
229 this->hw_reg = -1;
230 }
231
232 /** Generic unset register constructor. */
233 fs_reg()
234 {
235 init();
236 this->file = BAD_FILE;
237 }
238
239 /** Immediate value constructor. */
240 fs_reg(float f)
241 {
242 init();
243 this->file = IMM;
244 this->type = BRW_REGISTER_TYPE_F;
245 this->imm.f = f;
246 }
247
248 /** Immediate value constructor. */
249 fs_reg(int32_t i)
250 {
251 init();
252 this->file = IMM;
253 this->type = BRW_REGISTER_TYPE_D;
254 this->imm.i = i;
255 }
256
257 /** Immediate value constructor. */
258 fs_reg(uint32_t u)
259 {
260 init();
261 this->file = IMM;
262 this->type = BRW_REGISTER_TYPE_UD;
263 this->imm.u = u;
264 }
265
266 /** Fixed brw_reg Immediate value constructor. */
267 fs_reg(struct brw_reg fixed_hw_reg)
268 {
269 init();
270 this->file = FIXED_HW_REG;
271 this->fixed_hw_reg = fixed_hw_reg;
272 this->type = fixed_hw_reg.type;
273 }
274
275 fs_reg(enum register_file file, int hw_reg);
276 fs_reg(class fs_visitor *v, const struct glsl_type *type);
277
278 /** Register file: ARF, GRF, MRF, IMM. */
279 enum register_file file;
280 /** virtual register number. 0 = fixed hw reg */
281 int reg;
282 /** Offset within the virtual register. */
283 int reg_offset;
284 /** HW register number. Generally unset until register allocation. */
285 int hw_reg;
286 /** Register type. BRW_REGISTER_TYPE_* */
287 int type;
288 bool negate;
289 bool abs;
290 struct brw_reg fixed_hw_reg;
291
292 /** Value for file == BRW_IMMMEDIATE_FILE */
293 union {
294 int32_t i;
295 uint32_t u;
296 float f;
297 } imm;
298 };
299
300 static const fs_reg reg_undef;
301 static const fs_reg reg_null(ARF, BRW_ARF_NULL);
302
303 class fs_inst : public exec_node {
304 public:
305 /* Callers of this talloc-based new need not call delete. It's
306 * easier to just talloc_free 'ctx' (or any of its ancestors). */
307 static void* operator new(size_t size, void *ctx)
308 {
309 void *node;
310
311 node = talloc_zero_size(ctx, size);
312 assert(node != NULL);
313
314 return node;
315 }
316
317 void init()
318 {
319 this->opcode = BRW_OPCODE_NOP;
320 this->saturate = false;
321 this->conditional_mod = BRW_CONDITIONAL_NONE;
322 this->predicated = false;
323 this->sampler = 0;
324 this->target = 0;
325 this->eot = false;
326 this->shadow_compare = false;
327 }
328
329 fs_inst()
330 {
331 init();
332 }
333
334 fs_inst(int opcode)
335 {
336 init();
337 this->opcode = opcode;
338 }
339
340 fs_inst(int opcode, fs_reg dst, fs_reg src0)
341 {
342 init();
343 this->opcode = opcode;
344 this->dst = dst;
345 this->src[0] = src0;
346 }
347
348 fs_inst(int opcode, fs_reg dst, fs_reg src0, fs_reg src1)
349 {
350 init();
351 this->opcode = opcode;
352 this->dst = dst;
353 this->src[0] = src0;
354 this->src[1] = src1;
355 }
356
357 fs_inst(int opcode, fs_reg dst, fs_reg src0, fs_reg src1, fs_reg src2)
358 {
359 init();
360 this->opcode = opcode;
361 this->dst = dst;
362 this->src[0] = src0;
363 this->src[1] = src1;
364 this->src[2] = src2;
365 }
366
367 int opcode; /* BRW_OPCODE_* or FS_OPCODE_* */
368 fs_reg dst;
369 fs_reg src[3];
370 bool saturate;
371 bool predicated;
372 int conditional_mod; /**< BRW_CONDITIONAL_* */
373
374 int mlen; /**< SEND message length */
375 int sampler;
376 int target; /**< MRT target. */
377 bool eot;
378 bool shadow_compare;
379
380 /** @{
381 * Annotation for the generated IR. One of the two can be set.
382 */
383 ir_instruction *ir;
384 const char *annotation;
385 /** @} */
386 };
387
388 class fs_visitor : public ir_visitor
389 {
390 public:
391
392 fs_visitor(struct brw_wm_compile *c, struct brw_shader *shader)
393 {
394 this->c = c;
395 this->p = &c->func;
396 this->brw = p->brw;
397 this->fp = brw->fragment_program;
398 this->intel = &brw->intel;
399 this->ctx = &intel->ctx;
400 this->mem_ctx = talloc_new(NULL);
401 this->shader = shader;
402 this->fail = false;
403 this->variable_ht = hash_table_ctor(0,
404 hash_table_pointer_hash,
405 hash_table_pointer_compare);
406
407 this->frag_color = NULL;
408 this->frag_data = NULL;
409 this->frag_depth = NULL;
410 this->first_non_payload_grf = 0;
411
412 this->current_annotation = NULL;
413 this->annotation_string = NULL;
414 this->annotation_ir = NULL;
415 this->base_ir = NULL;
416
417 this->virtual_grf_sizes = NULL;
418 this->virtual_grf_next = 1;
419 this->virtual_grf_array_size = 0;
420 }
421 ~fs_visitor()
422 {
423 talloc_free(this->mem_ctx);
424 hash_table_dtor(this->variable_ht);
425 }
426
427 fs_reg *variable_storage(ir_variable *var);
428 int virtual_grf_alloc(int size);
429
430 void visit(ir_variable *ir);
431 void visit(ir_assignment *ir);
432 void visit(ir_dereference_variable *ir);
433 void visit(ir_dereference_record *ir);
434 void visit(ir_dereference_array *ir);
435 void visit(ir_expression *ir);
436 void visit(ir_texture *ir);
437 void visit(ir_if *ir);
438 void visit(ir_constant *ir);
439 void visit(ir_swizzle *ir);
440 void visit(ir_return *ir);
441 void visit(ir_loop *ir);
442 void visit(ir_loop_jump *ir);
443 void visit(ir_discard *ir);
444 void visit(ir_call *ir);
445 void visit(ir_function *ir);
446 void visit(ir_function_signature *ir);
447
448 fs_inst *emit(fs_inst inst);
449 void assign_curb_setup();
450 void assign_urb_setup();
451 void assign_regs();
452 void assign_regs_trivial();
453 void generate_code();
454 void generate_fb_write(fs_inst *inst);
455 void generate_linterp(fs_inst *inst, struct brw_reg dst,
456 struct brw_reg *src);
457 void generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src);
458 void generate_math(fs_inst *inst, struct brw_reg dst, struct brw_reg *src);
459 void generate_discard(fs_inst *inst, struct brw_reg temp);
460 void generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src);
461 void generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src);
462
463 void emit_dummy_fs();
464 void emit_fragcoord_interpolation(ir_variable *ir);
465 void emit_general_interpolation(ir_variable *ir);
466 void emit_interpolation_setup();
467 void emit_fb_writes();
468
469 struct brw_reg interp_reg(int location, int channel);
470 int setup_uniform_values(int loc, const glsl_type *type);
471 void setup_builtin_uniform_values(ir_variable *ir);
472
473 struct brw_context *brw;
474 const struct gl_fragment_program *fp;
475 struct intel_context *intel;
476 GLcontext *ctx;
477 struct brw_wm_compile *c;
478 struct brw_compile *p;
479 struct brw_shader *shader;
480 void *mem_ctx;
481 exec_list instructions;
482
483 int *virtual_grf_sizes;
484 int virtual_grf_next;
485 int virtual_grf_array_size;
486
487 struct hash_table *variable_ht;
488 ir_variable *frag_color, *frag_data, *frag_depth;
489 int first_non_payload_grf;
490
491 /** @{ debug annotation info */
492 const char *current_annotation;
493 ir_instruction *base_ir;
494 const char **annotation_string;
495 ir_instruction **annotation_ir;
496 /** @} */
497
498 bool fail;
499
500 /* Result of last visit() method. */
501 fs_reg result;
502
503 fs_reg pixel_x;
504 fs_reg pixel_y;
505 fs_reg wpos_w;
506 fs_reg pixel_w;
507 fs_reg delta_x;
508 fs_reg delta_y;
509
510 int grf_used;
511
512 };
513
514 int
515 fs_visitor::virtual_grf_alloc(int size)
516 {
517 if (virtual_grf_array_size <= virtual_grf_next) {
518 if (virtual_grf_array_size == 0)
519 virtual_grf_array_size = 16;
520 else
521 virtual_grf_array_size *= 2;
522 virtual_grf_sizes = talloc_realloc(mem_ctx, virtual_grf_sizes,
523 int, virtual_grf_array_size);
524
525 /* This slot is always unused. */
526 virtual_grf_sizes[0] = 0;
527 }
528 virtual_grf_sizes[virtual_grf_next] = size;
529 return virtual_grf_next++;
530 }
531
532 /** Fixed HW reg constructor. */
533 fs_reg::fs_reg(enum register_file file, int hw_reg)
534 {
535 init();
536 this->file = file;
537 this->hw_reg = hw_reg;
538 this->type = BRW_REGISTER_TYPE_F;
539 }
540
541 int
542 brw_type_for_base_type(const struct glsl_type *type)
543 {
544 switch (type->base_type) {
545 case GLSL_TYPE_FLOAT:
546 return BRW_REGISTER_TYPE_F;
547 case GLSL_TYPE_INT:
548 case GLSL_TYPE_BOOL:
549 return BRW_REGISTER_TYPE_D;
550 case GLSL_TYPE_UINT:
551 return BRW_REGISTER_TYPE_UD;
552 case GLSL_TYPE_ARRAY:
553 case GLSL_TYPE_STRUCT:
554 /* These should be overridden with the type of the member when
555 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
556 * way to trip up if we don't.
557 */
558 return BRW_REGISTER_TYPE_UD;
559 default:
560 assert(!"not reached");
561 return BRW_REGISTER_TYPE_F;
562 }
563 }
564
565 /** Automatic reg constructor. */
566 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
567 {
568 init();
569
570 this->file = GRF;
571 this->reg = v->virtual_grf_alloc(type_size(type));
572 this->reg_offset = 0;
573 this->type = brw_type_for_base_type(type);
574 }
575
576 fs_reg *
577 fs_visitor::variable_storage(ir_variable *var)
578 {
579 return (fs_reg *)hash_table_find(this->variable_ht, var);
580 }
581
582 /* Our support for uniforms is piggy-backed on the struct
583 * gl_fragment_program, because that's where the values actually
584 * get stored, rather than in some global gl_shader_program uniform
585 * store.
586 */
587 int
588 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
589 {
590 unsigned int offset = 0;
591 float *vec_values;
592
593 if (type->is_matrix()) {
594 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
595 type->vector_elements,
596 1);
597
598 for (unsigned int i = 0; i < type->matrix_columns; i++) {
599 offset += setup_uniform_values(loc + offset, column);
600 }
601
602 return offset;
603 }
604
605 switch (type->base_type) {
606 case GLSL_TYPE_FLOAT:
607 case GLSL_TYPE_UINT:
608 case GLSL_TYPE_INT:
609 case GLSL_TYPE_BOOL:
610 vec_values = fp->Base.Parameters->ParameterValues[loc];
611 for (unsigned int i = 0; i < type->vector_elements; i++) {
612 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[i];
613 }
614 return 1;
615
616 case GLSL_TYPE_STRUCT:
617 for (unsigned int i = 0; i < type->length; i++) {
618 offset += setup_uniform_values(loc + offset,
619 type->fields.structure[i].type);
620 }
621 return offset;
622
623 case GLSL_TYPE_ARRAY:
624 for (unsigned int i = 0; i < type->length; i++) {
625 offset += setup_uniform_values(loc + offset, type->fields.array);
626 }
627 return offset;
628
629 case GLSL_TYPE_SAMPLER:
630 /* The sampler takes up a slot, but we don't use any values from it. */
631 return 1;
632
633 default:
634 assert(!"not reached");
635 return 0;
636 }
637 }
638
639
640 /* Our support for builtin uniforms is even scarier than non-builtin.
641 * It sits on top of the PROG_STATE_VAR parameters that are
642 * automatically updated from GL context state.
643 */
644 void
645 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
646 {
647 const struct gl_builtin_uniform_desc *statevar = NULL;
648
649 for (unsigned int i = 0; _mesa_builtin_uniform_desc[i].name; i++) {
650 statevar = &_mesa_builtin_uniform_desc[i];
651 if (strcmp(ir->name, _mesa_builtin_uniform_desc[i].name) == 0)
652 break;
653 }
654
655 if (!statevar->name) {
656 this->fail = true;
657 printf("Failed to find builtin uniform `%s'\n", ir->name);
658 return;
659 }
660
661 int array_count;
662 if (ir->type->is_array()) {
663 array_count = ir->type->length;
664 } else {
665 array_count = 1;
666 }
667
668 for (int a = 0; a < array_count; a++) {
669 for (unsigned int i = 0; i < statevar->num_elements; i++) {
670 struct gl_builtin_uniform_element *element = &statevar->elements[i];
671 int tokens[STATE_LENGTH];
672
673 memcpy(tokens, element->tokens, sizeof(element->tokens));
674 if (ir->type->is_array()) {
675 tokens[1] = a;
676 }
677
678 /* This state reference has already been setup by ir_to_mesa,
679 * but we'll get the same index back here.
680 */
681 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
682 (gl_state_index *)tokens);
683 float *vec_values = this->fp->Base.Parameters->ParameterValues[index];
684
685 /* Add each of the unique swizzles of the element as a
686 * parameter. This'll end up matching the expected layout of
687 * the array/matrix/structure we're trying to fill in.
688 */
689 int last_swiz = -1;
690 for (unsigned int i = 0; i < 4; i++) {
691 int this_swiz = GET_SWZ(element->swizzle, i);
692 if (this_swiz == last_swiz)
693 break;
694 last_swiz = this_swiz;
695
696 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[i];
697 }
698 }
699 }
700 }
701
702 void
703 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
704 {
705 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
706 fs_reg wpos = *reg;
707 fs_reg neg_y = this->pixel_y;
708 neg_y.negate = true;
709
710 /* gl_FragCoord.x */
711 if (ir->pixel_center_integer) {
712 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_x));
713 } else {
714 emit(fs_inst(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f)));
715 }
716 wpos.reg_offset++;
717
718 /* gl_FragCoord.y */
719 if (ir->origin_upper_left && ir->pixel_center_integer) {
720 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_y));
721 } else {
722 fs_reg pixel_y = this->pixel_y;
723 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
724
725 if (!ir->origin_upper_left) {
726 pixel_y.negate = true;
727 offset += c->key.drawable_height - 1.0;
728 }
729
730 emit(fs_inst(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset)));
731 }
732 wpos.reg_offset++;
733
734 /* gl_FragCoord.z */
735 emit(fs_inst(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y,
736 interp_reg(FRAG_ATTRIB_WPOS, 2)));
737 wpos.reg_offset++;
738
739 /* gl_FragCoord.w: Already set up in emit_interpolation */
740 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->wpos_w));
741
742 hash_table_insert(this->variable_ht, reg, ir);
743 }
744
745
746 void
747 fs_visitor::emit_general_interpolation(ir_variable *ir)
748 {
749 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
750 /* Interpolation is always in floating point regs. */
751 reg->type = BRW_REGISTER_TYPE_F;
752 fs_reg attr = *reg;
753
754 unsigned int array_elements;
755 const glsl_type *type;
756
757 if (ir->type->is_array()) {
758 array_elements = ir->type->length;
759 if (array_elements == 0) {
760 this->fail = true;
761 }
762 type = ir->type->fields.array;
763 } else {
764 array_elements = 1;
765 type = ir->type;
766 }
767
768 int location = ir->location;
769 for (unsigned int i = 0; i < array_elements; i++) {
770 for (unsigned int j = 0; j < type->matrix_columns; j++) {
771 if (!(fp->Base.InputsRead & BITFIELD64_BIT(location))) {
772 /* If there's no incoming setup data for this slot, don't
773 * emit interpolation for it (since it's not used, and
774 * we'd fall over later trying to find the setup data.
775 */
776 attr.reg_offset += type->vector_elements;
777 continue;
778 }
779
780 for (unsigned int c = 0; c < type->vector_elements; c++) {
781 struct brw_reg interp = interp_reg(location, c);
782 emit(fs_inst(FS_OPCODE_LINTERP,
783 attr,
784 this->delta_x,
785 this->delta_y,
786 fs_reg(interp)));
787 attr.reg_offset++;
788 }
789 attr.reg_offset -= type->vector_elements;
790
791 for (unsigned int c = 0; c < type->vector_elements; c++) {
792 emit(fs_inst(BRW_OPCODE_MUL,
793 attr,
794 attr,
795 this->pixel_w));
796 attr.reg_offset++;
797 }
798 location++;
799 }
800 }
801
802 hash_table_insert(this->variable_ht, reg, ir);
803 }
804
805 void
806 fs_visitor::visit(ir_variable *ir)
807 {
808 fs_reg *reg = NULL;
809
810 if (variable_storage(ir))
811 return;
812
813 if (strcmp(ir->name, "gl_FragColor") == 0) {
814 this->frag_color = ir;
815 } else if (strcmp(ir->name, "gl_FragData") == 0) {
816 this->frag_data = ir;
817 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
818 this->frag_depth = ir;
819 }
820
821 if (ir->mode == ir_var_in) {
822 if (!strcmp(ir->name, "gl_FragCoord")) {
823 emit_fragcoord_interpolation(ir);
824 return;
825 } else if (!strcmp(ir->name, "gl_FrontFacing")) {
826 reg = new(this->mem_ctx) fs_reg(this, ir->type);
827 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
828 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
829 * us front face
830 */
831 fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP,
832 *reg,
833 fs_reg(r1_6ud),
834 fs_reg(1u << 31)));
835 inst->conditional_mod = BRW_CONDITIONAL_L;
836 emit(fs_inst(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u)));
837 } else {
838 emit_general_interpolation(ir);
839 return;
840 }
841 }
842
843 if (ir->mode == ir_var_uniform) {
844 int param_index = c->prog_data.nr_params;
845
846 if (!strncmp(ir->name, "gl_", 3)) {
847 setup_builtin_uniform_values(ir);
848 } else {
849 setup_uniform_values(ir->location, ir->type);
850 }
851
852 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
853 }
854
855 if (!reg)
856 reg = new(this->mem_ctx) fs_reg(this, ir->type);
857
858 hash_table_insert(this->variable_ht, reg, ir);
859 }
860
861 void
862 fs_visitor::visit(ir_dereference_variable *ir)
863 {
864 fs_reg *reg = variable_storage(ir->var);
865 this->result = *reg;
866 }
867
868 void
869 fs_visitor::visit(ir_dereference_record *ir)
870 {
871 const glsl_type *struct_type = ir->record->type;
872
873 ir->record->accept(this);
874
875 unsigned int offset = 0;
876 for (unsigned int i = 0; i < struct_type->length; i++) {
877 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
878 break;
879 offset += type_size(struct_type->fields.structure[i].type);
880 }
881 this->result.reg_offset += offset;
882 this->result.type = brw_type_for_base_type(ir->type);
883 }
884
885 void
886 fs_visitor::visit(ir_dereference_array *ir)
887 {
888 ir_constant *index;
889 int element_size;
890
891 ir->array->accept(this);
892 index = ir->array_index->as_constant();
893
894 element_size = type_size(ir->type);
895 this->result.type = brw_type_for_base_type(ir->type);
896
897 if (index) {
898 assert(this->result.file == UNIFORM ||
899 (this->result.file == GRF &&
900 this->result.reg != 0));
901 this->result.reg_offset += index->value.i[0] * element_size;
902 } else {
903 assert(!"FINISHME: non-constant array element");
904 }
905 }
906
907 void
908 fs_visitor::visit(ir_expression *ir)
909 {
910 unsigned int operand;
911 fs_reg op[2], temp;
912 fs_reg result;
913 fs_inst *inst;
914
915 for (operand = 0; operand < ir->get_num_operands(); operand++) {
916 ir->operands[operand]->accept(this);
917 if (this->result.file == BAD_FILE) {
918 ir_print_visitor v;
919 printf("Failed to get tree for expression operand:\n");
920 ir->operands[operand]->accept(&v);
921 this->fail = true;
922 }
923 op[operand] = this->result;
924
925 /* Matrix expression operands should have been broken down to vector
926 * operations already.
927 */
928 assert(!ir->operands[operand]->type->is_matrix());
929 /* And then those vector operands should have been broken down to scalar.
930 */
931 assert(!ir->operands[operand]->type->is_vector());
932 }
933
934 /* Storage for our result. If our result goes into an assignment, it will
935 * just get copy-propagated out, so no worries.
936 */
937 this->result = fs_reg(this, ir->type);
938
939 switch (ir->operation) {
940 case ir_unop_logic_not:
941 emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], fs_reg(-1)));
942 break;
943 case ir_unop_neg:
944 op[0].negate = !op[0].negate;
945 this->result = op[0];
946 break;
947 case ir_unop_abs:
948 op[0].abs = true;
949 this->result = op[0];
950 break;
951 case ir_unop_sign:
952 temp = fs_reg(this, ir->type);
953
954 emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)));
955
956 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, op[0], fs_reg(0.0f)));
957 inst->conditional_mod = BRW_CONDITIONAL_G;
958 inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)));
959 inst->predicated = true;
960
961 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, op[0], fs_reg(0.0f)));
962 inst->conditional_mod = BRW_CONDITIONAL_L;
963 inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)));
964 inst->predicated = true;
965
966 break;
967 case ir_unop_rcp:
968 emit(fs_inst(FS_OPCODE_RCP, this->result, op[0]));
969 break;
970
971 case ir_unop_exp2:
972 emit(fs_inst(FS_OPCODE_EXP2, this->result, op[0]));
973 break;
974 case ir_unop_log2:
975 emit(fs_inst(FS_OPCODE_LOG2, this->result, op[0]));
976 break;
977 case ir_unop_exp:
978 case ir_unop_log:
979 assert(!"not reached: should be handled by ir_explog_to_explog2");
980 break;
981 case ir_unop_sin:
982 emit(fs_inst(FS_OPCODE_SIN, this->result, op[0]));
983 break;
984 case ir_unop_cos:
985 emit(fs_inst(FS_OPCODE_COS, this->result, op[0]));
986 break;
987
988 case ir_unop_dFdx:
989 emit(fs_inst(FS_OPCODE_DDX, this->result, op[0]));
990 break;
991 case ir_unop_dFdy:
992 emit(fs_inst(FS_OPCODE_DDY, this->result, op[0]));
993 break;
994
995 case ir_binop_add:
996 emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], op[1]));
997 break;
998 case ir_binop_sub:
999 assert(!"not reached: should be handled by ir_sub_to_add_neg");
1000 break;
1001
1002 case ir_binop_mul:
1003 emit(fs_inst(BRW_OPCODE_MUL, this->result, op[0], op[1]));
1004 break;
1005 case ir_binop_div:
1006 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
1007 break;
1008 case ir_binop_mod:
1009 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1010 break;
1011
1012 case ir_binop_less:
1013 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1014 inst->conditional_mod = BRW_CONDITIONAL_L;
1015 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
1016 break;
1017 case ir_binop_greater:
1018 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1019 inst->conditional_mod = BRW_CONDITIONAL_G;
1020 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
1021 break;
1022 case ir_binop_lequal:
1023 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1024 inst->conditional_mod = BRW_CONDITIONAL_LE;
1025 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
1026 break;
1027 case ir_binop_gequal:
1028 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1029 inst->conditional_mod = BRW_CONDITIONAL_GE;
1030 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
1031 break;
1032 case ir_binop_equal:
1033 case ir_binop_all_equal: /* same as nequal for scalars */
1034 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1035 inst->conditional_mod = BRW_CONDITIONAL_Z;
1036 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
1037 break;
1038 case ir_binop_nequal:
1039 case ir_binop_any_nequal: /* same as nequal for scalars */
1040 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1041 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1042 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
1043 break;
1044
1045 case ir_binop_logic_xor:
1046 emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], op[1]));
1047 break;
1048
1049 case ir_binop_logic_or:
1050 emit(fs_inst(BRW_OPCODE_OR, this->result, op[0], op[1]));
1051 break;
1052
1053 case ir_binop_logic_and:
1054 emit(fs_inst(BRW_OPCODE_AND, this->result, op[0], op[1]));
1055 break;
1056
1057 case ir_binop_dot:
1058 case ir_binop_cross:
1059 case ir_unop_any:
1060 assert(!"not reached: should be handled by brw_fs_channel_expressions");
1061 break;
1062
1063 case ir_unop_noise:
1064 assert(!"not reached: should be handled by lower_noise");
1065 break;
1066
1067 case ir_unop_sqrt:
1068 emit(fs_inst(FS_OPCODE_SQRT, this->result, op[0]));
1069 break;
1070
1071 case ir_unop_rsq:
1072 emit(fs_inst(FS_OPCODE_RSQ, this->result, op[0]));
1073 break;
1074
1075 case ir_unop_i2f:
1076 case ir_unop_b2f:
1077 case ir_unop_b2i:
1078 emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0]));
1079 break;
1080 case ir_unop_f2i:
1081 emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0]));
1082 break;
1083 case ir_unop_f2b:
1084 case ir_unop_i2b:
1085 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], fs_reg(0.0f)));
1086 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1087
1088 case ir_unop_trunc:
1089 emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
1090 break;
1091 case ir_unop_ceil:
1092 op[0].negate = ~op[0].negate;
1093 inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
1094 this->result.negate = true;
1095 break;
1096 case ir_unop_floor:
1097 inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
1098 break;
1099 case ir_unop_fract:
1100 inst = emit(fs_inst(BRW_OPCODE_FRC, this->result, op[0]));
1101 break;
1102
1103 case ir_binop_min:
1104 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1105 inst->conditional_mod = BRW_CONDITIONAL_L;
1106
1107 inst = emit(fs_inst(BRW_OPCODE_SEL, this->result, op[0], op[1]));
1108 inst->predicated = true;
1109 break;
1110 case ir_binop_max:
1111 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
1112 inst->conditional_mod = BRW_CONDITIONAL_G;
1113
1114 inst = emit(fs_inst(BRW_OPCODE_SEL, this->result, op[0], op[1]));
1115 inst->predicated = true;
1116 break;
1117
1118 case ir_binop_pow:
1119 inst = emit(fs_inst(FS_OPCODE_POW, this->result, op[0], op[1]));
1120 break;
1121
1122 case ir_unop_bit_not:
1123 case ir_unop_u2f:
1124 case ir_binop_lshift:
1125 case ir_binop_rshift:
1126 case ir_binop_bit_and:
1127 case ir_binop_bit_xor:
1128 case ir_binop_bit_or:
1129 assert(!"GLSL 1.30 features unsupported");
1130 break;
1131 }
1132 }
1133
1134 void
1135 fs_visitor::visit(ir_assignment *ir)
1136 {
1137 struct fs_reg l, r;
1138 int i;
1139 int write_mask;
1140 fs_inst *inst;
1141
1142 /* FINISHME: arrays on the lhs */
1143 ir->lhs->accept(this);
1144 l = this->result;
1145
1146 ir->rhs->accept(this);
1147 r = this->result;
1148
1149 /* FINISHME: This should really set to the correct maximal writemask for each
1150 * FINISHME: component written (in the loops below). This case can only
1151 * FINISHME: occur for matrices, arrays, and structures.
1152 */
1153 if (ir->write_mask == 0) {
1154 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
1155 write_mask = WRITEMASK_XYZW;
1156 } else {
1157 assert(ir->lhs->type->is_vector() || ir->lhs->type->is_scalar());
1158 write_mask = ir->write_mask;
1159 }
1160
1161 assert(l.file != BAD_FILE);
1162 assert(r.file != BAD_FILE);
1163
1164 if (ir->condition) {
1165 /* Get the condition bool into the predicate. */
1166 ir->condition->accept(this);
1167 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null, this->result, fs_reg(0)));
1168 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1169 }
1170
1171 for (i = 0; i < type_size(ir->lhs->type); i++) {
1172 if (i >= 4 || (write_mask & (1 << i))) {
1173 inst = emit(fs_inst(BRW_OPCODE_MOV, l, r));
1174 if (ir->condition)
1175 inst->predicated = true;
1176 r.reg_offset++;
1177 }
1178 l.reg_offset++;
1179 }
1180 }
1181
1182 void
1183 fs_visitor::visit(ir_texture *ir)
1184 {
1185 int base_mrf = 2;
1186 fs_inst *inst = NULL;
1187 unsigned int mlen = 0;
1188
1189 ir->coordinate->accept(this);
1190 fs_reg coordinate = this->result;
1191
1192 if (ir->projector) {
1193 fs_reg inv_proj = fs_reg(this, glsl_type::float_type);
1194
1195 ir->projector->accept(this);
1196 emit(fs_inst(FS_OPCODE_RCP, inv_proj, this->result));
1197
1198 fs_reg proj_coordinate = fs_reg(this, ir->coordinate->type);
1199 for (unsigned int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1200 emit(fs_inst(BRW_OPCODE_MUL, proj_coordinate, coordinate, inv_proj));
1201 coordinate.reg_offset++;
1202 proj_coordinate.reg_offset++;
1203 }
1204 proj_coordinate.reg_offset = 0;
1205
1206 coordinate = proj_coordinate;
1207 }
1208
1209 for (mlen = 0; mlen < ir->coordinate->type->vector_elements; mlen++) {
1210 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate));
1211 coordinate.reg_offset++;
1212 }
1213
1214 /* Pre-Ironlake, the 8-wide sampler always took u,v,r. */
1215 if (intel->gen < 5)
1216 mlen = 3;
1217
1218 if (ir->shadow_comparitor) {
1219 /* For shadow comparisons, we have to supply u,v,r. */
1220 mlen = 3;
1221
1222 ir->shadow_comparitor->accept(this);
1223 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1224 mlen++;
1225 }
1226
1227 /* Do we ever want to handle writemasking on texture samples? Is it
1228 * performance relevant?
1229 */
1230 fs_reg dst = fs_reg(this, glsl_type::vec4_type);
1231
1232 switch (ir->op) {
1233 case ir_tex:
1234 inst = emit(fs_inst(FS_OPCODE_TEX, dst, fs_reg(MRF, base_mrf)));
1235 break;
1236 case ir_txb:
1237 ir->lod_info.bias->accept(this);
1238 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1239 mlen++;
1240
1241 inst = emit(fs_inst(FS_OPCODE_TXB, dst, fs_reg(MRF, base_mrf)));
1242 break;
1243 case ir_txl:
1244 ir->lod_info.lod->accept(this);
1245 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1246 mlen++;
1247
1248 inst = emit(fs_inst(FS_OPCODE_TXL, dst, fs_reg(MRF, base_mrf)));
1249 break;
1250 case ir_txd:
1251 case ir_txf:
1252 assert(!"GLSL 1.30 features unsupported");
1253 break;
1254 }
1255
1256 inst->sampler =
1257 _mesa_get_sampler_uniform_value(ir->sampler,
1258 ctx->Shader.CurrentProgram,
1259 &brw->fragment_program->Base);
1260 inst->sampler = c->fp->program.Base.SamplerUnits[inst->sampler];
1261
1262 this->result = dst;
1263
1264 if (ir->shadow_comparitor)
1265 inst->shadow_compare = true;
1266 inst->mlen = mlen;
1267 }
1268
1269 void
1270 fs_visitor::visit(ir_swizzle *ir)
1271 {
1272 ir->val->accept(this);
1273 fs_reg val = this->result;
1274
1275 fs_reg result = fs_reg(this, ir->type);
1276 this->result = result;
1277
1278 for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1279 fs_reg channel = val;
1280 int swiz = 0;
1281
1282 switch (i) {
1283 case 0:
1284 swiz = ir->mask.x;
1285 break;
1286 case 1:
1287 swiz = ir->mask.y;
1288 break;
1289 case 2:
1290 swiz = ir->mask.z;
1291 break;
1292 case 3:
1293 swiz = ir->mask.w;
1294 break;
1295 }
1296
1297 channel.reg_offset += swiz;
1298 emit(fs_inst(BRW_OPCODE_MOV, result, channel));
1299 result.reg_offset++;
1300 }
1301 }
1302
1303 void
1304 fs_visitor::visit(ir_discard *ir)
1305 {
1306 fs_reg temp = fs_reg(this, glsl_type::uint_type);
1307
1308 assert(ir->condition == NULL); /* FINISHME */
1309
1310 emit(fs_inst(FS_OPCODE_DISCARD, temp, temp));
1311 }
1312
1313 void
1314 fs_visitor::visit(ir_constant *ir)
1315 {
1316 fs_reg reg(this, ir->type);
1317 this->result = reg;
1318
1319 for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1320 switch (ir->type->base_type) {
1321 case GLSL_TYPE_FLOAT:
1322 emit(fs_inst(BRW_OPCODE_MOV, reg, fs_reg(ir->value.f[i])));
1323 break;
1324 case GLSL_TYPE_UINT:
1325 emit(fs_inst(BRW_OPCODE_MOV, reg, fs_reg(ir->value.u[i])));
1326 break;
1327 case GLSL_TYPE_INT:
1328 emit(fs_inst(BRW_OPCODE_MOV, reg, fs_reg(ir->value.i[i])));
1329 break;
1330 case GLSL_TYPE_BOOL:
1331 emit(fs_inst(BRW_OPCODE_MOV, reg, fs_reg((int)ir->value.b[i])));
1332 break;
1333 default:
1334 assert(!"Non-float/uint/int/bool constant");
1335 }
1336 reg.reg_offset++;
1337 }
1338 }
1339
1340 void
1341 fs_visitor::visit(ir_if *ir)
1342 {
1343 fs_inst *inst;
1344
1345 /* Don't point the annotation at the if statement, because then it plus
1346 * the then and else blocks get printed.
1347 */
1348 this->base_ir = ir->condition;
1349
1350 /* Generate the condition into the condition code. */
1351 ir->condition->accept(this);
1352 inst = emit(fs_inst(BRW_OPCODE_MOV, fs_reg(brw_null_reg()), this->result));
1353 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1354
1355 inst = emit(fs_inst(BRW_OPCODE_IF));
1356 inst->predicated = true;
1357
1358 foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
1359 ir_instruction *ir = (ir_instruction *)iter.get();
1360 this->base_ir = ir;
1361
1362 ir->accept(this);
1363 }
1364
1365 if (!ir->else_instructions.is_empty()) {
1366 emit(fs_inst(BRW_OPCODE_ELSE));
1367
1368 foreach_iter(exec_list_iterator, iter, ir->else_instructions) {
1369 ir_instruction *ir = (ir_instruction *)iter.get();
1370 this->base_ir = ir;
1371
1372 ir->accept(this);
1373 }
1374 }
1375
1376 emit(fs_inst(BRW_OPCODE_ENDIF));
1377 }
1378
1379 void
1380 fs_visitor::visit(ir_loop *ir)
1381 {
1382 fs_reg counter = reg_undef;
1383
1384 if (ir->counter) {
1385 this->base_ir = ir->counter;
1386 ir->counter->accept(this);
1387 counter = *(variable_storage(ir->counter));
1388
1389 if (ir->from) {
1390 this->base_ir = ir->from;
1391 ir->from->accept(this);
1392
1393 emit(fs_inst(BRW_OPCODE_MOV, counter, this->result));
1394 }
1395 }
1396
1397 /* Start a safety counter. If the user messed up their loop
1398 * counting, we don't want to hang the GPU.
1399 */
1400 fs_reg max_iter = fs_reg(this, glsl_type::int_type);
1401 emit(fs_inst(BRW_OPCODE_MOV, max_iter, fs_reg(10000)));
1402
1403 emit(fs_inst(BRW_OPCODE_DO));
1404
1405 if (ir->to) {
1406 this->base_ir = ir->to;
1407 ir->to->accept(this);
1408
1409 fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null,
1410 counter, this->result));
1411 switch (ir->cmp) {
1412 case ir_binop_equal:
1413 inst->conditional_mod = BRW_CONDITIONAL_Z;
1414 break;
1415 case ir_binop_nequal:
1416 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1417 break;
1418 case ir_binop_gequal:
1419 inst->conditional_mod = BRW_CONDITIONAL_GE;
1420 break;
1421 case ir_binop_lequal:
1422 inst->conditional_mod = BRW_CONDITIONAL_LE;
1423 break;
1424 case ir_binop_greater:
1425 inst->conditional_mod = BRW_CONDITIONAL_G;
1426 break;
1427 case ir_binop_less:
1428 inst->conditional_mod = BRW_CONDITIONAL_L;
1429 break;
1430 default:
1431 assert(!"not reached: unknown loop condition");
1432 this->fail = true;
1433 break;
1434 }
1435
1436 inst = emit(fs_inst(BRW_OPCODE_BREAK));
1437 inst->predicated = true;
1438 }
1439
1440 foreach_iter(exec_list_iterator, iter, ir->body_instructions) {
1441 ir_instruction *ir = (ir_instruction *)iter.get();
1442 fs_inst *inst;
1443
1444 this->base_ir = ir;
1445 ir->accept(this);
1446
1447 /* Check the maximum loop iters counter. */
1448 inst = emit(fs_inst(BRW_OPCODE_ADD, max_iter, max_iter, fs_reg(-1)));
1449 inst->conditional_mod = BRW_CONDITIONAL_Z;
1450
1451 inst = emit(fs_inst(BRW_OPCODE_BREAK));
1452 inst->predicated = true;
1453 }
1454
1455 if (ir->increment) {
1456 this->base_ir = ir->increment;
1457 ir->increment->accept(this);
1458 emit(fs_inst(BRW_OPCODE_ADD, counter, counter, this->result));
1459 }
1460
1461 emit(fs_inst(BRW_OPCODE_WHILE));
1462 }
1463
1464 void
1465 fs_visitor::visit(ir_loop_jump *ir)
1466 {
1467 switch (ir->mode) {
1468 case ir_loop_jump::jump_break:
1469 emit(fs_inst(BRW_OPCODE_BREAK));
1470 break;
1471 case ir_loop_jump::jump_continue:
1472 emit(fs_inst(BRW_OPCODE_CONTINUE));
1473 break;
1474 }
1475 }
1476
1477 void
1478 fs_visitor::visit(ir_call *ir)
1479 {
1480 assert(!"FINISHME");
1481 }
1482
1483 void
1484 fs_visitor::visit(ir_return *ir)
1485 {
1486 assert(!"FINISHME");
1487 }
1488
1489 void
1490 fs_visitor::visit(ir_function *ir)
1491 {
1492 /* Ignore function bodies other than main() -- we shouldn't see calls to
1493 * them since they should all be inlined before we get to ir_to_mesa.
1494 */
1495 if (strcmp(ir->name, "main") == 0) {
1496 const ir_function_signature *sig;
1497 exec_list empty;
1498
1499 sig = ir->matching_signature(&empty);
1500
1501 assert(sig);
1502
1503 foreach_iter(exec_list_iterator, iter, sig->body) {
1504 ir_instruction *ir = (ir_instruction *)iter.get();
1505 this->base_ir = ir;
1506
1507 ir->accept(this);
1508 }
1509 }
1510 }
1511
1512 void
1513 fs_visitor::visit(ir_function_signature *ir)
1514 {
1515 assert(!"not reached");
1516 (void)ir;
1517 }
1518
1519 fs_inst *
1520 fs_visitor::emit(fs_inst inst)
1521 {
1522 fs_inst *list_inst = new(mem_ctx) fs_inst;
1523 *list_inst = inst;
1524
1525 list_inst->annotation = this->current_annotation;
1526 list_inst->ir = this->base_ir;
1527
1528 this->instructions.push_tail(list_inst);
1529
1530 return list_inst;
1531 }
1532
1533 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1534 void
1535 fs_visitor::emit_dummy_fs()
1536 {
1537 /* Everyone's favorite color. */
1538 emit(fs_inst(BRW_OPCODE_MOV,
1539 fs_reg(MRF, 2),
1540 fs_reg(1.0f)));
1541 emit(fs_inst(BRW_OPCODE_MOV,
1542 fs_reg(MRF, 3),
1543 fs_reg(0.0f)));
1544 emit(fs_inst(BRW_OPCODE_MOV,
1545 fs_reg(MRF, 4),
1546 fs_reg(1.0f)));
1547 emit(fs_inst(BRW_OPCODE_MOV,
1548 fs_reg(MRF, 5),
1549 fs_reg(0.0f)));
1550
1551 fs_inst *write;
1552 write = emit(fs_inst(FS_OPCODE_FB_WRITE,
1553 fs_reg(0),
1554 fs_reg(0)));
1555 }
1556
1557 /* The register location here is relative to the start of the URB
1558 * data. It will get adjusted to be a real location before
1559 * generate_code() time.
1560 */
1561 struct brw_reg
1562 fs_visitor::interp_reg(int location, int channel)
1563 {
1564 int regnr = location * 2 + channel / 2;
1565 int stride = (channel & 1) * 4;
1566
1567 return brw_vec1_grf(regnr, stride);
1568 }
1569
1570 /** Emits the interpolation for the varying inputs. */
1571 void
1572 fs_visitor::emit_interpolation_setup()
1573 {
1574 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1575
1576 this->current_annotation = "compute pixel centers";
1577 this->pixel_x = fs_reg(this, glsl_type::uint_type);
1578 this->pixel_y = fs_reg(this, glsl_type::uint_type);
1579 this->pixel_x.type = BRW_REGISTER_TYPE_UW;
1580 this->pixel_y.type = BRW_REGISTER_TYPE_UW;
1581 emit(fs_inst(BRW_OPCODE_ADD,
1582 this->pixel_x,
1583 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1584 fs_reg(brw_imm_v(0x10101010))));
1585 emit(fs_inst(BRW_OPCODE_ADD,
1586 this->pixel_y,
1587 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1588 fs_reg(brw_imm_v(0x11001100))));
1589
1590 this->current_annotation = "compute pixel deltas from v0";
1591 this->delta_x = fs_reg(this, glsl_type::float_type);
1592 this->delta_y = fs_reg(this, glsl_type::float_type);
1593 emit(fs_inst(BRW_OPCODE_ADD,
1594 this->delta_x,
1595 this->pixel_x,
1596 fs_reg(negate(brw_vec1_grf(1, 0)))));
1597 emit(fs_inst(BRW_OPCODE_ADD,
1598 this->delta_y,
1599 this->pixel_y,
1600 fs_reg(negate(brw_vec1_grf(1, 1)))));
1601
1602 this->current_annotation = "compute pos.w and 1/pos.w";
1603 /* Compute wpos.w. It's always in our setup, since it's needed to
1604 * interpolate the other attributes.
1605 */
1606 this->wpos_w = fs_reg(this, glsl_type::float_type);
1607 emit(fs_inst(FS_OPCODE_LINTERP, wpos_w, this->delta_x, this->delta_y,
1608 interp_reg(FRAG_ATTRIB_WPOS, 3)));
1609 /* Compute the pixel 1/W value from wpos.w. */
1610 this->pixel_w = fs_reg(this, glsl_type::float_type);
1611 emit(fs_inst(FS_OPCODE_RCP, this->pixel_w, wpos_w));
1612 this->current_annotation = NULL;
1613 }
1614
1615 void
1616 fs_visitor::emit_fb_writes()
1617 {
1618 this->current_annotation = "FB write header";
1619 int nr = 0;
1620
1621 /* m0, m1 header */
1622 nr += 2;
1623
1624 if (c->key.aa_dest_stencil_reg) {
1625 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
1626 fs_reg(brw_vec8_grf(c->key.aa_dest_stencil_reg, 0))));
1627 }
1628
1629 /* Reserve space for color. It'll be filled in per MRT below. */
1630 int color_mrf = nr;
1631 nr += 4;
1632
1633 if (c->key.source_depth_to_render_target) {
1634 if (c->key.computes_depth) {
1635 /* Hand over gl_FragDepth. */
1636 assert(this->frag_depth);
1637 fs_reg depth = *(variable_storage(this->frag_depth));
1638
1639 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), depth));
1640 } else {
1641 /* Pass through the payload depth. */
1642 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
1643 fs_reg(brw_vec8_grf(c->key.source_depth_reg, 0))));
1644 }
1645 }
1646
1647 if (c->key.dest_depth_reg) {
1648 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
1649 fs_reg(brw_vec8_grf(c->key.dest_depth_reg, 0))));
1650 }
1651
1652 fs_reg color = reg_undef;
1653 if (this->frag_color)
1654 color = *(variable_storage(this->frag_color));
1655 else if (this->frag_data)
1656 color = *(variable_storage(this->frag_data));
1657
1658 for (int target = 0; target < c->key.nr_color_regions; target++) {
1659 this->current_annotation = talloc_asprintf(this->mem_ctx,
1660 "FB write target %d",
1661 target);
1662 if (this->frag_color || this->frag_data) {
1663 for (int i = 0; i < 4; i++) {
1664 emit(fs_inst(BRW_OPCODE_MOV,
1665 fs_reg(MRF, color_mrf + i),
1666 color));
1667 color.reg_offset++;
1668 }
1669 }
1670
1671 if (this->frag_color)
1672 color.reg_offset -= 4;
1673
1674 fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE,
1675 reg_undef, reg_undef));
1676 inst->target = target;
1677 inst->mlen = nr;
1678 if (target == c->key.nr_color_regions - 1)
1679 inst->eot = true;
1680 }
1681
1682 if (c->key.nr_color_regions == 0) {
1683 fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE,
1684 reg_undef, reg_undef));
1685 inst->mlen = nr;
1686 inst->eot = true;
1687 }
1688
1689 this->current_annotation = NULL;
1690 }
1691
1692 void
1693 fs_visitor::generate_fb_write(fs_inst *inst)
1694 {
1695 GLboolean eot = inst->eot;
1696
1697 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
1698 * move, here's g1.
1699 */
1700 brw_push_insn_state(p);
1701 brw_set_mask_control(p, BRW_MASK_DISABLE);
1702 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1703 brw_MOV(p,
1704 brw_message_reg(1),
1705 brw_vec8_grf(1, 0));
1706 brw_pop_insn_state(p);
1707
1708 brw_fb_WRITE(p,
1709 8, /* dispatch_width */
1710 retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW),
1711 0, /* base MRF */
1712 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
1713 inst->target,
1714 inst->mlen,
1715 0,
1716 eot);
1717 }
1718
1719 void
1720 fs_visitor::generate_linterp(fs_inst *inst,
1721 struct brw_reg dst, struct brw_reg *src)
1722 {
1723 struct brw_reg delta_x = src[0];
1724 struct brw_reg delta_y = src[1];
1725 struct brw_reg interp = src[2];
1726
1727 if (brw->has_pln &&
1728 delta_y.nr == delta_x.nr + 1 &&
1729 (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
1730 brw_PLN(p, dst, interp, delta_x);
1731 } else {
1732 brw_LINE(p, brw_null_reg(), interp, delta_x);
1733 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
1734 }
1735 }
1736
1737 void
1738 fs_visitor::generate_math(fs_inst *inst,
1739 struct brw_reg dst, struct brw_reg *src)
1740 {
1741 int op;
1742
1743 switch (inst->opcode) {
1744 case FS_OPCODE_RCP:
1745 op = BRW_MATH_FUNCTION_INV;
1746 break;
1747 case FS_OPCODE_RSQ:
1748 op = BRW_MATH_FUNCTION_RSQ;
1749 break;
1750 case FS_OPCODE_SQRT:
1751 op = BRW_MATH_FUNCTION_SQRT;
1752 break;
1753 case FS_OPCODE_EXP2:
1754 op = BRW_MATH_FUNCTION_EXP;
1755 break;
1756 case FS_OPCODE_LOG2:
1757 op = BRW_MATH_FUNCTION_LOG;
1758 break;
1759 case FS_OPCODE_POW:
1760 op = BRW_MATH_FUNCTION_POW;
1761 break;
1762 case FS_OPCODE_SIN:
1763 op = BRW_MATH_FUNCTION_SIN;
1764 break;
1765 case FS_OPCODE_COS:
1766 op = BRW_MATH_FUNCTION_COS;
1767 break;
1768 default:
1769 assert(!"not reached: unknown math function");
1770 op = 0;
1771 break;
1772 }
1773
1774 if (inst->opcode == FS_OPCODE_POW) {
1775 brw_MOV(p, brw_message_reg(3), src[1]);
1776 }
1777
1778 brw_math(p, dst,
1779 op,
1780 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
1781 BRW_MATH_SATURATE_NONE,
1782 2, src[0],
1783 BRW_MATH_DATA_VECTOR,
1784 BRW_MATH_PRECISION_FULL);
1785 }
1786
1787 void
1788 fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
1789 {
1790 int msg_type = -1;
1791 int rlen = 4;
1792
1793 if (intel->gen == 5) {
1794 switch (inst->opcode) {
1795 case FS_OPCODE_TEX:
1796 if (inst->shadow_compare) {
1797 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5;
1798 } else {
1799 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_GEN5;
1800 }
1801 break;
1802 case FS_OPCODE_TXB:
1803 if (inst->shadow_compare) {
1804 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE_GEN5;
1805 } else {
1806 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
1807 }
1808 break;
1809 }
1810 } else {
1811 switch (inst->opcode) {
1812 case FS_OPCODE_TEX:
1813 /* Note that G45 and older determines shadow compare and dispatch width
1814 * from message length for most messages.
1815 */
1816 if (inst->shadow_compare) {
1817 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE;
1818 } else {
1819 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE;
1820 }
1821 case FS_OPCODE_TXB:
1822 if (inst->shadow_compare) {
1823 assert(!"FINISHME: shadow compare with bias.");
1824 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
1825 } else {
1826 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
1827 rlen = 8;
1828 }
1829 break;
1830 }
1831 }
1832 assert(msg_type != -1);
1833
1834 /* g0 header. */
1835 src.nr--;
1836
1837 brw_SAMPLE(p,
1838 retype(dst, BRW_REGISTER_TYPE_UW),
1839 src.nr,
1840 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
1841 SURF_INDEX_TEXTURE(inst->sampler),
1842 inst->sampler,
1843 WRITEMASK_XYZW,
1844 msg_type,
1845 rlen,
1846 inst->mlen + 1,
1847 0,
1848 1,
1849 BRW_SAMPLER_SIMD_MODE_SIMD8);
1850 }
1851
1852
1853 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
1854 * looking like:
1855 *
1856 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
1857 *
1858 * and we're trying to produce:
1859 *
1860 * DDX DDY
1861 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
1862 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
1863 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
1864 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
1865 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
1866 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
1867 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
1868 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
1869 *
1870 * and add another set of two more subspans if in 16-pixel dispatch mode.
1871 *
1872 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
1873 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
1874 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
1875 * between each other. We could probably do it like ddx and swizzle the right
1876 * order later, but bail for now and just produce
1877 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
1878 */
1879 void
1880 fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
1881 {
1882 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
1883 BRW_REGISTER_TYPE_F,
1884 BRW_VERTICAL_STRIDE_2,
1885 BRW_WIDTH_2,
1886 BRW_HORIZONTAL_STRIDE_0,
1887 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1888 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
1889 BRW_REGISTER_TYPE_F,
1890 BRW_VERTICAL_STRIDE_2,
1891 BRW_WIDTH_2,
1892 BRW_HORIZONTAL_STRIDE_0,
1893 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1894 brw_ADD(p, dst, src0, negate(src1));
1895 }
1896
1897 void
1898 fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
1899 {
1900 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
1901 BRW_REGISTER_TYPE_F,
1902 BRW_VERTICAL_STRIDE_4,
1903 BRW_WIDTH_4,
1904 BRW_HORIZONTAL_STRIDE_0,
1905 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1906 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
1907 BRW_REGISTER_TYPE_F,
1908 BRW_VERTICAL_STRIDE_4,
1909 BRW_WIDTH_4,
1910 BRW_HORIZONTAL_STRIDE_0,
1911 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1912 brw_ADD(p, dst, src0, negate(src1));
1913 }
1914
1915 void
1916 fs_visitor::generate_discard(fs_inst *inst, struct brw_reg temp)
1917 {
1918 struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
1919 temp = brw_uw1_reg(temp.file, temp.nr, 0);
1920
1921 brw_push_insn_state(p);
1922 brw_set_mask_control(p, BRW_MASK_DISABLE);
1923 brw_NOT(p, temp, brw_mask_reg(1)); /* IMASK */
1924 brw_AND(p, g0, temp, g0);
1925 brw_pop_insn_state(p);
1926 }
1927
1928 void
1929 fs_visitor::assign_curb_setup()
1930 {
1931 c->prog_data.first_curbe_grf = c->key.nr_payload_regs;
1932 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
1933
1934 if (intel->gen == 5 && (c->prog_data.first_curbe_grf +
1935 c->prog_data.curb_read_length) & 1) {
1936 /* Align the start of the interpolation coefficients so that we can use
1937 * the PLN instruction.
1938 */
1939 c->prog_data.first_curbe_grf++;
1940 }
1941
1942 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1943 foreach_iter(exec_list_iterator, iter, this->instructions) {
1944 fs_inst *inst = (fs_inst *)iter.get();
1945
1946 for (unsigned int i = 0; i < 3; i++) {
1947 if (inst->src[i].file == UNIFORM) {
1948 int constant_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
1949 struct brw_reg brw_reg = brw_vec1_grf(c->prog_data.first_curbe_grf +
1950 constant_nr / 8,
1951 constant_nr % 8);
1952
1953 inst->src[i].file = FIXED_HW_REG;
1954 inst->src[i].fixed_hw_reg = brw_reg;
1955 }
1956 }
1957 }
1958 }
1959
1960 void
1961 fs_visitor::assign_urb_setup()
1962 {
1963 int urb_start = c->prog_data.first_curbe_grf + c->prog_data.curb_read_length;
1964 int interp_reg_nr[FRAG_ATTRIB_MAX];
1965
1966 c->prog_data.urb_read_length = 0;
1967
1968 /* Figure out where each of the incoming setup attributes lands. */
1969 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
1970 interp_reg_nr[i] = -1;
1971
1972 if (i != FRAG_ATTRIB_WPOS &&
1973 !(brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(i)))
1974 continue;
1975
1976 /* Each attribute is 4 setup channels, each of which is half a reg. */
1977 interp_reg_nr[i] = urb_start + c->prog_data.urb_read_length;
1978 c->prog_data.urb_read_length += 2;
1979 }
1980
1981 /* Map the register numbers for FS_OPCODE_LINTERP so that it uses
1982 * the correct setup input.
1983 */
1984 foreach_iter(exec_list_iterator, iter, this->instructions) {
1985 fs_inst *inst = (fs_inst *)iter.get();
1986
1987 if (inst->opcode != FS_OPCODE_LINTERP)
1988 continue;
1989
1990 assert(inst->src[2].file == FIXED_HW_REG);
1991
1992 int location = inst->src[2].fixed_hw_reg.nr / 2;
1993 assert(interp_reg_nr[location] != -1);
1994 inst->src[2].fixed_hw_reg.nr = (interp_reg_nr[location] +
1995 (inst->src[2].fixed_hw_reg.nr & 1));
1996 }
1997
1998 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
1999 }
2000
2001 static void
2002 assign_reg(int *reg_hw_locations, fs_reg *reg)
2003 {
2004 if (reg->file == GRF && reg->reg != 0) {
2005 reg->hw_reg = reg_hw_locations[reg->reg] + reg->reg_offset;
2006 reg->reg = 0;
2007 }
2008 }
2009
2010 void
2011 fs_visitor::assign_regs_trivial()
2012 {
2013 int last_grf = 0;
2014 int hw_reg_mapping[this->virtual_grf_next];
2015 int i;
2016
2017 hw_reg_mapping[0] = 0;
2018 hw_reg_mapping[1] = this->first_non_payload_grf;
2019 for (i = 2; i < this->virtual_grf_next; i++) {
2020 hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
2021 this->virtual_grf_sizes[i - 1]);
2022 }
2023 last_grf = hw_reg_mapping[i - 1] + this->virtual_grf_sizes[i - 1];
2024
2025 foreach_iter(exec_list_iterator, iter, this->instructions) {
2026 fs_inst *inst = (fs_inst *)iter.get();
2027
2028 assign_reg(hw_reg_mapping, &inst->dst);
2029 assign_reg(hw_reg_mapping, &inst->src[0]);
2030 assign_reg(hw_reg_mapping, &inst->src[1]);
2031 }
2032
2033 this->grf_used = last_grf + 1;
2034 }
2035
2036 void
2037 fs_visitor::assign_regs()
2038 {
2039 int last_grf = 0;
2040 int hw_reg_mapping[this->virtual_grf_next + 1];
2041 int base_reg_count = BRW_MAX_GRF - this->first_non_payload_grf;
2042 int class_sizes[base_reg_count];
2043 int class_count = 0;
2044
2045 /* Set up the register classes.
2046 *
2047 * The base registers store a scalar value. For texture samples,
2048 * we get virtual GRFs composed of 4 contiguous hw register. For
2049 * structures and arrays, we store them as contiguous larger things
2050 * than that, though we should be able to do better most of the
2051 * time.
2052 */
2053 class_sizes[class_count++] = 1;
2054 for (int r = 1; r < this->virtual_grf_next; r++) {
2055 int i;
2056
2057 for (i = 0; i < class_count; i++) {
2058 if (class_sizes[i] == this->virtual_grf_sizes[r])
2059 break;
2060 }
2061 if (i == class_count) {
2062 class_sizes[class_count++] = this->virtual_grf_sizes[r];
2063 }
2064 }
2065
2066 int ra_reg_count = 0;
2067 int class_base_reg[class_count];
2068 int class_reg_count[class_count];
2069 int classes[class_count];
2070
2071 for (int i = 0; i < class_count; i++) {
2072 class_base_reg[i] = ra_reg_count;
2073 class_reg_count[i] = base_reg_count - (class_sizes[i] - 1);
2074 ra_reg_count += class_reg_count[i];
2075 }
2076
2077 struct ra_regs *regs = ra_alloc_reg_set(ra_reg_count);
2078 for (int i = 0; i < class_count; i++) {
2079 classes[i] = ra_alloc_reg_class(regs);
2080
2081 for (int i_r = 0; i_r < class_reg_count[i]; i_r++) {
2082 ra_class_add_reg(regs, classes[i], class_base_reg[i] + i_r);
2083 }
2084
2085 /* Add conflicts between our contiguous registers aliasing
2086 * base regs and other register classes' contiguous registers
2087 * that alias base regs, or the base regs themselves for classes[0].
2088 */
2089 for (int c = 0; c <= i; c++) {
2090 for (int i_r = 0; i_r < class_reg_count[i] - 1; i_r++) {
2091 for (int c_r = MAX2(0, i_r - (class_sizes[c] - 1));
2092 c_r <= MIN2(class_reg_count[c] - 1, i_r + class_sizes[i] - 1);
2093 c_r++) {
2094
2095 if (0) {
2096 printf("%d/%d conflicts %d/%d\n",
2097 class_sizes[i], i_r,
2098 class_sizes[c], c_r);
2099 }
2100
2101 ra_add_reg_conflict(regs,
2102 class_base_reg[i] + i_r,
2103 class_base_reg[c] + c_r);
2104 }
2105 }
2106 }
2107 }
2108
2109 ra_set_finalize(regs);
2110
2111 struct ra_graph *g = ra_alloc_interference_graph(regs,
2112 this->virtual_grf_next);
2113 /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1
2114 * with nodes.
2115 */
2116 ra_set_node_class(g, 0, classes[0]);
2117
2118 /* FINISHME: Proper interference (live interval analysis) */
2119 for (int i = 1; i < this->virtual_grf_next; i++) {
2120 for (int c = 0; c < class_count; c++) {
2121 if (class_sizes[c] == this->virtual_grf_sizes[i]) {
2122 ra_set_node_class(g, i, classes[c]);
2123 break;
2124 }
2125 }
2126
2127 for (int j = 1; j < i; j++) {
2128 ra_add_node_interference(g, i, j);
2129 }
2130 }
2131
2132 /* FINISHME: Handle spilling */
2133 if (!ra_allocate_no_spills(g)) {
2134 fprintf(stderr, "Failed to allocate registers.\n");
2135 this->fail = true;
2136 return;
2137 }
2138
2139 /* Get the chosen virtual registers for each node, and map virtual
2140 * regs in the register classes back down to real hardware reg
2141 * numbers.
2142 */
2143 hw_reg_mapping[0] = 0; /* unused */
2144 for (int i = 1; i < this->virtual_grf_next; i++) {
2145 int reg = ra_get_node_reg(g, i);
2146 int hw_reg = -1;
2147
2148 for (int c = 0; c < class_count; c++) {
2149 if (reg >= class_base_reg[c] &&
2150 reg < class_base_reg[c] + class_reg_count[c] - 1) {
2151 hw_reg = reg - class_base_reg[c];
2152 break;
2153 }
2154 }
2155
2156 assert(hw_reg != -1);
2157 hw_reg_mapping[i] = this->first_non_payload_grf + hw_reg;
2158 last_grf = MAX2(last_grf,
2159 hw_reg_mapping[i] + this->virtual_grf_sizes[i] - 1);
2160 }
2161
2162 foreach_iter(exec_list_iterator, iter, this->instructions) {
2163 fs_inst *inst = (fs_inst *)iter.get();
2164
2165 assign_reg(hw_reg_mapping, &inst->dst);
2166 assign_reg(hw_reg_mapping, &inst->src[0]);
2167 assign_reg(hw_reg_mapping, &inst->src[1]);
2168 }
2169
2170 this->grf_used = last_grf + 1;
2171
2172 talloc_free(g);
2173 talloc_free(regs);
2174 }
2175
2176 static struct brw_reg brw_reg_from_fs_reg(fs_reg *reg)
2177 {
2178 struct brw_reg brw_reg;
2179
2180 switch (reg->file) {
2181 case GRF:
2182 case ARF:
2183 case MRF:
2184 brw_reg = brw_vec8_reg(reg->file,
2185 reg->hw_reg, 0);
2186 brw_reg = retype(brw_reg, reg->type);
2187 break;
2188 case IMM:
2189 switch (reg->type) {
2190 case BRW_REGISTER_TYPE_F:
2191 brw_reg = brw_imm_f(reg->imm.f);
2192 break;
2193 case BRW_REGISTER_TYPE_D:
2194 brw_reg = brw_imm_d(reg->imm.i);
2195 break;
2196 case BRW_REGISTER_TYPE_UD:
2197 brw_reg = brw_imm_ud(reg->imm.u);
2198 break;
2199 default:
2200 assert(!"not reached");
2201 break;
2202 }
2203 break;
2204 case FIXED_HW_REG:
2205 brw_reg = reg->fixed_hw_reg;
2206 break;
2207 case BAD_FILE:
2208 /* Probably unused. */
2209 brw_reg = brw_null_reg();
2210 break;
2211 case UNIFORM:
2212 assert(!"not reached");
2213 brw_reg = brw_null_reg();
2214 break;
2215 }
2216 if (reg->abs)
2217 brw_reg = brw_abs(brw_reg);
2218 if (reg->negate)
2219 brw_reg = negate(brw_reg);
2220
2221 return brw_reg;
2222 }
2223
2224 void
2225 fs_visitor::generate_code()
2226 {
2227 unsigned int annotation_len = 0;
2228 int last_native_inst = 0;
2229 struct brw_instruction *if_stack[16], *loop_stack[16];
2230 int if_stack_depth = 0, loop_stack_depth = 0;
2231 int if_depth_in_loop[16];
2232
2233 if_depth_in_loop[loop_stack_depth] = 0;
2234
2235 memset(&if_stack, 0, sizeof(if_stack));
2236 foreach_iter(exec_list_iterator, iter, this->instructions) {
2237 fs_inst *inst = (fs_inst *)iter.get();
2238 struct brw_reg src[3], dst;
2239
2240 for (unsigned int i = 0; i < 3; i++) {
2241 src[i] = brw_reg_from_fs_reg(&inst->src[i]);
2242 }
2243 dst = brw_reg_from_fs_reg(&inst->dst);
2244
2245 brw_set_conditionalmod(p, inst->conditional_mod);
2246 brw_set_predicate_control(p, inst->predicated);
2247
2248 switch (inst->opcode) {
2249 case BRW_OPCODE_MOV:
2250 brw_MOV(p, dst, src[0]);
2251 break;
2252 case BRW_OPCODE_ADD:
2253 brw_ADD(p, dst, src[0], src[1]);
2254 break;
2255 case BRW_OPCODE_MUL:
2256 brw_MUL(p, dst, src[0], src[1]);
2257 break;
2258
2259 case BRW_OPCODE_FRC:
2260 brw_FRC(p, dst, src[0]);
2261 break;
2262 case BRW_OPCODE_RNDD:
2263 brw_RNDD(p, dst, src[0]);
2264 break;
2265 case BRW_OPCODE_RNDZ:
2266 brw_RNDZ(p, dst, src[0]);
2267 break;
2268
2269 case BRW_OPCODE_AND:
2270 brw_AND(p, dst, src[0], src[1]);
2271 break;
2272 case BRW_OPCODE_OR:
2273 brw_OR(p, dst, src[0], src[1]);
2274 break;
2275 case BRW_OPCODE_XOR:
2276 brw_XOR(p, dst, src[0], src[1]);
2277 break;
2278
2279 case BRW_OPCODE_CMP:
2280 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
2281 break;
2282 case BRW_OPCODE_SEL:
2283 brw_SEL(p, dst, src[0], src[1]);
2284 break;
2285
2286 case BRW_OPCODE_IF:
2287 assert(if_stack_depth < 16);
2288 if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
2289 if_depth_in_loop[loop_stack_depth]++;
2290 if_stack_depth++;
2291 break;
2292 case BRW_OPCODE_ELSE:
2293 if_stack[if_stack_depth - 1] =
2294 brw_ELSE(p, if_stack[if_stack_depth - 1]);
2295 break;
2296 case BRW_OPCODE_ENDIF:
2297 if_stack_depth--;
2298 brw_ENDIF(p , if_stack[if_stack_depth]);
2299 if_depth_in_loop[loop_stack_depth]--;
2300 break;
2301
2302 case BRW_OPCODE_DO:
2303 loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
2304 if_depth_in_loop[loop_stack_depth] = 0;
2305 break;
2306
2307 case BRW_OPCODE_BREAK:
2308 brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
2309 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2310 break;
2311 case BRW_OPCODE_CONTINUE:
2312 brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
2313 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2314 break;
2315
2316 case BRW_OPCODE_WHILE: {
2317 struct brw_instruction *inst0, *inst1;
2318 GLuint br = 1;
2319
2320 if (intel->gen == 5)
2321 br = 2;
2322
2323 assert(loop_stack_depth > 0);
2324 loop_stack_depth--;
2325 inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
2326 /* patch all the BREAK/CONT instructions from last BGNLOOP */
2327 while (inst0 > loop_stack[loop_stack_depth]) {
2328 inst0--;
2329 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
2330 inst0->bits3.if_else.jump_count == 0) {
2331 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
2332 }
2333 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
2334 inst0->bits3.if_else.jump_count == 0) {
2335 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
2336 }
2337 }
2338 }
2339 break;
2340
2341 case FS_OPCODE_RCP:
2342 case FS_OPCODE_RSQ:
2343 case FS_OPCODE_SQRT:
2344 case FS_OPCODE_EXP2:
2345 case FS_OPCODE_LOG2:
2346 case FS_OPCODE_POW:
2347 case FS_OPCODE_SIN:
2348 case FS_OPCODE_COS:
2349 generate_math(inst, dst, src);
2350 break;
2351 case FS_OPCODE_LINTERP:
2352 generate_linterp(inst, dst, src);
2353 break;
2354 case FS_OPCODE_TEX:
2355 case FS_OPCODE_TXB:
2356 case FS_OPCODE_TXL:
2357 generate_tex(inst, dst, src[0]);
2358 break;
2359 case FS_OPCODE_DISCARD:
2360 generate_discard(inst, dst /* src0 == dst */);
2361 break;
2362 case FS_OPCODE_DDX:
2363 generate_ddx(inst, dst, src[0]);
2364 break;
2365 case FS_OPCODE_DDY:
2366 generate_ddy(inst, dst, src[0]);
2367 break;
2368 case FS_OPCODE_FB_WRITE:
2369 generate_fb_write(inst);
2370 break;
2371 default:
2372 if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
2373 _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
2374 brw_opcodes[inst->opcode].name);
2375 } else {
2376 _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
2377 }
2378 this->fail = true;
2379 }
2380
2381 if (annotation_len < p->nr_insn) {
2382 annotation_len *= 2;
2383 if (annotation_len < 16)
2384 annotation_len = 16;
2385
2386 this->annotation_string = talloc_realloc(this->mem_ctx,
2387 annotation_string,
2388 const char *,
2389 annotation_len);
2390 this->annotation_ir = talloc_realloc(this->mem_ctx,
2391 annotation_ir,
2392 ir_instruction *,
2393 annotation_len);
2394 }
2395
2396 for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
2397 this->annotation_string[i] = inst->annotation;
2398 this->annotation_ir[i] = inst->ir;
2399 }
2400 last_native_inst = p->nr_insn;
2401 }
2402 }
2403
2404 GLboolean
2405 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
2406 {
2407 struct brw_compile *p = &c->func;
2408 struct intel_context *intel = &brw->intel;
2409 GLcontext *ctx = &intel->ctx;
2410 struct brw_shader *shader = NULL;
2411 struct gl_shader_program *prog = ctx->Shader.CurrentProgram;
2412
2413 if (!prog)
2414 return GL_FALSE;
2415
2416 if (!using_new_fs)
2417 return GL_FALSE;
2418
2419 for (unsigned int i = 0; i < prog->_NumLinkedShaders; i++) {
2420 if (prog->_LinkedShaders[i]->Type == GL_FRAGMENT_SHADER) {
2421 shader = (struct brw_shader *)prog->_LinkedShaders[i];
2422 break;
2423 }
2424 }
2425 if (!shader)
2426 return GL_FALSE;
2427
2428 /* We always use 8-wide mode, at least for now. For one, flow
2429 * control only works in 8-wide. Also, when we're fragment shader
2430 * bound, we're almost always under register pressure as well, so
2431 * 8-wide would save us from the performance cliff of spilling
2432 * regs.
2433 */
2434 c->dispatch_width = 8;
2435
2436 if (INTEL_DEBUG & DEBUG_WM) {
2437 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
2438 _mesa_print_ir(shader->ir, NULL);
2439 printf("\n");
2440 }
2441
2442 /* Now the main event: Visit the shader IR and generate our FS IR for it.
2443 */
2444 fs_visitor v(c, shader);
2445
2446 if (0) {
2447 v.emit_dummy_fs();
2448 } else {
2449 v.emit_interpolation_setup();
2450
2451 /* Generate FS IR for main(). (the visitor only descends into
2452 * functions called "main").
2453 */
2454 foreach_iter(exec_list_iterator, iter, *shader->ir) {
2455 ir_instruction *ir = (ir_instruction *)iter.get();
2456 v.base_ir = ir;
2457 ir->accept(&v);
2458 }
2459
2460 v.emit_fb_writes();
2461 v.assign_curb_setup();
2462 v.assign_urb_setup();
2463 if (0)
2464 v.assign_regs_trivial();
2465 else
2466 v.assign_regs();
2467 }
2468
2469 v.generate_code();
2470
2471 assert(!v.fail); /* FINISHME: Cleanly fail, tested at link time, etc. */
2472
2473 if (v.fail)
2474 return GL_FALSE;
2475
2476 if (INTEL_DEBUG & DEBUG_WM) {
2477 const char *last_annotation_string = NULL;
2478 ir_instruction *last_annotation_ir = NULL;
2479
2480 printf("Native code for fragment shader %d:\n", prog->Name);
2481 for (unsigned int i = 0; i < p->nr_insn; i++) {
2482 if (last_annotation_ir != v.annotation_ir[i]) {
2483 last_annotation_ir = v.annotation_ir[i];
2484 if (last_annotation_ir) {
2485 printf(" ");
2486 last_annotation_ir->print();
2487 printf("\n");
2488 }
2489 }
2490 if (last_annotation_string != v.annotation_string[i]) {
2491 last_annotation_string = v.annotation_string[i];
2492 if (last_annotation_string)
2493 printf(" %s\n", last_annotation_string);
2494 }
2495 brw_disasm(stdout, &p->store[i], intel->gen);
2496 }
2497 printf("\n");
2498 }
2499
2500 c->prog_data.total_grf = v.grf_used;
2501 c->prog_data.total_scratch = 0;
2502
2503 return GL_TRUE;
2504 }