a956453dbf5ca6e3063eff90f213d8f0697611c0
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 extern "C" {
29
30 #include <sys/types.h>
31
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_wm.h"
44 #include "talloc.h"
45 }
46 #include "brw_fs.h"
47 #include "../glsl/glsl_types.h"
48 #include "../glsl/ir_optimization.h"
49 #include "../glsl/ir_print_visitor.h"
50
51 static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg);
52
53 struct gl_shader *
54 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
55 {
56 struct brw_shader *shader;
57
58 shader = talloc_zero(NULL, struct brw_shader);
59 if (shader) {
60 shader->base.Type = type;
61 shader->base.Name = name;
62 _mesa_init_shader(ctx, &shader->base);
63 }
64
65 return &shader->base;
66 }
67
68 struct gl_shader_program *
69 brw_new_shader_program(struct gl_context *ctx, GLuint name)
70 {
71 struct brw_shader_program *prog;
72 prog = talloc_zero(NULL, struct brw_shader_program);
73 if (prog) {
74 prog->base.Name = name;
75 _mesa_init_shader_program(ctx, &prog->base);
76 }
77 return &prog->base;
78 }
79
80 GLboolean
81 brw_compile_shader(struct gl_context *ctx, struct gl_shader *shader)
82 {
83 if (!_mesa_ir_compile_shader(ctx, shader))
84 return GL_FALSE;
85
86 return GL_TRUE;
87 }
88
89 GLboolean
90 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
91 {
92 struct brw_shader *shader =
93 (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
94 if (shader != NULL) {
95 void *mem_ctx = talloc_new(NULL);
96 bool progress;
97
98 if (shader->ir)
99 talloc_free(shader->ir);
100 shader->ir = new(shader) exec_list;
101 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
102
103 do_mat_op_to_vec(shader->ir);
104 lower_instructions(shader->ir,
105 MOD_TO_FRACT |
106 DIV_TO_MUL_RCP |
107 SUB_TO_ADD_NEG |
108 EXP_TO_EXP2 |
109 LOG_TO_LOG2);
110 do_lower_texture_projection(shader->ir);
111 brw_do_cubemap_normalize(shader->ir);
112
113 do {
114 progress = false;
115
116 brw_do_channel_expressions(shader->ir);
117 brw_do_vector_splitting(shader->ir);
118
119 progress = do_lower_jumps(shader->ir, true, true,
120 true, /* main return */
121 false, /* continue */
122 false /* loops */
123 ) || progress;
124
125 progress = do_common_optimization(shader->ir, true, 32) || progress;
126
127 progress = lower_noise(shader->ir) || progress;
128 progress =
129 lower_variable_index_to_cond_assign(shader->ir,
130 GL_TRUE, /* input */
131 GL_TRUE, /* output */
132 GL_TRUE, /* temp */
133 GL_TRUE /* uniform */
134 ) || progress;
135 progress = lower_quadop_vector(shader->ir, false) || progress;
136 } while (progress);
137
138 validate_ir_tree(shader->ir);
139
140 reparent_ir(shader->ir, shader->ir);
141 talloc_free(mem_ctx);
142 }
143
144 if (!_mesa_ir_link_shader(ctx, prog))
145 return GL_FALSE;
146
147 return GL_TRUE;
148 }
149
150 static int
151 type_size(const struct glsl_type *type)
152 {
153 unsigned int size, i;
154
155 switch (type->base_type) {
156 case GLSL_TYPE_UINT:
157 case GLSL_TYPE_INT:
158 case GLSL_TYPE_FLOAT:
159 case GLSL_TYPE_BOOL:
160 return type->components();
161 case GLSL_TYPE_ARRAY:
162 return type_size(type->fields.array) * type->length;
163 case GLSL_TYPE_STRUCT:
164 size = 0;
165 for (i = 0; i < type->length; i++) {
166 size += type_size(type->fields.structure[i].type);
167 }
168 return size;
169 case GLSL_TYPE_SAMPLER:
170 /* Samplers take up no register space, since they're baked in at
171 * link time.
172 */
173 return 0;
174 default:
175 assert(!"not reached");
176 return 0;
177 }
178 }
179
180 /**
181 * Returns how many MRFs an FS opcode will write over.
182 *
183 * Note that this is not the 0 or 1 implied writes in an actual gen
184 * instruction -- the FS opcodes often generate MOVs in addition.
185 */
186 int
187 fs_visitor::implied_mrf_writes(fs_inst *inst)
188 {
189 if (inst->mlen == 0)
190 return 0;
191
192 switch (inst->opcode) {
193 case FS_OPCODE_RCP:
194 case FS_OPCODE_RSQ:
195 case FS_OPCODE_SQRT:
196 case FS_OPCODE_EXP2:
197 case FS_OPCODE_LOG2:
198 case FS_OPCODE_SIN:
199 case FS_OPCODE_COS:
200 return 1;
201 case FS_OPCODE_POW:
202 return 2;
203 case FS_OPCODE_TEX:
204 case FS_OPCODE_TXB:
205 case FS_OPCODE_TXL:
206 return 1;
207 case FS_OPCODE_FB_WRITE:
208 return 2;
209 case FS_OPCODE_PULL_CONSTANT_LOAD:
210 case FS_OPCODE_UNSPILL:
211 return 1;
212 case FS_OPCODE_SPILL:
213 return 2;
214 default:
215 assert(!"not reached");
216 return inst->mlen;
217 }
218 }
219
220 int
221 fs_visitor::virtual_grf_alloc(int size)
222 {
223 if (virtual_grf_array_size <= virtual_grf_next) {
224 if (virtual_grf_array_size == 0)
225 virtual_grf_array_size = 16;
226 else
227 virtual_grf_array_size *= 2;
228 virtual_grf_sizes = talloc_realloc(mem_ctx, virtual_grf_sizes,
229 int, virtual_grf_array_size);
230
231 /* This slot is always unused. */
232 virtual_grf_sizes[0] = 0;
233 }
234 virtual_grf_sizes[virtual_grf_next] = size;
235 return virtual_grf_next++;
236 }
237
238 /** Fixed HW reg constructor. */
239 fs_reg::fs_reg(enum register_file file, int hw_reg)
240 {
241 init();
242 this->file = file;
243 this->hw_reg = hw_reg;
244 this->type = BRW_REGISTER_TYPE_F;
245 }
246
247 /** Fixed HW reg constructor. */
248 fs_reg::fs_reg(enum register_file file, int hw_reg, uint32_t type)
249 {
250 init();
251 this->file = file;
252 this->hw_reg = hw_reg;
253 this->type = type;
254 }
255
256 int
257 brw_type_for_base_type(const struct glsl_type *type)
258 {
259 switch (type->base_type) {
260 case GLSL_TYPE_FLOAT:
261 return BRW_REGISTER_TYPE_F;
262 case GLSL_TYPE_INT:
263 case GLSL_TYPE_BOOL:
264 return BRW_REGISTER_TYPE_D;
265 case GLSL_TYPE_UINT:
266 return BRW_REGISTER_TYPE_UD;
267 case GLSL_TYPE_ARRAY:
268 case GLSL_TYPE_STRUCT:
269 case GLSL_TYPE_SAMPLER:
270 /* These should be overridden with the type of the member when
271 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
272 * way to trip up if we don't.
273 */
274 return BRW_REGISTER_TYPE_UD;
275 default:
276 assert(!"not reached");
277 return BRW_REGISTER_TYPE_F;
278 }
279 }
280
281 /** Automatic reg constructor. */
282 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
283 {
284 init();
285
286 this->file = GRF;
287 this->reg = v->virtual_grf_alloc(type_size(type));
288 this->reg_offset = 0;
289 this->type = brw_type_for_base_type(type);
290 }
291
292 fs_reg *
293 fs_visitor::variable_storage(ir_variable *var)
294 {
295 return (fs_reg *)hash_table_find(this->variable_ht, var);
296 }
297
298 /* Our support for uniforms is piggy-backed on the struct
299 * gl_fragment_program, because that's where the values actually
300 * get stored, rather than in some global gl_shader_program uniform
301 * store.
302 */
303 int
304 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
305 {
306 unsigned int offset = 0;
307 float *vec_values;
308
309 if (type->is_matrix()) {
310 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
311 type->vector_elements,
312 1);
313
314 for (unsigned int i = 0; i < type->matrix_columns; i++) {
315 offset += setup_uniform_values(loc + offset, column);
316 }
317
318 return offset;
319 }
320
321 switch (type->base_type) {
322 case GLSL_TYPE_FLOAT:
323 case GLSL_TYPE_UINT:
324 case GLSL_TYPE_INT:
325 case GLSL_TYPE_BOOL:
326 vec_values = fp->Base.Parameters->ParameterValues[loc];
327 for (unsigned int i = 0; i < type->vector_elements; i++) {
328 unsigned int param = c->prog_data.nr_params++;
329
330 assert(param < ARRAY_SIZE(c->prog_data.param));
331
332 switch (type->base_type) {
333 case GLSL_TYPE_FLOAT:
334 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
335 break;
336 case GLSL_TYPE_UINT:
337 c->prog_data.param_convert[param] = PARAM_CONVERT_F2U;
338 break;
339 case GLSL_TYPE_INT:
340 c->prog_data.param_convert[param] = PARAM_CONVERT_F2I;
341 break;
342 case GLSL_TYPE_BOOL:
343 c->prog_data.param_convert[param] = PARAM_CONVERT_F2B;
344 break;
345 default:
346 assert(!"not reached");
347 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
348 break;
349 }
350
351 c->prog_data.param[param] = &vec_values[i];
352 }
353 return 1;
354
355 case GLSL_TYPE_STRUCT:
356 for (unsigned int i = 0; i < type->length; i++) {
357 offset += setup_uniform_values(loc + offset,
358 type->fields.structure[i].type);
359 }
360 return offset;
361
362 case GLSL_TYPE_ARRAY:
363 for (unsigned int i = 0; i < type->length; i++) {
364 offset += setup_uniform_values(loc + offset, type->fields.array);
365 }
366 return offset;
367
368 case GLSL_TYPE_SAMPLER:
369 /* The sampler takes up a slot, but we don't use any values from it. */
370 return 1;
371
372 default:
373 assert(!"not reached");
374 return 0;
375 }
376 }
377
378
379 /* Our support for builtin uniforms is even scarier than non-builtin.
380 * It sits on top of the PROG_STATE_VAR parameters that are
381 * automatically updated from GL context state.
382 */
383 void
384 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
385 {
386 const struct gl_builtin_uniform_desc *statevar = NULL;
387
388 for (unsigned int i = 0; _mesa_builtin_uniform_desc[i].name; i++) {
389 statevar = &_mesa_builtin_uniform_desc[i];
390 if (strcmp(ir->name, _mesa_builtin_uniform_desc[i].name) == 0)
391 break;
392 }
393
394 if (!statevar->name) {
395 this->fail = true;
396 printf("Failed to find builtin uniform `%s'\n", ir->name);
397 return;
398 }
399
400 int array_count;
401 if (ir->type->is_array()) {
402 array_count = ir->type->length;
403 } else {
404 array_count = 1;
405 }
406
407 for (int a = 0; a < array_count; a++) {
408 for (unsigned int i = 0; i < statevar->num_elements; i++) {
409 struct gl_builtin_uniform_element *element = &statevar->elements[i];
410 int tokens[STATE_LENGTH];
411
412 memcpy(tokens, element->tokens, sizeof(element->tokens));
413 if (ir->type->is_array()) {
414 tokens[1] = a;
415 }
416
417 /* This state reference has already been setup by ir_to_mesa,
418 * but we'll get the same index back here.
419 */
420 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
421 (gl_state_index *)tokens);
422 float *vec_values = this->fp->Base.Parameters->ParameterValues[index];
423
424 /* Add each of the unique swizzles of the element as a
425 * parameter. This'll end up matching the expected layout of
426 * the array/matrix/structure we're trying to fill in.
427 */
428 int last_swiz = -1;
429 for (unsigned int i = 0; i < 4; i++) {
430 int swiz = GET_SWZ(element->swizzle, i);
431 if (swiz == last_swiz)
432 break;
433 last_swiz = swiz;
434
435 c->prog_data.param_convert[c->prog_data.nr_params] =
436 PARAM_NO_CONVERT;
437 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[swiz];
438 }
439 }
440 }
441 }
442
443 fs_reg *
444 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
445 {
446 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
447 fs_reg wpos = *reg;
448 fs_reg neg_y = this->pixel_y;
449 neg_y.negate = true;
450 bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
451
452 /* gl_FragCoord.x */
453 if (ir->pixel_center_integer) {
454 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_x));
455 } else {
456 emit(fs_inst(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f)));
457 }
458 wpos.reg_offset++;
459
460 /* gl_FragCoord.y */
461 if (!flip && ir->pixel_center_integer) {
462 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_y));
463 } else {
464 fs_reg pixel_y = this->pixel_y;
465 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
466
467 if (flip) {
468 pixel_y.negate = true;
469 offset += c->key.drawable_height - 1.0;
470 }
471
472 emit(fs_inst(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset)));
473 }
474 wpos.reg_offset++;
475
476 /* gl_FragCoord.z */
477 emit(fs_inst(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y,
478 interp_reg(FRAG_ATTRIB_WPOS, 2)));
479 wpos.reg_offset++;
480
481 /* gl_FragCoord.w: Already set up in emit_interpolation */
482 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->wpos_w));
483
484 return reg;
485 }
486
487 fs_reg *
488 fs_visitor::emit_general_interpolation(ir_variable *ir)
489 {
490 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
491 /* Interpolation is always in floating point regs. */
492 reg->type = BRW_REGISTER_TYPE_F;
493 fs_reg attr = *reg;
494
495 unsigned int array_elements;
496 const glsl_type *type;
497
498 if (ir->type->is_array()) {
499 array_elements = ir->type->length;
500 if (array_elements == 0) {
501 this->fail = true;
502 }
503 type = ir->type->fields.array;
504 } else {
505 array_elements = 1;
506 type = ir->type;
507 }
508
509 int location = ir->location;
510 for (unsigned int i = 0; i < array_elements; i++) {
511 for (unsigned int j = 0; j < type->matrix_columns; j++) {
512 if (urb_setup[location] == -1) {
513 /* If there's no incoming setup data for this slot, don't
514 * emit interpolation for it.
515 */
516 attr.reg_offset += type->vector_elements;
517 location++;
518 continue;
519 }
520
521 for (unsigned int c = 0; c < type->vector_elements; c++) {
522 struct brw_reg interp = interp_reg(location, c);
523 emit(fs_inst(FS_OPCODE_LINTERP,
524 attr,
525 this->delta_x,
526 this->delta_y,
527 fs_reg(interp)));
528 attr.reg_offset++;
529 }
530
531 if (intel->gen < 6) {
532 attr.reg_offset -= type->vector_elements;
533 for (unsigned int c = 0; c < type->vector_elements; c++) {
534 emit(fs_inst(BRW_OPCODE_MUL,
535 attr,
536 attr,
537 this->pixel_w));
538 attr.reg_offset++;
539 }
540 }
541 location++;
542 }
543 }
544
545 return reg;
546 }
547
548 fs_reg *
549 fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
550 {
551 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
552
553 /* The frontfacing comes in as a bit in the thread payload. */
554 if (intel->gen >= 6) {
555 emit(fs_inst(BRW_OPCODE_ASR,
556 *reg,
557 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
558 fs_reg(15)));
559 emit(fs_inst(BRW_OPCODE_NOT,
560 *reg,
561 *reg));
562 emit(fs_inst(BRW_OPCODE_AND,
563 *reg,
564 *reg,
565 fs_reg(1)));
566 } else {
567 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
568 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
569 * us front face
570 */
571 fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP,
572 *reg,
573 fs_reg(r1_6ud),
574 fs_reg(1u << 31)));
575 inst->conditional_mod = BRW_CONDITIONAL_L;
576 emit(fs_inst(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u)));
577 }
578
579 return reg;
580 }
581
582 fs_inst *
583 fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src)
584 {
585 switch (opcode) {
586 case FS_OPCODE_RCP:
587 case FS_OPCODE_RSQ:
588 case FS_OPCODE_SQRT:
589 case FS_OPCODE_EXP2:
590 case FS_OPCODE_LOG2:
591 case FS_OPCODE_SIN:
592 case FS_OPCODE_COS:
593 break;
594 default:
595 assert(!"not reached: bad math opcode");
596 return NULL;
597 }
598
599 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
600 * might be able to do better by doing execsize = 1 math and then
601 * expanding that result out, but we would need to be careful with
602 * masking.
603 */
604 if (intel->gen >= 6 && src.file == UNIFORM) {
605 fs_reg expanded = fs_reg(this, glsl_type::float_type);
606 emit(fs_inst(BRW_OPCODE_MOV, expanded, src));
607 src = expanded;
608 }
609
610 fs_inst *inst = emit(fs_inst(opcode, dst, src));
611
612 if (intel->gen < 6) {
613 inst->base_mrf = 2;
614 inst->mlen = 1;
615 }
616
617 return inst;
618 }
619
620 fs_inst *
621 fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src0, fs_reg src1)
622 {
623 int base_mrf = 2;
624 fs_inst *inst;
625
626 assert(opcode == FS_OPCODE_POW);
627
628 if (intel->gen >= 6) {
629 /* Can't do hstride == 0 args to gen6 math, so expand it out. */
630 if (src0.file == UNIFORM) {
631 fs_reg expanded = fs_reg(this, glsl_type::float_type);
632 emit(fs_inst(BRW_OPCODE_MOV, expanded, src0));
633 src0 = expanded;
634 }
635
636 if (src1.file == UNIFORM) {
637 fs_reg expanded = fs_reg(this, glsl_type::float_type);
638 emit(fs_inst(BRW_OPCODE_MOV, expanded, src1));
639 src1 = expanded;
640 }
641
642 inst = emit(fs_inst(opcode, dst, src0, src1));
643 } else {
644 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1));
645 inst = emit(fs_inst(opcode, dst, src0, reg_null_f));
646
647 inst->base_mrf = base_mrf;
648 inst->mlen = 2;
649 }
650 return inst;
651 }
652
653 void
654 fs_visitor::visit(ir_variable *ir)
655 {
656 fs_reg *reg = NULL;
657
658 if (variable_storage(ir))
659 return;
660
661 if (strcmp(ir->name, "gl_FragColor") == 0) {
662 this->frag_color = ir;
663 } else if (strcmp(ir->name, "gl_FragData") == 0) {
664 this->frag_data = ir;
665 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
666 this->frag_depth = ir;
667 }
668
669 if (ir->mode == ir_var_in) {
670 if (!strcmp(ir->name, "gl_FragCoord")) {
671 reg = emit_fragcoord_interpolation(ir);
672 } else if (!strcmp(ir->name, "gl_FrontFacing")) {
673 reg = emit_frontfacing_interpolation(ir);
674 } else {
675 reg = emit_general_interpolation(ir);
676 }
677 assert(reg);
678 hash_table_insert(this->variable_ht, reg, ir);
679 return;
680 }
681
682 if (ir->mode == ir_var_uniform) {
683 int param_index = c->prog_data.nr_params;
684
685 if (!strncmp(ir->name, "gl_", 3)) {
686 setup_builtin_uniform_values(ir);
687 } else {
688 setup_uniform_values(ir->location, ir->type);
689 }
690
691 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
692 reg->type = brw_type_for_base_type(ir->type);
693 }
694
695 if (!reg)
696 reg = new(this->mem_ctx) fs_reg(this, ir->type);
697
698 hash_table_insert(this->variable_ht, reg, ir);
699 }
700
701 void
702 fs_visitor::visit(ir_dereference_variable *ir)
703 {
704 fs_reg *reg = variable_storage(ir->var);
705 this->result = *reg;
706 }
707
708 void
709 fs_visitor::visit(ir_dereference_record *ir)
710 {
711 const glsl_type *struct_type = ir->record->type;
712
713 ir->record->accept(this);
714
715 unsigned int offset = 0;
716 for (unsigned int i = 0; i < struct_type->length; i++) {
717 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
718 break;
719 offset += type_size(struct_type->fields.structure[i].type);
720 }
721 this->result.reg_offset += offset;
722 this->result.type = brw_type_for_base_type(ir->type);
723 }
724
725 void
726 fs_visitor::visit(ir_dereference_array *ir)
727 {
728 ir_constant *index;
729 int element_size;
730
731 ir->array->accept(this);
732 index = ir->array_index->as_constant();
733
734 element_size = type_size(ir->type);
735 this->result.type = brw_type_for_base_type(ir->type);
736
737 if (index) {
738 assert(this->result.file == UNIFORM ||
739 (this->result.file == GRF &&
740 this->result.reg != 0));
741 this->result.reg_offset += index->value.i[0] * element_size;
742 } else {
743 assert(!"FINISHME: non-constant array element");
744 }
745 }
746
747 /* Instruction selection: Produce a MOV.sat instead of
748 * MIN(MAX(val, 0), 1) when possible.
749 */
750 bool
751 fs_visitor::try_emit_saturate(ir_expression *ir)
752 {
753 ir_rvalue *sat_val = ir->as_rvalue_to_saturate();
754
755 if (!sat_val)
756 return false;
757
758 sat_val->accept(this);
759 fs_reg src = this->result;
760
761 this->result = fs_reg(this, ir->type);
762 fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, src));
763 inst->saturate = true;
764
765 return true;
766 }
767
768 void
769 fs_visitor::visit(ir_expression *ir)
770 {
771 unsigned int operand;
772 fs_reg op[2], temp;
773 fs_inst *inst;
774
775 assert(ir->get_num_operands() <= 2);
776
777 if (try_emit_saturate(ir))
778 return;
779
780 for (operand = 0; operand < ir->get_num_operands(); operand++) {
781 ir->operands[operand]->accept(this);
782 if (this->result.file == BAD_FILE) {
783 ir_print_visitor v;
784 printf("Failed to get tree for expression operand:\n");
785 ir->operands[operand]->accept(&v);
786 this->fail = true;
787 }
788 op[operand] = this->result;
789
790 /* Matrix expression operands should have been broken down to vector
791 * operations already.
792 */
793 assert(!ir->operands[operand]->type->is_matrix());
794 /* And then those vector operands should have been broken down to scalar.
795 */
796 assert(!ir->operands[operand]->type->is_vector());
797 }
798
799 /* Storage for our result. If our result goes into an assignment, it will
800 * just get copy-propagated out, so no worries.
801 */
802 this->result = fs_reg(this, ir->type);
803
804 switch (ir->operation) {
805 case ir_unop_logic_not:
806 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
807 * ones complement of the whole register, not just bit 0.
808 */
809 emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)));
810 break;
811 case ir_unop_neg:
812 op[0].negate = !op[0].negate;
813 this->result = op[0];
814 break;
815 case ir_unop_abs:
816 op[0].abs = true;
817 this->result = op[0];
818 break;
819 case ir_unop_sign:
820 temp = fs_reg(this, ir->type);
821
822 emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)));
823
824 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)));
825 inst->conditional_mod = BRW_CONDITIONAL_G;
826 inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)));
827 inst->predicated = true;
828
829 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)));
830 inst->conditional_mod = BRW_CONDITIONAL_L;
831 inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)));
832 inst->predicated = true;
833
834 break;
835 case ir_unop_rcp:
836 emit_math(FS_OPCODE_RCP, this->result, op[0]);
837 break;
838
839 case ir_unop_exp2:
840 emit_math(FS_OPCODE_EXP2, this->result, op[0]);
841 break;
842 case ir_unop_log2:
843 emit_math(FS_OPCODE_LOG2, this->result, op[0]);
844 break;
845 case ir_unop_exp:
846 case ir_unop_log:
847 assert(!"not reached: should be handled by ir_explog_to_explog2");
848 break;
849 case ir_unop_sin:
850 case ir_unop_sin_reduced:
851 emit_math(FS_OPCODE_SIN, this->result, op[0]);
852 break;
853 case ir_unop_cos:
854 case ir_unop_cos_reduced:
855 emit_math(FS_OPCODE_COS, this->result, op[0]);
856 break;
857
858 case ir_unop_dFdx:
859 emit(fs_inst(FS_OPCODE_DDX, this->result, op[0]));
860 break;
861 case ir_unop_dFdy:
862 emit(fs_inst(FS_OPCODE_DDY, this->result, op[0]));
863 break;
864
865 case ir_binop_add:
866 emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], op[1]));
867 break;
868 case ir_binop_sub:
869 assert(!"not reached: should be handled by ir_sub_to_add_neg");
870 break;
871
872 case ir_binop_mul:
873 emit(fs_inst(BRW_OPCODE_MUL, this->result, op[0], op[1]));
874 break;
875 case ir_binop_div:
876 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
877 break;
878 case ir_binop_mod:
879 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
880 break;
881
882 case ir_binop_less:
883 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
884 inst->conditional_mod = BRW_CONDITIONAL_L;
885 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
886 break;
887 case ir_binop_greater:
888 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
889 inst->conditional_mod = BRW_CONDITIONAL_G;
890 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
891 break;
892 case ir_binop_lequal:
893 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
894 inst->conditional_mod = BRW_CONDITIONAL_LE;
895 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
896 break;
897 case ir_binop_gequal:
898 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
899 inst->conditional_mod = BRW_CONDITIONAL_GE;
900 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
901 break;
902 case ir_binop_equal:
903 case ir_binop_all_equal: /* same as nequal for scalars */
904 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
905 inst->conditional_mod = BRW_CONDITIONAL_Z;
906 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
907 break;
908 case ir_binop_nequal:
909 case ir_binop_any_nequal: /* same as nequal for scalars */
910 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
911 inst->conditional_mod = BRW_CONDITIONAL_NZ;
912 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
913 break;
914
915 case ir_binop_logic_xor:
916 emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], op[1]));
917 break;
918
919 case ir_binop_logic_or:
920 emit(fs_inst(BRW_OPCODE_OR, this->result, op[0], op[1]));
921 break;
922
923 case ir_binop_logic_and:
924 emit(fs_inst(BRW_OPCODE_AND, this->result, op[0], op[1]));
925 break;
926
927 case ir_binop_dot:
928 case ir_unop_any:
929 assert(!"not reached: should be handled by brw_fs_channel_expressions");
930 break;
931
932 case ir_unop_noise:
933 assert(!"not reached: should be handled by lower_noise");
934 break;
935
936 case ir_quadop_vector:
937 assert(!"not reached: should be handled by lower_quadop_vector");
938 break;
939
940 case ir_unop_sqrt:
941 emit_math(FS_OPCODE_SQRT, this->result, op[0]);
942 break;
943
944 case ir_unop_rsq:
945 emit_math(FS_OPCODE_RSQ, this->result, op[0]);
946 break;
947
948 case ir_unop_i2f:
949 case ir_unop_b2f:
950 case ir_unop_b2i:
951 case ir_unop_f2i:
952 emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0]));
953 break;
954 case ir_unop_f2b:
955 case ir_unop_i2b:
956 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], fs_reg(0.0f)));
957 inst->conditional_mod = BRW_CONDITIONAL_NZ;
958 inst = emit(fs_inst(BRW_OPCODE_AND, this->result,
959 this->result, fs_reg(1)));
960 break;
961
962 case ir_unop_trunc:
963 emit(fs_inst(BRW_OPCODE_RNDZ, this->result, op[0]));
964 break;
965 case ir_unop_ceil:
966 op[0].negate = !op[0].negate;
967 inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
968 this->result.negate = true;
969 break;
970 case ir_unop_floor:
971 inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
972 break;
973 case ir_unop_fract:
974 inst = emit(fs_inst(BRW_OPCODE_FRC, this->result, op[0]));
975 break;
976 case ir_unop_round_even:
977 emit(fs_inst(BRW_OPCODE_RNDE, this->result, op[0]));
978 break;
979
980 case ir_binop_min:
981 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
982 inst->conditional_mod = BRW_CONDITIONAL_L;
983
984 inst = emit(fs_inst(BRW_OPCODE_SEL, this->result, op[0], op[1]));
985 inst->predicated = true;
986 break;
987 case ir_binop_max:
988 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
989 inst->conditional_mod = BRW_CONDITIONAL_G;
990
991 inst = emit(fs_inst(BRW_OPCODE_SEL, this->result, op[0], op[1]));
992 inst->predicated = true;
993 break;
994
995 case ir_binop_pow:
996 emit_math(FS_OPCODE_POW, this->result, op[0], op[1]);
997 break;
998
999 case ir_unop_bit_not:
1000 inst = emit(fs_inst(BRW_OPCODE_NOT, this->result, op[0]));
1001 break;
1002 case ir_binop_bit_and:
1003 inst = emit(fs_inst(BRW_OPCODE_AND, this->result, op[0], op[1]));
1004 break;
1005 case ir_binop_bit_xor:
1006 inst = emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], op[1]));
1007 break;
1008 case ir_binop_bit_or:
1009 inst = emit(fs_inst(BRW_OPCODE_OR, this->result, op[0], op[1]));
1010 break;
1011
1012 case ir_unop_u2f:
1013 case ir_binop_lshift:
1014 case ir_binop_rshift:
1015 assert(!"GLSL 1.30 features unsupported");
1016 break;
1017 }
1018 }
1019
1020 void
1021 fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
1022 const glsl_type *type, bool predicated)
1023 {
1024 switch (type->base_type) {
1025 case GLSL_TYPE_FLOAT:
1026 case GLSL_TYPE_UINT:
1027 case GLSL_TYPE_INT:
1028 case GLSL_TYPE_BOOL:
1029 for (unsigned int i = 0; i < type->components(); i++) {
1030 l.type = brw_type_for_base_type(type);
1031 r.type = brw_type_for_base_type(type);
1032
1033 fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, l, r));
1034 inst->predicated = predicated;
1035
1036 l.reg_offset++;
1037 r.reg_offset++;
1038 }
1039 break;
1040 case GLSL_TYPE_ARRAY:
1041 for (unsigned int i = 0; i < type->length; i++) {
1042 emit_assignment_writes(l, r, type->fields.array, predicated);
1043 }
1044 break;
1045
1046 case GLSL_TYPE_STRUCT:
1047 for (unsigned int i = 0; i < type->length; i++) {
1048 emit_assignment_writes(l, r, type->fields.structure[i].type,
1049 predicated);
1050 }
1051 break;
1052
1053 case GLSL_TYPE_SAMPLER:
1054 break;
1055
1056 default:
1057 assert(!"not reached");
1058 break;
1059 }
1060 }
1061
1062 void
1063 fs_visitor::visit(ir_assignment *ir)
1064 {
1065 struct fs_reg l, r;
1066 fs_inst *inst;
1067
1068 /* FINISHME: arrays on the lhs */
1069 ir->lhs->accept(this);
1070 l = this->result;
1071
1072 ir->rhs->accept(this);
1073 r = this->result;
1074
1075 assert(l.file != BAD_FILE);
1076 assert(r.file != BAD_FILE);
1077
1078 if (ir->condition) {
1079 emit_bool_to_cond_code(ir->condition);
1080 }
1081
1082 if (ir->lhs->type->is_scalar() ||
1083 ir->lhs->type->is_vector()) {
1084 for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
1085 if (ir->write_mask & (1 << i)) {
1086 inst = emit(fs_inst(BRW_OPCODE_MOV, l, r));
1087 if (ir->condition)
1088 inst->predicated = true;
1089 r.reg_offset++;
1090 }
1091 l.reg_offset++;
1092 }
1093 } else {
1094 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL);
1095 }
1096 }
1097
1098 fs_inst *
1099 fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate)
1100 {
1101 int mlen;
1102 int base_mrf = 1;
1103 bool simd16 = false;
1104 fs_reg orig_dst;
1105
1106 /* g0 header. */
1107 mlen = 1;
1108
1109 if (ir->shadow_comparitor) {
1110 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1111 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1112 coordinate));
1113 coordinate.reg_offset++;
1114 }
1115 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1116 mlen += 3;
1117
1118 if (ir->op == ir_tex) {
1119 /* There's no plain shadow compare message, so we use shadow
1120 * compare with a bias of 0.0.
1121 */
1122 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1123 fs_reg(0.0f)));
1124 mlen++;
1125 } else if (ir->op == ir_txb) {
1126 ir->lod_info.bias->accept(this);
1127 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1128 this->result));
1129 mlen++;
1130 } else {
1131 assert(ir->op == ir_txl);
1132 ir->lod_info.lod->accept(this);
1133 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1134 this->result));
1135 mlen++;
1136 }
1137
1138 ir->shadow_comparitor->accept(this);
1139 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1140 mlen++;
1141 } else if (ir->op == ir_tex) {
1142 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1143 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1144 coordinate));
1145 coordinate.reg_offset++;
1146 }
1147 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1148 mlen += 3;
1149 } else {
1150 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1151 * instructions. We'll need to do SIMD16 here.
1152 */
1153 assert(ir->op == ir_txb || ir->op == ir_txl);
1154
1155 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1156 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2),
1157 coordinate));
1158 coordinate.reg_offset++;
1159 }
1160
1161 /* lod/bias appears after u/v/r. */
1162 mlen += 6;
1163
1164 if (ir->op == ir_txb) {
1165 ir->lod_info.bias->accept(this);
1166 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1167 this->result));
1168 mlen++;
1169 } else {
1170 ir->lod_info.lod->accept(this);
1171 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1172 this->result));
1173 mlen++;
1174 }
1175
1176 /* The unused upper half. */
1177 mlen++;
1178
1179 /* Now, since we're doing simd16, the return is 2 interleaved
1180 * vec4s where the odd-indexed ones are junk. We'll need to move
1181 * this weirdness around to the expected layout.
1182 */
1183 simd16 = true;
1184 orig_dst = dst;
1185 dst = fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type,
1186 2));
1187 dst.type = BRW_REGISTER_TYPE_F;
1188 }
1189
1190 fs_inst *inst = NULL;
1191 switch (ir->op) {
1192 case ir_tex:
1193 inst = emit(fs_inst(FS_OPCODE_TEX, dst));
1194 break;
1195 case ir_txb:
1196 inst = emit(fs_inst(FS_OPCODE_TXB, dst));
1197 break;
1198 case ir_txl:
1199 inst = emit(fs_inst(FS_OPCODE_TXL, dst));
1200 break;
1201 case ir_txd:
1202 case ir_txf:
1203 assert(!"GLSL 1.30 features unsupported");
1204 break;
1205 }
1206 inst->base_mrf = base_mrf;
1207 inst->mlen = mlen;
1208
1209 if (simd16) {
1210 for (int i = 0; i < 4; i++) {
1211 emit(fs_inst(BRW_OPCODE_MOV, orig_dst, dst));
1212 orig_dst.reg_offset++;
1213 dst.reg_offset += 2;
1214 }
1215 }
1216
1217 return inst;
1218 }
1219
1220 fs_inst *
1221 fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate)
1222 {
1223 /* gen5's SIMD8 sampler has slots for u, v, r, array index, then
1224 * optional parameters like shadow comparitor or LOD bias. If
1225 * optional parameters aren't present, those base slots are
1226 * optional and don't need to be included in the message.
1227 *
1228 * We don't fill in the unnecessary slots regardless, which may
1229 * look surprising in the disassembly.
1230 */
1231 int mlen = 1; /* g0 header always present. */
1232 int base_mrf = 1;
1233
1234 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1235 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1236 coordinate));
1237 coordinate.reg_offset++;
1238 }
1239 mlen += ir->coordinate->type->vector_elements;
1240
1241 if (ir->shadow_comparitor) {
1242 mlen = MAX2(mlen, 5);
1243
1244 ir->shadow_comparitor->accept(this);
1245 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1246 mlen++;
1247 }
1248
1249 fs_inst *inst = NULL;
1250 switch (ir->op) {
1251 case ir_tex:
1252 inst = emit(fs_inst(FS_OPCODE_TEX, dst));
1253 break;
1254 case ir_txb:
1255 ir->lod_info.bias->accept(this);
1256 mlen = MAX2(mlen, 5);
1257 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1258 mlen++;
1259
1260 inst = emit(fs_inst(FS_OPCODE_TXB, dst));
1261 break;
1262 case ir_txl:
1263 ir->lod_info.lod->accept(this);
1264 mlen = MAX2(mlen, 5);
1265 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1266 mlen++;
1267
1268 inst = emit(fs_inst(FS_OPCODE_TXL, dst));
1269 break;
1270 case ir_txd:
1271 case ir_txf:
1272 assert(!"GLSL 1.30 features unsupported");
1273 break;
1274 }
1275 inst->base_mrf = base_mrf;
1276 inst->mlen = mlen;
1277
1278 return inst;
1279 }
1280
1281 void
1282 fs_visitor::visit(ir_texture *ir)
1283 {
1284 int sampler;
1285 fs_inst *inst = NULL;
1286
1287 ir->coordinate->accept(this);
1288 fs_reg coordinate = this->result;
1289
1290 /* Should be lowered by do_lower_texture_projection */
1291 assert(!ir->projector);
1292
1293 sampler = _mesa_get_sampler_uniform_value(ir->sampler,
1294 ctx->Shader.CurrentFragmentProgram,
1295 &brw->fragment_program->Base);
1296 sampler = c->fp->program.Base.SamplerUnits[sampler];
1297
1298 /* The 965 requires the EU to do the normalization of GL rectangle
1299 * texture coordinates. We use the program parameter state
1300 * tracking to get the scaling factor.
1301 */
1302 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1303 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
1304 int tokens[STATE_LENGTH] = {
1305 STATE_INTERNAL,
1306 STATE_TEXRECT_SCALE,
1307 sampler,
1308 0,
1309 0
1310 };
1311
1312 c->prog_data.param_convert[c->prog_data.nr_params] =
1313 PARAM_NO_CONVERT;
1314 c->prog_data.param_convert[c->prog_data.nr_params + 1] =
1315 PARAM_NO_CONVERT;
1316
1317 fs_reg scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
1318 fs_reg scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
1319 GLuint index = _mesa_add_state_reference(params,
1320 (gl_state_index *)tokens);
1321 float *vec_values = this->fp->Base.Parameters->ParameterValues[index];
1322
1323 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[0];
1324 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[1];
1325
1326 fs_reg dst = fs_reg(this, ir->coordinate->type);
1327 fs_reg src = coordinate;
1328 coordinate = dst;
1329
1330 emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_x));
1331 dst.reg_offset++;
1332 src.reg_offset++;
1333 emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_y));
1334 }
1335
1336 /* Writemasking doesn't eliminate channels on SIMD8 texture
1337 * samples, so don't worry about them.
1338 */
1339 fs_reg dst = fs_reg(this, glsl_type::vec4_type);
1340
1341 if (intel->gen < 5) {
1342 inst = emit_texture_gen4(ir, dst, coordinate);
1343 } else {
1344 inst = emit_texture_gen5(ir, dst, coordinate);
1345 }
1346
1347 inst->sampler = sampler;
1348
1349 this->result = dst;
1350
1351 if (ir->shadow_comparitor)
1352 inst->shadow_compare = true;
1353
1354 if (c->key.tex_swizzles[inst->sampler] != SWIZZLE_NOOP) {
1355 fs_reg swizzle_dst = fs_reg(this, glsl_type::vec4_type);
1356
1357 for (int i = 0; i < 4; i++) {
1358 int swiz = GET_SWZ(c->key.tex_swizzles[inst->sampler], i);
1359 fs_reg l = swizzle_dst;
1360 l.reg_offset += i;
1361
1362 if (swiz == SWIZZLE_ZERO) {
1363 emit(fs_inst(BRW_OPCODE_MOV, l, fs_reg(0.0f)));
1364 } else if (swiz == SWIZZLE_ONE) {
1365 emit(fs_inst(BRW_OPCODE_MOV, l, fs_reg(1.0f)));
1366 } else {
1367 fs_reg r = dst;
1368 r.reg_offset += GET_SWZ(c->key.tex_swizzles[inst->sampler], i);
1369 emit(fs_inst(BRW_OPCODE_MOV, l, r));
1370 }
1371 }
1372 this->result = swizzle_dst;
1373 }
1374 }
1375
1376 void
1377 fs_visitor::visit(ir_swizzle *ir)
1378 {
1379 ir->val->accept(this);
1380 fs_reg val = this->result;
1381
1382 if (ir->type->vector_elements == 1) {
1383 this->result.reg_offset += ir->mask.x;
1384 return;
1385 }
1386
1387 fs_reg result = fs_reg(this, ir->type);
1388 this->result = result;
1389
1390 for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1391 fs_reg channel = val;
1392 int swiz = 0;
1393
1394 switch (i) {
1395 case 0:
1396 swiz = ir->mask.x;
1397 break;
1398 case 1:
1399 swiz = ir->mask.y;
1400 break;
1401 case 2:
1402 swiz = ir->mask.z;
1403 break;
1404 case 3:
1405 swiz = ir->mask.w;
1406 break;
1407 }
1408
1409 channel.reg_offset += swiz;
1410 emit(fs_inst(BRW_OPCODE_MOV, result, channel));
1411 result.reg_offset++;
1412 }
1413 }
1414
1415 void
1416 fs_visitor::visit(ir_discard *ir)
1417 {
1418 fs_reg temp = fs_reg(this, glsl_type::uint_type);
1419
1420 assert(ir->condition == NULL); /* FINISHME */
1421
1422 emit(fs_inst(FS_OPCODE_DISCARD_NOT, temp, reg_null_d));
1423 emit(fs_inst(FS_OPCODE_DISCARD_AND, reg_null_d, temp));
1424 kill_emitted = true;
1425 }
1426
1427 void
1428 fs_visitor::visit(ir_constant *ir)
1429 {
1430 /* Set this->result to reg at the bottom of the function because some code
1431 * paths will cause this visitor to be applied to other fields. This will
1432 * cause the value stored in this->result to be modified.
1433 *
1434 * Make reg constant so that it doesn't get accidentally modified along the
1435 * way. Yes, I actually had this problem. :(
1436 */
1437 const fs_reg reg(this, ir->type);
1438 fs_reg dst_reg = reg;
1439
1440 if (ir->type->is_array()) {
1441 const unsigned size = type_size(ir->type->fields.array);
1442
1443 for (unsigned i = 0; i < ir->type->length; i++) {
1444 ir->array_elements[i]->accept(this);
1445 fs_reg src_reg = this->result;
1446
1447 dst_reg.type = src_reg.type;
1448 for (unsigned j = 0; j < size; j++) {
1449 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, src_reg));
1450 src_reg.reg_offset++;
1451 dst_reg.reg_offset++;
1452 }
1453 }
1454 } else if (ir->type->is_record()) {
1455 foreach_list(node, &ir->components) {
1456 ir_instruction *const field = (ir_instruction *) node;
1457 const unsigned size = type_size(field->type);
1458
1459 field->accept(this);
1460 fs_reg src_reg = this->result;
1461
1462 dst_reg.type = src_reg.type;
1463 for (unsigned j = 0; j < size; j++) {
1464 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, src_reg));
1465 src_reg.reg_offset++;
1466 dst_reg.reg_offset++;
1467 }
1468 }
1469 } else {
1470 const unsigned size = type_size(ir->type);
1471
1472 for (unsigned i = 0; i < size; i++) {
1473 switch (ir->type->base_type) {
1474 case GLSL_TYPE_FLOAT:
1475 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])));
1476 break;
1477 case GLSL_TYPE_UINT:
1478 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])));
1479 break;
1480 case GLSL_TYPE_INT:
1481 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])));
1482 break;
1483 case GLSL_TYPE_BOOL:
1484 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])));
1485 break;
1486 default:
1487 assert(!"Non-float/uint/int/bool constant");
1488 }
1489 dst_reg.reg_offset++;
1490 }
1491 }
1492
1493 this->result = reg;
1494 }
1495
1496 void
1497 fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
1498 {
1499 ir_expression *expr = ir->as_expression();
1500
1501 if (expr) {
1502 fs_reg op[2];
1503 fs_inst *inst;
1504
1505 assert(expr->get_num_operands() <= 2);
1506 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1507 assert(expr->operands[i]->type->is_scalar());
1508
1509 expr->operands[i]->accept(this);
1510 op[i] = this->result;
1511 }
1512
1513 switch (expr->operation) {
1514 case ir_unop_logic_not:
1515 inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)));
1516 inst->conditional_mod = BRW_CONDITIONAL_Z;
1517 break;
1518
1519 case ir_binop_logic_xor:
1520 inst = emit(fs_inst(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]));
1521 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1522 break;
1523
1524 case ir_binop_logic_or:
1525 inst = emit(fs_inst(BRW_OPCODE_OR, reg_null_d, op[0], op[1]));
1526 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1527 break;
1528
1529 case ir_binop_logic_and:
1530 inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], op[1]));
1531 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1532 break;
1533
1534 case ir_unop_f2b:
1535 if (intel->gen >= 6) {
1536 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d,
1537 op[0], fs_reg(0.0f)));
1538 } else {
1539 inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0]));
1540 }
1541 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1542 break;
1543
1544 case ir_unop_i2b:
1545 if (intel->gen >= 6) {
1546 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)));
1547 } else {
1548 inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0]));
1549 }
1550 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1551 break;
1552
1553 case ir_binop_greater:
1554 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1555 inst->conditional_mod = BRW_CONDITIONAL_G;
1556 break;
1557 case ir_binop_gequal:
1558 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1559 inst->conditional_mod = BRW_CONDITIONAL_GE;
1560 break;
1561 case ir_binop_less:
1562 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1563 inst->conditional_mod = BRW_CONDITIONAL_L;
1564 break;
1565 case ir_binop_lequal:
1566 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1567 inst->conditional_mod = BRW_CONDITIONAL_LE;
1568 break;
1569 case ir_binop_equal:
1570 case ir_binop_all_equal:
1571 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1572 inst->conditional_mod = BRW_CONDITIONAL_Z;
1573 break;
1574 case ir_binop_nequal:
1575 case ir_binop_any_nequal:
1576 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1577 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1578 break;
1579 default:
1580 assert(!"not reached");
1581 this->fail = true;
1582 break;
1583 }
1584 return;
1585 }
1586
1587 ir->accept(this);
1588
1589 if (intel->gen >= 6) {
1590 fs_inst *inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d,
1591 this->result, fs_reg(1)));
1592 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1593 } else {
1594 fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, this->result));
1595 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1596 }
1597 }
1598
1599 /**
1600 * Emit a gen6 IF statement with the comparison folded into the IF
1601 * instruction.
1602 */
1603 void
1604 fs_visitor::emit_if_gen6(ir_if *ir)
1605 {
1606 ir_expression *expr = ir->condition->as_expression();
1607
1608 if (expr) {
1609 fs_reg op[2];
1610 fs_inst *inst;
1611 fs_reg temp;
1612
1613 assert(expr->get_num_operands() <= 2);
1614 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1615 assert(expr->operands[i]->type->is_scalar());
1616
1617 expr->operands[i]->accept(this);
1618 op[i] = this->result;
1619 }
1620
1621 switch (expr->operation) {
1622 case ir_unop_logic_not:
1623 inst = emit(fs_inst(BRW_OPCODE_IF, temp, op[0], fs_reg(1)));
1624 inst->conditional_mod = BRW_CONDITIONAL_Z;
1625 return;
1626
1627 case ir_binop_logic_xor:
1628 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1629 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1630 return;
1631
1632 case ir_binop_logic_or:
1633 temp = fs_reg(this, glsl_type::bool_type);
1634 emit(fs_inst(BRW_OPCODE_OR, temp, op[0], op[1]));
1635 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
1636 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1637 return;
1638
1639 case ir_binop_logic_and:
1640 temp = fs_reg(this, glsl_type::bool_type);
1641 emit(fs_inst(BRW_OPCODE_AND, temp, op[0], op[1]));
1642 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
1643 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1644 return;
1645
1646 case ir_unop_f2b:
1647 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)));
1648 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1649 return;
1650
1651 case ir_unop_i2b:
1652 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
1653 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1654 return;
1655
1656 case ir_binop_greater:
1657 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1658 inst->conditional_mod = BRW_CONDITIONAL_G;
1659 return;
1660 case ir_binop_gequal:
1661 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1662 inst->conditional_mod = BRW_CONDITIONAL_GE;
1663 return;
1664 case ir_binop_less:
1665 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1666 inst->conditional_mod = BRW_CONDITIONAL_L;
1667 return;
1668 case ir_binop_lequal:
1669 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1670 inst->conditional_mod = BRW_CONDITIONAL_LE;
1671 return;
1672 case ir_binop_equal:
1673 case ir_binop_all_equal:
1674 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1675 inst->conditional_mod = BRW_CONDITIONAL_Z;
1676 return;
1677 case ir_binop_nequal:
1678 case ir_binop_any_nequal:
1679 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1680 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1681 return;
1682 default:
1683 assert(!"not reached");
1684 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
1685 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1686 this->fail = true;
1687 return;
1688 }
1689 return;
1690 }
1691
1692 ir->condition->accept(this);
1693
1694 fs_inst *inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)));
1695 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1696 }
1697
1698 void
1699 fs_visitor::visit(ir_if *ir)
1700 {
1701 fs_inst *inst;
1702
1703 /* Don't point the annotation at the if statement, because then it plus
1704 * the then and else blocks get printed.
1705 */
1706 this->base_ir = ir->condition;
1707
1708 if (intel->gen >= 6) {
1709 emit_if_gen6(ir);
1710 } else {
1711 emit_bool_to_cond_code(ir->condition);
1712
1713 inst = emit(fs_inst(BRW_OPCODE_IF));
1714 inst->predicated = true;
1715 }
1716
1717 foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
1718 ir_instruction *ir = (ir_instruction *)iter.get();
1719 this->base_ir = ir;
1720
1721 ir->accept(this);
1722 }
1723
1724 if (!ir->else_instructions.is_empty()) {
1725 emit(fs_inst(BRW_OPCODE_ELSE));
1726
1727 foreach_iter(exec_list_iterator, iter, ir->else_instructions) {
1728 ir_instruction *ir = (ir_instruction *)iter.get();
1729 this->base_ir = ir;
1730
1731 ir->accept(this);
1732 }
1733 }
1734
1735 emit(fs_inst(BRW_OPCODE_ENDIF));
1736 }
1737
1738 void
1739 fs_visitor::visit(ir_loop *ir)
1740 {
1741 fs_reg counter = reg_undef;
1742
1743 if (ir->counter) {
1744 this->base_ir = ir->counter;
1745 ir->counter->accept(this);
1746 counter = *(variable_storage(ir->counter));
1747
1748 if (ir->from) {
1749 this->base_ir = ir->from;
1750 ir->from->accept(this);
1751
1752 emit(fs_inst(BRW_OPCODE_MOV, counter, this->result));
1753 }
1754 }
1755
1756 emit(fs_inst(BRW_OPCODE_DO));
1757
1758 if (ir->to) {
1759 this->base_ir = ir->to;
1760 ir->to->accept(this);
1761
1762 fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d,
1763 counter, this->result));
1764 switch (ir->cmp) {
1765 case ir_binop_equal:
1766 inst->conditional_mod = BRW_CONDITIONAL_Z;
1767 break;
1768 case ir_binop_nequal:
1769 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1770 break;
1771 case ir_binop_gequal:
1772 inst->conditional_mod = BRW_CONDITIONAL_GE;
1773 break;
1774 case ir_binop_lequal:
1775 inst->conditional_mod = BRW_CONDITIONAL_LE;
1776 break;
1777 case ir_binop_greater:
1778 inst->conditional_mod = BRW_CONDITIONAL_G;
1779 break;
1780 case ir_binop_less:
1781 inst->conditional_mod = BRW_CONDITIONAL_L;
1782 break;
1783 default:
1784 assert(!"not reached: unknown loop condition");
1785 this->fail = true;
1786 break;
1787 }
1788
1789 inst = emit(fs_inst(BRW_OPCODE_BREAK));
1790 inst->predicated = true;
1791 }
1792
1793 foreach_iter(exec_list_iterator, iter, ir->body_instructions) {
1794 ir_instruction *ir = (ir_instruction *)iter.get();
1795
1796 this->base_ir = ir;
1797 ir->accept(this);
1798 }
1799
1800 if (ir->increment) {
1801 this->base_ir = ir->increment;
1802 ir->increment->accept(this);
1803 emit(fs_inst(BRW_OPCODE_ADD, counter, counter, this->result));
1804 }
1805
1806 emit(fs_inst(BRW_OPCODE_WHILE));
1807 }
1808
1809 void
1810 fs_visitor::visit(ir_loop_jump *ir)
1811 {
1812 switch (ir->mode) {
1813 case ir_loop_jump::jump_break:
1814 emit(fs_inst(BRW_OPCODE_BREAK));
1815 break;
1816 case ir_loop_jump::jump_continue:
1817 emit(fs_inst(BRW_OPCODE_CONTINUE));
1818 break;
1819 }
1820 }
1821
1822 void
1823 fs_visitor::visit(ir_call *ir)
1824 {
1825 assert(!"FINISHME");
1826 }
1827
1828 void
1829 fs_visitor::visit(ir_return *ir)
1830 {
1831 assert(!"FINISHME");
1832 }
1833
1834 void
1835 fs_visitor::visit(ir_function *ir)
1836 {
1837 /* Ignore function bodies other than main() -- we shouldn't see calls to
1838 * them since they should all be inlined before we get to ir_to_mesa.
1839 */
1840 if (strcmp(ir->name, "main") == 0) {
1841 const ir_function_signature *sig;
1842 exec_list empty;
1843
1844 sig = ir->matching_signature(&empty);
1845
1846 assert(sig);
1847
1848 foreach_iter(exec_list_iterator, iter, sig->body) {
1849 ir_instruction *ir = (ir_instruction *)iter.get();
1850 this->base_ir = ir;
1851
1852 ir->accept(this);
1853 }
1854 }
1855 }
1856
1857 void
1858 fs_visitor::visit(ir_function_signature *ir)
1859 {
1860 assert(!"not reached");
1861 (void)ir;
1862 }
1863
1864 fs_inst *
1865 fs_visitor::emit(fs_inst inst)
1866 {
1867 fs_inst *list_inst = new(mem_ctx) fs_inst;
1868 *list_inst = inst;
1869
1870 list_inst->annotation = this->current_annotation;
1871 list_inst->ir = this->base_ir;
1872
1873 this->instructions.push_tail(list_inst);
1874
1875 return list_inst;
1876 }
1877
1878 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1879 void
1880 fs_visitor::emit_dummy_fs()
1881 {
1882 /* Everyone's favorite color. */
1883 emit(fs_inst(BRW_OPCODE_MOV,
1884 fs_reg(MRF, 2),
1885 fs_reg(1.0f)));
1886 emit(fs_inst(BRW_OPCODE_MOV,
1887 fs_reg(MRF, 3),
1888 fs_reg(0.0f)));
1889 emit(fs_inst(BRW_OPCODE_MOV,
1890 fs_reg(MRF, 4),
1891 fs_reg(1.0f)));
1892 emit(fs_inst(BRW_OPCODE_MOV,
1893 fs_reg(MRF, 5),
1894 fs_reg(0.0f)));
1895
1896 fs_inst *write;
1897 write = emit(fs_inst(FS_OPCODE_FB_WRITE,
1898 fs_reg(0),
1899 fs_reg(0)));
1900 write->base_mrf = 0;
1901 }
1902
1903 /* The register location here is relative to the start of the URB
1904 * data. It will get adjusted to be a real location before
1905 * generate_code() time.
1906 */
1907 struct brw_reg
1908 fs_visitor::interp_reg(int location, int channel)
1909 {
1910 int regnr = urb_setup[location] * 2 + channel / 2;
1911 int stride = (channel & 1) * 4;
1912
1913 assert(urb_setup[location] != -1);
1914
1915 return brw_vec1_grf(regnr, stride);
1916 }
1917
1918 /** Emits the interpolation for the varying inputs. */
1919 void
1920 fs_visitor::emit_interpolation_setup_gen4()
1921 {
1922 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1923
1924 this->current_annotation = "compute pixel centers";
1925 this->pixel_x = fs_reg(this, glsl_type::uint_type);
1926 this->pixel_y = fs_reg(this, glsl_type::uint_type);
1927 this->pixel_x.type = BRW_REGISTER_TYPE_UW;
1928 this->pixel_y.type = BRW_REGISTER_TYPE_UW;
1929 emit(fs_inst(BRW_OPCODE_ADD,
1930 this->pixel_x,
1931 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1932 fs_reg(brw_imm_v(0x10101010))));
1933 emit(fs_inst(BRW_OPCODE_ADD,
1934 this->pixel_y,
1935 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1936 fs_reg(brw_imm_v(0x11001100))));
1937
1938 this->current_annotation = "compute pixel deltas from v0";
1939 if (brw->has_pln) {
1940 this->delta_x = fs_reg(this, glsl_type::vec2_type);
1941 this->delta_y = this->delta_x;
1942 this->delta_y.reg_offset++;
1943 } else {
1944 this->delta_x = fs_reg(this, glsl_type::float_type);
1945 this->delta_y = fs_reg(this, glsl_type::float_type);
1946 }
1947 emit(fs_inst(BRW_OPCODE_ADD,
1948 this->delta_x,
1949 this->pixel_x,
1950 fs_reg(negate(brw_vec1_grf(1, 0)))));
1951 emit(fs_inst(BRW_OPCODE_ADD,
1952 this->delta_y,
1953 this->pixel_y,
1954 fs_reg(negate(brw_vec1_grf(1, 1)))));
1955
1956 this->current_annotation = "compute pos.w and 1/pos.w";
1957 /* Compute wpos.w. It's always in our setup, since it's needed to
1958 * interpolate the other attributes.
1959 */
1960 this->wpos_w = fs_reg(this, glsl_type::float_type);
1961 emit(fs_inst(FS_OPCODE_LINTERP, wpos_w, this->delta_x, this->delta_y,
1962 interp_reg(FRAG_ATTRIB_WPOS, 3)));
1963 /* Compute the pixel 1/W value from wpos.w. */
1964 this->pixel_w = fs_reg(this, glsl_type::float_type);
1965 emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w);
1966 this->current_annotation = NULL;
1967 }
1968
1969 /** Emits the interpolation for the varying inputs. */
1970 void
1971 fs_visitor::emit_interpolation_setup_gen6()
1972 {
1973 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1974
1975 /* If the pixel centers end up used, the setup is the same as for gen4. */
1976 this->current_annotation = "compute pixel centers";
1977 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type);
1978 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
1979 int_pixel_x.type = BRW_REGISTER_TYPE_UW;
1980 int_pixel_y.type = BRW_REGISTER_TYPE_UW;
1981 emit(fs_inst(BRW_OPCODE_ADD,
1982 int_pixel_x,
1983 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1984 fs_reg(brw_imm_v(0x10101010))));
1985 emit(fs_inst(BRW_OPCODE_ADD,
1986 int_pixel_y,
1987 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1988 fs_reg(brw_imm_v(0x11001100))));
1989
1990 /* As of gen6, we can no longer mix float and int sources. We have
1991 * to turn the integer pixel centers into floats for their actual
1992 * use.
1993 */
1994 this->pixel_x = fs_reg(this, glsl_type::float_type);
1995 this->pixel_y = fs_reg(this, glsl_type::float_type);
1996 emit(fs_inst(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x));
1997 emit(fs_inst(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y));
1998
1999 this->current_annotation = "compute 1/pos.w";
2000 this->wpos_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0));
2001 this->pixel_w = fs_reg(this, glsl_type::float_type);
2002 emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w);
2003
2004 this->delta_x = fs_reg(brw_vec8_grf(2, 0));
2005 this->delta_y = fs_reg(brw_vec8_grf(3, 0));
2006
2007 this->current_annotation = NULL;
2008 }
2009
2010 void
2011 fs_visitor::emit_fb_writes()
2012 {
2013 this->current_annotation = "FB write header";
2014 GLboolean header_present = GL_TRUE;
2015 int nr = 0;
2016
2017 if (intel->gen >= 6 &&
2018 !this->kill_emitted &&
2019 c->key.nr_color_regions == 1) {
2020 header_present = false;
2021 }
2022
2023 if (header_present) {
2024 /* m0, m1 header */
2025 nr += 2;
2026 }
2027
2028 if (c->aa_dest_stencil_reg) {
2029 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2030 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))));
2031 }
2032
2033 /* Reserve space for color. It'll be filled in per MRT below. */
2034 int color_mrf = nr;
2035 nr += 4;
2036
2037 if (c->source_depth_to_render_target) {
2038 if (c->computes_depth) {
2039 /* Hand over gl_FragDepth. */
2040 assert(this->frag_depth);
2041 fs_reg depth = *(variable_storage(this->frag_depth));
2042
2043 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), depth));
2044 } else {
2045 /* Pass through the payload depth. */
2046 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2047 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
2048 }
2049 }
2050
2051 if (c->dest_depth_reg) {
2052 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2053 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))));
2054 }
2055
2056 fs_reg color = reg_undef;
2057 if (this->frag_color)
2058 color = *(variable_storage(this->frag_color));
2059 else if (this->frag_data) {
2060 color = *(variable_storage(this->frag_data));
2061 color.type = BRW_REGISTER_TYPE_F;
2062 }
2063
2064 for (int target = 0; target < c->key.nr_color_regions; target++) {
2065 this->current_annotation = talloc_asprintf(this->mem_ctx,
2066 "FB write target %d",
2067 target);
2068 if (this->frag_color || this->frag_data) {
2069 for (int i = 0; i < 4; i++) {
2070 emit(fs_inst(BRW_OPCODE_MOV,
2071 fs_reg(MRF, color_mrf + i),
2072 color));
2073 color.reg_offset++;
2074 }
2075 }
2076
2077 if (this->frag_color)
2078 color.reg_offset -= 4;
2079
2080 fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE,
2081 reg_undef, reg_undef));
2082 inst->target = target;
2083 inst->base_mrf = 0;
2084 inst->mlen = nr;
2085 if (target == c->key.nr_color_regions - 1)
2086 inst->eot = true;
2087 inst->header_present = header_present;
2088 }
2089
2090 if (c->key.nr_color_regions == 0) {
2091 fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE,
2092 reg_undef, reg_undef));
2093 inst->base_mrf = 0;
2094 inst->mlen = nr;
2095 inst->eot = true;
2096 inst->header_present = header_present;
2097 }
2098
2099 this->current_annotation = NULL;
2100 }
2101
2102 void
2103 fs_visitor::generate_fb_write(fs_inst *inst)
2104 {
2105 GLboolean eot = inst->eot;
2106 struct brw_reg implied_header;
2107
2108 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
2109 * move, here's g1.
2110 */
2111 brw_push_insn_state(p);
2112 brw_set_mask_control(p, BRW_MASK_DISABLE);
2113 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2114
2115 if (inst->header_present) {
2116 if (intel->gen >= 6) {
2117 brw_MOV(p,
2118 brw_message_reg(inst->base_mrf),
2119 brw_vec8_grf(0, 0));
2120
2121 if (inst->target > 0) {
2122 /* Set the render target index for choosing BLEND_STATE. */
2123 brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 2),
2124 BRW_REGISTER_TYPE_UD),
2125 brw_imm_ud(inst->target));
2126 }
2127
2128 /* Clear viewport index, render target array index. */
2129 brw_AND(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 0),
2130 BRW_REGISTER_TYPE_UD),
2131 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2132 brw_imm_ud(0xf7ff));
2133
2134 implied_header = brw_null_reg();
2135 } else {
2136 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
2137 }
2138
2139 brw_MOV(p,
2140 brw_message_reg(inst->base_mrf + 1),
2141 brw_vec8_grf(1, 0));
2142 } else {
2143 implied_header = brw_null_reg();
2144 }
2145
2146 brw_pop_insn_state(p);
2147
2148 brw_fb_WRITE(p,
2149 8, /* dispatch_width */
2150 retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW),
2151 inst->base_mrf,
2152 implied_header,
2153 inst->target,
2154 inst->mlen,
2155 0,
2156 eot);
2157 }
2158
2159 void
2160 fs_visitor::generate_linterp(fs_inst *inst,
2161 struct brw_reg dst, struct brw_reg *src)
2162 {
2163 struct brw_reg delta_x = src[0];
2164 struct brw_reg delta_y = src[1];
2165 struct brw_reg interp = src[2];
2166
2167 if (brw->has_pln &&
2168 delta_y.nr == delta_x.nr + 1 &&
2169 (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
2170 brw_PLN(p, dst, interp, delta_x);
2171 } else {
2172 brw_LINE(p, brw_null_reg(), interp, delta_x);
2173 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
2174 }
2175 }
2176
2177 void
2178 fs_visitor::generate_math(fs_inst *inst,
2179 struct brw_reg dst, struct brw_reg *src)
2180 {
2181 int op;
2182
2183 switch (inst->opcode) {
2184 case FS_OPCODE_RCP:
2185 op = BRW_MATH_FUNCTION_INV;
2186 break;
2187 case FS_OPCODE_RSQ:
2188 op = BRW_MATH_FUNCTION_RSQ;
2189 break;
2190 case FS_OPCODE_SQRT:
2191 op = BRW_MATH_FUNCTION_SQRT;
2192 break;
2193 case FS_OPCODE_EXP2:
2194 op = BRW_MATH_FUNCTION_EXP;
2195 break;
2196 case FS_OPCODE_LOG2:
2197 op = BRW_MATH_FUNCTION_LOG;
2198 break;
2199 case FS_OPCODE_POW:
2200 op = BRW_MATH_FUNCTION_POW;
2201 break;
2202 case FS_OPCODE_SIN:
2203 op = BRW_MATH_FUNCTION_SIN;
2204 break;
2205 case FS_OPCODE_COS:
2206 op = BRW_MATH_FUNCTION_COS;
2207 break;
2208 default:
2209 assert(!"not reached: unknown math function");
2210 op = 0;
2211 break;
2212 }
2213
2214 if (intel->gen >= 6) {
2215 assert(inst->mlen == 0);
2216
2217 if (inst->opcode == FS_OPCODE_POW) {
2218 brw_math2(p, dst, op, src[0], src[1]);
2219 } else {
2220 brw_math(p, dst,
2221 op,
2222 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2223 BRW_MATH_SATURATE_NONE,
2224 0, src[0],
2225 BRW_MATH_DATA_VECTOR,
2226 BRW_MATH_PRECISION_FULL);
2227 }
2228 } else {
2229 assert(inst->mlen >= 1);
2230
2231 brw_math(p, dst,
2232 op,
2233 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2234 BRW_MATH_SATURATE_NONE,
2235 inst->base_mrf, src[0],
2236 BRW_MATH_DATA_VECTOR,
2237 BRW_MATH_PRECISION_FULL);
2238 }
2239 }
2240
2241 void
2242 fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst)
2243 {
2244 int msg_type = -1;
2245 int rlen = 4;
2246 uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
2247
2248 if (intel->gen >= 5) {
2249 switch (inst->opcode) {
2250 case FS_OPCODE_TEX:
2251 if (inst->shadow_compare) {
2252 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5;
2253 } else {
2254 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_GEN5;
2255 }
2256 break;
2257 case FS_OPCODE_TXB:
2258 if (inst->shadow_compare) {
2259 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE_GEN5;
2260 } else {
2261 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
2262 }
2263 break;
2264 }
2265 } else {
2266 switch (inst->opcode) {
2267 case FS_OPCODE_TEX:
2268 /* Note that G45 and older determines shadow compare and dispatch width
2269 * from message length for most messages.
2270 */
2271 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
2272 if (inst->shadow_compare) {
2273 assert(inst->mlen == 6);
2274 } else {
2275 assert(inst->mlen <= 4);
2276 }
2277 break;
2278 case FS_OPCODE_TXB:
2279 if (inst->shadow_compare) {
2280 assert(inst->mlen == 6);
2281 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
2282 } else {
2283 assert(inst->mlen == 9);
2284 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
2285 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
2286 }
2287 break;
2288 }
2289 }
2290 assert(msg_type != -1);
2291
2292 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
2293 rlen = 8;
2294 dst = vec16(dst);
2295 }
2296
2297 brw_SAMPLE(p,
2298 retype(dst, BRW_REGISTER_TYPE_UW),
2299 inst->base_mrf,
2300 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
2301 SURF_INDEX_TEXTURE(inst->sampler),
2302 inst->sampler,
2303 WRITEMASK_XYZW,
2304 msg_type,
2305 rlen,
2306 inst->mlen,
2307 0,
2308 1,
2309 simd_mode);
2310 }
2311
2312
2313 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
2314 * looking like:
2315 *
2316 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
2317 *
2318 * and we're trying to produce:
2319 *
2320 * DDX DDY
2321 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
2322 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
2323 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
2324 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
2325 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
2326 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
2327 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
2328 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
2329 *
2330 * and add another set of two more subspans if in 16-pixel dispatch mode.
2331 *
2332 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
2333 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
2334 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
2335 * between each other. We could probably do it like ddx and swizzle the right
2336 * order later, but bail for now and just produce
2337 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
2338 */
2339 void
2340 fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2341 {
2342 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
2343 BRW_REGISTER_TYPE_F,
2344 BRW_VERTICAL_STRIDE_2,
2345 BRW_WIDTH_2,
2346 BRW_HORIZONTAL_STRIDE_0,
2347 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2348 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
2349 BRW_REGISTER_TYPE_F,
2350 BRW_VERTICAL_STRIDE_2,
2351 BRW_WIDTH_2,
2352 BRW_HORIZONTAL_STRIDE_0,
2353 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2354 brw_ADD(p, dst, src0, negate(src1));
2355 }
2356
2357 void
2358 fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2359 {
2360 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
2361 BRW_REGISTER_TYPE_F,
2362 BRW_VERTICAL_STRIDE_4,
2363 BRW_WIDTH_4,
2364 BRW_HORIZONTAL_STRIDE_0,
2365 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2366 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
2367 BRW_REGISTER_TYPE_F,
2368 BRW_VERTICAL_STRIDE_4,
2369 BRW_WIDTH_4,
2370 BRW_HORIZONTAL_STRIDE_0,
2371 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2372 brw_ADD(p, dst, src0, negate(src1));
2373 }
2374
2375 void
2376 fs_visitor::generate_discard_not(fs_inst *inst, struct brw_reg mask)
2377 {
2378 if (intel->gen >= 6) {
2379 /* Gen6 no longer has the mask reg for us to just read the
2380 * active channels from. However, cmp updates just the channels
2381 * of the flag reg that are enabled, so we can get at the
2382 * channel enables that way. In this step, make a reg of ones
2383 * we'll compare to.
2384 */
2385 brw_MOV(p, mask, brw_imm_ud(1));
2386 } else {
2387 brw_push_insn_state(p);
2388 brw_set_mask_control(p, BRW_MASK_DISABLE);
2389 brw_NOT(p, mask, brw_mask_reg(1)); /* IMASK */
2390 brw_pop_insn_state(p);
2391 }
2392 }
2393
2394 void
2395 fs_visitor::generate_discard_and(fs_inst *inst, struct brw_reg mask)
2396 {
2397 if (intel->gen >= 6) {
2398 struct brw_reg f0 = brw_flag_reg();
2399 struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
2400
2401 brw_push_insn_state(p);
2402 brw_set_mask_control(p, BRW_MASK_DISABLE);
2403 brw_MOV(p, f0, brw_imm_uw(0xffff)); /* inactive channels undiscarded */
2404 brw_pop_insn_state(p);
2405
2406 brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
2407 BRW_CONDITIONAL_Z, mask, brw_imm_ud(0)); /* active channels fail test */
2408 /* Undo CMP's whacking of predication*/
2409 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2410
2411 brw_push_insn_state(p);
2412 brw_set_mask_control(p, BRW_MASK_DISABLE);
2413 brw_AND(p, g1, f0, g1);
2414 brw_pop_insn_state(p);
2415 } else {
2416 struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
2417
2418 mask = brw_uw1_reg(mask.file, mask.nr, 0);
2419
2420 brw_push_insn_state(p);
2421 brw_set_mask_control(p, BRW_MASK_DISABLE);
2422 brw_AND(p, g0, mask, g0);
2423 brw_pop_insn_state(p);
2424 }
2425 }
2426
2427 void
2428 fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
2429 {
2430 assert(inst->mlen != 0);
2431
2432 brw_MOV(p,
2433 retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
2434 retype(src, BRW_REGISTER_TYPE_UD));
2435 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
2436 inst->offset);
2437 }
2438
2439 void
2440 fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
2441 {
2442 assert(inst->mlen != 0);
2443
2444 /* Clear any post destination dependencies that would be ignored by
2445 * the block read. See the B-Spec for pre-gen5 send instruction.
2446 *
2447 * This could use a better solution, since texture sampling and
2448 * math reads could potentially run into it as well -- anywhere
2449 * that we have a SEND with a destination that is a register that
2450 * was written but not read within the last N instructions (what's
2451 * N? unsure). This is rare because of dead code elimination, but
2452 * not impossible.
2453 */
2454 if (intel->gen == 4 && !intel->is_g4x)
2455 brw_MOV(p, brw_null_reg(), dst);
2456
2457 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
2458 inst->offset);
2459
2460 if (intel->gen == 4 && !intel->is_g4x) {
2461 /* gen4 errata: destination from a send can't be used as a
2462 * destination until it's been read. Just read it so we don't
2463 * have to worry.
2464 */
2465 brw_MOV(p, brw_null_reg(), dst);
2466 }
2467 }
2468
2469
2470 void
2471 fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
2472 {
2473 assert(inst->mlen != 0);
2474
2475 /* Clear any post destination dependencies that would be ignored by
2476 * the block read. See the B-Spec for pre-gen5 send instruction.
2477 *
2478 * This could use a better solution, since texture sampling and
2479 * math reads could potentially run into it as well -- anywhere
2480 * that we have a SEND with a destination that is a register that
2481 * was written but not read within the last N instructions (what's
2482 * N? unsure). This is rare because of dead code elimination, but
2483 * not impossible.
2484 */
2485 if (intel->gen == 4 && !intel->is_g4x)
2486 brw_MOV(p, brw_null_reg(), dst);
2487
2488 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
2489 inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
2490
2491 if (intel->gen == 4 && !intel->is_g4x) {
2492 /* gen4 errata: destination from a send can't be used as a
2493 * destination until it's been read. Just read it so we don't
2494 * have to worry.
2495 */
2496 brw_MOV(p, brw_null_reg(), dst);
2497 }
2498 }
2499
2500 void
2501 fs_visitor::assign_curb_setup()
2502 {
2503 c->prog_data.first_curbe_grf = c->nr_payload_regs;
2504 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
2505
2506 /* Map the offsets in the UNIFORM file to fixed HW regs. */
2507 foreach_iter(exec_list_iterator, iter, this->instructions) {
2508 fs_inst *inst = (fs_inst *)iter.get();
2509
2510 for (unsigned int i = 0; i < 3; i++) {
2511 if (inst->src[i].file == UNIFORM) {
2512 int constant_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
2513 struct brw_reg brw_reg = brw_vec1_grf(c->prog_data.first_curbe_grf +
2514 constant_nr / 8,
2515 constant_nr % 8);
2516
2517 inst->src[i].file = FIXED_HW_REG;
2518 inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
2519 }
2520 }
2521 }
2522 }
2523
2524 void
2525 fs_visitor::calculate_urb_setup()
2526 {
2527 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2528 urb_setup[i] = -1;
2529 }
2530
2531 int urb_next = 0;
2532 /* Figure out where each of the incoming setup attributes lands. */
2533 if (intel->gen >= 6) {
2534 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2535 if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(i)) {
2536 urb_setup[i] = urb_next++;
2537 }
2538 }
2539 } else {
2540 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
2541 for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) {
2542 if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) {
2543 int fp_index;
2544
2545 if (i >= VERT_RESULT_VAR0)
2546 fp_index = i - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0);
2547 else if (i <= VERT_RESULT_TEX7)
2548 fp_index = i;
2549 else
2550 fp_index = -1;
2551
2552 if (fp_index >= 0)
2553 urb_setup[fp_index] = urb_next++;
2554 }
2555 }
2556 }
2557
2558 /* Each attribute is 4 setup channels, each of which is half a reg. */
2559 c->prog_data.urb_read_length = urb_next * 2;
2560 }
2561
2562 void
2563 fs_visitor::assign_urb_setup()
2564 {
2565 int urb_start = c->prog_data.first_curbe_grf + c->prog_data.curb_read_length;
2566
2567 /* Offset all the urb_setup[] index by the actual position of the
2568 * setup regs, now that the location of the constants has been chosen.
2569 */
2570 foreach_iter(exec_list_iterator, iter, this->instructions) {
2571 fs_inst *inst = (fs_inst *)iter.get();
2572
2573 if (inst->opcode != FS_OPCODE_LINTERP)
2574 continue;
2575
2576 assert(inst->src[2].file == FIXED_HW_REG);
2577
2578 inst->src[2].fixed_hw_reg.nr += urb_start;
2579 }
2580
2581 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
2582 }
2583
2584 /**
2585 * Split large virtual GRFs into separate components if we can.
2586 *
2587 * This is mostly duplicated with what brw_fs_vector_splitting does,
2588 * but that's really conservative because it's afraid of doing
2589 * splitting that doesn't result in real progress after the rest of
2590 * the optimization phases, which would cause infinite looping in
2591 * optimization. We can do it once here, safely. This also has the
2592 * opportunity to split interpolated values, or maybe even uniforms,
2593 * which we don't have at the IR level.
2594 *
2595 * We want to split, because virtual GRFs are what we register
2596 * allocate and spill (due to contiguousness requirements for some
2597 * instructions), and they're what we naturally generate in the
2598 * codegen process, but most virtual GRFs don't actually need to be
2599 * contiguous sets of GRFs. If we split, we'll end up with reduced
2600 * live intervals and better dead code elimination and coalescing.
2601 */
2602 void
2603 fs_visitor::split_virtual_grfs()
2604 {
2605 int num_vars = this->virtual_grf_next;
2606 bool split_grf[num_vars];
2607 int new_virtual_grf[num_vars];
2608
2609 /* Try to split anything > 0 sized. */
2610 for (int i = 0; i < num_vars; i++) {
2611 if (this->virtual_grf_sizes[i] != 1)
2612 split_grf[i] = true;
2613 else
2614 split_grf[i] = false;
2615 }
2616
2617 if (brw->has_pln) {
2618 /* PLN opcodes rely on the delta_xy being contiguous. */
2619 split_grf[this->delta_x.reg] = false;
2620 }
2621
2622 foreach_iter(exec_list_iterator, iter, this->instructions) {
2623 fs_inst *inst = (fs_inst *)iter.get();
2624
2625 /* Texturing produces 4 contiguous registers, so no splitting. */
2626 if ((inst->opcode == FS_OPCODE_TEX ||
2627 inst->opcode == FS_OPCODE_TXB ||
2628 inst->opcode == FS_OPCODE_TXL) &&
2629 inst->dst.file == GRF) {
2630 split_grf[inst->dst.reg] = false;
2631 }
2632 }
2633
2634 /* Allocate new space for split regs. Note that the virtual
2635 * numbers will be contiguous.
2636 */
2637 for (int i = 0; i < num_vars; i++) {
2638 if (split_grf[i]) {
2639 new_virtual_grf[i] = virtual_grf_alloc(1);
2640 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
2641 int reg = virtual_grf_alloc(1);
2642 assert(reg == new_virtual_grf[i] + j - 1);
2643 (void) reg;
2644 }
2645 this->virtual_grf_sizes[i] = 1;
2646 }
2647 }
2648
2649 foreach_iter(exec_list_iterator, iter, this->instructions) {
2650 fs_inst *inst = (fs_inst *)iter.get();
2651
2652 if (inst->dst.file == GRF &&
2653 split_grf[inst->dst.reg] &&
2654 inst->dst.reg_offset != 0) {
2655 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
2656 inst->dst.reg_offset - 1);
2657 inst->dst.reg_offset = 0;
2658 }
2659 for (int i = 0; i < 3; i++) {
2660 if (inst->src[i].file == GRF &&
2661 split_grf[inst->src[i].reg] &&
2662 inst->src[i].reg_offset != 0) {
2663 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
2664 inst->src[i].reg_offset - 1);
2665 inst->src[i].reg_offset = 0;
2666 }
2667 }
2668 }
2669 }
2670
2671 /**
2672 * Choose accesses from the UNIFORM file to demote to using the pull
2673 * constant buffer.
2674 *
2675 * We allow a fragment shader to have more than the specified minimum
2676 * maximum number of fragment shader uniform components (64). If
2677 * there are too many of these, they'd fill up all of register space.
2678 * So, this will push some of them out to the pull constant buffer and
2679 * update the program to load them.
2680 */
2681 void
2682 fs_visitor::setup_pull_constants()
2683 {
2684 /* Only allow 16 registers (128 uniform components) as push constants. */
2685 unsigned int max_uniform_components = 16 * 8;
2686 if (c->prog_data.nr_params <= max_uniform_components)
2687 return;
2688
2689 /* Just demote the end of the list. We could probably do better
2690 * here, demoting things that are rarely used in the program first.
2691 */
2692 int pull_uniform_base = max_uniform_components;
2693 int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
2694
2695 foreach_iter(exec_list_iterator, iter, this->instructions) {
2696 fs_inst *inst = (fs_inst *)iter.get();
2697
2698 for (int i = 0; i < 3; i++) {
2699 if (inst->src[i].file != UNIFORM)
2700 continue;
2701
2702 int uniform_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
2703 if (uniform_nr < pull_uniform_base)
2704 continue;
2705
2706 fs_reg dst = fs_reg(this, glsl_type::float_type);
2707 fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
2708 dst);
2709 pull->offset = ((uniform_nr - pull_uniform_base) * 4) & ~15;
2710 pull->ir = inst->ir;
2711 pull->annotation = inst->annotation;
2712 pull->base_mrf = 14;
2713 pull->mlen = 1;
2714
2715 inst->insert_before(pull);
2716
2717 inst->src[i].file = GRF;
2718 inst->src[i].reg = dst.reg;
2719 inst->src[i].reg_offset = 0;
2720 inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
2721 }
2722 }
2723
2724 for (int i = 0; i < pull_uniform_count; i++) {
2725 c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
2726 c->prog_data.pull_param_convert[i] =
2727 c->prog_data.param_convert[pull_uniform_base + i];
2728 }
2729 c->prog_data.nr_params -= pull_uniform_count;
2730 c->prog_data.nr_pull_params = pull_uniform_count;
2731 }
2732
2733 void
2734 fs_visitor::calculate_live_intervals()
2735 {
2736 int num_vars = this->virtual_grf_next;
2737 int *def = talloc_array(mem_ctx, int, num_vars);
2738 int *use = talloc_array(mem_ctx, int, num_vars);
2739 int loop_depth = 0;
2740 int loop_start = 0;
2741 int bb_header_ip = 0;
2742
2743 for (int i = 0; i < num_vars; i++) {
2744 def[i] = 1 << 30;
2745 use[i] = -1;
2746 }
2747
2748 int ip = 0;
2749 foreach_iter(exec_list_iterator, iter, this->instructions) {
2750 fs_inst *inst = (fs_inst *)iter.get();
2751
2752 if (inst->opcode == BRW_OPCODE_DO) {
2753 if (loop_depth++ == 0)
2754 loop_start = ip;
2755 } else if (inst->opcode == BRW_OPCODE_WHILE) {
2756 loop_depth--;
2757
2758 if (loop_depth == 0) {
2759 /* Patches up the use of vars marked for being live across
2760 * the whole loop.
2761 */
2762 for (int i = 0; i < num_vars; i++) {
2763 if (use[i] == loop_start) {
2764 use[i] = ip;
2765 }
2766 }
2767 }
2768 } else {
2769 for (unsigned int i = 0; i < 3; i++) {
2770 if (inst->src[i].file == GRF && inst->src[i].reg != 0) {
2771 int reg = inst->src[i].reg;
2772
2773 if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
2774 def[reg] >= bb_header_ip)) {
2775 use[reg] = ip;
2776 } else {
2777 def[reg] = MIN2(loop_start, def[reg]);
2778 use[reg] = loop_start;
2779
2780 /* Nobody else is going to go smash our start to
2781 * later in the loop now, because def[reg] now
2782 * points before the bb header.
2783 */
2784 }
2785 }
2786 }
2787 if (inst->dst.file == GRF && inst->dst.reg != 0) {
2788 int reg = inst->dst.reg;
2789
2790 if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
2791 !inst->predicated)) {
2792 def[reg] = MIN2(def[reg], ip);
2793 } else {
2794 def[reg] = MIN2(def[reg], loop_start);
2795 }
2796 }
2797 }
2798
2799 ip++;
2800
2801 /* Set the basic block header IP. This is used for determining
2802 * if a complete def of single-register virtual GRF in a loop
2803 * dominates a use in the same basic block. It's a quick way to
2804 * reduce the live interval range of most register used in a
2805 * loop.
2806 */
2807 if (inst->opcode == BRW_OPCODE_IF ||
2808 inst->opcode == BRW_OPCODE_ELSE ||
2809 inst->opcode == BRW_OPCODE_ENDIF ||
2810 inst->opcode == BRW_OPCODE_DO ||
2811 inst->opcode == BRW_OPCODE_WHILE ||
2812 inst->opcode == BRW_OPCODE_BREAK ||
2813 inst->opcode == BRW_OPCODE_CONTINUE) {
2814 bb_header_ip = ip;
2815 }
2816 }
2817
2818 talloc_free(this->virtual_grf_def);
2819 talloc_free(this->virtual_grf_use);
2820 this->virtual_grf_def = def;
2821 this->virtual_grf_use = use;
2822 }
2823
2824 /**
2825 * Attempts to move immediate constants into the immediate
2826 * constant slot of following instructions.
2827 *
2828 * Immediate constants are a bit tricky -- they have to be in the last
2829 * operand slot, you can't do abs/negate on them,
2830 */
2831
2832 bool
2833 fs_visitor::propagate_constants()
2834 {
2835 bool progress = false;
2836
2837 foreach_iter(exec_list_iterator, iter, this->instructions) {
2838 fs_inst *inst = (fs_inst *)iter.get();
2839
2840 if (inst->opcode != BRW_OPCODE_MOV ||
2841 inst->predicated ||
2842 inst->dst.file != GRF || inst->src[0].file != IMM ||
2843 inst->dst.type != inst->src[0].type)
2844 continue;
2845
2846 /* Don't bother with cases where we should have had the
2847 * operation on the constant folded in GLSL already.
2848 */
2849 if (inst->saturate)
2850 continue;
2851
2852 /* Found a move of a constant to a GRF. Find anything else using the GRF
2853 * before it's written, and replace it with the constant if we can.
2854 */
2855 exec_list_iterator scan_iter = iter;
2856 scan_iter.next();
2857 for (; scan_iter.has_next(); scan_iter.next()) {
2858 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
2859
2860 if (scan_inst->opcode == BRW_OPCODE_DO ||
2861 scan_inst->opcode == BRW_OPCODE_WHILE ||
2862 scan_inst->opcode == BRW_OPCODE_ELSE ||
2863 scan_inst->opcode == BRW_OPCODE_ENDIF) {
2864 break;
2865 }
2866
2867 for (int i = 2; i >= 0; i--) {
2868 if (scan_inst->src[i].file != GRF ||
2869 scan_inst->src[i].reg != inst->dst.reg ||
2870 scan_inst->src[i].reg_offset != inst->dst.reg_offset)
2871 continue;
2872
2873 /* Don't bother with cases where we should have had the
2874 * operation on the constant folded in GLSL already.
2875 */
2876 if (scan_inst->src[i].negate || scan_inst->src[i].abs)
2877 continue;
2878
2879 switch (scan_inst->opcode) {
2880 case BRW_OPCODE_MOV:
2881 scan_inst->src[i] = inst->src[0];
2882 progress = true;
2883 break;
2884
2885 case BRW_OPCODE_MUL:
2886 case BRW_OPCODE_ADD:
2887 if (i == 1) {
2888 scan_inst->src[i] = inst->src[0];
2889 progress = true;
2890 } else if (i == 0 && scan_inst->src[1].file != IMM) {
2891 /* Fit this constant in by commuting the operands */
2892 scan_inst->src[0] = scan_inst->src[1];
2893 scan_inst->src[1] = inst->src[0];
2894 }
2895 break;
2896 case BRW_OPCODE_CMP:
2897 case BRW_OPCODE_SEL:
2898 if (i == 1) {
2899 scan_inst->src[i] = inst->src[0];
2900 progress = true;
2901 }
2902 }
2903 }
2904
2905 if (scan_inst->dst.file == GRF &&
2906 scan_inst->dst.reg == inst->dst.reg &&
2907 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
2908 scan_inst->opcode == FS_OPCODE_TEX)) {
2909 break;
2910 }
2911 }
2912 }
2913
2914 return progress;
2915 }
2916 /**
2917 * Must be called after calculate_live_intervales() to remove unused
2918 * writes to registers -- register allocation will fail otherwise
2919 * because something deffed but not used won't be considered to
2920 * interfere with other regs.
2921 */
2922 bool
2923 fs_visitor::dead_code_eliminate()
2924 {
2925 bool progress = false;
2926 int pc = 0;
2927
2928 foreach_iter(exec_list_iterator, iter, this->instructions) {
2929 fs_inst *inst = (fs_inst *)iter.get();
2930
2931 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
2932 inst->remove();
2933 progress = true;
2934 }
2935
2936 pc++;
2937 }
2938
2939 return progress;
2940 }
2941
2942 bool
2943 fs_visitor::register_coalesce()
2944 {
2945 bool progress = false;
2946
2947 foreach_iter(exec_list_iterator, iter, this->instructions) {
2948 fs_inst *inst = (fs_inst *)iter.get();
2949
2950 if (inst->opcode != BRW_OPCODE_MOV ||
2951 inst->predicated ||
2952 inst->saturate ||
2953 inst->dst.file != GRF || inst->src[0].file != GRF ||
2954 inst->dst.type != inst->src[0].type)
2955 continue;
2956
2957 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
2958 * them: check for no writes to either one until the exit of the
2959 * program.
2960 */
2961 bool interfered = false;
2962 exec_list_iterator scan_iter = iter;
2963 scan_iter.next();
2964 for (; scan_iter.has_next(); scan_iter.next()) {
2965 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
2966
2967 if (scan_inst->opcode == BRW_OPCODE_DO ||
2968 scan_inst->opcode == BRW_OPCODE_WHILE ||
2969 scan_inst->opcode == BRW_OPCODE_ENDIF) {
2970 interfered = true;
2971 iter = scan_iter;
2972 break;
2973 }
2974
2975 if (scan_inst->dst.file == GRF) {
2976 if (scan_inst->dst.reg == inst->dst.reg &&
2977 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
2978 scan_inst->opcode == FS_OPCODE_TEX)) {
2979 interfered = true;
2980 break;
2981 }
2982 if (scan_inst->dst.reg == inst->src[0].reg &&
2983 (scan_inst->dst.reg_offset == inst->src[0].reg_offset ||
2984 scan_inst->opcode == FS_OPCODE_TEX)) {
2985 interfered = true;
2986 break;
2987 }
2988 }
2989 }
2990 if (interfered) {
2991 continue;
2992 }
2993
2994 /* Update live interval so we don't have to recalculate. */
2995 this->virtual_grf_use[inst->src[0].reg] = MAX2(virtual_grf_use[inst->src[0].reg],
2996 virtual_grf_use[inst->dst.reg]);
2997
2998 /* Rewrite the later usage to point at the source of the move to
2999 * be removed.
3000 */
3001 for (exec_list_iterator scan_iter = iter; scan_iter.has_next();
3002 scan_iter.next()) {
3003 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
3004
3005 for (int i = 0; i < 3; i++) {
3006 if (scan_inst->src[i].file == GRF &&
3007 scan_inst->src[i].reg == inst->dst.reg &&
3008 scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
3009 scan_inst->src[i].reg = inst->src[0].reg;
3010 scan_inst->src[i].reg_offset = inst->src[0].reg_offset;
3011 scan_inst->src[i].abs |= inst->src[0].abs;
3012 scan_inst->src[i].negate ^= inst->src[0].negate;
3013 scan_inst->src[i].smear = inst->src[0].smear;
3014 }
3015 }
3016 }
3017
3018 inst->remove();
3019 progress = true;
3020 }
3021
3022 return progress;
3023 }
3024
3025
3026 bool
3027 fs_visitor::compute_to_mrf()
3028 {
3029 bool progress = false;
3030 int next_ip = 0;
3031
3032 foreach_iter(exec_list_iterator, iter, this->instructions) {
3033 fs_inst *inst = (fs_inst *)iter.get();
3034
3035 int ip = next_ip;
3036 next_ip++;
3037
3038 if (inst->opcode != BRW_OPCODE_MOV ||
3039 inst->predicated ||
3040 inst->dst.file != MRF || inst->src[0].file != GRF ||
3041 inst->dst.type != inst->src[0].type ||
3042 inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
3043 continue;
3044
3045 /* Can't compute-to-MRF this GRF if someone else was going to
3046 * read it later.
3047 */
3048 if (this->virtual_grf_use[inst->src[0].reg] > ip)
3049 continue;
3050
3051 /* Found a move of a GRF to a MRF. Let's see if we can go
3052 * rewrite the thing that made this GRF to write into the MRF.
3053 */
3054 fs_inst *scan_inst;
3055 for (scan_inst = (fs_inst *)inst->prev;
3056 scan_inst->prev != NULL;
3057 scan_inst = (fs_inst *)scan_inst->prev) {
3058 if (scan_inst->dst.file == GRF &&
3059 scan_inst->dst.reg == inst->src[0].reg) {
3060 /* Found the last thing to write our reg we want to turn
3061 * into a compute-to-MRF.
3062 */
3063
3064 if (scan_inst->opcode == FS_OPCODE_TEX) {
3065 /* texturing writes several continuous regs, so we can't
3066 * compute-to-mrf that.
3067 */
3068 break;
3069 }
3070
3071 /* If it's predicated, it (probably) didn't populate all
3072 * the channels.
3073 */
3074 if (scan_inst->predicated)
3075 break;
3076
3077 /* SEND instructions can't have MRF as a destination. */
3078 if (scan_inst->mlen)
3079 break;
3080
3081 if (intel->gen >= 6) {
3082 /* gen6 math instructions must have the destination be
3083 * GRF, so no compute-to-MRF for them.
3084 */
3085 if (scan_inst->opcode == FS_OPCODE_RCP ||
3086 scan_inst->opcode == FS_OPCODE_RSQ ||
3087 scan_inst->opcode == FS_OPCODE_SQRT ||
3088 scan_inst->opcode == FS_OPCODE_EXP2 ||
3089 scan_inst->opcode == FS_OPCODE_LOG2 ||
3090 scan_inst->opcode == FS_OPCODE_SIN ||
3091 scan_inst->opcode == FS_OPCODE_COS ||
3092 scan_inst->opcode == FS_OPCODE_POW) {
3093 break;
3094 }
3095 }
3096
3097 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
3098 /* Found the creator of our MRF's source value. */
3099 scan_inst->dst.file = MRF;
3100 scan_inst->dst.hw_reg = inst->dst.hw_reg;
3101 scan_inst->saturate |= inst->saturate;
3102 inst->remove();
3103 progress = true;
3104 }
3105 break;
3106 }
3107
3108 /* We don't handle flow control here. Most computation of
3109 * values that end up in MRFs are shortly before the MRF
3110 * write anyway.
3111 */
3112 if (scan_inst->opcode == BRW_OPCODE_DO ||
3113 scan_inst->opcode == BRW_OPCODE_WHILE ||
3114 scan_inst->opcode == BRW_OPCODE_ENDIF) {
3115 break;
3116 }
3117
3118 /* You can't read from an MRF, so if someone else reads our
3119 * MRF's source GRF that we wanted to rewrite, that stops us.
3120 */
3121 bool interfered = false;
3122 for (int i = 0; i < 3; i++) {
3123 if (scan_inst->src[i].file == GRF &&
3124 scan_inst->src[i].reg == inst->src[0].reg &&
3125 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
3126 interfered = true;
3127 }
3128 }
3129 if (interfered)
3130 break;
3131
3132 if (scan_inst->dst.file == MRF &&
3133 scan_inst->dst.hw_reg == inst->dst.hw_reg) {
3134 /* Somebody else wrote our MRF here, so we can't can't
3135 * compute-to-MRF before that.
3136 */
3137 break;
3138 }
3139
3140 if (scan_inst->mlen > 0) {
3141 /* Found a SEND instruction, which means that there are
3142 * live values in MRFs from base_mrf to base_mrf +
3143 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3144 * above it.
3145 */
3146 if (inst->dst.hw_reg >= scan_inst->base_mrf &&
3147 inst->dst.hw_reg < scan_inst->base_mrf + scan_inst->mlen) {
3148 break;
3149 }
3150 }
3151 }
3152 }
3153
3154 return progress;
3155 }
3156
3157 /**
3158 * Walks through basic blocks, locking for repeated MRF writes and
3159 * removing the later ones.
3160 */
3161 bool
3162 fs_visitor::remove_duplicate_mrf_writes()
3163 {
3164 fs_inst *last_mrf_move[16];
3165 bool progress = false;
3166
3167 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3168
3169 foreach_iter(exec_list_iterator, iter, this->instructions) {
3170 fs_inst *inst = (fs_inst *)iter.get();
3171
3172 switch (inst->opcode) {
3173 case BRW_OPCODE_DO:
3174 case BRW_OPCODE_WHILE:
3175 case BRW_OPCODE_IF:
3176 case BRW_OPCODE_ELSE:
3177 case BRW_OPCODE_ENDIF:
3178 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3179 continue;
3180 default:
3181 break;
3182 }
3183
3184 if (inst->opcode == BRW_OPCODE_MOV &&
3185 inst->dst.file == MRF) {
3186 fs_inst *prev_inst = last_mrf_move[inst->dst.hw_reg];
3187 if (prev_inst && inst->equals(prev_inst)) {
3188 inst->remove();
3189 progress = true;
3190 continue;
3191 }
3192 }
3193
3194 /* Clear out the last-write records for MRFs that were overwritten. */
3195 if (inst->dst.file == MRF) {
3196 last_mrf_move[inst->dst.hw_reg] = NULL;
3197 }
3198
3199 if (inst->mlen > 0) {
3200 /* Found a SEND instruction, which will include two of fewer
3201 * implied MRF writes. We could do better here.
3202 */
3203 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3204 last_mrf_move[inst->base_mrf + i] = NULL;
3205 }
3206 }
3207
3208 /* Clear out any MRF move records whose sources got overwritten. */
3209 if (inst->dst.file == GRF) {
3210 for (unsigned int i = 0; i < Elements(last_mrf_move); i++) {
3211 if (last_mrf_move[i] &&
3212 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
3213 last_mrf_move[i] = NULL;
3214 }
3215 }
3216 }
3217
3218 if (inst->opcode == BRW_OPCODE_MOV &&
3219 inst->dst.file == MRF &&
3220 inst->src[0].file == GRF &&
3221 !inst->predicated) {
3222 last_mrf_move[inst->dst.hw_reg] = inst;
3223 }
3224 }
3225
3226 return progress;
3227 }
3228
3229 bool
3230 fs_visitor::virtual_grf_interferes(int a, int b)
3231 {
3232 int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]);
3233 int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]);
3234
3235 /* For dead code, just check if the def interferes with the other range. */
3236 if (this->virtual_grf_use[a] == -1) {
3237 return (this->virtual_grf_def[a] >= this->virtual_grf_def[b] &&
3238 this->virtual_grf_def[a] < this->virtual_grf_use[b]);
3239 }
3240 if (this->virtual_grf_use[b] == -1) {
3241 return (this->virtual_grf_def[b] >= this->virtual_grf_def[a] &&
3242 this->virtual_grf_def[b] < this->virtual_grf_use[a]);
3243 }
3244
3245 return start < end;
3246 }
3247
3248 static struct brw_reg brw_reg_from_fs_reg(fs_reg *reg)
3249 {
3250 struct brw_reg brw_reg;
3251
3252 switch (reg->file) {
3253 case GRF:
3254 case ARF:
3255 case MRF:
3256 if (reg->smear == -1) {
3257 brw_reg = brw_vec8_reg(reg->file,
3258 reg->hw_reg, 0);
3259 } else {
3260 brw_reg = brw_vec1_reg(reg->file,
3261 reg->hw_reg, reg->smear);
3262 }
3263 brw_reg = retype(brw_reg, reg->type);
3264 break;
3265 case IMM:
3266 switch (reg->type) {
3267 case BRW_REGISTER_TYPE_F:
3268 brw_reg = brw_imm_f(reg->imm.f);
3269 break;
3270 case BRW_REGISTER_TYPE_D:
3271 brw_reg = brw_imm_d(reg->imm.i);
3272 break;
3273 case BRW_REGISTER_TYPE_UD:
3274 brw_reg = brw_imm_ud(reg->imm.u);
3275 break;
3276 default:
3277 assert(!"not reached");
3278 break;
3279 }
3280 break;
3281 case FIXED_HW_REG:
3282 brw_reg = reg->fixed_hw_reg;
3283 break;
3284 case BAD_FILE:
3285 /* Probably unused. */
3286 brw_reg = brw_null_reg();
3287 break;
3288 case UNIFORM:
3289 assert(!"not reached");
3290 brw_reg = brw_null_reg();
3291 break;
3292 }
3293 if (reg->abs)
3294 brw_reg = brw_abs(brw_reg);
3295 if (reg->negate)
3296 brw_reg = negate(brw_reg);
3297
3298 return brw_reg;
3299 }
3300
3301 void
3302 fs_visitor::generate_code()
3303 {
3304 int last_native_inst = 0;
3305 struct brw_instruction *if_stack[16], *loop_stack[16];
3306 int if_stack_depth = 0, loop_stack_depth = 0;
3307 int if_depth_in_loop[16];
3308 const char *last_annotation_string = NULL;
3309 ir_instruction *last_annotation_ir = NULL;
3310
3311 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3312 printf("Native code for fragment shader %d:\n",
3313 ctx->Shader.CurrentFragmentProgram->Name);
3314 }
3315
3316 if_depth_in_loop[loop_stack_depth] = 0;
3317
3318 memset(&if_stack, 0, sizeof(if_stack));
3319 foreach_iter(exec_list_iterator, iter, this->instructions) {
3320 fs_inst *inst = (fs_inst *)iter.get();
3321 struct brw_reg src[3], dst;
3322
3323 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3324 if (last_annotation_ir != inst->ir) {
3325 last_annotation_ir = inst->ir;
3326 if (last_annotation_ir) {
3327 printf(" ");
3328 last_annotation_ir->print();
3329 printf("\n");
3330 }
3331 }
3332 if (last_annotation_string != inst->annotation) {
3333 last_annotation_string = inst->annotation;
3334 if (last_annotation_string)
3335 printf(" %s\n", last_annotation_string);
3336 }
3337 }
3338
3339 for (unsigned int i = 0; i < 3; i++) {
3340 src[i] = brw_reg_from_fs_reg(&inst->src[i]);
3341 }
3342 dst = brw_reg_from_fs_reg(&inst->dst);
3343
3344 brw_set_conditionalmod(p, inst->conditional_mod);
3345 brw_set_predicate_control(p, inst->predicated);
3346 brw_set_saturate(p, inst->saturate);
3347
3348 switch (inst->opcode) {
3349 case BRW_OPCODE_MOV:
3350 brw_MOV(p, dst, src[0]);
3351 break;
3352 case BRW_OPCODE_ADD:
3353 brw_ADD(p, dst, src[0], src[1]);
3354 break;
3355 case BRW_OPCODE_MUL:
3356 brw_MUL(p, dst, src[0], src[1]);
3357 break;
3358
3359 case BRW_OPCODE_FRC:
3360 brw_FRC(p, dst, src[0]);
3361 break;
3362 case BRW_OPCODE_RNDD:
3363 brw_RNDD(p, dst, src[0]);
3364 break;
3365 case BRW_OPCODE_RNDE:
3366 brw_RNDE(p, dst, src[0]);
3367 break;
3368 case BRW_OPCODE_RNDZ:
3369 brw_RNDZ(p, dst, src[0]);
3370 break;
3371
3372 case BRW_OPCODE_AND:
3373 brw_AND(p, dst, src[0], src[1]);
3374 break;
3375 case BRW_OPCODE_OR:
3376 brw_OR(p, dst, src[0], src[1]);
3377 break;
3378 case BRW_OPCODE_XOR:
3379 brw_XOR(p, dst, src[0], src[1]);
3380 break;
3381 case BRW_OPCODE_NOT:
3382 brw_NOT(p, dst, src[0]);
3383 break;
3384 case BRW_OPCODE_ASR:
3385 brw_ASR(p, dst, src[0], src[1]);
3386 break;
3387 case BRW_OPCODE_SHR:
3388 brw_SHR(p, dst, src[0], src[1]);
3389 break;
3390 case BRW_OPCODE_SHL:
3391 brw_SHL(p, dst, src[0], src[1]);
3392 break;
3393
3394 case BRW_OPCODE_CMP:
3395 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
3396 break;
3397 case BRW_OPCODE_SEL:
3398 brw_SEL(p, dst, src[0], src[1]);
3399 break;
3400
3401 case BRW_OPCODE_IF:
3402 assert(if_stack_depth < 16);
3403 if (inst->src[0].file != BAD_FILE) {
3404 assert(intel->gen >= 6);
3405 if_stack[if_stack_depth] = brw_IF_gen6(p, inst->conditional_mod, src[0], src[1]);
3406 } else {
3407 if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
3408 }
3409 if_depth_in_loop[loop_stack_depth]++;
3410 if_stack_depth++;
3411 break;
3412
3413 case BRW_OPCODE_ELSE:
3414 if_stack[if_stack_depth - 1] =
3415 brw_ELSE(p, if_stack[if_stack_depth - 1]);
3416 break;
3417 case BRW_OPCODE_ENDIF:
3418 if_stack_depth--;
3419 brw_ENDIF(p , if_stack[if_stack_depth]);
3420 if_depth_in_loop[loop_stack_depth]--;
3421 break;
3422
3423 case BRW_OPCODE_DO:
3424 loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
3425 if_depth_in_loop[loop_stack_depth] = 0;
3426 break;
3427
3428 case BRW_OPCODE_BREAK:
3429 brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
3430 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
3431 break;
3432 case BRW_OPCODE_CONTINUE:
3433 /* FINISHME: We need to write the loop instruction support still. */
3434 if (intel->gen >= 6)
3435 brw_CONT_gen6(p, loop_stack[loop_stack_depth - 1]);
3436 else
3437 brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
3438 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
3439 break;
3440
3441 case BRW_OPCODE_WHILE: {
3442 struct brw_instruction *inst0, *inst1;
3443 GLuint br = 1;
3444
3445 if (intel->gen >= 5)
3446 br = 2;
3447
3448 assert(loop_stack_depth > 0);
3449 loop_stack_depth--;
3450 inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
3451 if (intel->gen < 6) {
3452 /* patch all the BREAK/CONT instructions from last BGNLOOP */
3453 while (inst0 > loop_stack[loop_stack_depth]) {
3454 inst0--;
3455 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
3456 inst0->bits3.if_else.jump_count == 0) {
3457 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
3458 }
3459 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
3460 inst0->bits3.if_else.jump_count == 0) {
3461 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
3462 }
3463 }
3464 }
3465 }
3466 break;
3467
3468 case FS_OPCODE_RCP:
3469 case FS_OPCODE_RSQ:
3470 case FS_OPCODE_SQRT:
3471 case FS_OPCODE_EXP2:
3472 case FS_OPCODE_LOG2:
3473 case FS_OPCODE_POW:
3474 case FS_OPCODE_SIN:
3475 case FS_OPCODE_COS:
3476 generate_math(inst, dst, src);
3477 break;
3478 case FS_OPCODE_LINTERP:
3479 generate_linterp(inst, dst, src);
3480 break;
3481 case FS_OPCODE_TEX:
3482 case FS_OPCODE_TXB:
3483 case FS_OPCODE_TXL:
3484 generate_tex(inst, dst);
3485 break;
3486 case FS_OPCODE_DISCARD_NOT:
3487 generate_discard_not(inst, dst);
3488 break;
3489 case FS_OPCODE_DISCARD_AND:
3490 generate_discard_and(inst, src[0]);
3491 break;
3492 case FS_OPCODE_DDX:
3493 generate_ddx(inst, dst, src[0]);
3494 break;
3495 case FS_OPCODE_DDY:
3496 generate_ddy(inst, dst, src[0]);
3497 break;
3498
3499 case FS_OPCODE_SPILL:
3500 generate_spill(inst, src[0]);
3501 break;
3502
3503 case FS_OPCODE_UNSPILL:
3504 generate_unspill(inst, dst);
3505 break;
3506
3507 case FS_OPCODE_PULL_CONSTANT_LOAD:
3508 generate_pull_constant_load(inst, dst);
3509 break;
3510
3511 case FS_OPCODE_FB_WRITE:
3512 generate_fb_write(inst);
3513 break;
3514 default:
3515 if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
3516 _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
3517 brw_opcodes[inst->opcode].name);
3518 } else {
3519 _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
3520 }
3521 this->fail = true;
3522 }
3523
3524 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3525 for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
3526 if (0) {
3527 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3528 ((uint32_t *)&p->store[i])[3],
3529 ((uint32_t *)&p->store[i])[2],
3530 ((uint32_t *)&p->store[i])[1],
3531 ((uint32_t *)&p->store[i])[0]);
3532 }
3533 brw_disasm(stdout, &p->store[i], intel->gen);
3534 }
3535 }
3536
3537 last_native_inst = p->nr_insn;
3538 }
3539
3540 brw_set_uip_jip(p);
3541
3542 /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
3543 * emit issues, it doesn't get the jump distances into the output,
3544 * which is often something we want to debug. So this is here in
3545 * case you're doing that.
3546 */
3547 if (0) {
3548 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3549 for (unsigned int i = 0; i < p->nr_insn; i++) {
3550 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3551 ((uint32_t *)&p->store[i])[3],
3552 ((uint32_t *)&p->store[i])[2],
3553 ((uint32_t *)&p->store[i])[1],
3554 ((uint32_t *)&p->store[i])[0]);
3555 brw_disasm(stdout, &p->store[i], intel->gen);
3556 }
3557 }
3558 }
3559 }
3560
3561 GLboolean
3562 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
3563 {
3564 struct intel_context *intel = &brw->intel;
3565 struct gl_context *ctx = &intel->ctx;
3566 struct gl_shader_program *prog = ctx->Shader.CurrentFragmentProgram;
3567
3568 if (!prog)
3569 return GL_FALSE;
3570
3571 struct brw_shader *shader =
3572 (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
3573 if (!shader)
3574 return GL_FALSE;
3575
3576 /* We always use 8-wide mode, at least for now. For one, flow
3577 * control only works in 8-wide. Also, when we're fragment shader
3578 * bound, we're almost always under register pressure as well, so
3579 * 8-wide would save us from the performance cliff of spilling
3580 * regs.
3581 */
3582 c->dispatch_width = 8;
3583
3584 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3585 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
3586 _mesa_print_ir(shader->ir, NULL);
3587 printf("\n");
3588 }
3589
3590 /* Now the main event: Visit the shader IR and generate our FS IR for it.
3591 */
3592 fs_visitor v(c, shader);
3593
3594 if (0) {
3595 v.emit_dummy_fs();
3596 } else {
3597 v.calculate_urb_setup();
3598 if (intel->gen < 6)
3599 v.emit_interpolation_setup_gen4();
3600 else
3601 v.emit_interpolation_setup_gen6();
3602
3603 /* Generate FS IR for main(). (the visitor only descends into
3604 * functions called "main").
3605 */
3606 foreach_iter(exec_list_iterator, iter, *shader->ir) {
3607 ir_instruction *ir = (ir_instruction *)iter.get();
3608 v.base_ir = ir;
3609 ir->accept(&v);
3610 }
3611
3612 v.emit_fb_writes();
3613
3614 v.split_virtual_grfs();
3615 v.setup_pull_constants();
3616
3617 v.assign_curb_setup();
3618 v.assign_urb_setup();
3619
3620 bool progress;
3621 do {
3622 progress = false;
3623
3624 progress = v.remove_duplicate_mrf_writes() || progress;
3625
3626 v.calculate_live_intervals();
3627 progress = v.propagate_constants() || progress;
3628 progress = v.register_coalesce() || progress;
3629 progress = v.compute_to_mrf() || progress;
3630 progress = v.dead_code_eliminate() || progress;
3631 } while (progress);
3632
3633 if (0) {
3634 /* Debug of register spilling: Go spill everything. */
3635 int virtual_grf_count = v.virtual_grf_next;
3636 for (int i = 1; i < virtual_grf_count; i++) {
3637 v.spill_reg(i);
3638 }
3639 v.calculate_live_intervals();
3640 }
3641
3642 if (0)
3643 v.assign_regs_trivial();
3644 else {
3645 while (!v.assign_regs()) {
3646 if (v.fail)
3647 break;
3648
3649 v.calculate_live_intervals();
3650 }
3651 }
3652 }
3653
3654 if (!v.fail)
3655 v.generate_code();
3656
3657 assert(!v.fail); /* FINISHME: Cleanly fail, tested at link time, etc. */
3658
3659 if (v.fail)
3660 return GL_FALSE;
3661
3662 c->prog_data.total_grf = v.grf_used;
3663
3664 return GL_TRUE;
3665 }