Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 extern "C" {
29
30 #include <sys/types.h>
31
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_wm.h"
44 #include "talloc.h"
45 }
46 #include "brw_fs.h"
47 #include "../glsl/glsl_types.h"
48 #include "../glsl/ir_optimization.h"
49 #include "../glsl/ir_print_visitor.h"
50
51 static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg);
52
53 struct gl_shader *
54 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
55 {
56 struct brw_shader *shader;
57
58 shader = talloc_zero(NULL, struct brw_shader);
59 if (shader) {
60 shader->base.Type = type;
61 shader->base.Name = name;
62 _mesa_init_shader(ctx, &shader->base);
63 }
64
65 return &shader->base;
66 }
67
68 struct gl_shader_program *
69 brw_new_shader_program(struct gl_context *ctx, GLuint name)
70 {
71 struct brw_shader_program *prog;
72 prog = talloc_zero(NULL, struct brw_shader_program);
73 if (prog) {
74 prog->base.Name = name;
75 _mesa_init_shader_program(ctx, &prog->base);
76 }
77 return &prog->base;
78 }
79
80 GLboolean
81 brw_compile_shader(struct gl_context *ctx, struct gl_shader *shader)
82 {
83 if (!_mesa_ir_compile_shader(ctx, shader))
84 return GL_FALSE;
85
86 return GL_TRUE;
87 }
88
89 GLboolean
90 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
91 {
92 struct brw_shader *shader =
93 (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
94 if (shader != NULL) {
95 void *mem_ctx = talloc_new(NULL);
96 bool progress;
97
98 if (shader->ir)
99 talloc_free(shader->ir);
100 shader->ir = new(shader) exec_list;
101 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
102
103 do_mat_op_to_vec(shader->ir);
104 lower_instructions(shader->ir,
105 MOD_TO_FRACT |
106 DIV_TO_MUL_RCP |
107 SUB_TO_ADD_NEG |
108 EXP_TO_EXP2 |
109 LOG_TO_LOG2);
110 do_lower_texture_projection(shader->ir);
111 brw_do_cubemap_normalize(shader->ir);
112
113 do {
114 progress = false;
115
116 brw_do_channel_expressions(shader->ir);
117 brw_do_vector_splitting(shader->ir);
118
119 progress = do_lower_jumps(shader->ir, true, true,
120 true, /* main return */
121 false, /* continue */
122 false /* loops */
123 ) || progress;
124
125 progress = do_common_optimization(shader->ir, true, 32) || progress;
126
127 progress = lower_noise(shader->ir) || progress;
128 progress =
129 lower_variable_index_to_cond_assign(shader->ir,
130 GL_TRUE, /* input */
131 GL_TRUE, /* output */
132 GL_TRUE, /* temp */
133 GL_TRUE /* uniform */
134 ) || progress;
135 progress = lower_quadop_vector(shader->ir, false) || progress;
136 } while (progress);
137
138 validate_ir_tree(shader->ir);
139
140 reparent_ir(shader->ir, shader->ir);
141 talloc_free(mem_ctx);
142 }
143
144 if (!_mesa_ir_link_shader(ctx, prog))
145 return GL_FALSE;
146
147 return GL_TRUE;
148 }
149
150 static int
151 type_size(const struct glsl_type *type)
152 {
153 unsigned int size, i;
154
155 switch (type->base_type) {
156 case GLSL_TYPE_UINT:
157 case GLSL_TYPE_INT:
158 case GLSL_TYPE_FLOAT:
159 case GLSL_TYPE_BOOL:
160 return type->components();
161 case GLSL_TYPE_ARRAY:
162 return type_size(type->fields.array) * type->length;
163 case GLSL_TYPE_STRUCT:
164 size = 0;
165 for (i = 0; i < type->length; i++) {
166 size += type_size(type->fields.structure[i].type);
167 }
168 return size;
169 case GLSL_TYPE_SAMPLER:
170 /* Samplers take up no register space, since they're baked in at
171 * link time.
172 */
173 return 0;
174 default:
175 assert(!"not reached");
176 return 0;
177 }
178 }
179
180 /**
181 * Returns how many MRFs an FS opcode will write over.
182 *
183 * Note that this is not the 0 or 1 implied writes in an actual gen
184 * instruction -- the FS opcodes often generate MOVs in addition.
185 */
186 int
187 fs_visitor::implied_mrf_writes(fs_inst *inst)
188 {
189 if (inst->mlen == 0)
190 return 0;
191
192 switch (inst->opcode) {
193 case FS_OPCODE_RCP:
194 case FS_OPCODE_RSQ:
195 case FS_OPCODE_SQRT:
196 case FS_OPCODE_EXP2:
197 case FS_OPCODE_LOG2:
198 case FS_OPCODE_SIN:
199 case FS_OPCODE_COS:
200 return 1;
201 case FS_OPCODE_POW:
202 return 2;
203 case FS_OPCODE_TEX:
204 case FS_OPCODE_TXB:
205 case FS_OPCODE_TXL:
206 return 1;
207 case FS_OPCODE_FB_WRITE:
208 return 2;
209 case FS_OPCODE_PULL_CONSTANT_LOAD:
210 case FS_OPCODE_UNSPILL:
211 return 1;
212 case FS_OPCODE_SPILL:
213 return 2;
214 default:
215 assert(!"not reached");
216 return inst->mlen;
217 }
218 }
219
220 int
221 fs_visitor::virtual_grf_alloc(int size)
222 {
223 if (virtual_grf_array_size <= virtual_grf_next) {
224 if (virtual_grf_array_size == 0)
225 virtual_grf_array_size = 16;
226 else
227 virtual_grf_array_size *= 2;
228 virtual_grf_sizes = talloc_realloc(mem_ctx, virtual_grf_sizes,
229 int, virtual_grf_array_size);
230
231 /* This slot is always unused. */
232 virtual_grf_sizes[0] = 0;
233 }
234 virtual_grf_sizes[virtual_grf_next] = size;
235 return virtual_grf_next++;
236 }
237
238 /** Fixed HW reg constructor. */
239 fs_reg::fs_reg(enum register_file file, int hw_reg)
240 {
241 init();
242 this->file = file;
243 this->hw_reg = hw_reg;
244 this->type = BRW_REGISTER_TYPE_F;
245 }
246
247 /** Fixed HW reg constructor. */
248 fs_reg::fs_reg(enum register_file file, int hw_reg, uint32_t type)
249 {
250 init();
251 this->file = file;
252 this->hw_reg = hw_reg;
253 this->type = type;
254 }
255
256 int
257 brw_type_for_base_type(const struct glsl_type *type)
258 {
259 switch (type->base_type) {
260 case GLSL_TYPE_FLOAT:
261 return BRW_REGISTER_TYPE_F;
262 case GLSL_TYPE_INT:
263 case GLSL_TYPE_BOOL:
264 return BRW_REGISTER_TYPE_D;
265 case GLSL_TYPE_UINT:
266 return BRW_REGISTER_TYPE_UD;
267 case GLSL_TYPE_ARRAY:
268 case GLSL_TYPE_STRUCT:
269 case GLSL_TYPE_SAMPLER:
270 /* These should be overridden with the type of the member when
271 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
272 * way to trip up if we don't.
273 */
274 return BRW_REGISTER_TYPE_UD;
275 default:
276 assert(!"not reached");
277 return BRW_REGISTER_TYPE_F;
278 }
279 }
280
281 /** Automatic reg constructor. */
282 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
283 {
284 init();
285
286 this->file = GRF;
287 this->reg = v->virtual_grf_alloc(type_size(type));
288 this->reg_offset = 0;
289 this->type = brw_type_for_base_type(type);
290 }
291
292 fs_reg *
293 fs_visitor::variable_storage(ir_variable *var)
294 {
295 return (fs_reg *)hash_table_find(this->variable_ht, var);
296 }
297
298 /* Our support for uniforms is piggy-backed on the struct
299 * gl_fragment_program, because that's where the values actually
300 * get stored, rather than in some global gl_shader_program uniform
301 * store.
302 */
303 int
304 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
305 {
306 unsigned int offset = 0;
307 float *vec_values;
308
309 if (type->is_matrix()) {
310 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
311 type->vector_elements,
312 1);
313
314 for (unsigned int i = 0; i < type->matrix_columns; i++) {
315 offset += setup_uniform_values(loc + offset, column);
316 }
317
318 return offset;
319 }
320
321 switch (type->base_type) {
322 case GLSL_TYPE_FLOAT:
323 case GLSL_TYPE_UINT:
324 case GLSL_TYPE_INT:
325 case GLSL_TYPE_BOOL:
326 vec_values = fp->Base.Parameters->ParameterValues[loc];
327 for (unsigned int i = 0; i < type->vector_elements; i++) {
328 unsigned int param = c->prog_data.nr_params++;
329
330 assert(param < ARRAY_SIZE(c->prog_data.param));
331
332 switch (type->base_type) {
333 case GLSL_TYPE_FLOAT:
334 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
335 break;
336 case GLSL_TYPE_UINT:
337 c->prog_data.param_convert[param] = PARAM_CONVERT_F2U;
338 break;
339 case GLSL_TYPE_INT:
340 c->prog_data.param_convert[param] = PARAM_CONVERT_F2I;
341 break;
342 case GLSL_TYPE_BOOL:
343 c->prog_data.param_convert[param] = PARAM_CONVERT_F2B;
344 break;
345 default:
346 assert(!"not reached");
347 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
348 break;
349 }
350
351 c->prog_data.param[param] = &vec_values[i];
352 }
353 return 1;
354
355 case GLSL_TYPE_STRUCT:
356 for (unsigned int i = 0; i < type->length; i++) {
357 offset += setup_uniform_values(loc + offset,
358 type->fields.structure[i].type);
359 }
360 return offset;
361
362 case GLSL_TYPE_ARRAY:
363 for (unsigned int i = 0; i < type->length; i++) {
364 offset += setup_uniform_values(loc + offset, type->fields.array);
365 }
366 return offset;
367
368 case GLSL_TYPE_SAMPLER:
369 /* The sampler takes up a slot, but we don't use any values from it. */
370 return 1;
371
372 default:
373 assert(!"not reached");
374 return 0;
375 }
376 }
377
378
379 /* Our support for builtin uniforms is even scarier than non-builtin.
380 * It sits on top of the PROG_STATE_VAR parameters that are
381 * automatically updated from GL context state.
382 */
383 void
384 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
385 {
386 const struct gl_builtin_uniform_desc *statevar = NULL;
387
388 for (unsigned int i = 0; _mesa_builtin_uniform_desc[i].name; i++) {
389 statevar = &_mesa_builtin_uniform_desc[i];
390 if (strcmp(ir->name, _mesa_builtin_uniform_desc[i].name) == 0)
391 break;
392 }
393
394 if (!statevar->name) {
395 this->fail = true;
396 printf("Failed to find builtin uniform `%s'\n", ir->name);
397 return;
398 }
399
400 int array_count;
401 if (ir->type->is_array()) {
402 array_count = ir->type->length;
403 } else {
404 array_count = 1;
405 }
406
407 for (int a = 0; a < array_count; a++) {
408 for (unsigned int i = 0; i < statevar->num_elements; i++) {
409 struct gl_builtin_uniform_element *element = &statevar->elements[i];
410 int tokens[STATE_LENGTH];
411
412 memcpy(tokens, element->tokens, sizeof(element->tokens));
413 if (ir->type->is_array()) {
414 tokens[1] = a;
415 }
416
417 /* This state reference has already been setup by ir_to_mesa,
418 * but we'll get the same index back here.
419 */
420 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
421 (gl_state_index *)tokens);
422 float *vec_values = this->fp->Base.Parameters->ParameterValues[index];
423
424 /* Add each of the unique swizzles of the element as a
425 * parameter. This'll end up matching the expected layout of
426 * the array/matrix/structure we're trying to fill in.
427 */
428 int last_swiz = -1;
429 for (unsigned int i = 0; i < 4; i++) {
430 int swiz = GET_SWZ(element->swizzle, i);
431 if (swiz == last_swiz)
432 break;
433 last_swiz = swiz;
434
435 c->prog_data.param_convert[c->prog_data.nr_params] =
436 PARAM_NO_CONVERT;
437 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[swiz];
438 }
439 }
440 }
441 }
442
443 fs_reg *
444 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
445 {
446 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
447 fs_reg wpos = *reg;
448 fs_reg neg_y = this->pixel_y;
449 neg_y.negate = true;
450 bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
451
452 /* gl_FragCoord.x */
453 if (ir->pixel_center_integer) {
454 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_x));
455 } else {
456 emit(fs_inst(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f)));
457 }
458 wpos.reg_offset++;
459
460 /* gl_FragCoord.y */
461 if (!flip && ir->pixel_center_integer) {
462 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->pixel_y));
463 } else {
464 fs_reg pixel_y = this->pixel_y;
465 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
466
467 if (flip) {
468 pixel_y.negate = true;
469 offset += c->key.drawable_height - 1.0;
470 }
471
472 emit(fs_inst(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset)));
473 }
474 wpos.reg_offset++;
475
476 /* gl_FragCoord.z */
477 if (intel->gen >= 6) {
478 emit(fs_inst(BRW_OPCODE_MOV, wpos,
479 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
480 } else {
481 emit(fs_inst(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y,
482 interp_reg(FRAG_ATTRIB_WPOS, 2)));
483 }
484 wpos.reg_offset++;
485
486 /* gl_FragCoord.w: Already set up in emit_interpolation */
487 emit(fs_inst(BRW_OPCODE_MOV, wpos, this->wpos_w));
488
489 return reg;
490 }
491
492 fs_reg *
493 fs_visitor::emit_general_interpolation(ir_variable *ir)
494 {
495 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
496 /* Interpolation is always in floating point regs. */
497 reg->type = BRW_REGISTER_TYPE_F;
498 fs_reg attr = *reg;
499
500 unsigned int array_elements;
501 const glsl_type *type;
502
503 if (ir->type->is_array()) {
504 array_elements = ir->type->length;
505 if (array_elements == 0) {
506 this->fail = true;
507 }
508 type = ir->type->fields.array;
509 } else {
510 array_elements = 1;
511 type = ir->type;
512 }
513
514 int location = ir->location;
515 for (unsigned int i = 0; i < array_elements; i++) {
516 for (unsigned int j = 0; j < type->matrix_columns; j++) {
517 if (urb_setup[location] == -1) {
518 /* If there's no incoming setup data for this slot, don't
519 * emit interpolation for it.
520 */
521 attr.reg_offset += type->vector_elements;
522 location++;
523 continue;
524 }
525
526 for (unsigned int c = 0; c < type->vector_elements; c++) {
527 struct brw_reg interp = interp_reg(location, c);
528 emit(fs_inst(FS_OPCODE_LINTERP,
529 attr,
530 this->delta_x,
531 this->delta_y,
532 fs_reg(interp)));
533 attr.reg_offset++;
534 }
535
536 if (intel->gen < 6) {
537 attr.reg_offset -= type->vector_elements;
538 for (unsigned int c = 0; c < type->vector_elements; c++) {
539 emit(fs_inst(BRW_OPCODE_MUL,
540 attr,
541 attr,
542 this->pixel_w));
543 attr.reg_offset++;
544 }
545 }
546 location++;
547 }
548 }
549
550 return reg;
551 }
552
553 fs_reg *
554 fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
555 {
556 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
557
558 /* The frontfacing comes in as a bit in the thread payload. */
559 if (intel->gen >= 6) {
560 emit(fs_inst(BRW_OPCODE_ASR,
561 *reg,
562 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
563 fs_reg(15)));
564 emit(fs_inst(BRW_OPCODE_NOT,
565 *reg,
566 *reg));
567 emit(fs_inst(BRW_OPCODE_AND,
568 *reg,
569 *reg,
570 fs_reg(1)));
571 } else {
572 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
573 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
574 * us front face
575 */
576 fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP,
577 *reg,
578 fs_reg(r1_6ud),
579 fs_reg(1u << 31)));
580 inst->conditional_mod = BRW_CONDITIONAL_L;
581 emit(fs_inst(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u)));
582 }
583
584 return reg;
585 }
586
587 fs_inst *
588 fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src)
589 {
590 switch (opcode) {
591 case FS_OPCODE_RCP:
592 case FS_OPCODE_RSQ:
593 case FS_OPCODE_SQRT:
594 case FS_OPCODE_EXP2:
595 case FS_OPCODE_LOG2:
596 case FS_OPCODE_SIN:
597 case FS_OPCODE_COS:
598 break;
599 default:
600 assert(!"not reached: bad math opcode");
601 return NULL;
602 }
603
604 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
605 * might be able to do better by doing execsize = 1 math and then
606 * expanding that result out, but we would need to be careful with
607 * masking.
608 *
609 * The hardware ignores source modifiers (negate and abs) on math
610 * instructions, so we also move to a temp to set those up.
611 */
612 if (intel->gen >= 6 && (src.file == UNIFORM ||
613 src.abs ||
614 src.negate)) {
615 fs_reg expanded = fs_reg(this, glsl_type::float_type);
616 emit(fs_inst(BRW_OPCODE_MOV, expanded, src));
617 src = expanded;
618 }
619
620 fs_inst *inst = emit(fs_inst(opcode, dst, src));
621
622 if (intel->gen < 6) {
623 inst->base_mrf = 2;
624 inst->mlen = 1;
625 }
626
627 return inst;
628 }
629
630 fs_inst *
631 fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src0, fs_reg src1)
632 {
633 int base_mrf = 2;
634 fs_inst *inst;
635
636 assert(opcode == FS_OPCODE_POW);
637
638 if (intel->gen >= 6) {
639 /* Can't do hstride == 0 args to gen6 math, so expand it out. */
640 if (src0.file == UNIFORM) {
641 fs_reg expanded = fs_reg(this, glsl_type::float_type);
642 emit(fs_inst(BRW_OPCODE_MOV, expanded, src0));
643 src0 = expanded;
644 }
645
646 if (src1.file == UNIFORM) {
647 fs_reg expanded = fs_reg(this, glsl_type::float_type);
648 emit(fs_inst(BRW_OPCODE_MOV, expanded, src1));
649 src1 = expanded;
650 }
651
652 inst = emit(fs_inst(opcode, dst, src0, src1));
653 } else {
654 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1));
655 inst = emit(fs_inst(opcode, dst, src0, reg_null_f));
656
657 inst->base_mrf = base_mrf;
658 inst->mlen = 2;
659 }
660 return inst;
661 }
662
663 void
664 fs_visitor::visit(ir_variable *ir)
665 {
666 fs_reg *reg = NULL;
667
668 if (variable_storage(ir))
669 return;
670
671 if (strcmp(ir->name, "gl_FragColor") == 0) {
672 this->frag_color = ir;
673 } else if (strcmp(ir->name, "gl_FragData") == 0) {
674 this->frag_data = ir;
675 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
676 this->frag_depth = ir;
677 }
678
679 if (ir->mode == ir_var_in) {
680 if (!strcmp(ir->name, "gl_FragCoord")) {
681 reg = emit_fragcoord_interpolation(ir);
682 } else if (!strcmp(ir->name, "gl_FrontFacing")) {
683 reg = emit_frontfacing_interpolation(ir);
684 } else {
685 reg = emit_general_interpolation(ir);
686 }
687 assert(reg);
688 hash_table_insert(this->variable_ht, reg, ir);
689 return;
690 }
691
692 if (ir->mode == ir_var_uniform) {
693 int param_index = c->prog_data.nr_params;
694
695 if (!strncmp(ir->name, "gl_", 3)) {
696 setup_builtin_uniform_values(ir);
697 } else {
698 setup_uniform_values(ir->location, ir->type);
699 }
700
701 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
702 reg->type = brw_type_for_base_type(ir->type);
703 }
704
705 if (!reg)
706 reg = new(this->mem_ctx) fs_reg(this, ir->type);
707
708 hash_table_insert(this->variable_ht, reg, ir);
709 }
710
711 void
712 fs_visitor::visit(ir_dereference_variable *ir)
713 {
714 fs_reg *reg = variable_storage(ir->var);
715 this->result = *reg;
716 }
717
718 void
719 fs_visitor::visit(ir_dereference_record *ir)
720 {
721 const glsl_type *struct_type = ir->record->type;
722
723 ir->record->accept(this);
724
725 unsigned int offset = 0;
726 for (unsigned int i = 0; i < struct_type->length; i++) {
727 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
728 break;
729 offset += type_size(struct_type->fields.structure[i].type);
730 }
731 this->result.reg_offset += offset;
732 this->result.type = brw_type_for_base_type(ir->type);
733 }
734
735 void
736 fs_visitor::visit(ir_dereference_array *ir)
737 {
738 ir_constant *index;
739 int element_size;
740
741 ir->array->accept(this);
742 index = ir->array_index->as_constant();
743
744 element_size = type_size(ir->type);
745 this->result.type = brw_type_for_base_type(ir->type);
746
747 if (index) {
748 assert(this->result.file == UNIFORM ||
749 (this->result.file == GRF &&
750 this->result.reg != 0));
751 this->result.reg_offset += index->value.i[0] * element_size;
752 } else {
753 assert(!"FINISHME: non-constant array element");
754 }
755 }
756
757 /* Instruction selection: Produce a MOV.sat instead of
758 * MIN(MAX(val, 0), 1) when possible.
759 */
760 bool
761 fs_visitor::try_emit_saturate(ir_expression *ir)
762 {
763 ir_rvalue *sat_val = ir->as_rvalue_to_saturate();
764
765 if (!sat_val)
766 return false;
767
768 sat_val->accept(this);
769 fs_reg src = this->result;
770
771 this->result = fs_reg(this, ir->type);
772 fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, src));
773 inst->saturate = true;
774
775 return true;
776 }
777
778 void
779 fs_visitor::visit(ir_expression *ir)
780 {
781 unsigned int operand;
782 fs_reg op[2], temp;
783 fs_inst *inst;
784
785 assert(ir->get_num_operands() <= 2);
786
787 if (try_emit_saturate(ir))
788 return;
789
790 for (operand = 0; operand < ir->get_num_operands(); operand++) {
791 ir->operands[operand]->accept(this);
792 if (this->result.file == BAD_FILE) {
793 ir_print_visitor v;
794 printf("Failed to get tree for expression operand:\n");
795 ir->operands[operand]->accept(&v);
796 this->fail = true;
797 }
798 op[operand] = this->result;
799
800 /* Matrix expression operands should have been broken down to vector
801 * operations already.
802 */
803 assert(!ir->operands[operand]->type->is_matrix());
804 /* And then those vector operands should have been broken down to scalar.
805 */
806 assert(!ir->operands[operand]->type->is_vector());
807 }
808
809 /* Storage for our result. If our result goes into an assignment, it will
810 * just get copy-propagated out, so no worries.
811 */
812 this->result = fs_reg(this, ir->type);
813
814 switch (ir->operation) {
815 case ir_unop_logic_not:
816 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
817 * ones complement of the whole register, not just bit 0.
818 */
819 emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)));
820 break;
821 case ir_unop_neg:
822 op[0].negate = !op[0].negate;
823 this->result = op[0];
824 break;
825 case ir_unop_abs:
826 op[0].abs = true;
827 this->result = op[0];
828 break;
829 case ir_unop_sign:
830 temp = fs_reg(this, ir->type);
831
832 emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)));
833
834 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)));
835 inst->conditional_mod = BRW_CONDITIONAL_G;
836 inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)));
837 inst->predicated = true;
838
839 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)));
840 inst->conditional_mod = BRW_CONDITIONAL_L;
841 inst = emit(fs_inst(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)));
842 inst->predicated = true;
843
844 break;
845 case ir_unop_rcp:
846 emit_math(FS_OPCODE_RCP, this->result, op[0]);
847 break;
848
849 case ir_unop_exp2:
850 emit_math(FS_OPCODE_EXP2, this->result, op[0]);
851 break;
852 case ir_unop_log2:
853 emit_math(FS_OPCODE_LOG2, this->result, op[0]);
854 break;
855 case ir_unop_exp:
856 case ir_unop_log:
857 assert(!"not reached: should be handled by ir_explog_to_explog2");
858 break;
859 case ir_unop_sin:
860 case ir_unop_sin_reduced:
861 emit_math(FS_OPCODE_SIN, this->result, op[0]);
862 break;
863 case ir_unop_cos:
864 case ir_unop_cos_reduced:
865 emit_math(FS_OPCODE_COS, this->result, op[0]);
866 break;
867
868 case ir_unop_dFdx:
869 emit(fs_inst(FS_OPCODE_DDX, this->result, op[0]));
870 break;
871 case ir_unop_dFdy:
872 emit(fs_inst(FS_OPCODE_DDY, this->result, op[0]));
873 break;
874
875 case ir_binop_add:
876 emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], op[1]));
877 break;
878 case ir_binop_sub:
879 assert(!"not reached: should be handled by ir_sub_to_add_neg");
880 break;
881
882 case ir_binop_mul:
883 emit(fs_inst(BRW_OPCODE_MUL, this->result, op[0], op[1]));
884 break;
885 case ir_binop_div:
886 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
887 break;
888 case ir_binop_mod:
889 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
890 break;
891
892 case ir_binop_less:
893 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
894 inst->conditional_mod = BRW_CONDITIONAL_L;
895 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
896 break;
897 case ir_binop_greater:
898 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
899 inst->conditional_mod = BRW_CONDITIONAL_G;
900 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
901 break;
902 case ir_binop_lequal:
903 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
904 inst->conditional_mod = BRW_CONDITIONAL_LE;
905 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
906 break;
907 case ir_binop_gequal:
908 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
909 inst->conditional_mod = BRW_CONDITIONAL_GE;
910 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
911 break;
912 case ir_binop_equal:
913 case ir_binop_all_equal: /* same as nequal for scalars */
914 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
915 inst->conditional_mod = BRW_CONDITIONAL_Z;
916 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
917 break;
918 case ir_binop_nequal:
919 case ir_binop_any_nequal: /* same as nequal for scalars */
920 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
921 inst->conditional_mod = BRW_CONDITIONAL_NZ;
922 emit(fs_inst(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)));
923 break;
924
925 case ir_binop_logic_xor:
926 emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], op[1]));
927 break;
928
929 case ir_binop_logic_or:
930 emit(fs_inst(BRW_OPCODE_OR, this->result, op[0], op[1]));
931 break;
932
933 case ir_binop_logic_and:
934 emit(fs_inst(BRW_OPCODE_AND, this->result, op[0], op[1]));
935 break;
936
937 case ir_binop_dot:
938 case ir_unop_any:
939 assert(!"not reached: should be handled by brw_fs_channel_expressions");
940 break;
941
942 case ir_unop_noise:
943 assert(!"not reached: should be handled by lower_noise");
944 break;
945
946 case ir_quadop_vector:
947 assert(!"not reached: should be handled by lower_quadop_vector");
948 break;
949
950 case ir_unop_sqrt:
951 emit_math(FS_OPCODE_SQRT, this->result, op[0]);
952 break;
953
954 case ir_unop_rsq:
955 emit_math(FS_OPCODE_RSQ, this->result, op[0]);
956 break;
957
958 case ir_unop_i2f:
959 case ir_unop_b2f:
960 case ir_unop_b2i:
961 case ir_unop_f2i:
962 emit(fs_inst(BRW_OPCODE_MOV, this->result, op[0]));
963 break;
964 case ir_unop_f2b:
965 case ir_unop_i2b:
966 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], fs_reg(0.0f)));
967 inst->conditional_mod = BRW_CONDITIONAL_NZ;
968 inst = emit(fs_inst(BRW_OPCODE_AND, this->result,
969 this->result, fs_reg(1)));
970 break;
971
972 case ir_unop_trunc:
973 emit(fs_inst(BRW_OPCODE_RNDZ, this->result, op[0]));
974 break;
975 case ir_unop_ceil:
976 op[0].negate = !op[0].negate;
977 inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
978 this->result.negate = true;
979 break;
980 case ir_unop_floor:
981 inst = emit(fs_inst(BRW_OPCODE_RNDD, this->result, op[0]));
982 break;
983 case ir_unop_fract:
984 inst = emit(fs_inst(BRW_OPCODE_FRC, this->result, op[0]));
985 break;
986 case ir_unop_round_even:
987 emit(fs_inst(BRW_OPCODE_RNDE, this->result, op[0]));
988 break;
989
990 case ir_binop_min:
991 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
992 inst->conditional_mod = BRW_CONDITIONAL_L;
993
994 inst = emit(fs_inst(BRW_OPCODE_SEL, this->result, op[0], op[1]));
995 inst->predicated = true;
996 break;
997 case ir_binop_max:
998 inst = emit(fs_inst(BRW_OPCODE_CMP, this->result, op[0], op[1]));
999 inst->conditional_mod = BRW_CONDITIONAL_G;
1000
1001 inst = emit(fs_inst(BRW_OPCODE_SEL, this->result, op[0], op[1]));
1002 inst->predicated = true;
1003 break;
1004
1005 case ir_binop_pow:
1006 emit_math(FS_OPCODE_POW, this->result, op[0], op[1]);
1007 break;
1008
1009 case ir_unop_bit_not:
1010 inst = emit(fs_inst(BRW_OPCODE_NOT, this->result, op[0]));
1011 break;
1012 case ir_binop_bit_and:
1013 inst = emit(fs_inst(BRW_OPCODE_AND, this->result, op[0], op[1]));
1014 break;
1015 case ir_binop_bit_xor:
1016 inst = emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], op[1]));
1017 break;
1018 case ir_binop_bit_or:
1019 inst = emit(fs_inst(BRW_OPCODE_OR, this->result, op[0], op[1]));
1020 break;
1021
1022 case ir_unop_u2f:
1023 case ir_binop_lshift:
1024 case ir_binop_rshift:
1025 assert(!"GLSL 1.30 features unsupported");
1026 break;
1027 }
1028 }
1029
1030 void
1031 fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
1032 const glsl_type *type, bool predicated)
1033 {
1034 switch (type->base_type) {
1035 case GLSL_TYPE_FLOAT:
1036 case GLSL_TYPE_UINT:
1037 case GLSL_TYPE_INT:
1038 case GLSL_TYPE_BOOL:
1039 for (unsigned int i = 0; i < type->components(); i++) {
1040 l.type = brw_type_for_base_type(type);
1041 r.type = brw_type_for_base_type(type);
1042
1043 fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, l, r));
1044 inst->predicated = predicated;
1045
1046 l.reg_offset++;
1047 r.reg_offset++;
1048 }
1049 break;
1050 case GLSL_TYPE_ARRAY:
1051 for (unsigned int i = 0; i < type->length; i++) {
1052 emit_assignment_writes(l, r, type->fields.array, predicated);
1053 }
1054 break;
1055
1056 case GLSL_TYPE_STRUCT:
1057 for (unsigned int i = 0; i < type->length; i++) {
1058 emit_assignment_writes(l, r, type->fields.structure[i].type,
1059 predicated);
1060 }
1061 break;
1062
1063 case GLSL_TYPE_SAMPLER:
1064 break;
1065
1066 default:
1067 assert(!"not reached");
1068 break;
1069 }
1070 }
1071
1072 void
1073 fs_visitor::visit(ir_assignment *ir)
1074 {
1075 struct fs_reg l, r;
1076 fs_inst *inst;
1077
1078 /* FINISHME: arrays on the lhs */
1079 ir->lhs->accept(this);
1080 l = this->result;
1081
1082 ir->rhs->accept(this);
1083 r = this->result;
1084
1085 assert(l.file != BAD_FILE);
1086 assert(r.file != BAD_FILE);
1087
1088 if (ir->condition) {
1089 emit_bool_to_cond_code(ir->condition);
1090 }
1091
1092 if (ir->lhs->type->is_scalar() ||
1093 ir->lhs->type->is_vector()) {
1094 for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
1095 if (ir->write_mask & (1 << i)) {
1096 inst = emit(fs_inst(BRW_OPCODE_MOV, l, r));
1097 if (ir->condition)
1098 inst->predicated = true;
1099 r.reg_offset++;
1100 }
1101 l.reg_offset++;
1102 }
1103 } else {
1104 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL);
1105 }
1106 }
1107
1108 fs_inst *
1109 fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate)
1110 {
1111 int mlen;
1112 int base_mrf = 1;
1113 bool simd16 = false;
1114 fs_reg orig_dst;
1115
1116 /* g0 header. */
1117 mlen = 1;
1118
1119 if (ir->shadow_comparitor) {
1120 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1121 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1122 coordinate));
1123 coordinate.reg_offset++;
1124 }
1125 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1126 mlen += 3;
1127
1128 if (ir->op == ir_tex) {
1129 /* There's no plain shadow compare message, so we use shadow
1130 * compare with a bias of 0.0.
1131 */
1132 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1133 fs_reg(0.0f)));
1134 mlen++;
1135 } else if (ir->op == ir_txb) {
1136 ir->lod_info.bias->accept(this);
1137 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1138 this->result));
1139 mlen++;
1140 } else {
1141 assert(ir->op == ir_txl);
1142 ir->lod_info.lod->accept(this);
1143 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1144 this->result));
1145 mlen++;
1146 }
1147
1148 ir->shadow_comparitor->accept(this);
1149 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1150 mlen++;
1151 } else if (ir->op == ir_tex) {
1152 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1153 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1154 coordinate));
1155 coordinate.reg_offset++;
1156 }
1157 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1158 mlen += 3;
1159 } else {
1160 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1161 * instructions. We'll need to do SIMD16 here.
1162 */
1163 assert(ir->op == ir_txb || ir->op == ir_txl);
1164
1165 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1166 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2),
1167 coordinate));
1168 coordinate.reg_offset++;
1169 }
1170
1171 /* lod/bias appears after u/v/r. */
1172 mlen += 6;
1173
1174 if (ir->op == ir_txb) {
1175 ir->lod_info.bias->accept(this);
1176 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1177 this->result));
1178 mlen++;
1179 } else {
1180 ir->lod_info.lod->accept(this);
1181 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1182 this->result));
1183 mlen++;
1184 }
1185
1186 /* The unused upper half. */
1187 mlen++;
1188
1189 /* Now, since we're doing simd16, the return is 2 interleaved
1190 * vec4s where the odd-indexed ones are junk. We'll need to move
1191 * this weirdness around to the expected layout.
1192 */
1193 simd16 = true;
1194 orig_dst = dst;
1195 dst = fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type,
1196 2));
1197 dst.type = BRW_REGISTER_TYPE_F;
1198 }
1199
1200 fs_inst *inst = NULL;
1201 switch (ir->op) {
1202 case ir_tex:
1203 inst = emit(fs_inst(FS_OPCODE_TEX, dst));
1204 break;
1205 case ir_txb:
1206 inst = emit(fs_inst(FS_OPCODE_TXB, dst));
1207 break;
1208 case ir_txl:
1209 inst = emit(fs_inst(FS_OPCODE_TXL, dst));
1210 break;
1211 case ir_txd:
1212 case ir_txf:
1213 assert(!"GLSL 1.30 features unsupported");
1214 break;
1215 }
1216 inst->base_mrf = base_mrf;
1217 inst->mlen = mlen;
1218
1219 if (simd16) {
1220 for (int i = 0; i < 4; i++) {
1221 emit(fs_inst(BRW_OPCODE_MOV, orig_dst, dst));
1222 orig_dst.reg_offset++;
1223 dst.reg_offset += 2;
1224 }
1225 }
1226
1227 return inst;
1228 }
1229
1230 fs_inst *
1231 fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate)
1232 {
1233 /* gen5's SIMD8 sampler has slots for u, v, r, array index, then
1234 * optional parameters like shadow comparitor or LOD bias. If
1235 * optional parameters aren't present, those base slots are
1236 * optional and don't need to be included in the message.
1237 *
1238 * We don't fill in the unnecessary slots regardless, which may
1239 * look surprising in the disassembly.
1240 */
1241 int mlen = 1; /* g0 header always present. */
1242 int base_mrf = 1;
1243
1244 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1245 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1246 coordinate));
1247 coordinate.reg_offset++;
1248 }
1249 mlen += ir->coordinate->type->vector_elements;
1250
1251 if (ir->shadow_comparitor) {
1252 mlen = MAX2(mlen, 5);
1253
1254 ir->shadow_comparitor->accept(this);
1255 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1256 mlen++;
1257 }
1258
1259 fs_inst *inst = NULL;
1260 switch (ir->op) {
1261 case ir_tex:
1262 inst = emit(fs_inst(FS_OPCODE_TEX, dst));
1263 break;
1264 case ir_txb:
1265 ir->lod_info.bias->accept(this);
1266 mlen = MAX2(mlen, 5);
1267 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1268 mlen++;
1269
1270 inst = emit(fs_inst(FS_OPCODE_TXB, dst));
1271 break;
1272 case ir_txl:
1273 ir->lod_info.lod->accept(this);
1274 mlen = MAX2(mlen, 5);
1275 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result));
1276 mlen++;
1277
1278 inst = emit(fs_inst(FS_OPCODE_TXL, dst));
1279 break;
1280 case ir_txd:
1281 case ir_txf:
1282 assert(!"GLSL 1.30 features unsupported");
1283 break;
1284 }
1285 inst->base_mrf = base_mrf;
1286 inst->mlen = mlen;
1287
1288 return inst;
1289 }
1290
1291 void
1292 fs_visitor::visit(ir_texture *ir)
1293 {
1294 int sampler;
1295 fs_inst *inst = NULL;
1296
1297 ir->coordinate->accept(this);
1298 fs_reg coordinate = this->result;
1299
1300 /* Should be lowered by do_lower_texture_projection */
1301 assert(!ir->projector);
1302
1303 sampler = _mesa_get_sampler_uniform_value(ir->sampler,
1304 ctx->Shader.CurrentFragmentProgram,
1305 &brw->fragment_program->Base);
1306 sampler = c->fp->program.Base.SamplerUnits[sampler];
1307
1308 /* The 965 requires the EU to do the normalization of GL rectangle
1309 * texture coordinates. We use the program parameter state
1310 * tracking to get the scaling factor.
1311 */
1312 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1313 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
1314 int tokens[STATE_LENGTH] = {
1315 STATE_INTERNAL,
1316 STATE_TEXRECT_SCALE,
1317 sampler,
1318 0,
1319 0
1320 };
1321
1322 c->prog_data.param_convert[c->prog_data.nr_params] =
1323 PARAM_NO_CONVERT;
1324 c->prog_data.param_convert[c->prog_data.nr_params + 1] =
1325 PARAM_NO_CONVERT;
1326
1327 fs_reg scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
1328 fs_reg scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
1329 GLuint index = _mesa_add_state_reference(params,
1330 (gl_state_index *)tokens);
1331 float *vec_values = this->fp->Base.Parameters->ParameterValues[index];
1332
1333 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[0];
1334 c->prog_data.param[c->prog_data.nr_params++] = &vec_values[1];
1335
1336 fs_reg dst = fs_reg(this, ir->coordinate->type);
1337 fs_reg src = coordinate;
1338 coordinate = dst;
1339
1340 emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_x));
1341 dst.reg_offset++;
1342 src.reg_offset++;
1343 emit(fs_inst(BRW_OPCODE_MUL, dst, src, scale_y));
1344 }
1345
1346 /* Writemasking doesn't eliminate channels on SIMD8 texture
1347 * samples, so don't worry about them.
1348 */
1349 fs_reg dst = fs_reg(this, glsl_type::vec4_type);
1350
1351 if (intel->gen < 5) {
1352 inst = emit_texture_gen4(ir, dst, coordinate);
1353 } else {
1354 inst = emit_texture_gen5(ir, dst, coordinate);
1355 }
1356
1357 inst->sampler = sampler;
1358
1359 this->result = dst;
1360
1361 if (ir->shadow_comparitor)
1362 inst->shadow_compare = true;
1363
1364 if (c->key.tex_swizzles[inst->sampler] != SWIZZLE_NOOP) {
1365 fs_reg swizzle_dst = fs_reg(this, glsl_type::vec4_type);
1366
1367 for (int i = 0; i < 4; i++) {
1368 int swiz = GET_SWZ(c->key.tex_swizzles[inst->sampler], i);
1369 fs_reg l = swizzle_dst;
1370 l.reg_offset += i;
1371
1372 if (swiz == SWIZZLE_ZERO) {
1373 emit(fs_inst(BRW_OPCODE_MOV, l, fs_reg(0.0f)));
1374 } else if (swiz == SWIZZLE_ONE) {
1375 emit(fs_inst(BRW_OPCODE_MOV, l, fs_reg(1.0f)));
1376 } else {
1377 fs_reg r = dst;
1378 r.reg_offset += GET_SWZ(c->key.tex_swizzles[inst->sampler], i);
1379 emit(fs_inst(BRW_OPCODE_MOV, l, r));
1380 }
1381 }
1382 this->result = swizzle_dst;
1383 }
1384 }
1385
1386 void
1387 fs_visitor::visit(ir_swizzle *ir)
1388 {
1389 ir->val->accept(this);
1390 fs_reg val = this->result;
1391
1392 if (ir->type->vector_elements == 1) {
1393 this->result.reg_offset += ir->mask.x;
1394 return;
1395 }
1396
1397 fs_reg result = fs_reg(this, ir->type);
1398 this->result = result;
1399
1400 for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1401 fs_reg channel = val;
1402 int swiz = 0;
1403
1404 switch (i) {
1405 case 0:
1406 swiz = ir->mask.x;
1407 break;
1408 case 1:
1409 swiz = ir->mask.y;
1410 break;
1411 case 2:
1412 swiz = ir->mask.z;
1413 break;
1414 case 3:
1415 swiz = ir->mask.w;
1416 break;
1417 }
1418
1419 channel.reg_offset += swiz;
1420 emit(fs_inst(BRW_OPCODE_MOV, result, channel));
1421 result.reg_offset++;
1422 }
1423 }
1424
1425 void
1426 fs_visitor::visit(ir_discard *ir)
1427 {
1428 fs_reg temp = fs_reg(this, glsl_type::uint_type);
1429
1430 assert(ir->condition == NULL); /* FINISHME */
1431
1432 emit(fs_inst(FS_OPCODE_DISCARD_NOT, temp, reg_null_d));
1433 emit(fs_inst(FS_OPCODE_DISCARD_AND, reg_null_d, temp));
1434 kill_emitted = true;
1435 }
1436
1437 void
1438 fs_visitor::visit(ir_constant *ir)
1439 {
1440 /* Set this->result to reg at the bottom of the function because some code
1441 * paths will cause this visitor to be applied to other fields. This will
1442 * cause the value stored in this->result to be modified.
1443 *
1444 * Make reg constant so that it doesn't get accidentally modified along the
1445 * way. Yes, I actually had this problem. :(
1446 */
1447 const fs_reg reg(this, ir->type);
1448 fs_reg dst_reg = reg;
1449
1450 if (ir->type->is_array()) {
1451 const unsigned size = type_size(ir->type->fields.array);
1452
1453 for (unsigned i = 0; i < ir->type->length; i++) {
1454 ir->array_elements[i]->accept(this);
1455 fs_reg src_reg = this->result;
1456
1457 dst_reg.type = src_reg.type;
1458 for (unsigned j = 0; j < size; j++) {
1459 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, src_reg));
1460 src_reg.reg_offset++;
1461 dst_reg.reg_offset++;
1462 }
1463 }
1464 } else if (ir->type->is_record()) {
1465 foreach_list(node, &ir->components) {
1466 ir_instruction *const field = (ir_instruction *) node;
1467 const unsigned size = type_size(field->type);
1468
1469 field->accept(this);
1470 fs_reg src_reg = this->result;
1471
1472 dst_reg.type = src_reg.type;
1473 for (unsigned j = 0; j < size; j++) {
1474 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, src_reg));
1475 src_reg.reg_offset++;
1476 dst_reg.reg_offset++;
1477 }
1478 }
1479 } else {
1480 const unsigned size = type_size(ir->type);
1481
1482 for (unsigned i = 0; i < size; i++) {
1483 switch (ir->type->base_type) {
1484 case GLSL_TYPE_FLOAT:
1485 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])));
1486 break;
1487 case GLSL_TYPE_UINT:
1488 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])));
1489 break;
1490 case GLSL_TYPE_INT:
1491 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])));
1492 break;
1493 case GLSL_TYPE_BOOL:
1494 emit(fs_inst(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])));
1495 break;
1496 default:
1497 assert(!"Non-float/uint/int/bool constant");
1498 }
1499 dst_reg.reg_offset++;
1500 }
1501 }
1502
1503 this->result = reg;
1504 }
1505
1506 void
1507 fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
1508 {
1509 ir_expression *expr = ir->as_expression();
1510
1511 if (expr) {
1512 fs_reg op[2];
1513 fs_inst *inst;
1514
1515 assert(expr->get_num_operands() <= 2);
1516 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1517 assert(expr->operands[i]->type->is_scalar());
1518
1519 expr->operands[i]->accept(this);
1520 op[i] = this->result;
1521 }
1522
1523 switch (expr->operation) {
1524 case ir_unop_logic_not:
1525 inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)));
1526 inst->conditional_mod = BRW_CONDITIONAL_Z;
1527 break;
1528
1529 case ir_binop_logic_xor:
1530 inst = emit(fs_inst(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]));
1531 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1532 break;
1533
1534 case ir_binop_logic_or:
1535 inst = emit(fs_inst(BRW_OPCODE_OR, reg_null_d, op[0], op[1]));
1536 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1537 break;
1538
1539 case ir_binop_logic_and:
1540 inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d, op[0], op[1]));
1541 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1542 break;
1543
1544 case ir_unop_f2b:
1545 if (intel->gen >= 6) {
1546 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d,
1547 op[0], fs_reg(0.0f)));
1548 } else {
1549 inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0]));
1550 }
1551 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1552 break;
1553
1554 case ir_unop_i2b:
1555 if (intel->gen >= 6) {
1556 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)));
1557 } else {
1558 inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, op[0]));
1559 }
1560 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1561 break;
1562
1563 case ir_binop_greater:
1564 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1565 inst->conditional_mod = BRW_CONDITIONAL_G;
1566 break;
1567 case ir_binop_gequal:
1568 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1569 inst->conditional_mod = BRW_CONDITIONAL_GE;
1570 break;
1571 case ir_binop_less:
1572 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1573 inst->conditional_mod = BRW_CONDITIONAL_L;
1574 break;
1575 case ir_binop_lequal:
1576 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1577 inst->conditional_mod = BRW_CONDITIONAL_LE;
1578 break;
1579 case ir_binop_equal:
1580 case ir_binop_all_equal:
1581 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1582 inst->conditional_mod = BRW_CONDITIONAL_Z;
1583 break;
1584 case ir_binop_nequal:
1585 case ir_binop_any_nequal:
1586 inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d, op[0], op[1]));
1587 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1588 break;
1589 default:
1590 assert(!"not reached");
1591 this->fail = true;
1592 break;
1593 }
1594 return;
1595 }
1596
1597 ir->accept(this);
1598
1599 if (intel->gen >= 6) {
1600 fs_inst *inst = emit(fs_inst(BRW_OPCODE_AND, reg_null_d,
1601 this->result, fs_reg(1)));
1602 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1603 } else {
1604 fs_inst *inst = emit(fs_inst(BRW_OPCODE_MOV, reg_null_d, this->result));
1605 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1606 }
1607 }
1608
1609 /**
1610 * Emit a gen6 IF statement with the comparison folded into the IF
1611 * instruction.
1612 */
1613 void
1614 fs_visitor::emit_if_gen6(ir_if *ir)
1615 {
1616 ir_expression *expr = ir->condition->as_expression();
1617
1618 if (expr) {
1619 fs_reg op[2];
1620 fs_inst *inst;
1621 fs_reg temp;
1622
1623 assert(expr->get_num_operands() <= 2);
1624 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1625 assert(expr->operands[i]->type->is_scalar());
1626
1627 expr->operands[i]->accept(this);
1628 op[i] = this->result;
1629 }
1630
1631 switch (expr->operation) {
1632 case ir_unop_logic_not:
1633 inst = emit(fs_inst(BRW_OPCODE_IF, temp, op[0], fs_reg(0)));
1634 inst->conditional_mod = BRW_CONDITIONAL_Z;
1635 return;
1636
1637 case ir_binop_logic_xor:
1638 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1639 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1640 return;
1641
1642 case ir_binop_logic_or:
1643 temp = fs_reg(this, glsl_type::bool_type);
1644 emit(fs_inst(BRW_OPCODE_OR, temp, op[0], op[1]));
1645 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
1646 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1647 return;
1648
1649 case ir_binop_logic_and:
1650 temp = fs_reg(this, glsl_type::bool_type);
1651 emit(fs_inst(BRW_OPCODE_AND, temp, op[0], op[1]));
1652 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
1653 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1654 return;
1655
1656 case ir_unop_f2b:
1657 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)));
1658 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1659 return;
1660
1661 case ir_unop_i2b:
1662 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
1663 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1664 return;
1665
1666 case ir_binop_greater:
1667 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1668 inst->conditional_mod = BRW_CONDITIONAL_G;
1669 return;
1670 case ir_binop_gequal:
1671 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1672 inst->conditional_mod = BRW_CONDITIONAL_GE;
1673 return;
1674 case ir_binop_less:
1675 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1676 inst->conditional_mod = BRW_CONDITIONAL_L;
1677 return;
1678 case ir_binop_lequal:
1679 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1680 inst->conditional_mod = BRW_CONDITIONAL_LE;
1681 return;
1682 case ir_binop_equal:
1683 case ir_binop_all_equal:
1684 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1685 inst->conditional_mod = BRW_CONDITIONAL_Z;
1686 return;
1687 case ir_binop_nequal:
1688 case ir_binop_any_nequal:
1689 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
1690 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1691 return;
1692 default:
1693 assert(!"not reached");
1694 inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
1695 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1696 this->fail = true;
1697 return;
1698 }
1699 return;
1700 }
1701
1702 ir->condition->accept(this);
1703
1704 fs_inst *inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)));
1705 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1706 }
1707
1708 void
1709 fs_visitor::visit(ir_if *ir)
1710 {
1711 fs_inst *inst;
1712
1713 /* Don't point the annotation at the if statement, because then it plus
1714 * the then and else blocks get printed.
1715 */
1716 this->base_ir = ir->condition;
1717
1718 if (intel->gen >= 6) {
1719 emit_if_gen6(ir);
1720 } else {
1721 emit_bool_to_cond_code(ir->condition);
1722
1723 inst = emit(fs_inst(BRW_OPCODE_IF));
1724 inst->predicated = true;
1725 }
1726
1727 foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
1728 ir_instruction *ir = (ir_instruction *)iter.get();
1729 this->base_ir = ir;
1730
1731 ir->accept(this);
1732 }
1733
1734 if (!ir->else_instructions.is_empty()) {
1735 emit(fs_inst(BRW_OPCODE_ELSE));
1736
1737 foreach_iter(exec_list_iterator, iter, ir->else_instructions) {
1738 ir_instruction *ir = (ir_instruction *)iter.get();
1739 this->base_ir = ir;
1740
1741 ir->accept(this);
1742 }
1743 }
1744
1745 emit(fs_inst(BRW_OPCODE_ENDIF));
1746 }
1747
1748 void
1749 fs_visitor::visit(ir_loop *ir)
1750 {
1751 fs_reg counter = reg_undef;
1752
1753 if (ir->counter) {
1754 this->base_ir = ir->counter;
1755 ir->counter->accept(this);
1756 counter = *(variable_storage(ir->counter));
1757
1758 if (ir->from) {
1759 this->base_ir = ir->from;
1760 ir->from->accept(this);
1761
1762 emit(fs_inst(BRW_OPCODE_MOV, counter, this->result));
1763 }
1764 }
1765
1766 emit(fs_inst(BRW_OPCODE_DO));
1767
1768 if (ir->to) {
1769 this->base_ir = ir->to;
1770 ir->to->accept(this);
1771
1772 fs_inst *inst = emit(fs_inst(BRW_OPCODE_CMP, reg_null_d,
1773 counter, this->result));
1774 switch (ir->cmp) {
1775 case ir_binop_equal:
1776 inst->conditional_mod = BRW_CONDITIONAL_Z;
1777 break;
1778 case ir_binop_nequal:
1779 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1780 break;
1781 case ir_binop_gequal:
1782 inst->conditional_mod = BRW_CONDITIONAL_GE;
1783 break;
1784 case ir_binop_lequal:
1785 inst->conditional_mod = BRW_CONDITIONAL_LE;
1786 break;
1787 case ir_binop_greater:
1788 inst->conditional_mod = BRW_CONDITIONAL_G;
1789 break;
1790 case ir_binop_less:
1791 inst->conditional_mod = BRW_CONDITIONAL_L;
1792 break;
1793 default:
1794 assert(!"not reached: unknown loop condition");
1795 this->fail = true;
1796 break;
1797 }
1798
1799 inst = emit(fs_inst(BRW_OPCODE_BREAK));
1800 inst->predicated = true;
1801 }
1802
1803 foreach_iter(exec_list_iterator, iter, ir->body_instructions) {
1804 ir_instruction *ir = (ir_instruction *)iter.get();
1805
1806 this->base_ir = ir;
1807 ir->accept(this);
1808 }
1809
1810 if (ir->increment) {
1811 this->base_ir = ir->increment;
1812 ir->increment->accept(this);
1813 emit(fs_inst(BRW_OPCODE_ADD, counter, counter, this->result));
1814 }
1815
1816 emit(fs_inst(BRW_OPCODE_WHILE));
1817 }
1818
1819 void
1820 fs_visitor::visit(ir_loop_jump *ir)
1821 {
1822 switch (ir->mode) {
1823 case ir_loop_jump::jump_break:
1824 emit(fs_inst(BRW_OPCODE_BREAK));
1825 break;
1826 case ir_loop_jump::jump_continue:
1827 emit(fs_inst(BRW_OPCODE_CONTINUE));
1828 break;
1829 }
1830 }
1831
1832 void
1833 fs_visitor::visit(ir_call *ir)
1834 {
1835 assert(!"FINISHME");
1836 }
1837
1838 void
1839 fs_visitor::visit(ir_return *ir)
1840 {
1841 assert(!"FINISHME");
1842 }
1843
1844 void
1845 fs_visitor::visit(ir_function *ir)
1846 {
1847 /* Ignore function bodies other than main() -- we shouldn't see calls to
1848 * them since they should all be inlined before we get to ir_to_mesa.
1849 */
1850 if (strcmp(ir->name, "main") == 0) {
1851 const ir_function_signature *sig;
1852 exec_list empty;
1853
1854 sig = ir->matching_signature(&empty);
1855
1856 assert(sig);
1857
1858 foreach_iter(exec_list_iterator, iter, sig->body) {
1859 ir_instruction *ir = (ir_instruction *)iter.get();
1860 this->base_ir = ir;
1861
1862 ir->accept(this);
1863 }
1864 }
1865 }
1866
1867 void
1868 fs_visitor::visit(ir_function_signature *ir)
1869 {
1870 assert(!"not reached");
1871 (void)ir;
1872 }
1873
1874 fs_inst *
1875 fs_visitor::emit(fs_inst inst)
1876 {
1877 fs_inst *list_inst = new(mem_ctx) fs_inst;
1878 *list_inst = inst;
1879
1880 list_inst->annotation = this->current_annotation;
1881 list_inst->ir = this->base_ir;
1882
1883 this->instructions.push_tail(list_inst);
1884
1885 return list_inst;
1886 }
1887
1888 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1889 void
1890 fs_visitor::emit_dummy_fs()
1891 {
1892 /* Everyone's favorite color. */
1893 emit(fs_inst(BRW_OPCODE_MOV,
1894 fs_reg(MRF, 2),
1895 fs_reg(1.0f)));
1896 emit(fs_inst(BRW_OPCODE_MOV,
1897 fs_reg(MRF, 3),
1898 fs_reg(0.0f)));
1899 emit(fs_inst(BRW_OPCODE_MOV,
1900 fs_reg(MRF, 4),
1901 fs_reg(1.0f)));
1902 emit(fs_inst(BRW_OPCODE_MOV,
1903 fs_reg(MRF, 5),
1904 fs_reg(0.0f)));
1905
1906 fs_inst *write;
1907 write = emit(fs_inst(FS_OPCODE_FB_WRITE,
1908 fs_reg(0),
1909 fs_reg(0)));
1910 write->base_mrf = 0;
1911 }
1912
1913 /* The register location here is relative to the start of the URB
1914 * data. It will get adjusted to be a real location before
1915 * generate_code() time.
1916 */
1917 struct brw_reg
1918 fs_visitor::interp_reg(int location, int channel)
1919 {
1920 int regnr = urb_setup[location] * 2 + channel / 2;
1921 int stride = (channel & 1) * 4;
1922
1923 assert(urb_setup[location] != -1);
1924
1925 return brw_vec1_grf(regnr, stride);
1926 }
1927
1928 /** Emits the interpolation for the varying inputs. */
1929 void
1930 fs_visitor::emit_interpolation_setup_gen4()
1931 {
1932 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1933
1934 this->current_annotation = "compute pixel centers";
1935 this->pixel_x = fs_reg(this, glsl_type::uint_type);
1936 this->pixel_y = fs_reg(this, glsl_type::uint_type);
1937 this->pixel_x.type = BRW_REGISTER_TYPE_UW;
1938 this->pixel_y.type = BRW_REGISTER_TYPE_UW;
1939 emit(fs_inst(BRW_OPCODE_ADD,
1940 this->pixel_x,
1941 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1942 fs_reg(brw_imm_v(0x10101010))));
1943 emit(fs_inst(BRW_OPCODE_ADD,
1944 this->pixel_y,
1945 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1946 fs_reg(brw_imm_v(0x11001100))));
1947
1948 this->current_annotation = "compute pixel deltas from v0";
1949 if (brw->has_pln) {
1950 this->delta_x = fs_reg(this, glsl_type::vec2_type);
1951 this->delta_y = this->delta_x;
1952 this->delta_y.reg_offset++;
1953 } else {
1954 this->delta_x = fs_reg(this, glsl_type::float_type);
1955 this->delta_y = fs_reg(this, glsl_type::float_type);
1956 }
1957 emit(fs_inst(BRW_OPCODE_ADD,
1958 this->delta_x,
1959 this->pixel_x,
1960 fs_reg(negate(brw_vec1_grf(1, 0)))));
1961 emit(fs_inst(BRW_OPCODE_ADD,
1962 this->delta_y,
1963 this->pixel_y,
1964 fs_reg(negate(brw_vec1_grf(1, 1)))));
1965
1966 this->current_annotation = "compute pos.w and 1/pos.w";
1967 /* Compute wpos.w. It's always in our setup, since it's needed to
1968 * interpolate the other attributes.
1969 */
1970 this->wpos_w = fs_reg(this, glsl_type::float_type);
1971 emit(fs_inst(FS_OPCODE_LINTERP, wpos_w, this->delta_x, this->delta_y,
1972 interp_reg(FRAG_ATTRIB_WPOS, 3)));
1973 /* Compute the pixel 1/W value from wpos.w. */
1974 this->pixel_w = fs_reg(this, glsl_type::float_type);
1975 emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w);
1976 this->current_annotation = NULL;
1977 }
1978
1979 /** Emits the interpolation for the varying inputs. */
1980 void
1981 fs_visitor::emit_interpolation_setup_gen6()
1982 {
1983 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1984
1985 /* If the pixel centers end up used, the setup is the same as for gen4. */
1986 this->current_annotation = "compute pixel centers";
1987 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type);
1988 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
1989 int_pixel_x.type = BRW_REGISTER_TYPE_UW;
1990 int_pixel_y.type = BRW_REGISTER_TYPE_UW;
1991 emit(fs_inst(BRW_OPCODE_ADD,
1992 int_pixel_x,
1993 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1994 fs_reg(brw_imm_v(0x10101010))));
1995 emit(fs_inst(BRW_OPCODE_ADD,
1996 int_pixel_y,
1997 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1998 fs_reg(brw_imm_v(0x11001100))));
1999
2000 /* As of gen6, we can no longer mix float and int sources. We have
2001 * to turn the integer pixel centers into floats for their actual
2002 * use.
2003 */
2004 this->pixel_x = fs_reg(this, glsl_type::float_type);
2005 this->pixel_y = fs_reg(this, glsl_type::float_type);
2006 emit(fs_inst(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x));
2007 emit(fs_inst(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y));
2008
2009 this->current_annotation = "compute 1/pos.w";
2010 this->wpos_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0));
2011 this->pixel_w = fs_reg(this, glsl_type::float_type);
2012 emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w);
2013
2014 this->delta_x = fs_reg(brw_vec8_grf(2, 0));
2015 this->delta_y = fs_reg(brw_vec8_grf(3, 0));
2016
2017 this->current_annotation = NULL;
2018 }
2019
2020 void
2021 fs_visitor::emit_fb_writes()
2022 {
2023 this->current_annotation = "FB write header";
2024 GLboolean header_present = GL_TRUE;
2025 int nr = 0;
2026
2027 if (intel->gen >= 6 &&
2028 !this->kill_emitted &&
2029 c->key.nr_color_regions == 1) {
2030 header_present = false;
2031 }
2032
2033 if (header_present) {
2034 /* m0, m1 header */
2035 nr += 2;
2036 }
2037
2038 if (c->aa_dest_stencil_reg) {
2039 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2040 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))));
2041 }
2042
2043 /* Reserve space for color. It'll be filled in per MRT below. */
2044 int color_mrf = nr;
2045 nr += 4;
2046
2047 if (c->source_depth_to_render_target) {
2048 if (c->computes_depth) {
2049 /* Hand over gl_FragDepth. */
2050 assert(this->frag_depth);
2051 fs_reg depth = *(variable_storage(this->frag_depth));
2052
2053 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++), depth));
2054 } else {
2055 /* Pass through the payload depth. */
2056 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2057 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))));
2058 }
2059 }
2060
2061 if (c->dest_depth_reg) {
2062 emit(fs_inst(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2063 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))));
2064 }
2065
2066 fs_reg color = reg_undef;
2067 if (this->frag_color)
2068 color = *(variable_storage(this->frag_color));
2069 else if (this->frag_data) {
2070 color = *(variable_storage(this->frag_data));
2071 color.type = BRW_REGISTER_TYPE_F;
2072 }
2073
2074 for (int target = 0; target < c->key.nr_color_regions; target++) {
2075 this->current_annotation = talloc_asprintf(this->mem_ctx,
2076 "FB write target %d",
2077 target);
2078 if (this->frag_color || this->frag_data) {
2079 for (int i = 0; i < 4; i++) {
2080 emit(fs_inst(BRW_OPCODE_MOV,
2081 fs_reg(MRF, color_mrf + i),
2082 color));
2083 color.reg_offset++;
2084 }
2085 }
2086
2087 if (this->frag_color)
2088 color.reg_offset -= 4;
2089
2090 fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE,
2091 reg_undef, reg_undef));
2092 inst->target = target;
2093 inst->base_mrf = 0;
2094 inst->mlen = nr;
2095 if (target == c->key.nr_color_regions - 1)
2096 inst->eot = true;
2097 inst->header_present = header_present;
2098 }
2099
2100 if (c->key.nr_color_regions == 0) {
2101 fs_inst *inst = emit(fs_inst(FS_OPCODE_FB_WRITE,
2102 reg_undef, reg_undef));
2103 inst->base_mrf = 0;
2104 inst->mlen = nr;
2105 inst->eot = true;
2106 inst->header_present = header_present;
2107 }
2108
2109 this->current_annotation = NULL;
2110 }
2111
2112 void
2113 fs_visitor::generate_fb_write(fs_inst *inst)
2114 {
2115 GLboolean eot = inst->eot;
2116 struct brw_reg implied_header;
2117
2118 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
2119 * move, here's g1.
2120 */
2121 brw_push_insn_state(p);
2122 brw_set_mask_control(p, BRW_MASK_DISABLE);
2123 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2124
2125 if (inst->header_present) {
2126 if (intel->gen >= 6) {
2127 brw_MOV(p,
2128 brw_message_reg(inst->base_mrf),
2129 brw_vec8_grf(0, 0));
2130
2131 if (inst->target > 0) {
2132 /* Set the render target index for choosing BLEND_STATE. */
2133 brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 2),
2134 BRW_REGISTER_TYPE_UD),
2135 brw_imm_ud(inst->target));
2136 }
2137
2138 /* Clear viewport index, render target array index. */
2139 brw_AND(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 0),
2140 BRW_REGISTER_TYPE_UD),
2141 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2142 brw_imm_ud(0xf7ff));
2143
2144 implied_header = brw_null_reg();
2145 } else {
2146 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
2147 }
2148
2149 brw_MOV(p,
2150 brw_message_reg(inst->base_mrf + 1),
2151 brw_vec8_grf(1, 0));
2152 } else {
2153 implied_header = brw_null_reg();
2154 }
2155
2156 brw_pop_insn_state(p);
2157
2158 brw_fb_WRITE(p,
2159 8, /* dispatch_width */
2160 retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW),
2161 inst->base_mrf,
2162 implied_header,
2163 inst->target,
2164 inst->mlen,
2165 0,
2166 eot,
2167 inst->header_present);
2168 }
2169
2170 void
2171 fs_visitor::generate_linterp(fs_inst *inst,
2172 struct brw_reg dst, struct brw_reg *src)
2173 {
2174 struct brw_reg delta_x = src[0];
2175 struct brw_reg delta_y = src[1];
2176 struct brw_reg interp = src[2];
2177
2178 if (brw->has_pln &&
2179 delta_y.nr == delta_x.nr + 1 &&
2180 (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
2181 brw_PLN(p, dst, interp, delta_x);
2182 } else {
2183 brw_LINE(p, brw_null_reg(), interp, delta_x);
2184 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
2185 }
2186 }
2187
2188 void
2189 fs_visitor::generate_math(fs_inst *inst,
2190 struct brw_reg dst, struct brw_reg *src)
2191 {
2192 int op;
2193
2194 switch (inst->opcode) {
2195 case FS_OPCODE_RCP:
2196 op = BRW_MATH_FUNCTION_INV;
2197 break;
2198 case FS_OPCODE_RSQ:
2199 op = BRW_MATH_FUNCTION_RSQ;
2200 break;
2201 case FS_OPCODE_SQRT:
2202 op = BRW_MATH_FUNCTION_SQRT;
2203 break;
2204 case FS_OPCODE_EXP2:
2205 op = BRW_MATH_FUNCTION_EXP;
2206 break;
2207 case FS_OPCODE_LOG2:
2208 op = BRW_MATH_FUNCTION_LOG;
2209 break;
2210 case FS_OPCODE_POW:
2211 op = BRW_MATH_FUNCTION_POW;
2212 break;
2213 case FS_OPCODE_SIN:
2214 op = BRW_MATH_FUNCTION_SIN;
2215 break;
2216 case FS_OPCODE_COS:
2217 op = BRW_MATH_FUNCTION_COS;
2218 break;
2219 default:
2220 assert(!"not reached: unknown math function");
2221 op = 0;
2222 break;
2223 }
2224
2225 if (intel->gen >= 6) {
2226 assert(inst->mlen == 0);
2227
2228 if (inst->opcode == FS_OPCODE_POW) {
2229 brw_math2(p, dst, op, src[0], src[1]);
2230 } else {
2231 brw_math(p, dst,
2232 op,
2233 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2234 BRW_MATH_SATURATE_NONE,
2235 0, src[0],
2236 BRW_MATH_DATA_VECTOR,
2237 BRW_MATH_PRECISION_FULL);
2238 }
2239 } else {
2240 assert(inst->mlen >= 1);
2241
2242 brw_math(p, dst,
2243 op,
2244 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2245 BRW_MATH_SATURATE_NONE,
2246 inst->base_mrf, src[0],
2247 BRW_MATH_DATA_VECTOR,
2248 BRW_MATH_PRECISION_FULL);
2249 }
2250 }
2251
2252 void
2253 fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst)
2254 {
2255 int msg_type = -1;
2256 int rlen = 4;
2257 uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
2258
2259 if (intel->gen >= 5) {
2260 switch (inst->opcode) {
2261 case FS_OPCODE_TEX:
2262 if (inst->shadow_compare) {
2263 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5;
2264 } else {
2265 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_GEN5;
2266 }
2267 break;
2268 case FS_OPCODE_TXB:
2269 if (inst->shadow_compare) {
2270 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE_GEN5;
2271 } else {
2272 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
2273 }
2274 break;
2275 }
2276 } else {
2277 switch (inst->opcode) {
2278 case FS_OPCODE_TEX:
2279 /* Note that G45 and older determines shadow compare and dispatch width
2280 * from message length for most messages.
2281 */
2282 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
2283 if (inst->shadow_compare) {
2284 assert(inst->mlen == 6);
2285 } else {
2286 assert(inst->mlen <= 4);
2287 }
2288 break;
2289 case FS_OPCODE_TXB:
2290 if (inst->shadow_compare) {
2291 assert(inst->mlen == 6);
2292 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
2293 } else {
2294 assert(inst->mlen == 9);
2295 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
2296 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
2297 }
2298 break;
2299 }
2300 }
2301 assert(msg_type != -1);
2302
2303 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
2304 rlen = 8;
2305 dst = vec16(dst);
2306 }
2307
2308 brw_SAMPLE(p,
2309 retype(dst, BRW_REGISTER_TYPE_UW),
2310 inst->base_mrf,
2311 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
2312 SURF_INDEX_TEXTURE(inst->sampler),
2313 inst->sampler,
2314 WRITEMASK_XYZW,
2315 msg_type,
2316 rlen,
2317 inst->mlen,
2318 0,
2319 1,
2320 simd_mode);
2321 }
2322
2323
2324 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
2325 * looking like:
2326 *
2327 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
2328 *
2329 * and we're trying to produce:
2330 *
2331 * DDX DDY
2332 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
2333 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
2334 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
2335 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
2336 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
2337 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
2338 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
2339 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
2340 *
2341 * and add another set of two more subspans if in 16-pixel dispatch mode.
2342 *
2343 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
2344 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
2345 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
2346 * between each other. We could probably do it like ddx and swizzle the right
2347 * order later, but bail for now and just produce
2348 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
2349 */
2350 void
2351 fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2352 {
2353 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
2354 BRW_REGISTER_TYPE_F,
2355 BRW_VERTICAL_STRIDE_2,
2356 BRW_WIDTH_2,
2357 BRW_HORIZONTAL_STRIDE_0,
2358 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2359 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
2360 BRW_REGISTER_TYPE_F,
2361 BRW_VERTICAL_STRIDE_2,
2362 BRW_WIDTH_2,
2363 BRW_HORIZONTAL_STRIDE_0,
2364 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2365 brw_ADD(p, dst, src0, negate(src1));
2366 }
2367
2368 void
2369 fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2370 {
2371 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
2372 BRW_REGISTER_TYPE_F,
2373 BRW_VERTICAL_STRIDE_4,
2374 BRW_WIDTH_4,
2375 BRW_HORIZONTAL_STRIDE_0,
2376 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2377 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
2378 BRW_REGISTER_TYPE_F,
2379 BRW_VERTICAL_STRIDE_4,
2380 BRW_WIDTH_4,
2381 BRW_HORIZONTAL_STRIDE_0,
2382 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2383 brw_ADD(p, dst, src0, negate(src1));
2384 }
2385
2386 void
2387 fs_visitor::generate_discard_not(fs_inst *inst, struct brw_reg mask)
2388 {
2389 if (intel->gen >= 6) {
2390 /* Gen6 no longer has the mask reg for us to just read the
2391 * active channels from. However, cmp updates just the channels
2392 * of the flag reg that are enabled, so we can get at the
2393 * channel enables that way. In this step, make a reg of ones
2394 * we'll compare to.
2395 */
2396 brw_MOV(p, mask, brw_imm_ud(1));
2397 } else {
2398 brw_push_insn_state(p);
2399 brw_set_mask_control(p, BRW_MASK_DISABLE);
2400 brw_NOT(p, mask, brw_mask_reg(1)); /* IMASK */
2401 brw_pop_insn_state(p);
2402 }
2403 }
2404
2405 void
2406 fs_visitor::generate_discard_and(fs_inst *inst, struct brw_reg mask)
2407 {
2408 if (intel->gen >= 6) {
2409 struct brw_reg f0 = brw_flag_reg();
2410 struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
2411
2412 brw_push_insn_state(p);
2413 brw_set_mask_control(p, BRW_MASK_DISABLE);
2414 brw_MOV(p, f0, brw_imm_uw(0xffff)); /* inactive channels undiscarded */
2415 brw_pop_insn_state(p);
2416
2417 brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
2418 BRW_CONDITIONAL_Z, mask, brw_imm_ud(0)); /* active channels fail test */
2419 /* Undo CMP's whacking of predication*/
2420 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2421
2422 brw_push_insn_state(p);
2423 brw_set_mask_control(p, BRW_MASK_DISABLE);
2424 brw_AND(p, g1, f0, g1);
2425 brw_pop_insn_state(p);
2426 } else {
2427 struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
2428
2429 mask = brw_uw1_reg(mask.file, mask.nr, 0);
2430
2431 brw_push_insn_state(p);
2432 brw_set_mask_control(p, BRW_MASK_DISABLE);
2433 brw_AND(p, g0, mask, g0);
2434 brw_pop_insn_state(p);
2435 }
2436 }
2437
2438 void
2439 fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
2440 {
2441 assert(inst->mlen != 0);
2442
2443 brw_MOV(p,
2444 retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
2445 retype(src, BRW_REGISTER_TYPE_UD));
2446 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
2447 inst->offset);
2448 }
2449
2450 void
2451 fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
2452 {
2453 assert(inst->mlen != 0);
2454
2455 /* Clear any post destination dependencies that would be ignored by
2456 * the block read. See the B-Spec for pre-gen5 send instruction.
2457 *
2458 * This could use a better solution, since texture sampling and
2459 * math reads could potentially run into it as well -- anywhere
2460 * that we have a SEND with a destination that is a register that
2461 * was written but not read within the last N instructions (what's
2462 * N? unsure). This is rare because of dead code elimination, but
2463 * not impossible.
2464 */
2465 if (intel->gen == 4 && !intel->is_g4x)
2466 brw_MOV(p, brw_null_reg(), dst);
2467
2468 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
2469 inst->offset);
2470
2471 if (intel->gen == 4 && !intel->is_g4x) {
2472 /* gen4 errata: destination from a send can't be used as a
2473 * destination until it's been read. Just read it so we don't
2474 * have to worry.
2475 */
2476 brw_MOV(p, brw_null_reg(), dst);
2477 }
2478 }
2479
2480
2481 void
2482 fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
2483 {
2484 assert(inst->mlen != 0);
2485
2486 /* Clear any post destination dependencies that would be ignored by
2487 * the block read. See the B-Spec for pre-gen5 send instruction.
2488 *
2489 * This could use a better solution, since texture sampling and
2490 * math reads could potentially run into it as well -- anywhere
2491 * that we have a SEND with a destination that is a register that
2492 * was written but not read within the last N instructions (what's
2493 * N? unsure). This is rare because of dead code elimination, but
2494 * not impossible.
2495 */
2496 if (intel->gen == 4 && !intel->is_g4x)
2497 brw_MOV(p, brw_null_reg(), dst);
2498
2499 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
2500 inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
2501
2502 if (intel->gen == 4 && !intel->is_g4x) {
2503 /* gen4 errata: destination from a send can't be used as a
2504 * destination until it's been read. Just read it so we don't
2505 * have to worry.
2506 */
2507 brw_MOV(p, brw_null_reg(), dst);
2508 }
2509 }
2510
2511 void
2512 fs_visitor::assign_curb_setup()
2513 {
2514 c->prog_data.first_curbe_grf = c->nr_payload_regs;
2515 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
2516
2517 /* Map the offsets in the UNIFORM file to fixed HW regs. */
2518 foreach_iter(exec_list_iterator, iter, this->instructions) {
2519 fs_inst *inst = (fs_inst *)iter.get();
2520
2521 for (unsigned int i = 0; i < 3; i++) {
2522 if (inst->src[i].file == UNIFORM) {
2523 int constant_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
2524 struct brw_reg brw_reg = brw_vec1_grf(c->prog_data.first_curbe_grf +
2525 constant_nr / 8,
2526 constant_nr % 8);
2527
2528 inst->src[i].file = FIXED_HW_REG;
2529 inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
2530 }
2531 }
2532 }
2533 }
2534
2535 void
2536 fs_visitor::calculate_urb_setup()
2537 {
2538 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2539 urb_setup[i] = -1;
2540 }
2541
2542 int urb_next = 0;
2543 /* Figure out where each of the incoming setup attributes lands. */
2544 if (intel->gen >= 6) {
2545 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2546 if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(i)) {
2547 urb_setup[i] = urb_next++;
2548 }
2549 }
2550 } else {
2551 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
2552 for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) {
2553 if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) {
2554 int fp_index;
2555
2556 if (i >= VERT_RESULT_VAR0)
2557 fp_index = i - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0);
2558 else if (i <= VERT_RESULT_TEX7)
2559 fp_index = i;
2560 else
2561 fp_index = -1;
2562
2563 if (fp_index >= 0)
2564 urb_setup[fp_index] = urb_next++;
2565 }
2566 }
2567 }
2568
2569 /* Each attribute is 4 setup channels, each of which is half a reg. */
2570 c->prog_data.urb_read_length = urb_next * 2;
2571 }
2572
2573 void
2574 fs_visitor::assign_urb_setup()
2575 {
2576 int urb_start = c->prog_data.first_curbe_grf + c->prog_data.curb_read_length;
2577
2578 /* Offset all the urb_setup[] index by the actual position of the
2579 * setup regs, now that the location of the constants has been chosen.
2580 */
2581 foreach_iter(exec_list_iterator, iter, this->instructions) {
2582 fs_inst *inst = (fs_inst *)iter.get();
2583
2584 if (inst->opcode != FS_OPCODE_LINTERP)
2585 continue;
2586
2587 assert(inst->src[2].file == FIXED_HW_REG);
2588
2589 inst->src[2].fixed_hw_reg.nr += urb_start;
2590 }
2591
2592 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
2593 }
2594
2595 /**
2596 * Split large virtual GRFs into separate components if we can.
2597 *
2598 * This is mostly duplicated with what brw_fs_vector_splitting does,
2599 * but that's really conservative because it's afraid of doing
2600 * splitting that doesn't result in real progress after the rest of
2601 * the optimization phases, which would cause infinite looping in
2602 * optimization. We can do it once here, safely. This also has the
2603 * opportunity to split interpolated values, or maybe even uniforms,
2604 * which we don't have at the IR level.
2605 *
2606 * We want to split, because virtual GRFs are what we register
2607 * allocate and spill (due to contiguousness requirements for some
2608 * instructions), and they're what we naturally generate in the
2609 * codegen process, but most virtual GRFs don't actually need to be
2610 * contiguous sets of GRFs. If we split, we'll end up with reduced
2611 * live intervals and better dead code elimination and coalescing.
2612 */
2613 void
2614 fs_visitor::split_virtual_grfs()
2615 {
2616 int num_vars = this->virtual_grf_next;
2617 bool split_grf[num_vars];
2618 int new_virtual_grf[num_vars];
2619
2620 /* Try to split anything > 0 sized. */
2621 for (int i = 0; i < num_vars; i++) {
2622 if (this->virtual_grf_sizes[i] != 1)
2623 split_grf[i] = true;
2624 else
2625 split_grf[i] = false;
2626 }
2627
2628 if (brw->has_pln) {
2629 /* PLN opcodes rely on the delta_xy being contiguous. */
2630 split_grf[this->delta_x.reg] = false;
2631 }
2632
2633 foreach_iter(exec_list_iterator, iter, this->instructions) {
2634 fs_inst *inst = (fs_inst *)iter.get();
2635
2636 /* Texturing produces 4 contiguous registers, so no splitting. */
2637 if ((inst->opcode == FS_OPCODE_TEX ||
2638 inst->opcode == FS_OPCODE_TXB ||
2639 inst->opcode == FS_OPCODE_TXL) &&
2640 inst->dst.file == GRF) {
2641 split_grf[inst->dst.reg] = false;
2642 }
2643 }
2644
2645 /* Allocate new space for split regs. Note that the virtual
2646 * numbers will be contiguous.
2647 */
2648 for (int i = 0; i < num_vars; i++) {
2649 if (split_grf[i]) {
2650 new_virtual_grf[i] = virtual_grf_alloc(1);
2651 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
2652 int reg = virtual_grf_alloc(1);
2653 assert(reg == new_virtual_grf[i] + j - 1);
2654 (void) reg;
2655 }
2656 this->virtual_grf_sizes[i] = 1;
2657 }
2658 }
2659
2660 foreach_iter(exec_list_iterator, iter, this->instructions) {
2661 fs_inst *inst = (fs_inst *)iter.get();
2662
2663 if (inst->dst.file == GRF &&
2664 split_grf[inst->dst.reg] &&
2665 inst->dst.reg_offset != 0) {
2666 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
2667 inst->dst.reg_offset - 1);
2668 inst->dst.reg_offset = 0;
2669 }
2670 for (int i = 0; i < 3; i++) {
2671 if (inst->src[i].file == GRF &&
2672 split_grf[inst->src[i].reg] &&
2673 inst->src[i].reg_offset != 0) {
2674 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
2675 inst->src[i].reg_offset - 1);
2676 inst->src[i].reg_offset = 0;
2677 }
2678 }
2679 }
2680 }
2681
2682 /**
2683 * Choose accesses from the UNIFORM file to demote to using the pull
2684 * constant buffer.
2685 *
2686 * We allow a fragment shader to have more than the specified minimum
2687 * maximum number of fragment shader uniform components (64). If
2688 * there are too many of these, they'd fill up all of register space.
2689 * So, this will push some of them out to the pull constant buffer and
2690 * update the program to load them.
2691 */
2692 void
2693 fs_visitor::setup_pull_constants()
2694 {
2695 /* Only allow 16 registers (128 uniform components) as push constants. */
2696 unsigned int max_uniform_components = 16 * 8;
2697 if (c->prog_data.nr_params <= max_uniform_components)
2698 return;
2699
2700 /* Just demote the end of the list. We could probably do better
2701 * here, demoting things that are rarely used in the program first.
2702 */
2703 int pull_uniform_base = max_uniform_components;
2704 int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
2705
2706 foreach_iter(exec_list_iterator, iter, this->instructions) {
2707 fs_inst *inst = (fs_inst *)iter.get();
2708
2709 for (int i = 0; i < 3; i++) {
2710 if (inst->src[i].file != UNIFORM)
2711 continue;
2712
2713 int uniform_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
2714 if (uniform_nr < pull_uniform_base)
2715 continue;
2716
2717 fs_reg dst = fs_reg(this, glsl_type::float_type);
2718 fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
2719 dst);
2720 pull->offset = ((uniform_nr - pull_uniform_base) * 4) & ~15;
2721 pull->ir = inst->ir;
2722 pull->annotation = inst->annotation;
2723 pull->base_mrf = 14;
2724 pull->mlen = 1;
2725
2726 inst->insert_before(pull);
2727
2728 inst->src[i].file = GRF;
2729 inst->src[i].reg = dst.reg;
2730 inst->src[i].reg_offset = 0;
2731 inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
2732 }
2733 }
2734
2735 for (int i = 0; i < pull_uniform_count; i++) {
2736 c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
2737 c->prog_data.pull_param_convert[i] =
2738 c->prog_data.param_convert[pull_uniform_base + i];
2739 }
2740 c->prog_data.nr_params -= pull_uniform_count;
2741 c->prog_data.nr_pull_params = pull_uniform_count;
2742 }
2743
2744 void
2745 fs_visitor::calculate_live_intervals()
2746 {
2747 int num_vars = this->virtual_grf_next;
2748 int *def = talloc_array(mem_ctx, int, num_vars);
2749 int *use = talloc_array(mem_ctx, int, num_vars);
2750 int loop_depth = 0;
2751 int loop_start = 0;
2752 int bb_header_ip = 0;
2753
2754 for (int i = 0; i < num_vars; i++) {
2755 def[i] = 1 << 30;
2756 use[i] = -1;
2757 }
2758
2759 int ip = 0;
2760 foreach_iter(exec_list_iterator, iter, this->instructions) {
2761 fs_inst *inst = (fs_inst *)iter.get();
2762
2763 if (inst->opcode == BRW_OPCODE_DO) {
2764 if (loop_depth++ == 0)
2765 loop_start = ip;
2766 } else if (inst->opcode == BRW_OPCODE_WHILE) {
2767 loop_depth--;
2768
2769 if (loop_depth == 0) {
2770 /* Patches up the use of vars marked for being live across
2771 * the whole loop.
2772 */
2773 for (int i = 0; i < num_vars; i++) {
2774 if (use[i] == loop_start) {
2775 use[i] = ip;
2776 }
2777 }
2778 }
2779 } else {
2780 for (unsigned int i = 0; i < 3; i++) {
2781 if (inst->src[i].file == GRF && inst->src[i].reg != 0) {
2782 int reg = inst->src[i].reg;
2783
2784 if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
2785 def[reg] >= bb_header_ip)) {
2786 use[reg] = ip;
2787 } else {
2788 def[reg] = MIN2(loop_start, def[reg]);
2789 use[reg] = loop_start;
2790
2791 /* Nobody else is going to go smash our start to
2792 * later in the loop now, because def[reg] now
2793 * points before the bb header.
2794 */
2795 }
2796 }
2797 }
2798 if (inst->dst.file == GRF && inst->dst.reg != 0) {
2799 int reg = inst->dst.reg;
2800
2801 if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
2802 !inst->predicated)) {
2803 def[reg] = MIN2(def[reg], ip);
2804 } else {
2805 def[reg] = MIN2(def[reg], loop_start);
2806 }
2807 }
2808 }
2809
2810 ip++;
2811
2812 /* Set the basic block header IP. This is used for determining
2813 * if a complete def of single-register virtual GRF in a loop
2814 * dominates a use in the same basic block. It's a quick way to
2815 * reduce the live interval range of most register used in a
2816 * loop.
2817 */
2818 if (inst->opcode == BRW_OPCODE_IF ||
2819 inst->opcode == BRW_OPCODE_ELSE ||
2820 inst->opcode == BRW_OPCODE_ENDIF ||
2821 inst->opcode == BRW_OPCODE_DO ||
2822 inst->opcode == BRW_OPCODE_WHILE ||
2823 inst->opcode == BRW_OPCODE_BREAK ||
2824 inst->opcode == BRW_OPCODE_CONTINUE) {
2825 bb_header_ip = ip;
2826 }
2827 }
2828
2829 talloc_free(this->virtual_grf_def);
2830 talloc_free(this->virtual_grf_use);
2831 this->virtual_grf_def = def;
2832 this->virtual_grf_use = use;
2833 }
2834
2835 /**
2836 * Attempts to move immediate constants into the immediate
2837 * constant slot of following instructions.
2838 *
2839 * Immediate constants are a bit tricky -- they have to be in the last
2840 * operand slot, you can't do abs/negate on them,
2841 */
2842
2843 bool
2844 fs_visitor::propagate_constants()
2845 {
2846 bool progress = false;
2847
2848 foreach_iter(exec_list_iterator, iter, this->instructions) {
2849 fs_inst *inst = (fs_inst *)iter.get();
2850
2851 if (inst->opcode != BRW_OPCODE_MOV ||
2852 inst->predicated ||
2853 inst->dst.file != GRF || inst->src[0].file != IMM ||
2854 inst->dst.type != inst->src[0].type)
2855 continue;
2856
2857 /* Don't bother with cases where we should have had the
2858 * operation on the constant folded in GLSL already.
2859 */
2860 if (inst->saturate)
2861 continue;
2862
2863 /* Found a move of a constant to a GRF. Find anything else using the GRF
2864 * before it's written, and replace it with the constant if we can.
2865 */
2866 exec_list_iterator scan_iter = iter;
2867 scan_iter.next();
2868 for (; scan_iter.has_next(); scan_iter.next()) {
2869 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
2870
2871 if (scan_inst->opcode == BRW_OPCODE_DO ||
2872 scan_inst->opcode == BRW_OPCODE_WHILE ||
2873 scan_inst->opcode == BRW_OPCODE_ELSE ||
2874 scan_inst->opcode == BRW_OPCODE_ENDIF) {
2875 break;
2876 }
2877
2878 for (int i = 2; i >= 0; i--) {
2879 if (scan_inst->src[i].file != GRF ||
2880 scan_inst->src[i].reg != inst->dst.reg ||
2881 scan_inst->src[i].reg_offset != inst->dst.reg_offset)
2882 continue;
2883
2884 /* Don't bother with cases where we should have had the
2885 * operation on the constant folded in GLSL already.
2886 */
2887 if (scan_inst->src[i].negate || scan_inst->src[i].abs)
2888 continue;
2889
2890 switch (scan_inst->opcode) {
2891 case BRW_OPCODE_MOV:
2892 scan_inst->src[i] = inst->src[0];
2893 progress = true;
2894 break;
2895
2896 case BRW_OPCODE_MUL:
2897 case BRW_OPCODE_ADD:
2898 if (i == 1) {
2899 scan_inst->src[i] = inst->src[0];
2900 progress = true;
2901 } else if (i == 0 && scan_inst->src[1].file != IMM) {
2902 /* Fit this constant in by commuting the operands */
2903 scan_inst->src[0] = scan_inst->src[1];
2904 scan_inst->src[1] = inst->src[0];
2905 }
2906 break;
2907 case BRW_OPCODE_CMP:
2908 case BRW_OPCODE_SEL:
2909 if (i == 1) {
2910 scan_inst->src[i] = inst->src[0];
2911 progress = true;
2912 }
2913 }
2914 }
2915
2916 if (scan_inst->dst.file == GRF &&
2917 scan_inst->dst.reg == inst->dst.reg &&
2918 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
2919 scan_inst->opcode == FS_OPCODE_TEX)) {
2920 break;
2921 }
2922 }
2923 }
2924
2925 return progress;
2926 }
2927 /**
2928 * Must be called after calculate_live_intervales() to remove unused
2929 * writes to registers -- register allocation will fail otherwise
2930 * because something deffed but not used won't be considered to
2931 * interfere with other regs.
2932 */
2933 bool
2934 fs_visitor::dead_code_eliminate()
2935 {
2936 bool progress = false;
2937 int pc = 0;
2938
2939 foreach_iter(exec_list_iterator, iter, this->instructions) {
2940 fs_inst *inst = (fs_inst *)iter.get();
2941
2942 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
2943 inst->remove();
2944 progress = true;
2945 }
2946
2947 pc++;
2948 }
2949
2950 return progress;
2951 }
2952
2953 bool
2954 fs_visitor::register_coalesce()
2955 {
2956 bool progress = false;
2957
2958 foreach_iter(exec_list_iterator, iter, this->instructions) {
2959 fs_inst *inst = (fs_inst *)iter.get();
2960
2961 if (inst->opcode != BRW_OPCODE_MOV ||
2962 inst->predicated ||
2963 inst->saturate ||
2964 inst->dst.file != GRF || inst->src[0].file != GRF ||
2965 inst->dst.type != inst->src[0].type)
2966 continue;
2967
2968 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
2969 * them: check for no writes to either one until the exit of the
2970 * program.
2971 */
2972 bool interfered = false;
2973 exec_list_iterator scan_iter = iter;
2974 scan_iter.next();
2975 for (; scan_iter.has_next(); scan_iter.next()) {
2976 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
2977
2978 if (scan_inst->opcode == BRW_OPCODE_DO ||
2979 scan_inst->opcode == BRW_OPCODE_WHILE ||
2980 scan_inst->opcode == BRW_OPCODE_ENDIF) {
2981 interfered = true;
2982 iter = scan_iter;
2983 break;
2984 }
2985
2986 if (scan_inst->dst.file == GRF) {
2987 if (scan_inst->dst.reg == inst->dst.reg &&
2988 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
2989 scan_inst->opcode == FS_OPCODE_TEX)) {
2990 interfered = true;
2991 break;
2992 }
2993 if (scan_inst->dst.reg == inst->src[0].reg &&
2994 (scan_inst->dst.reg_offset == inst->src[0].reg_offset ||
2995 scan_inst->opcode == FS_OPCODE_TEX)) {
2996 interfered = true;
2997 break;
2998 }
2999 }
3000 }
3001 if (interfered) {
3002 continue;
3003 }
3004
3005 /* Update live interval so we don't have to recalculate. */
3006 this->virtual_grf_use[inst->src[0].reg] = MAX2(virtual_grf_use[inst->src[0].reg],
3007 virtual_grf_use[inst->dst.reg]);
3008
3009 /* Rewrite the later usage to point at the source of the move to
3010 * be removed.
3011 */
3012 for (exec_list_iterator scan_iter = iter; scan_iter.has_next();
3013 scan_iter.next()) {
3014 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
3015
3016 for (int i = 0; i < 3; i++) {
3017 if (scan_inst->src[i].file == GRF &&
3018 scan_inst->src[i].reg == inst->dst.reg &&
3019 scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
3020 scan_inst->src[i].reg = inst->src[0].reg;
3021 scan_inst->src[i].reg_offset = inst->src[0].reg_offset;
3022 scan_inst->src[i].abs |= inst->src[0].abs;
3023 scan_inst->src[i].negate ^= inst->src[0].negate;
3024 scan_inst->src[i].smear = inst->src[0].smear;
3025 }
3026 }
3027 }
3028
3029 inst->remove();
3030 progress = true;
3031 }
3032
3033 return progress;
3034 }
3035
3036
3037 bool
3038 fs_visitor::compute_to_mrf()
3039 {
3040 bool progress = false;
3041 int next_ip = 0;
3042
3043 foreach_iter(exec_list_iterator, iter, this->instructions) {
3044 fs_inst *inst = (fs_inst *)iter.get();
3045
3046 int ip = next_ip;
3047 next_ip++;
3048
3049 if (inst->opcode != BRW_OPCODE_MOV ||
3050 inst->predicated ||
3051 inst->dst.file != MRF || inst->src[0].file != GRF ||
3052 inst->dst.type != inst->src[0].type ||
3053 inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
3054 continue;
3055
3056 /* Can't compute-to-MRF this GRF if someone else was going to
3057 * read it later.
3058 */
3059 if (this->virtual_grf_use[inst->src[0].reg] > ip)
3060 continue;
3061
3062 /* Found a move of a GRF to a MRF. Let's see if we can go
3063 * rewrite the thing that made this GRF to write into the MRF.
3064 */
3065 fs_inst *scan_inst;
3066 for (scan_inst = (fs_inst *)inst->prev;
3067 scan_inst->prev != NULL;
3068 scan_inst = (fs_inst *)scan_inst->prev) {
3069 if (scan_inst->dst.file == GRF &&
3070 scan_inst->dst.reg == inst->src[0].reg) {
3071 /* Found the last thing to write our reg we want to turn
3072 * into a compute-to-MRF.
3073 */
3074
3075 if (scan_inst->opcode == FS_OPCODE_TEX) {
3076 /* texturing writes several continuous regs, so we can't
3077 * compute-to-mrf that.
3078 */
3079 break;
3080 }
3081
3082 /* If it's predicated, it (probably) didn't populate all
3083 * the channels.
3084 */
3085 if (scan_inst->predicated)
3086 break;
3087
3088 /* SEND instructions can't have MRF as a destination. */
3089 if (scan_inst->mlen)
3090 break;
3091
3092 if (intel->gen >= 6) {
3093 /* gen6 math instructions must have the destination be
3094 * GRF, so no compute-to-MRF for them.
3095 */
3096 if (scan_inst->opcode == FS_OPCODE_RCP ||
3097 scan_inst->opcode == FS_OPCODE_RSQ ||
3098 scan_inst->opcode == FS_OPCODE_SQRT ||
3099 scan_inst->opcode == FS_OPCODE_EXP2 ||
3100 scan_inst->opcode == FS_OPCODE_LOG2 ||
3101 scan_inst->opcode == FS_OPCODE_SIN ||
3102 scan_inst->opcode == FS_OPCODE_COS ||
3103 scan_inst->opcode == FS_OPCODE_POW) {
3104 break;
3105 }
3106 }
3107
3108 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
3109 /* Found the creator of our MRF's source value. */
3110 scan_inst->dst.file = MRF;
3111 scan_inst->dst.hw_reg = inst->dst.hw_reg;
3112 scan_inst->saturate |= inst->saturate;
3113 inst->remove();
3114 progress = true;
3115 }
3116 break;
3117 }
3118
3119 /* We don't handle flow control here. Most computation of
3120 * values that end up in MRFs are shortly before the MRF
3121 * write anyway.
3122 */
3123 if (scan_inst->opcode == BRW_OPCODE_DO ||
3124 scan_inst->opcode == BRW_OPCODE_WHILE ||
3125 scan_inst->opcode == BRW_OPCODE_ENDIF) {
3126 break;
3127 }
3128
3129 /* You can't read from an MRF, so if someone else reads our
3130 * MRF's source GRF that we wanted to rewrite, that stops us.
3131 */
3132 bool interfered = false;
3133 for (int i = 0; i < 3; i++) {
3134 if (scan_inst->src[i].file == GRF &&
3135 scan_inst->src[i].reg == inst->src[0].reg &&
3136 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
3137 interfered = true;
3138 }
3139 }
3140 if (interfered)
3141 break;
3142
3143 if (scan_inst->dst.file == MRF &&
3144 scan_inst->dst.hw_reg == inst->dst.hw_reg) {
3145 /* Somebody else wrote our MRF here, so we can't can't
3146 * compute-to-MRF before that.
3147 */
3148 break;
3149 }
3150
3151 if (scan_inst->mlen > 0) {
3152 /* Found a SEND instruction, which means that there are
3153 * live values in MRFs from base_mrf to base_mrf +
3154 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3155 * above it.
3156 */
3157 if (inst->dst.hw_reg >= scan_inst->base_mrf &&
3158 inst->dst.hw_reg < scan_inst->base_mrf + scan_inst->mlen) {
3159 break;
3160 }
3161 }
3162 }
3163 }
3164
3165 return progress;
3166 }
3167
3168 /**
3169 * Walks through basic blocks, locking for repeated MRF writes and
3170 * removing the later ones.
3171 */
3172 bool
3173 fs_visitor::remove_duplicate_mrf_writes()
3174 {
3175 fs_inst *last_mrf_move[16];
3176 bool progress = false;
3177
3178 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3179
3180 foreach_iter(exec_list_iterator, iter, this->instructions) {
3181 fs_inst *inst = (fs_inst *)iter.get();
3182
3183 switch (inst->opcode) {
3184 case BRW_OPCODE_DO:
3185 case BRW_OPCODE_WHILE:
3186 case BRW_OPCODE_IF:
3187 case BRW_OPCODE_ELSE:
3188 case BRW_OPCODE_ENDIF:
3189 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3190 continue;
3191 default:
3192 break;
3193 }
3194
3195 if (inst->opcode == BRW_OPCODE_MOV &&
3196 inst->dst.file == MRF) {
3197 fs_inst *prev_inst = last_mrf_move[inst->dst.hw_reg];
3198 if (prev_inst && inst->equals(prev_inst)) {
3199 inst->remove();
3200 progress = true;
3201 continue;
3202 }
3203 }
3204
3205 /* Clear out the last-write records for MRFs that were overwritten. */
3206 if (inst->dst.file == MRF) {
3207 last_mrf_move[inst->dst.hw_reg] = NULL;
3208 }
3209
3210 if (inst->mlen > 0) {
3211 /* Found a SEND instruction, which will include two of fewer
3212 * implied MRF writes. We could do better here.
3213 */
3214 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3215 last_mrf_move[inst->base_mrf + i] = NULL;
3216 }
3217 }
3218
3219 /* Clear out any MRF move records whose sources got overwritten. */
3220 if (inst->dst.file == GRF) {
3221 for (unsigned int i = 0; i < Elements(last_mrf_move); i++) {
3222 if (last_mrf_move[i] &&
3223 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
3224 last_mrf_move[i] = NULL;
3225 }
3226 }
3227 }
3228
3229 if (inst->opcode == BRW_OPCODE_MOV &&
3230 inst->dst.file == MRF &&
3231 inst->src[0].file == GRF &&
3232 !inst->predicated) {
3233 last_mrf_move[inst->dst.hw_reg] = inst;
3234 }
3235 }
3236
3237 return progress;
3238 }
3239
3240 bool
3241 fs_visitor::virtual_grf_interferes(int a, int b)
3242 {
3243 int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]);
3244 int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]);
3245
3246 /* For dead code, just check if the def interferes with the other range. */
3247 if (this->virtual_grf_use[a] == -1) {
3248 return (this->virtual_grf_def[a] >= this->virtual_grf_def[b] &&
3249 this->virtual_grf_def[a] < this->virtual_grf_use[b]);
3250 }
3251 if (this->virtual_grf_use[b] == -1) {
3252 return (this->virtual_grf_def[b] >= this->virtual_grf_def[a] &&
3253 this->virtual_grf_def[b] < this->virtual_grf_use[a]);
3254 }
3255
3256 return start < end;
3257 }
3258
3259 static struct brw_reg brw_reg_from_fs_reg(fs_reg *reg)
3260 {
3261 struct brw_reg brw_reg;
3262
3263 switch (reg->file) {
3264 case GRF:
3265 case ARF:
3266 case MRF:
3267 if (reg->smear == -1) {
3268 brw_reg = brw_vec8_reg(reg->file,
3269 reg->hw_reg, 0);
3270 } else {
3271 brw_reg = brw_vec1_reg(reg->file,
3272 reg->hw_reg, reg->smear);
3273 }
3274 brw_reg = retype(brw_reg, reg->type);
3275 break;
3276 case IMM:
3277 switch (reg->type) {
3278 case BRW_REGISTER_TYPE_F:
3279 brw_reg = brw_imm_f(reg->imm.f);
3280 break;
3281 case BRW_REGISTER_TYPE_D:
3282 brw_reg = brw_imm_d(reg->imm.i);
3283 break;
3284 case BRW_REGISTER_TYPE_UD:
3285 brw_reg = brw_imm_ud(reg->imm.u);
3286 break;
3287 default:
3288 assert(!"not reached");
3289 brw_reg = brw_null_reg();
3290 break;
3291 }
3292 break;
3293 case FIXED_HW_REG:
3294 brw_reg = reg->fixed_hw_reg;
3295 break;
3296 case BAD_FILE:
3297 /* Probably unused. */
3298 brw_reg = brw_null_reg();
3299 break;
3300 case UNIFORM:
3301 assert(!"not reached");
3302 brw_reg = brw_null_reg();
3303 break;
3304 default:
3305 assert(!"not reached");
3306 brw_reg = brw_null_reg();
3307 break;
3308 }
3309 if (reg->abs)
3310 brw_reg = brw_abs(brw_reg);
3311 if (reg->negate)
3312 brw_reg = negate(brw_reg);
3313
3314 return brw_reg;
3315 }
3316
3317 void
3318 fs_visitor::generate_code()
3319 {
3320 int last_native_inst = 0;
3321 struct brw_instruction *if_stack[16], *loop_stack[16];
3322 int if_stack_depth = 0, loop_stack_depth = 0;
3323 int if_depth_in_loop[16];
3324 const char *last_annotation_string = NULL;
3325 ir_instruction *last_annotation_ir = NULL;
3326
3327 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3328 printf("Native code for fragment shader %d:\n",
3329 ctx->Shader.CurrentFragmentProgram->Name);
3330 }
3331
3332 if_depth_in_loop[loop_stack_depth] = 0;
3333
3334 memset(&if_stack, 0, sizeof(if_stack));
3335 foreach_iter(exec_list_iterator, iter, this->instructions) {
3336 fs_inst *inst = (fs_inst *)iter.get();
3337 struct brw_reg src[3], dst;
3338
3339 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3340 if (last_annotation_ir != inst->ir) {
3341 last_annotation_ir = inst->ir;
3342 if (last_annotation_ir) {
3343 printf(" ");
3344 last_annotation_ir->print();
3345 printf("\n");
3346 }
3347 }
3348 if (last_annotation_string != inst->annotation) {
3349 last_annotation_string = inst->annotation;
3350 if (last_annotation_string)
3351 printf(" %s\n", last_annotation_string);
3352 }
3353 }
3354
3355 for (unsigned int i = 0; i < 3; i++) {
3356 src[i] = brw_reg_from_fs_reg(&inst->src[i]);
3357 }
3358 dst = brw_reg_from_fs_reg(&inst->dst);
3359
3360 brw_set_conditionalmod(p, inst->conditional_mod);
3361 brw_set_predicate_control(p, inst->predicated);
3362 brw_set_saturate(p, inst->saturate);
3363
3364 switch (inst->opcode) {
3365 case BRW_OPCODE_MOV:
3366 brw_MOV(p, dst, src[0]);
3367 break;
3368 case BRW_OPCODE_ADD:
3369 brw_ADD(p, dst, src[0], src[1]);
3370 break;
3371 case BRW_OPCODE_MUL:
3372 brw_MUL(p, dst, src[0], src[1]);
3373 break;
3374
3375 case BRW_OPCODE_FRC:
3376 brw_FRC(p, dst, src[0]);
3377 break;
3378 case BRW_OPCODE_RNDD:
3379 brw_RNDD(p, dst, src[0]);
3380 break;
3381 case BRW_OPCODE_RNDE:
3382 brw_RNDE(p, dst, src[0]);
3383 break;
3384 case BRW_OPCODE_RNDZ:
3385 brw_RNDZ(p, dst, src[0]);
3386 break;
3387
3388 case BRW_OPCODE_AND:
3389 brw_AND(p, dst, src[0], src[1]);
3390 break;
3391 case BRW_OPCODE_OR:
3392 brw_OR(p, dst, src[0], src[1]);
3393 break;
3394 case BRW_OPCODE_XOR:
3395 brw_XOR(p, dst, src[0], src[1]);
3396 break;
3397 case BRW_OPCODE_NOT:
3398 brw_NOT(p, dst, src[0]);
3399 break;
3400 case BRW_OPCODE_ASR:
3401 brw_ASR(p, dst, src[0], src[1]);
3402 break;
3403 case BRW_OPCODE_SHR:
3404 brw_SHR(p, dst, src[0], src[1]);
3405 break;
3406 case BRW_OPCODE_SHL:
3407 brw_SHL(p, dst, src[0], src[1]);
3408 break;
3409
3410 case BRW_OPCODE_CMP:
3411 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
3412 break;
3413 case BRW_OPCODE_SEL:
3414 brw_SEL(p, dst, src[0], src[1]);
3415 break;
3416
3417 case BRW_OPCODE_IF:
3418 assert(if_stack_depth < 16);
3419 if (inst->src[0].file != BAD_FILE) {
3420 assert(intel->gen >= 6);
3421 if_stack[if_stack_depth] = brw_IF_gen6(p, inst->conditional_mod, src[0], src[1]);
3422 } else {
3423 if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
3424 }
3425 if_depth_in_loop[loop_stack_depth]++;
3426 if_stack_depth++;
3427 break;
3428
3429 case BRW_OPCODE_ELSE:
3430 if_stack[if_stack_depth - 1] =
3431 brw_ELSE(p, if_stack[if_stack_depth - 1]);
3432 break;
3433 case BRW_OPCODE_ENDIF:
3434 if_stack_depth--;
3435 brw_ENDIF(p , if_stack[if_stack_depth]);
3436 if_depth_in_loop[loop_stack_depth]--;
3437 break;
3438
3439 case BRW_OPCODE_DO:
3440 loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
3441 if_depth_in_loop[loop_stack_depth] = 0;
3442 break;
3443
3444 case BRW_OPCODE_BREAK:
3445 brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
3446 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
3447 break;
3448 case BRW_OPCODE_CONTINUE:
3449 /* FINISHME: We need to write the loop instruction support still. */
3450 if (intel->gen >= 6)
3451 brw_CONT_gen6(p, loop_stack[loop_stack_depth - 1]);
3452 else
3453 brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
3454 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
3455 break;
3456
3457 case BRW_OPCODE_WHILE: {
3458 struct brw_instruction *inst0, *inst1;
3459 GLuint br = 1;
3460
3461 if (intel->gen >= 5)
3462 br = 2;
3463
3464 assert(loop_stack_depth > 0);
3465 loop_stack_depth--;
3466 inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
3467 if (intel->gen < 6) {
3468 /* patch all the BREAK/CONT instructions from last BGNLOOP */
3469 while (inst0 > loop_stack[loop_stack_depth]) {
3470 inst0--;
3471 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
3472 inst0->bits3.if_else.jump_count == 0) {
3473 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
3474 }
3475 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
3476 inst0->bits3.if_else.jump_count == 0) {
3477 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
3478 }
3479 }
3480 }
3481 }
3482 break;
3483
3484 case FS_OPCODE_RCP:
3485 case FS_OPCODE_RSQ:
3486 case FS_OPCODE_SQRT:
3487 case FS_OPCODE_EXP2:
3488 case FS_OPCODE_LOG2:
3489 case FS_OPCODE_POW:
3490 case FS_OPCODE_SIN:
3491 case FS_OPCODE_COS:
3492 generate_math(inst, dst, src);
3493 break;
3494 case FS_OPCODE_LINTERP:
3495 generate_linterp(inst, dst, src);
3496 break;
3497 case FS_OPCODE_TEX:
3498 case FS_OPCODE_TXB:
3499 case FS_OPCODE_TXL:
3500 generate_tex(inst, dst);
3501 break;
3502 case FS_OPCODE_DISCARD_NOT:
3503 generate_discard_not(inst, dst);
3504 break;
3505 case FS_OPCODE_DISCARD_AND:
3506 generate_discard_and(inst, src[0]);
3507 break;
3508 case FS_OPCODE_DDX:
3509 generate_ddx(inst, dst, src[0]);
3510 break;
3511 case FS_OPCODE_DDY:
3512 generate_ddy(inst, dst, src[0]);
3513 break;
3514
3515 case FS_OPCODE_SPILL:
3516 generate_spill(inst, src[0]);
3517 break;
3518
3519 case FS_OPCODE_UNSPILL:
3520 generate_unspill(inst, dst);
3521 break;
3522
3523 case FS_OPCODE_PULL_CONSTANT_LOAD:
3524 generate_pull_constant_load(inst, dst);
3525 break;
3526
3527 case FS_OPCODE_FB_WRITE:
3528 generate_fb_write(inst);
3529 break;
3530 default:
3531 if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
3532 _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
3533 brw_opcodes[inst->opcode].name);
3534 } else {
3535 _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
3536 }
3537 this->fail = true;
3538 }
3539
3540 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3541 for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
3542 if (0) {
3543 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3544 ((uint32_t *)&p->store[i])[3],
3545 ((uint32_t *)&p->store[i])[2],
3546 ((uint32_t *)&p->store[i])[1],
3547 ((uint32_t *)&p->store[i])[0]);
3548 }
3549 brw_disasm(stdout, &p->store[i], intel->gen);
3550 }
3551 }
3552
3553 last_native_inst = p->nr_insn;
3554 }
3555
3556 brw_set_uip_jip(p);
3557
3558 /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
3559 * emit issues, it doesn't get the jump distances into the output,
3560 * which is often something we want to debug. So this is here in
3561 * case you're doing that.
3562 */
3563 if (0) {
3564 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3565 for (unsigned int i = 0; i < p->nr_insn; i++) {
3566 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3567 ((uint32_t *)&p->store[i])[3],
3568 ((uint32_t *)&p->store[i])[2],
3569 ((uint32_t *)&p->store[i])[1],
3570 ((uint32_t *)&p->store[i])[0]);
3571 brw_disasm(stdout, &p->store[i], intel->gen);
3572 }
3573 }
3574 }
3575 }
3576
3577 GLboolean
3578 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
3579 {
3580 struct intel_context *intel = &brw->intel;
3581 struct gl_context *ctx = &intel->ctx;
3582 struct gl_shader_program *prog = ctx->Shader.CurrentFragmentProgram;
3583
3584 if (!prog)
3585 return GL_FALSE;
3586
3587 struct brw_shader *shader =
3588 (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
3589 if (!shader)
3590 return GL_FALSE;
3591
3592 /* We always use 8-wide mode, at least for now. For one, flow
3593 * control only works in 8-wide. Also, when we're fragment shader
3594 * bound, we're almost always under register pressure as well, so
3595 * 8-wide would save us from the performance cliff of spilling
3596 * regs.
3597 */
3598 c->dispatch_width = 8;
3599
3600 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3601 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
3602 _mesa_print_ir(shader->ir, NULL);
3603 printf("\n");
3604 }
3605
3606 /* Now the main event: Visit the shader IR and generate our FS IR for it.
3607 */
3608 fs_visitor v(c, shader);
3609
3610 if (0) {
3611 v.emit_dummy_fs();
3612 } else {
3613 v.calculate_urb_setup();
3614 if (intel->gen < 6)
3615 v.emit_interpolation_setup_gen4();
3616 else
3617 v.emit_interpolation_setup_gen6();
3618
3619 /* Generate FS IR for main(). (the visitor only descends into
3620 * functions called "main").
3621 */
3622 foreach_iter(exec_list_iterator, iter, *shader->ir) {
3623 ir_instruction *ir = (ir_instruction *)iter.get();
3624 v.base_ir = ir;
3625 ir->accept(&v);
3626 }
3627
3628 v.emit_fb_writes();
3629
3630 v.split_virtual_grfs();
3631 v.setup_pull_constants();
3632
3633 v.assign_curb_setup();
3634 v.assign_urb_setup();
3635
3636 bool progress;
3637 do {
3638 progress = false;
3639
3640 progress = v.remove_duplicate_mrf_writes() || progress;
3641
3642 v.calculate_live_intervals();
3643 progress = v.propagate_constants() || progress;
3644 progress = v.register_coalesce() || progress;
3645 progress = v.compute_to_mrf() || progress;
3646 progress = v.dead_code_eliminate() || progress;
3647 } while (progress);
3648
3649 if (0) {
3650 /* Debug of register spilling: Go spill everything. */
3651 int virtual_grf_count = v.virtual_grf_next;
3652 for (int i = 1; i < virtual_grf_count; i++) {
3653 v.spill_reg(i);
3654 }
3655 v.calculate_live_intervals();
3656 }
3657
3658 if (0)
3659 v.assign_regs_trivial();
3660 else {
3661 while (!v.assign_regs()) {
3662 if (v.fail)
3663 break;
3664
3665 v.calculate_live_intervals();
3666 }
3667 }
3668 }
3669
3670 if (!v.fail)
3671 v.generate_code();
3672
3673 assert(!v.fail); /* FINISHME: Cleanly fail, tested at link time, etc. */
3674
3675 if (v.fail)
3676 return GL_FALSE;
3677
3678 c->prog_data.total_grf = v.grf_used;
3679
3680 return GL_TRUE;
3681 }