1943ab6021f0f4b8deb4741e88c5997274e8b5ff
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 extern "C" {
29
30 #include <sys/types.h>
31
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_wm.h"
44 }
45 #include "brw_fs.h"
46 #include "../glsl/glsl_types.h"
47 #include "../glsl/ir_optimization.h"
48 #include "../glsl/ir_print_visitor.h"
49
50 #define MAX_INSTRUCTION (1 << 30)
51 static struct brw_reg brw_reg_from_fs_reg(class fs_reg *reg);
52
53 struct gl_shader *
54 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
55 {
56 struct brw_shader *shader;
57
58 shader = rzalloc(NULL, struct brw_shader);
59 if (shader) {
60 shader->base.Type = type;
61 shader->base.Name = name;
62 _mesa_init_shader(ctx, &shader->base);
63 }
64
65 return &shader->base;
66 }
67
68 struct gl_shader_program *
69 brw_new_shader_program(struct gl_context *ctx, GLuint name)
70 {
71 struct brw_shader_program *prog;
72 prog = rzalloc(NULL, struct brw_shader_program);
73 if (prog) {
74 prog->base.Name = name;
75 _mesa_init_shader_program(ctx, &prog->base);
76 }
77 return &prog->base;
78 }
79
80 GLboolean
81 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
82 {
83 struct brw_context *brw = brw_context(ctx);
84 struct intel_context *intel = &brw->intel;
85
86 struct brw_shader *shader =
87 (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
88 if (shader != NULL) {
89 void *mem_ctx = ralloc_context(NULL);
90 bool progress;
91
92 if (shader->ir)
93 ralloc_free(shader->ir);
94 shader->ir = new(shader) exec_list;
95 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
96
97 do_mat_op_to_vec(shader->ir);
98 lower_instructions(shader->ir,
99 MOD_TO_FRACT |
100 DIV_TO_MUL_RCP |
101 SUB_TO_ADD_NEG |
102 EXP_TO_EXP2 |
103 LOG_TO_LOG2);
104
105 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
106 * if-statements need to be flattened.
107 */
108 if (intel->gen < 6)
109 lower_if_to_cond_assign(shader->ir, 16);
110
111 do_lower_texture_projection(shader->ir);
112 do_vec_index_to_cond_assign(shader->ir);
113 brw_do_cubemap_normalize(shader->ir);
114 lower_noise(shader->ir);
115 lower_quadop_vector(shader->ir, false);
116 lower_variable_index_to_cond_assign(shader->ir,
117 GL_TRUE, /* input */
118 GL_TRUE, /* output */
119 GL_TRUE, /* temp */
120 GL_TRUE /* uniform */
121 );
122
123 do {
124 progress = false;
125
126 brw_do_channel_expressions(shader->ir);
127 brw_do_vector_splitting(shader->ir);
128
129 progress = do_lower_jumps(shader->ir, true, true,
130 true, /* main return */
131 false, /* continue */
132 false /* loops */
133 ) || progress;
134
135 progress = do_common_optimization(shader->ir, true, 32) || progress;
136 } while (progress);
137
138 validate_ir_tree(shader->ir);
139
140 reparent_ir(shader->ir, shader->ir);
141 ralloc_free(mem_ctx);
142 }
143
144 if (!_mesa_ir_link_shader(ctx, prog))
145 return GL_FALSE;
146
147 return GL_TRUE;
148 }
149
150 static int
151 type_size(const struct glsl_type *type)
152 {
153 unsigned int size, i;
154
155 switch (type->base_type) {
156 case GLSL_TYPE_UINT:
157 case GLSL_TYPE_INT:
158 case GLSL_TYPE_FLOAT:
159 case GLSL_TYPE_BOOL:
160 return type->components();
161 case GLSL_TYPE_ARRAY:
162 return type_size(type->fields.array) * type->length;
163 case GLSL_TYPE_STRUCT:
164 size = 0;
165 for (i = 0; i < type->length; i++) {
166 size += type_size(type->fields.structure[i].type);
167 }
168 return size;
169 case GLSL_TYPE_SAMPLER:
170 /* Samplers take up no register space, since they're baked in at
171 * link time.
172 */
173 return 0;
174 default:
175 assert(!"not reached");
176 return 0;
177 }
178 }
179
180 void
181 fs_visitor::fail(const char *format, ...)
182 {
183 if (!failed) {
184 failed = true;
185
186 if (INTEL_DEBUG & DEBUG_WM) {
187 fprintf(stderr, "FS compile failed: ");
188
189 va_list va;
190 va_start(va, format);
191 vfprintf(stderr, format, va);
192 va_end(va);
193 }
194 }
195 }
196
197 void
198 fs_visitor::push_force_uncompressed()
199 {
200 force_uncompressed_stack++;
201 }
202
203 void
204 fs_visitor::pop_force_uncompressed()
205 {
206 force_uncompressed_stack--;
207 assert(force_uncompressed_stack >= 0);
208 }
209
210 void
211 fs_visitor::push_force_sechalf()
212 {
213 force_sechalf_stack++;
214 }
215
216 void
217 fs_visitor::pop_force_sechalf()
218 {
219 force_sechalf_stack--;
220 assert(force_sechalf_stack >= 0);
221 }
222
223 /**
224 * Returns how many MRFs an FS opcode will write over.
225 *
226 * Note that this is not the 0 or 1 implied writes in an actual gen
227 * instruction -- the FS opcodes often generate MOVs in addition.
228 */
229 int
230 fs_visitor::implied_mrf_writes(fs_inst *inst)
231 {
232 if (inst->mlen == 0)
233 return 0;
234
235 switch (inst->opcode) {
236 case FS_OPCODE_RCP:
237 case FS_OPCODE_RSQ:
238 case FS_OPCODE_SQRT:
239 case FS_OPCODE_EXP2:
240 case FS_OPCODE_LOG2:
241 case FS_OPCODE_SIN:
242 case FS_OPCODE_COS:
243 return 1 * c->dispatch_width / 8;
244 case FS_OPCODE_POW:
245 return 2 * c->dispatch_width / 8;
246 case FS_OPCODE_TEX:
247 case FS_OPCODE_TXB:
248 case FS_OPCODE_TXD:
249 case FS_OPCODE_TXL:
250 return 1;
251 case FS_OPCODE_FB_WRITE:
252 return 2;
253 case FS_OPCODE_PULL_CONSTANT_LOAD:
254 case FS_OPCODE_UNSPILL:
255 return 1;
256 case FS_OPCODE_SPILL:
257 return 2;
258 default:
259 assert(!"not reached");
260 return inst->mlen;
261 }
262 }
263
264 int
265 fs_visitor::virtual_grf_alloc(int size)
266 {
267 if (virtual_grf_array_size <= virtual_grf_next) {
268 if (virtual_grf_array_size == 0)
269 virtual_grf_array_size = 16;
270 else
271 virtual_grf_array_size *= 2;
272 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
273 virtual_grf_array_size);
274
275 /* This slot is always unused. */
276 virtual_grf_sizes[0] = 0;
277 }
278 virtual_grf_sizes[virtual_grf_next] = size;
279 return virtual_grf_next++;
280 }
281
282 /** Fixed HW reg constructor. */
283 fs_reg::fs_reg(enum register_file file, int hw_reg)
284 {
285 init();
286 this->file = file;
287 this->hw_reg = hw_reg;
288 this->type = BRW_REGISTER_TYPE_F;
289 }
290
291 /** Fixed HW reg constructor. */
292 fs_reg::fs_reg(enum register_file file, int hw_reg, uint32_t type)
293 {
294 init();
295 this->file = file;
296 this->hw_reg = hw_reg;
297 this->type = type;
298 }
299
300 int
301 brw_type_for_base_type(const struct glsl_type *type)
302 {
303 switch (type->base_type) {
304 case GLSL_TYPE_FLOAT:
305 return BRW_REGISTER_TYPE_F;
306 case GLSL_TYPE_INT:
307 case GLSL_TYPE_BOOL:
308 return BRW_REGISTER_TYPE_D;
309 case GLSL_TYPE_UINT:
310 return BRW_REGISTER_TYPE_UD;
311 case GLSL_TYPE_ARRAY:
312 case GLSL_TYPE_STRUCT:
313 case GLSL_TYPE_SAMPLER:
314 /* These should be overridden with the type of the member when
315 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
316 * way to trip up if we don't.
317 */
318 return BRW_REGISTER_TYPE_UD;
319 default:
320 assert(!"not reached");
321 return BRW_REGISTER_TYPE_F;
322 }
323 }
324
325 /** Automatic reg constructor. */
326 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
327 {
328 init();
329
330 this->file = GRF;
331 this->reg = v->virtual_grf_alloc(type_size(type));
332 this->reg_offset = 0;
333 this->type = brw_type_for_base_type(type);
334 }
335
336 fs_reg *
337 fs_visitor::variable_storage(ir_variable *var)
338 {
339 return (fs_reg *)hash_table_find(this->variable_ht, var);
340 }
341
342 void
343 import_uniforms_callback(const void *key,
344 void *data,
345 void *closure)
346 {
347 struct hash_table *dst_ht = (struct hash_table *)closure;
348 const fs_reg *reg = (const fs_reg *)data;
349
350 if (reg->file != UNIFORM)
351 return;
352
353 hash_table_insert(dst_ht, data, key);
354 }
355
356 /* For 16-wide, we need to follow from the uniform setup of 8-wide dispatch.
357 * This brings in those uniform definitions
358 */
359 void
360 fs_visitor::import_uniforms(struct hash_table *src_variable_ht)
361 {
362 hash_table_call_foreach(src_variable_ht,
363 import_uniforms_callback,
364 variable_ht);
365 }
366
367 /* Our support for uniforms is piggy-backed on the struct
368 * gl_fragment_program, because that's where the values actually
369 * get stored, rather than in some global gl_shader_program uniform
370 * store.
371 */
372 int
373 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
374 {
375 unsigned int offset = 0;
376
377 if (type->is_matrix()) {
378 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
379 type->vector_elements,
380 1);
381
382 for (unsigned int i = 0; i < type->matrix_columns; i++) {
383 offset += setup_uniform_values(loc + offset, column);
384 }
385
386 return offset;
387 }
388
389 switch (type->base_type) {
390 case GLSL_TYPE_FLOAT:
391 case GLSL_TYPE_UINT:
392 case GLSL_TYPE_INT:
393 case GLSL_TYPE_BOOL:
394 for (unsigned int i = 0; i < type->vector_elements; i++) {
395 unsigned int param = c->prog_data.nr_params++;
396
397 assert(param < ARRAY_SIZE(c->prog_data.param));
398
399 switch (type->base_type) {
400 case GLSL_TYPE_FLOAT:
401 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
402 break;
403 case GLSL_TYPE_UINT:
404 c->prog_data.param_convert[param] = PARAM_CONVERT_F2U;
405 break;
406 case GLSL_TYPE_INT:
407 c->prog_data.param_convert[param] = PARAM_CONVERT_F2I;
408 break;
409 case GLSL_TYPE_BOOL:
410 c->prog_data.param_convert[param] = PARAM_CONVERT_F2B;
411 break;
412 default:
413 assert(!"not reached");
414 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
415 break;
416 }
417 this->param_index[param] = loc;
418 this->param_offset[param] = i;
419 }
420 return 1;
421
422 case GLSL_TYPE_STRUCT:
423 for (unsigned int i = 0; i < type->length; i++) {
424 offset += setup_uniform_values(loc + offset,
425 type->fields.structure[i].type);
426 }
427 return offset;
428
429 case GLSL_TYPE_ARRAY:
430 for (unsigned int i = 0; i < type->length; i++) {
431 offset += setup_uniform_values(loc + offset, type->fields.array);
432 }
433 return offset;
434
435 case GLSL_TYPE_SAMPLER:
436 /* The sampler takes up a slot, but we don't use any values from it. */
437 return 1;
438
439 default:
440 assert(!"not reached");
441 return 0;
442 }
443 }
444
445
446 /* Our support for builtin uniforms is even scarier than non-builtin.
447 * It sits on top of the PROG_STATE_VAR parameters that are
448 * automatically updated from GL context state.
449 */
450 void
451 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
452 {
453 const ir_state_slot *const slots = ir->state_slots;
454 assert(ir->state_slots != NULL);
455
456 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
457 /* This state reference has already been setup by ir_to_mesa, but we'll
458 * get the same index back here.
459 */
460 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
461 (gl_state_index *)slots[i].tokens);
462
463 /* Add each of the unique swizzles of the element as a parameter.
464 * This'll end up matching the expected layout of the
465 * array/matrix/structure we're trying to fill in.
466 */
467 int last_swiz = -1;
468 for (unsigned int j = 0; j < 4; j++) {
469 int swiz = GET_SWZ(slots[i].swizzle, j);
470 if (swiz == last_swiz)
471 break;
472 last_swiz = swiz;
473
474 c->prog_data.param_convert[c->prog_data.nr_params] =
475 PARAM_NO_CONVERT;
476 this->param_index[c->prog_data.nr_params] = index;
477 this->param_offset[c->prog_data.nr_params] = swiz;
478 c->prog_data.nr_params++;
479 }
480 }
481 }
482
483 fs_reg *
484 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
485 {
486 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
487 fs_reg wpos = *reg;
488 bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
489
490 /* gl_FragCoord.x */
491 if (ir->pixel_center_integer) {
492 emit(BRW_OPCODE_MOV, wpos, this->pixel_x);
493 } else {
494 emit(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f));
495 }
496 wpos.reg_offset++;
497
498 /* gl_FragCoord.y */
499 if (!flip && ir->pixel_center_integer) {
500 emit(BRW_OPCODE_MOV, wpos, this->pixel_y);
501 } else {
502 fs_reg pixel_y = this->pixel_y;
503 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
504
505 if (flip) {
506 pixel_y.negate = true;
507 offset += c->key.drawable_height - 1.0;
508 }
509
510 emit(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset));
511 }
512 wpos.reg_offset++;
513
514 /* gl_FragCoord.z */
515 if (intel->gen >= 6) {
516 emit(BRW_OPCODE_MOV, wpos,
517 fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
518 } else {
519 emit(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y,
520 interp_reg(FRAG_ATTRIB_WPOS, 2));
521 }
522 wpos.reg_offset++;
523
524 /* gl_FragCoord.w: Already set up in emit_interpolation */
525 emit(BRW_OPCODE_MOV, wpos, this->wpos_w);
526
527 return reg;
528 }
529
530 fs_reg *
531 fs_visitor::emit_general_interpolation(ir_variable *ir)
532 {
533 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
534 /* Interpolation is always in floating point regs. */
535 reg->type = BRW_REGISTER_TYPE_F;
536 fs_reg attr = *reg;
537
538 unsigned int array_elements;
539 const glsl_type *type;
540
541 if (ir->type->is_array()) {
542 array_elements = ir->type->length;
543 if (array_elements == 0) {
544 fail("dereferenced array '%s' has length 0\n", ir->name);
545 }
546 type = ir->type->fields.array;
547 } else {
548 array_elements = 1;
549 type = ir->type;
550 }
551
552 int location = ir->location;
553 for (unsigned int i = 0; i < array_elements; i++) {
554 for (unsigned int j = 0; j < type->matrix_columns; j++) {
555 if (urb_setup[location] == -1) {
556 /* If there's no incoming setup data for this slot, don't
557 * emit interpolation for it.
558 */
559 attr.reg_offset += type->vector_elements;
560 location++;
561 continue;
562 }
563
564 bool is_gl_Color =
565 location == FRAG_ATTRIB_COL0 || location == FRAG_ATTRIB_COL1;
566
567 if (c->key.flat_shade && is_gl_Color) {
568 /* Constant interpolation (flat shading) case. The SF has
569 * handed us defined values in only the constant offset
570 * field of the setup reg.
571 */
572 for (unsigned int k = 0; k < type->vector_elements; k++) {
573 struct brw_reg interp = interp_reg(location, k);
574 interp = suboffset(interp, 3);
575 emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
576 attr.reg_offset++;
577 }
578 } else {
579 /* Perspective interpolation case. */
580 for (unsigned int k = 0; k < type->vector_elements; k++) {
581 struct brw_reg interp = interp_reg(location, k);
582 emit(FS_OPCODE_LINTERP, attr,
583 this->delta_x, this->delta_y, fs_reg(interp));
584 attr.reg_offset++;
585 }
586
587 if (intel->gen < 6 && !(is_gl_Color && c->key.linear_color)) {
588 attr.reg_offset -= type->vector_elements;
589 for (unsigned int k = 0; k < type->vector_elements; k++) {
590 emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
591 attr.reg_offset++;
592 }
593 }
594 }
595 location++;
596 }
597 }
598
599 return reg;
600 }
601
602 fs_reg *
603 fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
604 {
605 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
606
607 /* The frontfacing comes in as a bit in the thread payload. */
608 if (intel->gen >= 6) {
609 emit(BRW_OPCODE_ASR, *reg,
610 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
611 fs_reg(15));
612 emit(BRW_OPCODE_NOT, *reg, *reg);
613 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1));
614 } else {
615 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
616 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
617 * us front face
618 */
619 fs_inst *inst = emit(BRW_OPCODE_CMP, *reg,
620 fs_reg(r1_6ud),
621 fs_reg(1u << 31));
622 inst->conditional_mod = BRW_CONDITIONAL_L;
623 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u));
624 }
625
626 return reg;
627 }
628
629 fs_inst *
630 fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src)
631 {
632 switch (opcode) {
633 case FS_OPCODE_RCP:
634 case FS_OPCODE_RSQ:
635 case FS_OPCODE_SQRT:
636 case FS_OPCODE_EXP2:
637 case FS_OPCODE_LOG2:
638 case FS_OPCODE_SIN:
639 case FS_OPCODE_COS:
640 break;
641 default:
642 assert(!"not reached: bad math opcode");
643 return NULL;
644 }
645
646 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
647 * might be able to do better by doing execsize = 1 math and then
648 * expanding that result out, but we would need to be careful with
649 * masking.
650 *
651 * The hardware ignores source modifiers (negate and abs) on math
652 * instructions, so we also move to a temp to set those up.
653 */
654 if (intel->gen >= 6 && (src.file == UNIFORM ||
655 src.abs ||
656 src.negate)) {
657 fs_reg expanded = fs_reg(this, glsl_type::float_type);
658 emit(BRW_OPCODE_MOV, expanded, src);
659 src = expanded;
660 }
661
662 fs_inst *inst = emit(opcode, dst, src);
663
664 if (intel->gen < 6) {
665 inst->base_mrf = 2;
666 inst->mlen = c->dispatch_width / 8;
667 }
668
669 return inst;
670 }
671
672 fs_inst *
673 fs_visitor::emit_math(fs_opcodes opcode, fs_reg dst, fs_reg src0, fs_reg src1)
674 {
675 int base_mrf = 2;
676 fs_inst *inst;
677
678 assert(opcode == FS_OPCODE_POW);
679
680 if (intel->gen >= 6) {
681 /* Can't do hstride == 0 args to gen6 math, so expand it out.
682 *
683 * The hardware ignores source modifiers (negate and abs) on math
684 * instructions, so we also move to a temp to set those up.
685 */
686 if (src0.file == UNIFORM || src0.abs || src0.negate) {
687 fs_reg expanded = fs_reg(this, glsl_type::float_type);
688 emit(BRW_OPCODE_MOV, expanded, src0);
689 src0 = expanded;
690 }
691
692 if (src1.file == UNIFORM || src1.abs || src1.negate) {
693 fs_reg expanded = fs_reg(this, glsl_type::float_type);
694 emit(BRW_OPCODE_MOV, expanded, src1);
695 src1 = expanded;
696 }
697
698 inst = emit(opcode, dst, src0, src1);
699 } else {
700 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1);
701 inst = emit(opcode, dst, src0, reg_null_f);
702
703 inst->base_mrf = base_mrf;
704 inst->mlen = 2 * c->dispatch_width / 8;
705 }
706 return inst;
707 }
708
709 void
710 fs_visitor::visit(ir_variable *ir)
711 {
712 fs_reg *reg = NULL;
713
714 if (variable_storage(ir))
715 return;
716
717 if (strcmp(ir->name, "gl_FragColor") == 0) {
718 this->frag_color = ir;
719 } else if (strcmp(ir->name, "gl_FragData") == 0) {
720 this->frag_data = ir;
721 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
722 this->frag_depth = ir;
723 }
724
725 if (ir->mode == ir_var_in) {
726 if (!strcmp(ir->name, "gl_FragCoord")) {
727 reg = emit_fragcoord_interpolation(ir);
728 } else if (!strcmp(ir->name, "gl_FrontFacing")) {
729 reg = emit_frontfacing_interpolation(ir);
730 } else {
731 reg = emit_general_interpolation(ir);
732 }
733 assert(reg);
734 hash_table_insert(this->variable_ht, reg, ir);
735 return;
736 }
737
738 if (ir->mode == ir_var_uniform) {
739 int param_index = c->prog_data.nr_params;
740
741 if (c->dispatch_width == 16) {
742 if (!variable_storage(ir)) {
743 fail("Failed to find uniform '%s' in 16-wide\n", ir->name);
744 }
745 return;
746 }
747
748 if (!strncmp(ir->name, "gl_", 3)) {
749 setup_builtin_uniform_values(ir);
750 } else {
751 setup_uniform_values(ir->location, ir->type);
752 }
753
754 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
755 reg->type = brw_type_for_base_type(ir->type);
756 }
757
758 if (!reg)
759 reg = new(this->mem_ctx) fs_reg(this, ir->type);
760
761 hash_table_insert(this->variable_ht, reg, ir);
762 }
763
764 void
765 fs_visitor::visit(ir_dereference_variable *ir)
766 {
767 fs_reg *reg = variable_storage(ir->var);
768 this->result = *reg;
769 }
770
771 void
772 fs_visitor::visit(ir_dereference_record *ir)
773 {
774 const glsl_type *struct_type = ir->record->type;
775
776 ir->record->accept(this);
777
778 unsigned int offset = 0;
779 for (unsigned int i = 0; i < struct_type->length; i++) {
780 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
781 break;
782 offset += type_size(struct_type->fields.structure[i].type);
783 }
784 this->result.reg_offset += offset;
785 this->result.type = brw_type_for_base_type(ir->type);
786 }
787
788 void
789 fs_visitor::visit(ir_dereference_array *ir)
790 {
791 ir_constant *index;
792 int element_size;
793
794 ir->array->accept(this);
795 index = ir->array_index->as_constant();
796
797 element_size = type_size(ir->type);
798 this->result.type = brw_type_for_base_type(ir->type);
799
800 if (index) {
801 assert(this->result.file == UNIFORM ||
802 (this->result.file == GRF &&
803 this->result.reg != 0));
804 this->result.reg_offset += index->value.i[0] * element_size;
805 } else {
806 assert(!"FINISHME: non-constant array element");
807 }
808 }
809
810 /* Instruction selection: Produce a MOV.sat instead of
811 * MIN(MAX(val, 0), 1) when possible.
812 */
813 bool
814 fs_visitor::try_emit_saturate(ir_expression *ir)
815 {
816 ir_rvalue *sat_val = ir->as_rvalue_to_saturate();
817
818 if (!sat_val)
819 return false;
820
821 sat_val->accept(this);
822 fs_reg src = this->result;
823
824 this->result = fs_reg(this, ir->type);
825 fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src);
826 inst->saturate = true;
827
828 return true;
829 }
830
831 static uint32_t
832 brw_conditional_for_comparison(unsigned int op)
833 {
834 switch (op) {
835 case ir_binop_less:
836 return BRW_CONDITIONAL_L;
837 case ir_binop_greater:
838 return BRW_CONDITIONAL_G;
839 case ir_binop_lequal:
840 return BRW_CONDITIONAL_LE;
841 case ir_binop_gequal:
842 return BRW_CONDITIONAL_GE;
843 case ir_binop_equal:
844 case ir_binop_all_equal: /* same as equal for scalars */
845 return BRW_CONDITIONAL_Z;
846 case ir_binop_nequal:
847 case ir_binop_any_nequal: /* same as nequal for scalars */
848 return BRW_CONDITIONAL_NZ;
849 default:
850 assert(!"not reached: bad operation for comparison");
851 return BRW_CONDITIONAL_NZ;
852 }
853 }
854
855 void
856 fs_visitor::visit(ir_expression *ir)
857 {
858 unsigned int operand;
859 fs_reg op[2], temp;
860 fs_inst *inst;
861
862 assert(ir->get_num_operands() <= 2);
863
864 if (try_emit_saturate(ir))
865 return;
866
867 for (operand = 0; operand < ir->get_num_operands(); operand++) {
868 ir->operands[operand]->accept(this);
869 if (this->result.file == BAD_FILE) {
870 ir_print_visitor v;
871 fail("Failed to get tree for expression operand:\n");
872 ir->operands[operand]->accept(&v);
873 }
874 op[operand] = this->result;
875
876 /* Matrix expression operands should have been broken down to vector
877 * operations already.
878 */
879 assert(!ir->operands[operand]->type->is_matrix());
880 /* And then those vector operands should have been broken down to scalar.
881 */
882 assert(!ir->operands[operand]->type->is_vector());
883 }
884
885 /* Storage for our result. If our result goes into an assignment, it will
886 * just get copy-propagated out, so no worries.
887 */
888 this->result = fs_reg(this, ir->type);
889
890 switch (ir->operation) {
891 case ir_unop_logic_not:
892 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
893 * ones complement of the whole register, not just bit 0.
894 */
895 emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1));
896 break;
897 case ir_unop_neg:
898 op[0].negate = !op[0].negate;
899 this->result = op[0];
900 break;
901 case ir_unop_abs:
902 op[0].abs = true;
903 op[0].negate = false;
904 this->result = op[0];
905 break;
906 case ir_unop_sign:
907 temp = fs_reg(this, ir->type);
908
909 emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f));
910
911 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
912 inst->conditional_mod = BRW_CONDITIONAL_G;
913 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f));
914 inst->predicated = true;
915
916 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
917 inst->conditional_mod = BRW_CONDITIONAL_L;
918 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f));
919 inst->predicated = true;
920
921 break;
922 case ir_unop_rcp:
923 emit_math(FS_OPCODE_RCP, this->result, op[0]);
924 break;
925
926 case ir_unop_exp2:
927 emit_math(FS_OPCODE_EXP2, this->result, op[0]);
928 break;
929 case ir_unop_log2:
930 emit_math(FS_OPCODE_LOG2, this->result, op[0]);
931 break;
932 case ir_unop_exp:
933 case ir_unop_log:
934 assert(!"not reached: should be handled by ir_explog_to_explog2");
935 break;
936 case ir_unop_sin:
937 case ir_unop_sin_reduced:
938 emit_math(FS_OPCODE_SIN, this->result, op[0]);
939 break;
940 case ir_unop_cos:
941 case ir_unop_cos_reduced:
942 emit_math(FS_OPCODE_COS, this->result, op[0]);
943 break;
944
945 case ir_unop_dFdx:
946 emit(FS_OPCODE_DDX, this->result, op[0]);
947 break;
948 case ir_unop_dFdy:
949 emit(FS_OPCODE_DDY, this->result, op[0]);
950 break;
951
952 case ir_binop_add:
953 emit(BRW_OPCODE_ADD, this->result, op[0], op[1]);
954 break;
955 case ir_binop_sub:
956 assert(!"not reached: should be handled by ir_sub_to_add_neg");
957 break;
958
959 case ir_binop_mul:
960 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]);
961 break;
962 case ir_binop_div:
963 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
964 break;
965 case ir_binop_mod:
966 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
967 break;
968
969 case ir_binop_less:
970 case ir_binop_greater:
971 case ir_binop_lequal:
972 case ir_binop_gequal:
973 case ir_binop_equal:
974 case ir_binop_all_equal:
975 case ir_binop_nequal:
976 case ir_binop_any_nequal:
977 temp = this->result;
978 /* original gen4 does implicit conversion before comparison. */
979 if (intel->gen < 5)
980 temp.type = op[0].type;
981
982 inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]);
983 inst->conditional_mod = brw_conditional_for_comparison(ir->operation);
984 emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1));
985 break;
986
987 case ir_binop_logic_xor:
988 emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
989 break;
990
991 case ir_binop_logic_or:
992 emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
993 break;
994
995 case ir_binop_logic_and:
996 emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
997 break;
998
999 case ir_binop_dot:
1000 case ir_unop_any:
1001 assert(!"not reached: should be handled by brw_fs_channel_expressions");
1002 break;
1003
1004 case ir_unop_noise:
1005 assert(!"not reached: should be handled by lower_noise");
1006 break;
1007
1008 case ir_quadop_vector:
1009 assert(!"not reached: should be handled by lower_quadop_vector");
1010 break;
1011
1012 case ir_unop_sqrt:
1013 emit_math(FS_OPCODE_SQRT, this->result, op[0]);
1014 break;
1015
1016 case ir_unop_rsq:
1017 emit_math(FS_OPCODE_RSQ, this->result, op[0]);
1018 break;
1019
1020 case ir_unop_i2f:
1021 case ir_unop_b2f:
1022 case ir_unop_b2i:
1023 case ir_unop_f2i:
1024 emit(BRW_OPCODE_MOV, this->result, op[0]);
1025 break;
1026 case ir_unop_f2b:
1027 case ir_unop_i2b:
1028 temp = this->result;
1029 /* original gen4 does implicit conversion before comparison. */
1030 if (intel->gen < 5)
1031 temp.type = op[0].type;
1032
1033 inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f));
1034 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1035 inst = emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(1));
1036 break;
1037
1038 case ir_unop_trunc:
1039 emit(BRW_OPCODE_RNDZ, this->result, op[0]);
1040 break;
1041 case ir_unop_ceil:
1042 op[0].negate = !op[0].negate;
1043 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
1044 this->result.negate = true;
1045 break;
1046 case ir_unop_floor:
1047 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
1048 break;
1049 case ir_unop_fract:
1050 inst = emit(BRW_OPCODE_FRC, this->result, op[0]);
1051 break;
1052 case ir_unop_round_even:
1053 emit(BRW_OPCODE_RNDE, this->result, op[0]);
1054 break;
1055
1056 case ir_binop_min:
1057 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
1058 inst->conditional_mod = BRW_CONDITIONAL_L;
1059
1060 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
1061 inst->predicated = true;
1062 break;
1063 case ir_binop_max:
1064 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
1065 inst->conditional_mod = BRW_CONDITIONAL_G;
1066
1067 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
1068 inst->predicated = true;
1069 break;
1070
1071 case ir_binop_pow:
1072 emit_math(FS_OPCODE_POW, this->result, op[0], op[1]);
1073 break;
1074
1075 case ir_unop_bit_not:
1076 inst = emit(BRW_OPCODE_NOT, this->result, op[0]);
1077 break;
1078 case ir_binop_bit_and:
1079 inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
1080 break;
1081 case ir_binop_bit_xor:
1082 inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
1083 break;
1084 case ir_binop_bit_or:
1085 inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
1086 break;
1087
1088 case ir_unop_u2f:
1089 case ir_binop_lshift:
1090 case ir_binop_rshift:
1091 assert(!"GLSL 1.30 features unsupported");
1092 break;
1093 }
1094 }
1095
1096 void
1097 fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
1098 const glsl_type *type, bool predicated)
1099 {
1100 switch (type->base_type) {
1101 case GLSL_TYPE_FLOAT:
1102 case GLSL_TYPE_UINT:
1103 case GLSL_TYPE_INT:
1104 case GLSL_TYPE_BOOL:
1105 for (unsigned int i = 0; i < type->components(); i++) {
1106 l.type = brw_type_for_base_type(type);
1107 r.type = brw_type_for_base_type(type);
1108
1109 fs_inst *inst = emit(BRW_OPCODE_MOV, l, r);
1110 inst->predicated = predicated;
1111
1112 l.reg_offset++;
1113 r.reg_offset++;
1114 }
1115 break;
1116 case GLSL_TYPE_ARRAY:
1117 for (unsigned int i = 0; i < type->length; i++) {
1118 emit_assignment_writes(l, r, type->fields.array, predicated);
1119 }
1120 break;
1121
1122 case GLSL_TYPE_STRUCT:
1123 for (unsigned int i = 0; i < type->length; i++) {
1124 emit_assignment_writes(l, r, type->fields.structure[i].type,
1125 predicated);
1126 }
1127 break;
1128
1129 case GLSL_TYPE_SAMPLER:
1130 break;
1131
1132 default:
1133 assert(!"not reached");
1134 break;
1135 }
1136 }
1137
1138 void
1139 fs_visitor::visit(ir_assignment *ir)
1140 {
1141 struct fs_reg l, r;
1142 fs_inst *inst;
1143
1144 /* FINISHME: arrays on the lhs */
1145 ir->lhs->accept(this);
1146 l = this->result;
1147
1148 ir->rhs->accept(this);
1149 r = this->result;
1150
1151 assert(l.file != BAD_FILE);
1152 assert(r.file != BAD_FILE);
1153
1154 if (ir->condition) {
1155 emit_bool_to_cond_code(ir->condition);
1156 }
1157
1158 if (ir->lhs->type->is_scalar() ||
1159 ir->lhs->type->is_vector()) {
1160 for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
1161 if (ir->write_mask & (1 << i)) {
1162 inst = emit(BRW_OPCODE_MOV, l, r);
1163 if (ir->condition)
1164 inst->predicated = true;
1165 r.reg_offset++;
1166 }
1167 l.reg_offset++;
1168 }
1169 } else {
1170 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL);
1171 }
1172 }
1173
1174 fs_inst *
1175 fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate,
1176 int sampler)
1177 {
1178 int mlen;
1179 int base_mrf = 1;
1180 bool simd16 = false;
1181 fs_reg orig_dst;
1182
1183 /* g0 header. */
1184 mlen = 1;
1185
1186 if (ir->shadow_comparitor) {
1187 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1188 fs_inst *inst = emit(BRW_OPCODE_MOV,
1189 fs_reg(MRF, base_mrf + mlen + i), coordinate);
1190 if (i < 3 && c->key.gl_clamp_mask[i] & (1 << sampler))
1191 inst->saturate = true;
1192
1193 coordinate.reg_offset++;
1194 }
1195 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1196 mlen += 3;
1197
1198 if (ir->op == ir_tex) {
1199 /* There's no plain shadow compare message, so we use shadow
1200 * compare with a bias of 0.0.
1201 */
1202 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f));
1203 mlen++;
1204 } else if (ir->op == ir_txb) {
1205 ir->lod_info.bias->accept(this);
1206 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1207 mlen++;
1208 } else {
1209 assert(ir->op == ir_txl);
1210 ir->lod_info.lod->accept(this);
1211 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1212 mlen++;
1213 }
1214
1215 ir->shadow_comparitor->accept(this);
1216 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1217 mlen++;
1218 } else if (ir->op == ir_tex) {
1219 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1220 fs_inst *inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i),
1221 coordinate);
1222 if (i < 3 && c->key.gl_clamp_mask[i] & (1 << sampler))
1223 inst->saturate = true;
1224 coordinate.reg_offset++;
1225 }
1226 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1227 mlen += 3;
1228 } else if (ir->op == ir_txd) {
1229 assert(!"TXD isn't supported on gen4 yet.");
1230 } else {
1231 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1232 * instructions. We'll need to do SIMD16 here.
1233 */
1234 assert(ir->op == ir_txb || ir->op == ir_txl);
1235
1236 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1237 fs_inst *inst = emit(BRW_OPCODE_MOV, fs_reg(MRF,
1238 base_mrf + mlen + i * 2),
1239 coordinate);
1240 if (i < 3 && c->key.gl_clamp_mask[i] & (1 << sampler))
1241 inst->saturate = true;
1242 coordinate.reg_offset++;
1243 }
1244
1245 /* lod/bias appears after u/v/r. */
1246 mlen += 6;
1247
1248 if (ir->op == ir_txb) {
1249 ir->lod_info.bias->accept(this);
1250 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1251 mlen++;
1252 } else {
1253 ir->lod_info.lod->accept(this);
1254 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1255 mlen++;
1256 }
1257
1258 /* The unused upper half. */
1259 mlen++;
1260
1261 /* Now, since we're doing simd16, the return is 2 interleaved
1262 * vec4s where the odd-indexed ones are junk. We'll need to move
1263 * this weirdness around to the expected layout.
1264 */
1265 simd16 = true;
1266 orig_dst = dst;
1267 dst = fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type,
1268 2));
1269 dst.type = BRW_REGISTER_TYPE_F;
1270 }
1271
1272 fs_inst *inst = NULL;
1273 switch (ir->op) {
1274 case ir_tex:
1275 inst = emit(FS_OPCODE_TEX, dst);
1276 break;
1277 case ir_txb:
1278 inst = emit(FS_OPCODE_TXB, dst);
1279 break;
1280 case ir_txl:
1281 inst = emit(FS_OPCODE_TXL, dst);
1282 break;
1283 case ir_txd:
1284 inst = emit(FS_OPCODE_TXD, dst);
1285 break;
1286 case ir_txf:
1287 assert(!"GLSL 1.30 features unsupported");
1288 break;
1289 }
1290 inst->base_mrf = base_mrf;
1291 inst->mlen = mlen;
1292
1293 if (simd16) {
1294 for (int i = 0; i < 4; i++) {
1295 emit(BRW_OPCODE_MOV, orig_dst, dst);
1296 orig_dst.reg_offset++;
1297 dst.reg_offset += 2;
1298 }
1299 }
1300
1301 return inst;
1302 }
1303
1304 /* gen5's sampler has slots for u, v, r, array index, then optional
1305 * parameters like shadow comparitor or LOD bias. If optional
1306 * parameters aren't present, those base slots are optional and don't
1307 * need to be included in the message.
1308 *
1309 * We don't fill in the unnecessary slots regardless, which may look
1310 * surprising in the disassembly.
1311 */
1312 fs_inst *
1313 fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate,
1314 int sampler)
1315 {
1316 int mlen = 1; /* g0 header always present. */
1317 int base_mrf = 1;
1318 int reg_width = c->dispatch_width / 8;
1319
1320 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1321 fs_inst *inst = emit(BRW_OPCODE_MOV,
1322 fs_reg(MRF, base_mrf + mlen + i * reg_width),
1323 coordinate);
1324 if (i < 3 && c->key.gl_clamp_mask[i] & (1 << sampler))
1325 inst->saturate = true;
1326 coordinate.reg_offset++;
1327 }
1328 mlen += ir->coordinate->type->vector_elements * reg_width;
1329
1330 if (ir->shadow_comparitor) {
1331 mlen = MAX2(mlen, 1 + 4 * reg_width);
1332
1333 ir->shadow_comparitor->accept(this);
1334 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1335 mlen += reg_width;
1336 }
1337
1338 fs_inst *inst = NULL;
1339 switch (ir->op) {
1340 case ir_tex:
1341 inst = emit(FS_OPCODE_TEX, dst);
1342 break;
1343 case ir_txb:
1344 ir->lod_info.bias->accept(this);
1345 mlen = MAX2(mlen, 1 + 4 * reg_width);
1346 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1347 mlen += reg_width;
1348
1349 inst = emit(FS_OPCODE_TXB, dst);
1350
1351 break;
1352 case ir_txl:
1353 ir->lod_info.lod->accept(this);
1354 mlen = MAX2(mlen, 1 + 4 * reg_width);
1355 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1356 mlen += reg_width;
1357
1358 inst = emit(FS_OPCODE_TXL, dst);
1359 break;
1360 case ir_txd:
1361 case ir_txf:
1362 assert(!"GLSL 1.30 features unsupported");
1363 break;
1364 }
1365 inst->base_mrf = base_mrf;
1366 inst->mlen = mlen;
1367
1368 if (mlen > 11) {
1369 fail("Message length >11 disallowed by hardware\n");
1370 }
1371
1372 return inst;
1373 }
1374
1375 fs_inst *
1376 fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate,
1377 int sampler)
1378 {
1379 int mlen = 1; /* g0 header always present. */
1380 int base_mrf = 1;
1381 int reg_width = c->dispatch_width / 8;
1382
1383 if (ir->shadow_comparitor) {
1384 ir->shadow_comparitor->accept(this);
1385 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1386 mlen += reg_width;
1387 }
1388
1389 /* Set up the LOD info */
1390 switch (ir->op) {
1391 case ir_tex:
1392 break;
1393 case ir_txb:
1394 ir->lod_info.bias->accept(this);
1395 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1396 mlen += reg_width;
1397 break;
1398 case ir_txl:
1399 ir->lod_info.lod->accept(this);
1400 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1401 mlen += reg_width;
1402 break;
1403 case ir_txd:
1404 case ir_txf:
1405 assert(!"GLSL 1.30 features unsupported");
1406 break;
1407 }
1408
1409 /* Set up the coordinate */
1410 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1411 fs_inst *inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen),
1412 coordinate);
1413 if (i < 3 && c->key.gl_clamp_mask[i] & (1 << sampler))
1414 inst->saturate = true;
1415 coordinate.reg_offset++;
1416 mlen += reg_width;
1417 }
1418
1419 /* Generate the SEND */
1420 fs_inst *inst = NULL;
1421 switch (ir->op) {
1422 case ir_tex: inst = emit(FS_OPCODE_TEX, dst); break;
1423 case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break;
1424 case ir_txl: inst = emit(FS_OPCODE_TXL, dst); break;
1425 case ir_txd: inst = emit(FS_OPCODE_TXD, dst); break;
1426 case ir_txf: assert(!"TXF unsupported.");
1427 }
1428 inst->base_mrf = base_mrf;
1429 inst->mlen = mlen;
1430
1431 if (mlen > 11) {
1432 fail("Message length >11 disallowed by hardware\n");
1433 }
1434
1435 return inst;
1436 }
1437
1438 void
1439 fs_visitor::visit(ir_texture *ir)
1440 {
1441 int sampler;
1442 fs_inst *inst = NULL;
1443
1444 ir->coordinate->accept(this);
1445 fs_reg coordinate = this->result;
1446
1447 if (ir->offset != NULL) {
1448 ir_constant *offset = ir->offset->as_constant();
1449 assert(offset != NULL);
1450
1451 signed char offsets[3];
1452 for (unsigned i = 0; i < ir->offset->type->vector_elements; i++)
1453 offsets[i] = (signed char) offset->value.i[i];
1454
1455 /* Combine all three offsets into a single unsigned dword:
1456 *
1457 * bits 11:8 - U Offset (X component)
1458 * bits 7:4 - V Offset (Y component)
1459 * bits 3:0 - R Offset (Z component)
1460 */
1461 unsigned offset_bits = 0;
1462 for (unsigned i = 0; i < ir->offset->type->vector_elements; i++) {
1463 const unsigned shift = 4 * (2 - i);
1464 offset_bits |= (offsets[i] << shift) & (0xF << shift);
1465 }
1466
1467 /* Explicitly set up the message header by copying g0 to msg reg m1. */
1468 emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD),
1469 fs_reg(GRF, 0, BRW_REGISTER_TYPE_UD));
1470
1471 /* Then set the offset bits in DWord 2 of the message header. */
1472 emit(BRW_OPCODE_MOV,
1473 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2),
1474 BRW_REGISTER_TYPE_UD)),
1475 fs_reg(brw_imm_uw(offset_bits)));
1476 }
1477
1478 /* Should be lowered by do_lower_texture_projection */
1479 assert(!ir->projector);
1480
1481 sampler = _mesa_get_sampler_uniform_value(ir->sampler,
1482 ctx->Shader.CurrentFragmentProgram,
1483 &brw->fragment_program->Base);
1484 sampler = c->fp->program.Base.SamplerUnits[sampler];
1485
1486 /* The 965 requires the EU to do the normalization of GL rectangle
1487 * texture coordinates. We use the program parameter state
1488 * tracking to get the scaling factor.
1489 */
1490 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1491 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
1492 int tokens[STATE_LENGTH] = {
1493 STATE_INTERNAL,
1494 STATE_TEXRECT_SCALE,
1495 sampler,
1496 0,
1497 0
1498 };
1499
1500 if (c->dispatch_width == 16) {
1501 fail("rectangle scale uniform setup not supported on 16-wide\n");
1502 this->result = fs_reg(this, ir->type);
1503 return;
1504 }
1505
1506 c->prog_data.param_convert[c->prog_data.nr_params] =
1507 PARAM_NO_CONVERT;
1508 c->prog_data.param_convert[c->prog_data.nr_params + 1] =
1509 PARAM_NO_CONVERT;
1510
1511 fs_reg scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
1512 fs_reg scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
1513 GLuint index = _mesa_add_state_reference(params,
1514 (gl_state_index *)tokens);
1515
1516 this->param_index[c->prog_data.nr_params] = index;
1517 this->param_offset[c->prog_data.nr_params] = 0;
1518 c->prog_data.nr_params++;
1519 this->param_index[c->prog_data.nr_params] = index;
1520 this->param_offset[c->prog_data.nr_params] = 1;
1521 c->prog_data.nr_params++;
1522
1523 fs_reg dst = fs_reg(this, ir->coordinate->type);
1524 fs_reg src = coordinate;
1525 coordinate = dst;
1526
1527 emit(BRW_OPCODE_MUL, dst, src, scale_x);
1528 dst.reg_offset++;
1529 src.reg_offset++;
1530 emit(BRW_OPCODE_MUL, dst, src, scale_y);
1531 }
1532
1533 /* Writemasking doesn't eliminate channels on SIMD8 texture
1534 * samples, so don't worry about them.
1535 */
1536 fs_reg dst = fs_reg(this, glsl_type::vec4_type);
1537
1538 if (intel->gen >= 7) {
1539 inst = emit_texture_gen7(ir, dst, coordinate, sampler);
1540 } else if (intel->gen >= 5) {
1541 inst = emit_texture_gen5(ir, dst, coordinate, sampler);
1542 } else {
1543 inst = emit_texture_gen4(ir, dst, coordinate, sampler);
1544 }
1545
1546 /* If there's an offset, we already set up m1. To avoid the implied move,
1547 * use the null register. Otherwise, we want an implied move from g0.
1548 */
1549 if (ir->offset != NULL)
1550 inst->src[0] = fs_reg(brw_null_reg());
1551 else
1552 inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW));
1553
1554 inst->sampler = sampler;
1555
1556 this->result = dst;
1557
1558 if (ir->shadow_comparitor)
1559 inst->shadow_compare = true;
1560
1561 if (ir->type == glsl_type::float_type) {
1562 /* Ignore DEPTH_TEXTURE_MODE swizzling. */
1563 assert(ir->sampler->type->sampler_shadow);
1564 } else if (c->key.tex_swizzles[inst->sampler] != SWIZZLE_NOOP) {
1565 fs_reg swizzle_dst = fs_reg(this, glsl_type::vec4_type);
1566
1567 for (int i = 0; i < 4; i++) {
1568 int swiz = GET_SWZ(c->key.tex_swizzles[inst->sampler], i);
1569 fs_reg l = swizzle_dst;
1570 l.reg_offset += i;
1571
1572 if (swiz == SWIZZLE_ZERO) {
1573 emit(BRW_OPCODE_MOV, l, fs_reg(0.0f));
1574 } else if (swiz == SWIZZLE_ONE) {
1575 emit(BRW_OPCODE_MOV, l, fs_reg(1.0f));
1576 } else {
1577 fs_reg r = dst;
1578 r.reg_offset += GET_SWZ(c->key.tex_swizzles[inst->sampler], i);
1579 emit(BRW_OPCODE_MOV, l, r);
1580 }
1581 }
1582 this->result = swizzle_dst;
1583 }
1584 }
1585
1586 void
1587 fs_visitor::visit(ir_swizzle *ir)
1588 {
1589 ir->val->accept(this);
1590 fs_reg val = this->result;
1591
1592 if (ir->type->vector_elements == 1) {
1593 this->result.reg_offset += ir->mask.x;
1594 return;
1595 }
1596
1597 fs_reg result = fs_reg(this, ir->type);
1598 this->result = result;
1599
1600 for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1601 fs_reg channel = val;
1602 int swiz = 0;
1603
1604 switch (i) {
1605 case 0:
1606 swiz = ir->mask.x;
1607 break;
1608 case 1:
1609 swiz = ir->mask.y;
1610 break;
1611 case 2:
1612 swiz = ir->mask.z;
1613 break;
1614 case 3:
1615 swiz = ir->mask.w;
1616 break;
1617 }
1618
1619 channel.reg_offset += swiz;
1620 emit(BRW_OPCODE_MOV, result, channel);
1621 result.reg_offset++;
1622 }
1623 }
1624
1625 void
1626 fs_visitor::visit(ir_discard *ir)
1627 {
1628 assert(ir->condition == NULL); /* FINISHME */
1629
1630 emit(FS_OPCODE_DISCARD);
1631 kill_emitted = true;
1632 }
1633
1634 void
1635 fs_visitor::visit(ir_constant *ir)
1636 {
1637 /* Set this->result to reg at the bottom of the function because some code
1638 * paths will cause this visitor to be applied to other fields. This will
1639 * cause the value stored in this->result to be modified.
1640 *
1641 * Make reg constant so that it doesn't get accidentally modified along the
1642 * way. Yes, I actually had this problem. :(
1643 */
1644 const fs_reg reg(this, ir->type);
1645 fs_reg dst_reg = reg;
1646
1647 if (ir->type->is_array()) {
1648 const unsigned size = type_size(ir->type->fields.array);
1649
1650 for (unsigned i = 0; i < ir->type->length; i++) {
1651 ir->array_elements[i]->accept(this);
1652 fs_reg src_reg = this->result;
1653
1654 dst_reg.type = src_reg.type;
1655 for (unsigned j = 0; j < size; j++) {
1656 emit(BRW_OPCODE_MOV, dst_reg, src_reg);
1657 src_reg.reg_offset++;
1658 dst_reg.reg_offset++;
1659 }
1660 }
1661 } else if (ir->type->is_record()) {
1662 foreach_list(node, &ir->components) {
1663 ir_instruction *const field = (ir_instruction *) node;
1664 const unsigned size = type_size(field->type);
1665
1666 field->accept(this);
1667 fs_reg src_reg = this->result;
1668
1669 dst_reg.type = src_reg.type;
1670 for (unsigned j = 0; j < size; j++) {
1671 emit(BRW_OPCODE_MOV, dst_reg, src_reg);
1672 src_reg.reg_offset++;
1673 dst_reg.reg_offset++;
1674 }
1675 }
1676 } else {
1677 const unsigned size = type_size(ir->type);
1678
1679 for (unsigned i = 0; i < size; i++) {
1680 switch (ir->type->base_type) {
1681 case GLSL_TYPE_FLOAT:
1682 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i]));
1683 break;
1684 case GLSL_TYPE_UINT:
1685 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i]));
1686 break;
1687 case GLSL_TYPE_INT:
1688 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i]));
1689 break;
1690 case GLSL_TYPE_BOOL:
1691 emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i]));
1692 break;
1693 default:
1694 assert(!"Non-float/uint/int/bool constant");
1695 }
1696 dst_reg.reg_offset++;
1697 }
1698 }
1699
1700 this->result = reg;
1701 }
1702
1703 void
1704 fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
1705 {
1706 ir_expression *expr = ir->as_expression();
1707
1708 if (expr) {
1709 fs_reg op[2];
1710 fs_inst *inst;
1711
1712 assert(expr->get_num_operands() <= 2);
1713 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1714 assert(expr->operands[i]->type->is_scalar());
1715
1716 expr->operands[i]->accept(this);
1717 op[i] = this->result;
1718 }
1719
1720 switch (expr->operation) {
1721 case ir_unop_logic_not:
1722 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1));
1723 inst->conditional_mod = BRW_CONDITIONAL_Z;
1724 break;
1725
1726 case ir_binop_logic_xor:
1727 inst = emit(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]);
1728 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1729 break;
1730
1731 case ir_binop_logic_or:
1732 inst = emit(BRW_OPCODE_OR, reg_null_d, op[0], op[1]);
1733 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1734 break;
1735
1736 case ir_binop_logic_and:
1737 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], op[1]);
1738 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1739 break;
1740
1741 case ir_unop_f2b:
1742 if (intel->gen >= 6) {
1743 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f));
1744 } else {
1745 inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]);
1746 }
1747 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1748 break;
1749
1750 case ir_unop_i2b:
1751 if (intel->gen >= 6) {
1752 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0));
1753 } else {
1754 inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]);
1755 }
1756 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1757 break;
1758
1759 case ir_binop_greater:
1760 case ir_binop_gequal:
1761 case ir_binop_less:
1762 case ir_binop_lequal:
1763 case ir_binop_equal:
1764 case ir_binop_all_equal:
1765 case ir_binop_nequal:
1766 case ir_binop_any_nequal:
1767 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]);
1768 inst->conditional_mod =
1769 brw_conditional_for_comparison(expr->operation);
1770 break;
1771
1772 default:
1773 assert(!"not reached");
1774 fail("bad cond code\n");
1775 break;
1776 }
1777 return;
1778 }
1779
1780 ir->accept(this);
1781
1782 if (intel->gen >= 6) {
1783 fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1));
1784 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1785 } else {
1786 fs_inst *inst = emit(BRW_OPCODE_MOV, reg_null_d, this->result);
1787 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1788 }
1789 }
1790
1791 /**
1792 * Emit a gen6 IF statement with the comparison folded into the IF
1793 * instruction.
1794 */
1795 void
1796 fs_visitor::emit_if_gen6(ir_if *ir)
1797 {
1798 ir_expression *expr = ir->condition->as_expression();
1799
1800 if (expr) {
1801 fs_reg op[2];
1802 fs_inst *inst;
1803 fs_reg temp;
1804
1805 assert(expr->get_num_operands() <= 2);
1806 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1807 assert(expr->operands[i]->type->is_scalar());
1808
1809 expr->operands[i]->accept(this);
1810 op[i] = this->result;
1811 }
1812
1813 switch (expr->operation) {
1814 case ir_unop_logic_not:
1815 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0));
1816 inst->conditional_mod = BRW_CONDITIONAL_Z;
1817 return;
1818
1819 case ir_binop_logic_xor:
1820 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
1821 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1822 return;
1823
1824 case ir_binop_logic_or:
1825 temp = fs_reg(this, glsl_type::bool_type);
1826 emit(BRW_OPCODE_OR, temp, op[0], op[1]);
1827 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
1828 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1829 return;
1830
1831 case ir_binop_logic_and:
1832 temp = fs_reg(this, glsl_type::bool_type);
1833 emit(BRW_OPCODE_AND, temp, op[0], op[1]);
1834 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
1835 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1836 return;
1837
1838 case ir_unop_f2b:
1839 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0));
1840 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1841 return;
1842
1843 case ir_unop_i2b:
1844 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
1845 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1846 return;
1847
1848 case ir_binop_greater:
1849 case ir_binop_gequal:
1850 case ir_binop_less:
1851 case ir_binop_lequal:
1852 case ir_binop_equal:
1853 case ir_binop_all_equal:
1854 case ir_binop_nequal:
1855 case ir_binop_any_nequal:
1856 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
1857 inst->conditional_mod =
1858 brw_conditional_for_comparison(expr->operation);
1859 return;
1860 default:
1861 assert(!"not reached");
1862 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
1863 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1864 fail("bad condition\n");
1865 return;
1866 }
1867 return;
1868 }
1869
1870 ir->condition->accept(this);
1871
1872 fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0));
1873 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1874 }
1875
1876 void
1877 fs_visitor::visit(ir_if *ir)
1878 {
1879 fs_inst *inst;
1880
1881 if (intel->gen != 6 && c->dispatch_width == 16) {
1882 fail("Can't support (non-uniform) control flow on 16-wide\n");
1883 }
1884
1885 /* Don't point the annotation at the if statement, because then it plus
1886 * the then and else blocks get printed.
1887 */
1888 this->base_ir = ir->condition;
1889
1890 if (intel->gen == 6) {
1891 emit_if_gen6(ir);
1892 } else {
1893 emit_bool_to_cond_code(ir->condition);
1894
1895 inst = emit(BRW_OPCODE_IF);
1896 inst->predicated = true;
1897 }
1898
1899 foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
1900 ir_instruction *ir = (ir_instruction *)iter.get();
1901 this->base_ir = ir;
1902
1903 ir->accept(this);
1904 }
1905
1906 if (!ir->else_instructions.is_empty()) {
1907 emit(BRW_OPCODE_ELSE);
1908
1909 foreach_iter(exec_list_iterator, iter, ir->else_instructions) {
1910 ir_instruction *ir = (ir_instruction *)iter.get();
1911 this->base_ir = ir;
1912
1913 ir->accept(this);
1914 }
1915 }
1916
1917 emit(BRW_OPCODE_ENDIF);
1918 }
1919
1920 void
1921 fs_visitor::visit(ir_loop *ir)
1922 {
1923 fs_reg counter = reg_undef;
1924
1925 if (c->dispatch_width == 16) {
1926 fail("Can't support (non-uniform) control flow on 16-wide\n");
1927 }
1928
1929 if (ir->counter) {
1930 this->base_ir = ir->counter;
1931 ir->counter->accept(this);
1932 counter = *(variable_storage(ir->counter));
1933
1934 if (ir->from) {
1935 this->base_ir = ir->from;
1936 ir->from->accept(this);
1937
1938 emit(BRW_OPCODE_MOV, counter, this->result);
1939 }
1940 }
1941
1942 emit(BRW_OPCODE_DO);
1943
1944 if (ir->to) {
1945 this->base_ir = ir->to;
1946 ir->to->accept(this);
1947
1948 fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result);
1949 inst->conditional_mod = brw_conditional_for_comparison(ir->cmp);
1950
1951 inst = emit(BRW_OPCODE_BREAK);
1952 inst->predicated = true;
1953 }
1954
1955 foreach_iter(exec_list_iterator, iter, ir->body_instructions) {
1956 ir_instruction *ir = (ir_instruction *)iter.get();
1957
1958 this->base_ir = ir;
1959 ir->accept(this);
1960 }
1961
1962 if (ir->increment) {
1963 this->base_ir = ir->increment;
1964 ir->increment->accept(this);
1965 emit(BRW_OPCODE_ADD, counter, counter, this->result);
1966 }
1967
1968 emit(BRW_OPCODE_WHILE);
1969 }
1970
1971 void
1972 fs_visitor::visit(ir_loop_jump *ir)
1973 {
1974 switch (ir->mode) {
1975 case ir_loop_jump::jump_break:
1976 emit(BRW_OPCODE_BREAK);
1977 break;
1978 case ir_loop_jump::jump_continue:
1979 emit(BRW_OPCODE_CONTINUE);
1980 break;
1981 }
1982 }
1983
1984 void
1985 fs_visitor::visit(ir_call *ir)
1986 {
1987 assert(!"FINISHME");
1988 }
1989
1990 void
1991 fs_visitor::visit(ir_return *ir)
1992 {
1993 assert(!"FINISHME");
1994 }
1995
1996 void
1997 fs_visitor::visit(ir_function *ir)
1998 {
1999 /* Ignore function bodies other than main() -- we shouldn't see calls to
2000 * them since they should all be inlined before we get to ir_to_mesa.
2001 */
2002 if (strcmp(ir->name, "main") == 0) {
2003 const ir_function_signature *sig;
2004 exec_list empty;
2005
2006 sig = ir->matching_signature(&empty);
2007
2008 assert(sig);
2009
2010 foreach_iter(exec_list_iterator, iter, sig->body) {
2011 ir_instruction *ir = (ir_instruction *)iter.get();
2012 this->base_ir = ir;
2013
2014 ir->accept(this);
2015 }
2016 }
2017 }
2018
2019 void
2020 fs_visitor::visit(ir_function_signature *ir)
2021 {
2022 assert(!"not reached");
2023 (void)ir;
2024 }
2025
2026 fs_inst *
2027 fs_visitor::emit(fs_inst inst)
2028 {
2029 fs_inst *list_inst = new(mem_ctx) fs_inst;
2030 *list_inst = inst;
2031
2032 if (force_uncompressed_stack > 0)
2033 list_inst->force_uncompressed = true;
2034 else if (force_sechalf_stack > 0)
2035 list_inst->force_sechalf = true;
2036
2037 list_inst->annotation = this->current_annotation;
2038 list_inst->ir = this->base_ir;
2039
2040 this->instructions.push_tail(list_inst);
2041
2042 return list_inst;
2043 }
2044
2045 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
2046 void
2047 fs_visitor::emit_dummy_fs()
2048 {
2049 /* Everyone's favorite color. */
2050 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2), fs_reg(1.0f));
2051 emit(BRW_OPCODE_MOV, fs_reg(MRF, 3), fs_reg(0.0f));
2052 emit(BRW_OPCODE_MOV, fs_reg(MRF, 4), fs_reg(1.0f));
2053 emit(BRW_OPCODE_MOV, fs_reg(MRF, 5), fs_reg(0.0f));
2054
2055 fs_inst *write;
2056 write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0));
2057 write->base_mrf = 0;
2058 }
2059
2060 /* The register location here is relative to the start of the URB
2061 * data. It will get adjusted to be a real location before
2062 * generate_code() time.
2063 */
2064 struct brw_reg
2065 fs_visitor::interp_reg(int location, int channel)
2066 {
2067 int regnr = urb_setup[location] * 2 + channel / 2;
2068 int stride = (channel & 1) * 4;
2069
2070 assert(urb_setup[location] != -1);
2071
2072 return brw_vec1_grf(regnr, stride);
2073 }
2074
2075 /** Emits the interpolation for the varying inputs. */
2076 void
2077 fs_visitor::emit_interpolation_setup_gen4()
2078 {
2079 this->current_annotation = "compute pixel centers";
2080 this->pixel_x = fs_reg(this, glsl_type::uint_type);
2081 this->pixel_y = fs_reg(this, glsl_type::uint_type);
2082 this->pixel_x.type = BRW_REGISTER_TYPE_UW;
2083 this->pixel_y.type = BRW_REGISTER_TYPE_UW;
2084
2085 emit(FS_OPCODE_PIXEL_X, this->pixel_x);
2086 emit(FS_OPCODE_PIXEL_Y, this->pixel_y);
2087
2088 this->current_annotation = "compute pixel deltas from v0";
2089 if (brw->has_pln) {
2090 this->delta_x = fs_reg(this, glsl_type::vec2_type);
2091 this->delta_y = this->delta_x;
2092 this->delta_y.reg_offset++;
2093 } else {
2094 this->delta_x = fs_reg(this, glsl_type::float_type);
2095 this->delta_y = fs_reg(this, glsl_type::float_type);
2096 }
2097 emit(BRW_OPCODE_ADD, this->delta_x,
2098 this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0))));
2099 emit(BRW_OPCODE_ADD, this->delta_y,
2100 this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1))));
2101
2102 this->current_annotation = "compute pos.w and 1/pos.w";
2103 /* Compute wpos.w. It's always in our setup, since it's needed to
2104 * interpolate the other attributes.
2105 */
2106 this->wpos_w = fs_reg(this, glsl_type::float_type);
2107 emit(FS_OPCODE_LINTERP, wpos_w, this->delta_x, this->delta_y,
2108 interp_reg(FRAG_ATTRIB_WPOS, 3));
2109 /* Compute the pixel 1/W value from wpos.w. */
2110 this->pixel_w = fs_reg(this, glsl_type::float_type);
2111 emit_math(FS_OPCODE_RCP, this->pixel_w, wpos_w);
2112 this->current_annotation = NULL;
2113 }
2114
2115 /** Emits the interpolation for the varying inputs. */
2116 void
2117 fs_visitor::emit_interpolation_setup_gen6()
2118 {
2119 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
2120
2121 /* If the pixel centers end up used, the setup is the same as for gen4. */
2122 this->current_annotation = "compute pixel centers";
2123 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type);
2124 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
2125 int_pixel_x.type = BRW_REGISTER_TYPE_UW;
2126 int_pixel_y.type = BRW_REGISTER_TYPE_UW;
2127 emit(BRW_OPCODE_ADD,
2128 int_pixel_x,
2129 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
2130 fs_reg(brw_imm_v(0x10101010)));
2131 emit(BRW_OPCODE_ADD,
2132 int_pixel_y,
2133 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
2134 fs_reg(brw_imm_v(0x11001100)));
2135
2136 /* As of gen6, we can no longer mix float and int sources. We have
2137 * to turn the integer pixel centers into floats for their actual
2138 * use.
2139 */
2140 this->pixel_x = fs_reg(this, glsl_type::float_type);
2141 this->pixel_y = fs_reg(this, glsl_type::float_type);
2142 emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x);
2143 emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y);
2144
2145 this->current_annotation = "compute pos.w";
2146 this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0));
2147 this->wpos_w = fs_reg(this, glsl_type::float_type);
2148 emit_math(FS_OPCODE_RCP, this->wpos_w, this->pixel_w);
2149
2150 this->delta_x = fs_reg(brw_vec8_grf(2, 0));
2151 this->delta_y = fs_reg(brw_vec8_grf(3, 0));
2152
2153 this->current_annotation = NULL;
2154 }
2155
2156 void
2157 fs_visitor::emit_color_write(int index, int first_color_mrf, fs_reg color)
2158 {
2159 int reg_width = c->dispatch_width / 8;
2160
2161 if (c->dispatch_width == 8 || intel->gen == 6) {
2162 /* SIMD8 write looks like:
2163 * m + 0: r0
2164 * m + 1: r1
2165 * m + 2: g0
2166 * m + 3: g1
2167 *
2168 * gen6 SIMD16 DP write looks like:
2169 * m + 0: r0
2170 * m + 1: r1
2171 * m + 2: g0
2172 * m + 3: g1
2173 * m + 4: b0
2174 * m + 5: b1
2175 * m + 6: a0
2176 * m + 7: a1
2177 */
2178 emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index * reg_width),
2179 color);
2180 } else {
2181 /* pre-gen6 SIMD16 single source DP write looks like:
2182 * m + 0: r0
2183 * m + 1: g0
2184 * m + 2: b0
2185 * m + 3: a0
2186 * m + 4: r1
2187 * m + 5: g1
2188 * m + 6: b1
2189 * m + 7: a1
2190 */
2191 if (brw->has_compr4) {
2192 /* By setting the high bit of the MRF register number, we
2193 * indicate that we want COMPR4 mode - instead of doing the
2194 * usual destination + 1 for the second half we get
2195 * destination + 4.
2196 */
2197 emit(BRW_OPCODE_MOV,
2198 fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index), color);
2199 } else {
2200 push_force_uncompressed();
2201 emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index), color);
2202 pop_force_uncompressed();
2203
2204 push_force_sechalf();
2205 color.sechalf = true;
2206 emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4), color);
2207 pop_force_sechalf();
2208 color.sechalf = false;
2209 }
2210 }
2211 }
2212
2213 void
2214 fs_visitor::emit_fb_writes()
2215 {
2216 this->current_annotation = "FB write header";
2217 GLboolean header_present = GL_TRUE;
2218 int nr = 0;
2219 int reg_width = c->dispatch_width / 8;
2220
2221 if (intel->gen >= 6 &&
2222 !this->kill_emitted &&
2223 c->key.nr_color_regions == 1) {
2224 header_present = false;
2225 }
2226
2227 if (header_present) {
2228 /* m0, m1 header */
2229 nr += 2;
2230 }
2231
2232 if (c->aa_dest_stencil_reg) {
2233 push_force_uncompressed();
2234 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2235 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0)));
2236 pop_force_uncompressed();
2237 }
2238
2239 /* Reserve space for color. It'll be filled in per MRT below. */
2240 int color_mrf = nr;
2241 nr += 4 * reg_width;
2242
2243 if (c->source_depth_to_render_target) {
2244 if (intel->gen == 6 && c->dispatch_width == 16) {
2245 /* For outputting oDepth on gen6, SIMD8 writes have to be
2246 * used. This would require 8-wide moves of each half to
2247 * message regs, kind of like pre-gen5 SIMD16 FB writes.
2248 * Just bail on doing so for now.
2249 */
2250 fail("Missing support for simd16 depth writes on gen6\n");
2251 }
2252
2253 if (c->computes_depth) {
2254 /* Hand over gl_FragDepth. */
2255 assert(this->frag_depth);
2256 fs_reg depth = *(variable_storage(this->frag_depth));
2257
2258 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth);
2259 } else {
2260 /* Pass through the payload depth. */
2261 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
2262 fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
2263 }
2264 nr += reg_width;
2265 }
2266
2267 if (c->dest_depth_reg) {
2268 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
2269 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0)));
2270 nr += reg_width;
2271 }
2272
2273 fs_reg color = reg_undef;
2274 if (this->frag_color)
2275 color = *(variable_storage(this->frag_color));
2276 else if (this->frag_data) {
2277 color = *(variable_storage(this->frag_data));
2278 color.type = BRW_REGISTER_TYPE_F;
2279 }
2280
2281 for (int target = 0; target < c->key.nr_color_regions; target++) {
2282 this->current_annotation = ralloc_asprintf(this->mem_ctx,
2283 "FB write target %d",
2284 target);
2285 if (this->frag_color || this->frag_data) {
2286 for (int i = 0; i < 4; i++) {
2287 emit_color_write(i, color_mrf, color);
2288 color.reg_offset++;
2289 }
2290 }
2291
2292 if (this->frag_color)
2293 color.reg_offset -= 4;
2294
2295 fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2296 inst->target = target;
2297 inst->base_mrf = 0;
2298 inst->mlen = nr;
2299 if (target == c->key.nr_color_regions - 1)
2300 inst->eot = true;
2301 inst->header_present = header_present;
2302 }
2303
2304 if (c->key.nr_color_regions == 0) {
2305 if (c->key.alpha_test && (this->frag_color || this->frag_data)) {
2306 /* If the alpha test is enabled but there's no color buffer,
2307 * we still need to send alpha out the pipeline to our null
2308 * renderbuffer.
2309 */
2310 color.reg_offset += 3;
2311 emit_color_write(3, color_mrf, color);
2312 }
2313
2314 fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2315 inst->base_mrf = 0;
2316 inst->mlen = nr;
2317 inst->eot = true;
2318 inst->header_present = header_present;
2319 }
2320
2321 this->current_annotation = NULL;
2322 }
2323
2324 void
2325 fs_visitor::generate_fb_write(fs_inst *inst)
2326 {
2327 GLboolean eot = inst->eot;
2328 struct brw_reg implied_header;
2329
2330 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
2331 * move, here's g1.
2332 */
2333 brw_push_insn_state(p);
2334 brw_set_mask_control(p, BRW_MASK_DISABLE);
2335 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2336
2337 if (inst->header_present) {
2338 if (intel->gen >= 6) {
2339 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
2340 brw_MOV(p,
2341 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
2342 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2343 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2344
2345 if (inst->target > 0) {
2346 /* Set the render target index for choosing BLEND_STATE. */
2347 brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 2),
2348 BRW_REGISTER_TYPE_UD),
2349 brw_imm_ud(inst->target));
2350 }
2351
2352 implied_header = brw_null_reg();
2353 } else {
2354 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
2355
2356 brw_MOV(p,
2357 brw_message_reg(inst->base_mrf + 1),
2358 brw_vec8_grf(1, 0));
2359 }
2360 } else {
2361 implied_header = brw_null_reg();
2362 }
2363
2364 brw_pop_insn_state(p);
2365
2366 brw_fb_WRITE(p,
2367 c->dispatch_width,
2368 inst->base_mrf,
2369 implied_header,
2370 inst->target,
2371 inst->mlen,
2372 0,
2373 eot,
2374 inst->header_present);
2375 }
2376
2377 /* Computes the integer pixel x,y values from the origin.
2378 *
2379 * This is the basis of gl_FragCoord computation, but is also used
2380 * pre-gen6 for computing the deltas from v0 for computing
2381 * interpolation.
2382 */
2383 void
2384 fs_visitor::generate_pixel_xy(struct brw_reg dst, bool is_x)
2385 {
2386 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
2387 struct brw_reg src;
2388 struct brw_reg deltas;
2389
2390 if (is_x) {
2391 src = stride(suboffset(g1_uw, 4), 2, 4, 0);
2392 deltas = brw_imm_v(0x10101010);
2393 } else {
2394 src = stride(suboffset(g1_uw, 5), 2, 4, 0);
2395 deltas = brw_imm_v(0x11001100);
2396 }
2397
2398 if (c->dispatch_width == 16) {
2399 dst = vec16(dst);
2400 }
2401
2402 /* We do this 8 or 16-wide, but since the destination is UW we
2403 * don't do compression in the 16-wide case.
2404 */
2405 brw_push_insn_state(p);
2406 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2407 brw_ADD(p, dst, src, deltas);
2408 brw_pop_insn_state(p);
2409 }
2410
2411 void
2412 fs_visitor::generate_linterp(fs_inst *inst,
2413 struct brw_reg dst, struct brw_reg *src)
2414 {
2415 struct brw_reg delta_x = src[0];
2416 struct brw_reg delta_y = src[1];
2417 struct brw_reg interp = src[2];
2418
2419 if (brw->has_pln &&
2420 delta_y.nr == delta_x.nr + 1 &&
2421 (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
2422 brw_PLN(p, dst, interp, delta_x);
2423 } else {
2424 brw_LINE(p, brw_null_reg(), interp, delta_x);
2425 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
2426 }
2427 }
2428
2429 void
2430 fs_visitor::generate_math(fs_inst *inst,
2431 struct brw_reg dst, struct brw_reg *src)
2432 {
2433 int op;
2434
2435 switch (inst->opcode) {
2436 case FS_OPCODE_RCP:
2437 op = BRW_MATH_FUNCTION_INV;
2438 break;
2439 case FS_OPCODE_RSQ:
2440 op = BRW_MATH_FUNCTION_RSQ;
2441 break;
2442 case FS_OPCODE_SQRT:
2443 op = BRW_MATH_FUNCTION_SQRT;
2444 break;
2445 case FS_OPCODE_EXP2:
2446 op = BRW_MATH_FUNCTION_EXP;
2447 break;
2448 case FS_OPCODE_LOG2:
2449 op = BRW_MATH_FUNCTION_LOG;
2450 break;
2451 case FS_OPCODE_POW:
2452 op = BRW_MATH_FUNCTION_POW;
2453 break;
2454 case FS_OPCODE_SIN:
2455 op = BRW_MATH_FUNCTION_SIN;
2456 break;
2457 case FS_OPCODE_COS:
2458 op = BRW_MATH_FUNCTION_COS;
2459 break;
2460 default:
2461 assert(!"not reached: unknown math function");
2462 op = 0;
2463 break;
2464 }
2465
2466 if (intel->gen >= 6) {
2467 assert(inst->mlen == 0);
2468
2469 if (inst->opcode == FS_OPCODE_POW) {
2470 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2471 brw_math2(p, dst, op, src[0], src[1]);
2472
2473 if (c->dispatch_width == 16) {
2474 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
2475 brw_math2(p, sechalf(dst), op, sechalf(src[0]), sechalf(src[1]));
2476 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
2477 }
2478 } else {
2479 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2480 brw_math(p, dst,
2481 op,
2482 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2483 BRW_MATH_SATURATE_NONE,
2484 0, src[0],
2485 BRW_MATH_DATA_VECTOR,
2486 BRW_MATH_PRECISION_FULL);
2487
2488 if (c->dispatch_width == 16) {
2489 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
2490 brw_math(p, sechalf(dst),
2491 op,
2492 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2493 BRW_MATH_SATURATE_NONE,
2494 0, sechalf(src[0]),
2495 BRW_MATH_DATA_VECTOR,
2496 BRW_MATH_PRECISION_FULL);
2497 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
2498 }
2499 }
2500 } else /* gen <= 5 */{
2501 assert(inst->mlen >= 1);
2502
2503 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2504 brw_math(p, dst,
2505 op,
2506 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2507 BRW_MATH_SATURATE_NONE,
2508 inst->base_mrf, src[0],
2509 BRW_MATH_DATA_VECTOR,
2510 BRW_MATH_PRECISION_FULL);
2511
2512 if (c->dispatch_width == 16) {
2513 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
2514 brw_math(p, sechalf(dst),
2515 op,
2516 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
2517 BRW_MATH_SATURATE_NONE,
2518 inst->base_mrf + 1, sechalf(src[0]),
2519 BRW_MATH_DATA_VECTOR,
2520 BRW_MATH_PRECISION_FULL);
2521
2522 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
2523 }
2524 }
2525 }
2526
2527 void
2528 fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2529 {
2530 int msg_type = -1;
2531 int rlen = 4;
2532 uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
2533
2534 if (c->dispatch_width == 16) {
2535 rlen = 8;
2536 dst = vec16(dst);
2537 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
2538 }
2539
2540 if (intel->gen >= 5) {
2541 switch (inst->opcode) {
2542 case FS_OPCODE_TEX:
2543 if (inst->shadow_compare) {
2544 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
2545 } else {
2546 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
2547 }
2548 break;
2549 case FS_OPCODE_TXB:
2550 if (inst->shadow_compare) {
2551 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
2552 } else {
2553 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
2554 }
2555 break;
2556 case FS_OPCODE_TXL:
2557 if (inst->shadow_compare) {
2558 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
2559 } else {
2560 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
2561 }
2562 break;
2563 case FS_OPCODE_TXD:
2564 assert(!"TXD isn't supported on gen5+ yet.");
2565 break;
2566 }
2567 } else {
2568 switch (inst->opcode) {
2569 case FS_OPCODE_TEX:
2570 /* Note that G45 and older determines shadow compare and dispatch width
2571 * from message length for most messages.
2572 */
2573 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
2574 if (inst->shadow_compare) {
2575 assert(inst->mlen == 6);
2576 } else {
2577 assert(inst->mlen <= 4);
2578 }
2579 break;
2580 case FS_OPCODE_TXB:
2581 if (inst->shadow_compare) {
2582 assert(inst->mlen == 6);
2583 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
2584 } else {
2585 assert(inst->mlen == 9);
2586 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
2587 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
2588 }
2589 break;
2590 case FS_OPCODE_TXL:
2591 if (inst->shadow_compare) {
2592 assert(inst->mlen == 6);
2593 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
2594 } else {
2595 assert(inst->mlen == 9);
2596 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
2597 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
2598 }
2599 break;
2600 case FS_OPCODE_TXD:
2601 assert(!"TXD isn't supported on gen4 yet.");
2602 break;
2603 }
2604 }
2605 assert(msg_type != -1);
2606
2607 brw_SAMPLE(p,
2608 retype(dst, BRW_REGISTER_TYPE_UW),
2609 inst->base_mrf,
2610 src,
2611 SURF_INDEX_TEXTURE(inst->sampler),
2612 inst->sampler,
2613 WRITEMASK_XYZW,
2614 msg_type,
2615 rlen,
2616 inst->mlen,
2617 0,
2618 1,
2619 simd_mode);
2620 }
2621
2622
2623 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
2624 * looking like:
2625 *
2626 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
2627 *
2628 * and we're trying to produce:
2629 *
2630 * DDX DDY
2631 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
2632 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
2633 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
2634 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
2635 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
2636 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
2637 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
2638 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
2639 *
2640 * and add another set of two more subspans if in 16-pixel dispatch mode.
2641 *
2642 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
2643 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
2644 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
2645 * between each other. We could probably do it like ddx and swizzle the right
2646 * order later, but bail for now and just produce
2647 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
2648 */
2649 void
2650 fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2651 {
2652 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
2653 BRW_REGISTER_TYPE_F,
2654 BRW_VERTICAL_STRIDE_2,
2655 BRW_WIDTH_2,
2656 BRW_HORIZONTAL_STRIDE_0,
2657 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2658 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
2659 BRW_REGISTER_TYPE_F,
2660 BRW_VERTICAL_STRIDE_2,
2661 BRW_WIDTH_2,
2662 BRW_HORIZONTAL_STRIDE_0,
2663 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2664 brw_ADD(p, dst, src0, negate(src1));
2665 }
2666
2667 void
2668 fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
2669 {
2670 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
2671 BRW_REGISTER_TYPE_F,
2672 BRW_VERTICAL_STRIDE_4,
2673 BRW_WIDTH_4,
2674 BRW_HORIZONTAL_STRIDE_0,
2675 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2676 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
2677 BRW_REGISTER_TYPE_F,
2678 BRW_VERTICAL_STRIDE_4,
2679 BRW_WIDTH_4,
2680 BRW_HORIZONTAL_STRIDE_0,
2681 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2682 brw_ADD(p, dst, src0, negate(src1));
2683 }
2684
2685 void
2686 fs_visitor::generate_discard(fs_inst *inst)
2687 {
2688 struct brw_reg f0 = brw_flag_reg();
2689
2690 if (intel->gen >= 6) {
2691 struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
2692 struct brw_reg some_register;
2693
2694 /* As of gen6, we no longer have the mask register to look at,
2695 * so life gets a bit more complicated.
2696 */
2697
2698 /* Load the flag register with all ones. */
2699 brw_push_insn_state(p);
2700 brw_set_mask_control(p, BRW_MASK_DISABLE);
2701 brw_MOV(p, f0, brw_imm_uw(0xffff));
2702 brw_pop_insn_state(p);
2703
2704 /* Do a comparison that should always fail, to produce 0s in the flag
2705 * reg where we have active channels.
2706 */
2707 some_register = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
2708 brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
2709 BRW_CONDITIONAL_NZ, some_register, some_register);
2710
2711 /* Undo CMP's whacking of predication*/
2712 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2713
2714 brw_push_insn_state(p);
2715 brw_set_mask_control(p, BRW_MASK_DISABLE);
2716 brw_AND(p, g1, f0, g1);
2717 brw_pop_insn_state(p);
2718 } else {
2719 struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
2720 struct brw_reg mask = brw_uw1_reg(mask.file, mask.nr, 0);
2721
2722 brw_push_insn_state(p);
2723 brw_set_mask_control(p, BRW_MASK_DISABLE);
2724 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
2725
2726 /* Unlike the 965, we have the mask reg, so we just need
2727 * somewhere to invert that (containing channels to be disabled)
2728 * so it can be ANDed with the mask of pixels still to be
2729 * written. Use the flag reg for consistency with gen6+.
2730 */
2731 brw_NOT(p, f0, brw_mask_reg(1)); /* IMASK */
2732 brw_AND(p, g0, f0, g0);
2733
2734 brw_pop_insn_state(p);
2735 }
2736 }
2737
2738 void
2739 fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
2740 {
2741 assert(inst->mlen != 0);
2742
2743 brw_MOV(p,
2744 retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
2745 retype(src, BRW_REGISTER_TYPE_UD));
2746 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
2747 inst->offset);
2748 }
2749
2750 void
2751 fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
2752 {
2753 assert(inst->mlen != 0);
2754
2755 /* Clear any post destination dependencies that would be ignored by
2756 * the block read. See the B-Spec for pre-gen5 send instruction.
2757 *
2758 * This could use a better solution, since texture sampling and
2759 * math reads could potentially run into it as well -- anywhere
2760 * that we have a SEND with a destination that is a register that
2761 * was written but not read within the last N instructions (what's
2762 * N? unsure). This is rare because of dead code elimination, but
2763 * not impossible.
2764 */
2765 if (intel->gen == 4 && !intel->is_g4x)
2766 brw_MOV(p, brw_null_reg(), dst);
2767
2768 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
2769 inst->offset);
2770
2771 if (intel->gen == 4 && !intel->is_g4x) {
2772 /* gen4 errata: destination from a send can't be used as a
2773 * destination until it's been read. Just read it so we don't
2774 * have to worry.
2775 */
2776 brw_MOV(p, brw_null_reg(), dst);
2777 }
2778 }
2779
2780
2781 void
2782 fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
2783 {
2784 assert(inst->mlen != 0);
2785
2786 /* Clear any post destination dependencies that would be ignored by
2787 * the block read. See the B-Spec for pre-gen5 send instruction.
2788 *
2789 * This could use a better solution, since texture sampling and
2790 * math reads could potentially run into it as well -- anywhere
2791 * that we have a SEND with a destination that is a register that
2792 * was written but not read within the last N instructions (what's
2793 * N? unsure). This is rare because of dead code elimination, but
2794 * not impossible.
2795 */
2796 if (intel->gen == 4 && !intel->is_g4x)
2797 brw_MOV(p, brw_null_reg(), dst);
2798
2799 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
2800 inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
2801
2802 if (intel->gen == 4 && !intel->is_g4x) {
2803 /* gen4 errata: destination from a send can't be used as a
2804 * destination until it's been read. Just read it so we don't
2805 * have to worry.
2806 */
2807 brw_MOV(p, brw_null_reg(), dst);
2808 }
2809 }
2810
2811 /**
2812 * To be called after the last _mesa_add_state_reference() call, to
2813 * set up prog_data.param[] for assign_curb_setup() and
2814 * setup_pull_constants().
2815 */
2816 void
2817 fs_visitor::setup_paramvalues_refs()
2818 {
2819 if (c->dispatch_width != 8)
2820 return;
2821
2822 /* Set up the pointers to ParamValues now that that array is finalized. */
2823 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
2824 c->prog_data.param[i] =
2825 fp->Base.Parameters->ParameterValues[this->param_index[i]] +
2826 this->param_offset[i];
2827 }
2828 }
2829
2830 void
2831 fs_visitor::assign_curb_setup()
2832 {
2833 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
2834 if (c->dispatch_width == 8) {
2835 c->prog_data.first_curbe_grf = c->nr_payload_regs;
2836 } else {
2837 c->prog_data.first_curbe_grf_16 = c->nr_payload_regs;
2838 }
2839
2840 /* Map the offsets in the UNIFORM file to fixed HW regs. */
2841 foreach_iter(exec_list_iterator, iter, this->instructions) {
2842 fs_inst *inst = (fs_inst *)iter.get();
2843
2844 for (unsigned int i = 0; i < 3; i++) {
2845 if (inst->src[i].file == UNIFORM) {
2846 int constant_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
2847 struct brw_reg brw_reg = brw_vec1_grf(c->nr_payload_regs +
2848 constant_nr / 8,
2849 constant_nr % 8);
2850
2851 inst->src[i].file = FIXED_HW_REG;
2852 inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
2853 }
2854 }
2855 }
2856 }
2857
2858 void
2859 fs_visitor::calculate_urb_setup()
2860 {
2861 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2862 urb_setup[i] = -1;
2863 }
2864
2865 int urb_next = 0;
2866 /* Figure out where each of the incoming setup attributes lands. */
2867 if (intel->gen >= 6) {
2868 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2869 if (brw->fragment_program->Base.InputsRead & BITFIELD64_BIT(i)) {
2870 urb_setup[i] = urb_next++;
2871 }
2872 }
2873 } else {
2874 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
2875 for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) {
2876 if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) {
2877 int fp_index;
2878
2879 if (i >= VERT_RESULT_VAR0)
2880 fp_index = i - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0);
2881 else if (i <= VERT_RESULT_TEX7)
2882 fp_index = i;
2883 else
2884 fp_index = -1;
2885
2886 if (fp_index >= 0)
2887 urb_setup[fp_index] = urb_next++;
2888 }
2889 }
2890 }
2891
2892 /* Each attribute is 4 setup channels, each of which is half a reg. */
2893 c->prog_data.urb_read_length = urb_next * 2;
2894 }
2895
2896 void
2897 fs_visitor::assign_urb_setup()
2898 {
2899 int urb_start = c->nr_payload_regs + c->prog_data.curb_read_length;
2900
2901 /* Offset all the urb_setup[] index by the actual position of the
2902 * setup regs, now that the location of the constants has been chosen.
2903 */
2904 foreach_iter(exec_list_iterator, iter, this->instructions) {
2905 fs_inst *inst = (fs_inst *)iter.get();
2906
2907 if (inst->opcode == FS_OPCODE_LINTERP) {
2908 assert(inst->src[2].file == FIXED_HW_REG);
2909 inst->src[2].fixed_hw_reg.nr += urb_start;
2910 }
2911
2912 if (inst->opcode == FS_OPCODE_CINTERP) {
2913 assert(inst->src[0].file == FIXED_HW_REG);
2914 inst->src[0].fixed_hw_reg.nr += urb_start;
2915 }
2916 }
2917
2918 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
2919 }
2920
2921 /**
2922 * Split large virtual GRFs into separate components if we can.
2923 *
2924 * This is mostly duplicated with what brw_fs_vector_splitting does,
2925 * but that's really conservative because it's afraid of doing
2926 * splitting that doesn't result in real progress after the rest of
2927 * the optimization phases, which would cause infinite looping in
2928 * optimization. We can do it once here, safely. This also has the
2929 * opportunity to split interpolated values, or maybe even uniforms,
2930 * which we don't have at the IR level.
2931 *
2932 * We want to split, because virtual GRFs are what we register
2933 * allocate and spill (due to contiguousness requirements for some
2934 * instructions), and they're what we naturally generate in the
2935 * codegen process, but most virtual GRFs don't actually need to be
2936 * contiguous sets of GRFs. If we split, we'll end up with reduced
2937 * live intervals and better dead code elimination and coalescing.
2938 */
2939 void
2940 fs_visitor::split_virtual_grfs()
2941 {
2942 int num_vars = this->virtual_grf_next;
2943 bool split_grf[num_vars];
2944 int new_virtual_grf[num_vars];
2945
2946 /* Try to split anything > 0 sized. */
2947 for (int i = 0; i < num_vars; i++) {
2948 if (this->virtual_grf_sizes[i] != 1)
2949 split_grf[i] = true;
2950 else
2951 split_grf[i] = false;
2952 }
2953
2954 if (brw->has_pln) {
2955 /* PLN opcodes rely on the delta_xy being contiguous. */
2956 split_grf[this->delta_x.reg] = false;
2957 }
2958
2959 foreach_iter(exec_list_iterator, iter, this->instructions) {
2960 fs_inst *inst = (fs_inst *)iter.get();
2961
2962 /* Texturing produces 4 contiguous registers, so no splitting. */
2963 if (inst->is_tex()) {
2964 split_grf[inst->dst.reg] = false;
2965 }
2966 }
2967
2968 /* Allocate new space for split regs. Note that the virtual
2969 * numbers will be contiguous.
2970 */
2971 for (int i = 0; i < num_vars; i++) {
2972 if (split_grf[i]) {
2973 new_virtual_grf[i] = virtual_grf_alloc(1);
2974 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
2975 int reg = virtual_grf_alloc(1);
2976 assert(reg == new_virtual_grf[i] + j - 1);
2977 (void) reg;
2978 }
2979 this->virtual_grf_sizes[i] = 1;
2980 }
2981 }
2982
2983 foreach_iter(exec_list_iterator, iter, this->instructions) {
2984 fs_inst *inst = (fs_inst *)iter.get();
2985
2986 if (inst->dst.file == GRF &&
2987 split_grf[inst->dst.reg] &&
2988 inst->dst.reg_offset != 0) {
2989 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
2990 inst->dst.reg_offset - 1);
2991 inst->dst.reg_offset = 0;
2992 }
2993 for (int i = 0; i < 3; i++) {
2994 if (inst->src[i].file == GRF &&
2995 split_grf[inst->src[i].reg] &&
2996 inst->src[i].reg_offset != 0) {
2997 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
2998 inst->src[i].reg_offset - 1);
2999 inst->src[i].reg_offset = 0;
3000 }
3001 }
3002 }
3003 this->live_intervals_valid = false;
3004 }
3005
3006 /**
3007 * Choose accesses from the UNIFORM file to demote to using the pull
3008 * constant buffer.
3009 *
3010 * We allow a fragment shader to have more than the specified minimum
3011 * maximum number of fragment shader uniform components (64). If
3012 * there are too many of these, they'd fill up all of register space.
3013 * So, this will push some of them out to the pull constant buffer and
3014 * update the program to load them.
3015 */
3016 void
3017 fs_visitor::setup_pull_constants()
3018 {
3019 /* Only allow 16 registers (128 uniform components) as push constants. */
3020 unsigned int max_uniform_components = 16 * 8;
3021 if (c->prog_data.nr_params <= max_uniform_components)
3022 return;
3023
3024 if (c->dispatch_width == 16) {
3025 fail("Pull constants not supported in 16-wide\n");
3026 return;
3027 }
3028
3029 /* Just demote the end of the list. We could probably do better
3030 * here, demoting things that are rarely used in the program first.
3031 */
3032 int pull_uniform_base = max_uniform_components;
3033 int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
3034
3035 foreach_iter(exec_list_iterator, iter, this->instructions) {
3036 fs_inst *inst = (fs_inst *)iter.get();
3037
3038 for (int i = 0; i < 3; i++) {
3039 if (inst->src[i].file != UNIFORM)
3040 continue;
3041
3042 int uniform_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
3043 if (uniform_nr < pull_uniform_base)
3044 continue;
3045
3046 fs_reg dst = fs_reg(this, glsl_type::float_type);
3047 fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
3048 dst);
3049 pull->offset = ((uniform_nr - pull_uniform_base) * 4) & ~15;
3050 pull->ir = inst->ir;
3051 pull->annotation = inst->annotation;
3052 pull->base_mrf = 14;
3053 pull->mlen = 1;
3054
3055 inst->insert_before(pull);
3056
3057 inst->src[i].file = GRF;
3058 inst->src[i].reg = dst.reg;
3059 inst->src[i].reg_offset = 0;
3060 inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
3061 }
3062 }
3063
3064 for (int i = 0; i < pull_uniform_count; i++) {
3065 c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
3066 c->prog_data.pull_param_convert[i] =
3067 c->prog_data.param_convert[pull_uniform_base + i];
3068 }
3069 c->prog_data.nr_params -= pull_uniform_count;
3070 c->prog_data.nr_pull_params = pull_uniform_count;
3071 }
3072
3073 void
3074 fs_visitor::calculate_live_intervals()
3075 {
3076 int num_vars = this->virtual_grf_next;
3077 int *def = ralloc_array(mem_ctx, int, num_vars);
3078 int *use = ralloc_array(mem_ctx, int, num_vars);
3079 int loop_depth = 0;
3080 int loop_start = 0;
3081
3082 if (this->live_intervals_valid)
3083 return;
3084
3085 for (int i = 0; i < num_vars; i++) {
3086 def[i] = MAX_INSTRUCTION;
3087 use[i] = -1;
3088 }
3089
3090 int ip = 0;
3091 foreach_iter(exec_list_iterator, iter, this->instructions) {
3092 fs_inst *inst = (fs_inst *)iter.get();
3093
3094 if (inst->opcode == BRW_OPCODE_DO) {
3095 if (loop_depth++ == 0)
3096 loop_start = ip;
3097 } else if (inst->opcode == BRW_OPCODE_WHILE) {
3098 loop_depth--;
3099
3100 if (loop_depth == 0) {
3101 /* Patches up the use of vars marked for being live across
3102 * the whole loop.
3103 */
3104 for (int i = 0; i < num_vars; i++) {
3105 if (use[i] == loop_start) {
3106 use[i] = ip;
3107 }
3108 }
3109 }
3110 } else {
3111 for (unsigned int i = 0; i < 3; i++) {
3112 if (inst->src[i].file == GRF && inst->src[i].reg != 0) {
3113 int reg = inst->src[i].reg;
3114
3115 if (!loop_depth) {
3116 use[reg] = ip;
3117 } else {
3118 def[reg] = MIN2(loop_start, def[reg]);
3119 use[reg] = loop_start;
3120
3121 /* Nobody else is going to go smash our start to
3122 * later in the loop now, because def[reg] now
3123 * points before the bb header.
3124 */
3125 }
3126 }
3127 }
3128 if (inst->dst.file == GRF && inst->dst.reg != 0) {
3129 int reg = inst->dst.reg;
3130
3131 if (!loop_depth) {
3132 def[reg] = MIN2(def[reg], ip);
3133 } else {
3134 def[reg] = MIN2(def[reg], loop_start);
3135 }
3136 }
3137 }
3138
3139 ip++;
3140 }
3141
3142 ralloc_free(this->virtual_grf_def);
3143 ralloc_free(this->virtual_grf_use);
3144 this->virtual_grf_def = def;
3145 this->virtual_grf_use = use;
3146
3147 this->live_intervals_valid = true;
3148 }
3149
3150 /**
3151 * Attempts to move immediate constants into the immediate
3152 * constant slot of following instructions.
3153 *
3154 * Immediate constants are a bit tricky -- they have to be in the last
3155 * operand slot, you can't do abs/negate on them,
3156 */
3157
3158 bool
3159 fs_visitor::propagate_constants()
3160 {
3161 bool progress = false;
3162
3163 calculate_live_intervals();
3164
3165 foreach_iter(exec_list_iterator, iter, this->instructions) {
3166 fs_inst *inst = (fs_inst *)iter.get();
3167
3168 if (inst->opcode != BRW_OPCODE_MOV ||
3169 inst->predicated ||
3170 inst->dst.file != GRF || inst->src[0].file != IMM ||
3171 inst->dst.type != inst->src[0].type ||
3172 (c->dispatch_width == 16 &&
3173 (inst->force_uncompressed || inst->force_sechalf)))
3174 continue;
3175
3176 /* Don't bother with cases where we should have had the
3177 * operation on the constant folded in GLSL already.
3178 */
3179 if (inst->saturate)
3180 continue;
3181
3182 /* Found a move of a constant to a GRF. Find anything else using the GRF
3183 * before it's written, and replace it with the constant if we can.
3184 */
3185 exec_list_iterator scan_iter = iter;
3186 scan_iter.next();
3187 for (; scan_iter.has_next(); scan_iter.next()) {
3188 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
3189
3190 if (scan_inst->opcode == BRW_OPCODE_DO ||
3191 scan_inst->opcode == BRW_OPCODE_WHILE ||
3192 scan_inst->opcode == BRW_OPCODE_ELSE ||
3193 scan_inst->opcode == BRW_OPCODE_ENDIF) {
3194 break;
3195 }
3196
3197 for (int i = 2; i >= 0; i--) {
3198 if (scan_inst->src[i].file != GRF ||
3199 scan_inst->src[i].reg != inst->dst.reg ||
3200 scan_inst->src[i].reg_offset != inst->dst.reg_offset)
3201 continue;
3202
3203 /* Don't bother with cases where we should have had the
3204 * operation on the constant folded in GLSL already.
3205 */
3206 if (scan_inst->src[i].negate || scan_inst->src[i].abs)
3207 continue;
3208
3209 switch (scan_inst->opcode) {
3210 case BRW_OPCODE_MOV:
3211 scan_inst->src[i] = inst->src[0];
3212 progress = true;
3213 break;
3214
3215 case BRW_OPCODE_MUL:
3216 case BRW_OPCODE_ADD:
3217 if (i == 1) {
3218 scan_inst->src[i] = inst->src[0];
3219 progress = true;
3220 } else if (i == 0 && scan_inst->src[1].file != IMM) {
3221 /* Fit this constant in by commuting the operands */
3222 scan_inst->src[0] = scan_inst->src[1];
3223 scan_inst->src[1] = inst->src[0];
3224 progress = true;
3225 }
3226 break;
3227
3228 case BRW_OPCODE_CMP:
3229 if (i == 1) {
3230 scan_inst->src[i] = inst->src[0];
3231 progress = true;
3232 } else if (i == 0 && scan_inst->src[1].file != IMM) {
3233 uint32_t new_cmod;
3234
3235 new_cmod = brw_swap_cmod(scan_inst->conditional_mod);
3236 if (new_cmod != ~0u) {
3237 /* Fit this constant in by swapping the operands and
3238 * flipping the test
3239 */
3240 scan_inst->src[0] = scan_inst->src[1];
3241 scan_inst->src[1] = inst->src[0];
3242 scan_inst->conditional_mod = new_cmod;
3243 progress = true;
3244 }
3245 }
3246 break;
3247
3248 case BRW_OPCODE_SEL:
3249 if (i == 1) {
3250 scan_inst->src[i] = inst->src[0];
3251 progress = true;
3252 } else if (i == 0 && scan_inst->src[1].file != IMM) {
3253 /* Fit this constant in by swapping the operands and
3254 * flipping the predicate
3255 */
3256 scan_inst->src[0] = scan_inst->src[1];
3257 scan_inst->src[1] = inst->src[0];
3258 scan_inst->predicate_inverse = !scan_inst->predicate_inverse;
3259 progress = true;
3260 }
3261 break;
3262 }
3263 }
3264
3265 if (scan_inst->dst.file == GRF &&
3266 scan_inst->dst.reg == inst->dst.reg &&
3267 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
3268 scan_inst->is_tex())) {
3269 break;
3270 }
3271 }
3272 }
3273
3274 if (progress)
3275 this->live_intervals_valid = false;
3276
3277 return progress;
3278 }
3279 /**
3280 * Must be called after calculate_live_intervales() to remove unused
3281 * writes to registers -- register allocation will fail otherwise
3282 * because something deffed but not used won't be considered to
3283 * interfere with other regs.
3284 */
3285 bool
3286 fs_visitor::dead_code_eliminate()
3287 {
3288 bool progress = false;
3289 int pc = 0;
3290
3291 calculate_live_intervals();
3292
3293 foreach_iter(exec_list_iterator, iter, this->instructions) {
3294 fs_inst *inst = (fs_inst *)iter.get();
3295
3296 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
3297 inst->remove();
3298 progress = true;
3299 }
3300
3301 pc++;
3302 }
3303
3304 if (progress)
3305 live_intervals_valid = false;
3306
3307 return progress;
3308 }
3309
3310 bool
3311 fs_visitor::register_coalesce()
3312 {
3313 bool progress = false;
3314 int if_depth = 0;
3315 int loop_depth = 0;
3316
3317 foreach_iter(exec_list_iterator, iter, this->instructions) {
3318 fs_inst *inst = (fs_inst *)iter.get();
3319
3320 /* Make sure that we dominate the instructions we're going to
3321 * scan for interfering with our coalescing, or we won't have
3322 * scanned enough to see if anything interferes with our
3323 * coalescing. We don't dominate the following instructions if
3324 * we're in a loop or an if block.
3325 */
3326 switch (inst->opcode) {
3327 case BRW_OPCODE_DO:
3328 loop_depth++;
3329 break;
3330 case BRW_OPCODE_WHILE:
3331 loop_depth--;
3332 break;
3333 case BRW_OPCODE_IF:
3334 if_depth++;
3335 break;
3336 case BRW_OPCODE_ENDIF:
3337 if_depth--;
3338 break;
3339 }
3340 if (loop_depth || if_depth)
3341 continue;
3342
3343 if (inst->opcode != BRW_OPCODE_MOV ||
3344 inst->predicated ||
3345 inst->saturate ||
3346 inst->dst.file != GRF || inst->src[0].file != GRF ||
3347 inst->dst.type != inst->src[0].type)
3348 continue;
3349
3350 bool has_source_modifiers = inst->src[0].abs || inst->src[0].negate;
3351
3352 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
3353 * them: check for no writes to either one until the exit of the
3354 * program.
3355 */
3356 bool interfered = false;
3357 exec_list_iterator scan_iter = iter;
3358 scan_iter.next();
3359 for (; scan_iter.has_next(); scan_iter.next()) {
3360 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
3361
3362 if (scan_inst->dst.file == GRF) {
3363 if (scan_inst->dst.reg == inst->dst.reg &&
3364 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
3365 scan_inst->is_tex())) {
3366 interfered = true;
3367 break;
3368 }
3369 if (scan_inst->dst.reg == inst->src[0].reg &&
3370 (scan_inst->dst.reg_offset == inst->src[0].reg_offset ||
3371 scan_inst->is_tex())) {
3372 interfered = true;
3373 break;
3374 }
3375 }
3376
3377 /* The gen6 MATH instruction can't handle source modifiers, so avoid
3378 * coalescing those for now. We should do something more specific.
3379 */
3380 if (intel->gen >= 6 && scan_inst->is_math() && has_source_modifiers) {
3381 interfered = true;
3382 break;
3383 }
3384 }
3385 if (interfered) {
3386 continue;
3387 }
3388
3389 /* Rewrite the later usage to point at the source of the move to
3390 * be removed.
3391 */
3392 for (exec_list_iterator scan_iter = iter; scan_iter.has_next();
3393 scan_iter.next()) {
3394 fs_inst *scan_inst = (fs_inst *)scan_iter.get();
3395
3396 for (int i = 0; i < 3; i++) {
3397 if (scan_inst->src[i].file == GRF &&
3398 scan_inst->src[i].reg == inst->dst.reg &&
3399 scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
3400 scan_inst->src[i].reg = inst->src[0].reg;
3401 scan_inst->src[i].reg_offset = inst->src[0].reg_offset;
3402 scan_inst->src[i].abs |= inst->src[0].abs;
3403 scan_inst->src[i].negate ^= inst->src[0].negate;
3404 scan_inst->src[i].smear = inst->src[0].smear;
3405 }
3406 }
3407 }
3408
3409 inst->remove();
3410 progress = true;
3411 }
3412
3413 if (progress)
3414 live_intervals_valid = false;
3415
3416 return progress;
3417 }
3418
3419
3420 bool
3421 fs_visitor::compute_to_mrf()
3422 {
3423 bool progress = false;
3424 int next_ip = 0;
3425
3426 calculate_live_intervals();
3427
3428 foreach_iter(exec_list_iterator, iter, this->instructions) {
3429 fs_inst *inst = (fs_inst *)iter.get();
3430
3431 int ip = next_ip;
3432 next_ip++;
3433
3434 if (inst->opcode != BRW_OPCODE_MOV ||
3435 inst->predicated ||
3436 inst->dst.file != MRF || inst->src[0].file != GRF ||
3437 inst->dst.type != inst->src[0].type ||
3438 inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
3439 continue;
3440
3441 /* Work out which hardware MRF registers are written by this
3442 * instruction.
3443 */
3444 int mrf_low = inst->dst.hw_reg & ~BRW_MRF_COMPR4;
3445 int mrf_high;
3446 if (inst->dst.hw_reg & BRW_MRF_COMPR4) {
3447 mrf_high = mrf_low + 4;
3448 } else if (c->dispatch_width == 16 &&
3449 (!inst->force_uncompressed && !inst->force_sechalf)) {
3450 mrf_high = mrf_low + 1;
3451 } else {
3452 mrf_high = mrf_low;
3453 }
3454
3455 /* Can't compute-to-MRF this GRF if someone else was going to
3456 * read it later.
3457 */
3458 if (this->virtual_grf_use[inst->src[0].reg] > ip)
3459 continue;
3460
3461 /* Found a move of a GRF to a MRF. Let's see if we can go
3462 * rewrite the thing that made this GRF to write into the MRF.
3463 */
3464 fs_inst *scan_inst;
3465 for (scan_inst = (fs_inst *)inst->prev;
3466 scan_inst->prev != NULL;
3467 scan_inst = (fs_inst *)scan_inst->prev) {
3468 if (scan_inst->dst.file == GRF &&
3469 scan_inst->dst.reg == inst->src[0].reg) {
3470 /* Found the last thing to write our reg we want to turn
3471 * into a compute-to-MRF.
3472 */
3473
3474 if (scan_inst->is_tex()) {
3475 /* texturing writes several continuous regs, so we can't
3476 * compute-to-mrf that.
3477 */
3478 break;
3479 }
3480
3481 /* If it's predicated, it (probably) didn't populate all
3482 * the channels. We might be able to rewrite everything
3483 * that writes that reg, but it would require smarter
3484 * tracking to delay the rewriting until complete success.
3485 */
3486 if (scan_inst->predicated)
3487 break;
3488
3489 /* If it's half of register setup and not the same half as
3490 * our MOV we're trying to remove, bail for now.
3491 */
3492 if (scan_inst->force_uncompressed != inst->force_uncompressed ||
3493 scan_inst->force_sechalf != inst->force_sechalf) {
3494 break;
3495 }
3496
3497 /* SEND instructions can't have MRF as a destination. */
3498 if (scan_inst->mlen)
3499 break;
3500
3501 if (intel->gen >= 6) {
3502 /* gen6 math instructions must have the destination be
3503 * GRF, so no compute-to-MRF for them.
3504 */
3505 if (scan_inst->is_math()) {
3506 break;
3507 }
3508 }
3509
3510 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
3511 /* Found the creator of our MRF's source value. */
3512 scan_inst->dst.file = MRF;
3513 scan_inst->dst.hw_reg = inst->dst.hw_reg;
3514 scan_inst->saturate |= inst->saturate;
3515 inst->remove();
3516 progress = true;
3517 }
3518 break;
3519 }
3520
3521 /* We don't handle flow control here. Most computation of
3522 * values that end up in MRFs are shortly before the MRF
3523 * write anyway.
3524 */
3525 if (scan_inst->opcode == BRW_OPCODE_DO ||
3526 scan_inst->opcode == BRW_OPCODE_WHILE ||
3527 scan_inst->opcode == BRW_OPCODE_ELSE ||
3528 scan_inst->opcode == BRW_OPCODE_ENDIF) {
3529 break;
3530 }
3531
3532 /* You can't read from an MRF, so if someone else reads our
3533 * MRF's source GRF that we wanted to rewrite, that stops us.
3534 */
3535 bool interfered = false;
3536 for (int i = 0; i < 3; i++) {
3537 if (scan_inst->src[i].file == GRF &&
3538 scan_inst->src[i].reg == inst->src[0].reg &&
3539 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
3540 interfered = true;
3541 }
3542 }
3543 if (interfered)
3544 break;
3545
3546 if (scan_inst->dst.file == MRF) {
3547 /* If somebody else writes our MRF here, we can't
3548 * compute-to-MRF before that.
3549 */
3550 int scan_mrf_low = scan_inst->dst.hw_reg & ~BRW_MRF_COMPR4;
3551 int scan_mrf_high;
3552
3553 if (scan_inst->dst.hw_reg & BRW_MRF_COMPR4) {
3554 scan_mrf_high = scan_mrf_low + 4;
3555 } else if (c->dispatch_width == 16 &&
3556 (!scan_inst->force_uncompressed &&
3557 !scan_inst->force_sechalf)) {
3558 scan_mrf_high = scan_mrf_low + 1;
3559 } else {
3560 scan_mrf_high = scan_mrf_low;
3561 }
3562
3563 if (mrf_low == scan_mrf_low ||
3564 mrf_low == scan_mrf_high ||
3565 mrf_high == scan_mrf_low ||
3566 mrf_high == scan_mrf_high) {
3567 break;
3568 }
3569 }
3570
3571 if (scan_inst->mlen > 0) {
3572 /* Found a SEND instruction, which means that there are
3573 * live values in MRFs from base_mrf to base_mrf +
3574 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3575 * above it.
3576 */
3577 if (mrf_low >= scan_inst->base_mrf &&
3578 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
3579 break;
3580 }
3581 if (mrf_high >= scan_inst->base_mrf &&
3582 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
3583 break;
3584 }
3585 }
3586 }
3587 }
3588
3589 return progress;
3590 }
3591
3592 /**
3593 * Walks through basic blocks, locking for repeated MRF writes and
3594 * removing the later ones.
3595 */
3596 bool
3597 fs_visitor::remove_duplicate_mrf_writes()
3598 {
3599 fs_inst *last_mrf_move[16];
3600 bool progress = false;
3601
3602 /* Need to update the MRF tracking for compressed instructions. */
3603 if (c->dispatch_width == 16)
3604 return false;
3605
3606 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3607
3608 foreach_iter(exec_list_iterator, iter, this->instructions) {
3609 fs_inst *inst = (fs_inst *)iter.get();
3610
3611 switch (inst->opcode) {
3612 case BRW_OPCODE_DO:
3613 case BRW_OPCODE_WHILE:
3614 case BRW_OPCODE_IF:
3615 case BRW_OPCODE_ELSE:
3616 case BRW_OPCODE_ENDIF:
3617 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3618 continue;
3619 default:
3620 break;
3621 }
3622
3623 if (inst->opcode == BRW_OPCODE_MOV &&
3624 inst->dst.file == MRF) {
3625 fs_inst *prev_inst = last_mrf_move[inst->dst.hw_reg];
3626 if (prev_inst && inst->equals(prev_inst)) {
3627 inst->remove();
3628 progress = true;
3629 continue;
3630 }
3631 }
3632
3633 /* Clear out the last-write records for MRFs that were overwritten. */
3634 if (inst->dst.file == MRF) {
3635 last_mrf_move[inst->dst.hw_reg] = NULL;
3636 }
3637
3638 if (inst->mlen > 0) {
3639 /* Found a SEND instruction, which will include two or fewer
3640 * implied MRF writes. We could do better here.
3641 */
3642 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3643 last_mrf_move[inst->base_mrf + i] = NULL;
3644 }
3645 }
3646
3647 /* Clear out any MRF move records whose sources got overwritten. */
3648 if (inst->dst.file == GRF) {
3649 for (unsigned int i = 0; i < Elements(last_mrf_move); i++) {
3650 if (last_mrf_move[i] &&
3651 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
3652 last_mrf_move[i] = NULL;
3653 }
3654 }
3655 }
3656
3657 if (inst->opcode == BRW_OPCODE_MOV &&
3658 inst->dst.file == MRF &&
3659 inst->src[0].file == GRF &&
3660 !inst->predicated) {
3661 last_mrf_move[inst->dst.hw_reg] = inst;
3662 }
3663 }
3664
3665 return progress;
3666 }
3667
3668 bool
3669 fs_visitor::virtual_grf_interferes(int a, int b)
3670 {
3671 int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]);
3672 int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]);
3673
3674 /* We can't handle dead register writes here, without iterating
3675 * over the whole instruction stream to find every single dead
3676 * write to that register to compare to the live interval of the
3677 * other register. Just assert that dead_code_eliminate() has been
3678 * called.
3679 */
3680 assert((this->virtual_grf_use[a] != -1 ||
3681 this->virtual_grf_def[a] == MAX_INSTRUCTION) &&
3682 (this->virtual_grf_use[b] != -1 ||
3683 this->virtual_grf_def[b] == MAX_INSTRUCTION));
3684
3685 /* If the register is used to store 16 values of less than float
3686 * size (only the case for pixel_[xy]), then we can't allocate
3687 * another dword-sized thing to that register that would be used in
3688 * the same instruction. This is because when the GPU decodes (for
3689 * example):
3690 *
3691 * (declare (in ) vec4 gl_FragCoord@0x97766a0)
3692 * add(16) g6<1>F g6<8,8,1>UW 0.5F { align1 compr };
3693 *
3694 * it's actually processed as:
3695 * add(8) g6<1>F g6<8,8,1>UW 0.5F { align1 };
3696 * add(8) g7<1>F g6.8<8,8,1>UW 0.5F { align1 sechalf };
3697 *
3698 * so our second half values in g6 got overwritten in the first
3699 * half.
3700 */
3701 if (c->dispatch_width == 16 && (this->pixel_x.reg == a ||
3702 this->pixel_x.reg == b ||
3703 this->pixel_y.reg == a ||
3704 this->pixel_y.reg == b)) {
3705 return start <= end;
3706 }
3707
3708 return start < end;
3709 }
3710
3711 static struct brw_reg brw_reg_from_fs_reg(fs_reg *reg)
3712 {
3713 struct brw_reg brw_reg;
3714
3715 switch (reg->file) {
3716 case GRF:
3717 case ARF:
3718 case MRF:
3719 if (reg->smear == -1) {
3720 brw_reg = brw_vec8_reg(reg->file,
3721 reg->hw_reg, 0);
3722 } else {
3723 brw_reg = brw_vec1_reg(reg->file,
3724 reg->hw_reg, reg->smear);
3725 }
3726 brw_reg = retype(brw_reg, reg->type);
3727 if (reg->sechalf)
3728 brw_reg = sechalf(brw_reg);
3729 break;
3730 case IMM:
3731 switch (reg->type) {
3732 case BRW_REGISTER_TYPE_F:
3733 brw_reg = brw_imm_f(reg->imm.f);
3734 break;
3735 case BRW_REGISTER_TYPE_D:
3736 brw_reg = brw_imm_d(reg->imm.i);
3737 break;
3738 case BRW_REGISTER_TYPE_UD:
3739 brw_reg = brw_imm_ud(reg->imm.u);
3740 break;
3741 default:
3742 assert(!"not reached");
3743 brw_reg = brw_null_reg();
3744 break;
3745 }
3746 break;
3747 case FIXED_HW_REG:
3748 brw_reg = reg->fixed_hw_reg;
3749 break;
3750 case BAD_FILE:
3751 /* Probably unused. */
3752 brw_reg = brw_null_reg();
3753 break;
3754 case UNIFORM:
3755 assert(!"not reached");
3756 brw_reg = brw_null_reg();
3757 break;
3758 default:
3759 assert(!"not reached");
3760 brw_reg = brw_null_reg();
3761 break;
3762 }
3763 if (reg->abs)
3764 brw_reg = brw_abs(brw_reg);
3765 if (reg->negate)
3766 brw_reg = negate(brw_reg);
3767
3768 return brw_reg;
3769 }
3770
3771 void
3772 fs_visitor::generate_code()
3773 {
3774 int last_native_inst = p->nr_insn;
3775 const char *last_annotation_string = NULL;
3776 ir_instruction *last_annotation_ir = NULL;
3777
3778 int loop_stack_array_size = 16;
3779 int loop_stack_depth = 0;
3780 brw_instruction **loop_stack =
3781 rzalloc_array(this->mem_ctx, brw_instruction *, loop_stack_array_size);
3782 int *if_depth_in_loop =
3783 rzalloc_array(this->mem_ctx, int, loop_stack_array_size);
3784
3785
3786 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3787 printf("Native code for fragment shader %d (%d-wide dispatch):\n",
3788 ctx->Shader.CurrentFragmentProgram->Name, c->dispatch_width);
3789 }
3790
3791 foreach_iter(exec_list_iterator, iter, this->instructions) {
3792 fs_inst *inst = (fs_inst *)iter.get();
3793 struct brw_reg src[3], dst;
3794
3795 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
3796 if (last_annotation_ir != inst->ir) {
3797 last_annotation_ir = inst->ir;
3798 if (last_annotation_ir) {
3799 printf(" ");
3800 last_annotation_ir->print();
3801 printf("\n");
3802 }
3803 }
3804 if (last_annotation_string != inst->annotation) {
3805 last_annotation_string = inst->annotation;
3806 if (last_annotation_string)
3807 printf(" %s\n", last_annotation_string);
3808 }
3809 }
3810
3811 for (unsigned int i = 0; i < 3; i++) {
3812 src[i] = brw_reg_from_fs_reg(&inst->src[i]);
3813 }
3814 dst = brw_reg_from_fs_reg(&inst->dst);
3815
3816 brw_set_conditionalmod(p, inst->conditional_mod);
3817 brw_set_predicate_control(p, inst->predicated);
3818 brw_set_predicate_inverse(p, inst->predicate_inverse);
3819 brw_set_saturate(p, inst->saturate);
3820
3821 if (inst->force_uncompressed || c->dispatch_width == 8) {
3822 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
3823 } else if (inst->force_sechalf) {
3824 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
3825 } else {
3826 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
3827 }
3828
3829 switch (inst->opcode) {
3830 case BRW_OPCODE_MOV:
3831 brw_MOV(p, dst, src[0]);
3832 break;
3833 case BRW_OPCODE_ADD:
3834 brw_ADD(p, dst, src[0], src[1]);
3835 break;
3836 case BRW_OPCODE_MUL:
3837 brw_MUL(p, dst, src[0], src[1]);
3838 break;
3839
3840 case BRW_OPCODE_FRC:
3841 brw_FRC(p, dst, src[0]);
3842 break;
3843 case BRW_OPCODE_RNDD:
3844 brw_RNDD(p, dst, src[0]);
3845 break;
3846 case BRW_OPCODE_RNDE:
3847 brw_RNDE(p, dst, src[0]);
3848 break;
3849 case BRW_OPCODE_RNDZ:
3850 brw_RNDZ(p, dst, src[0]);
3851 break;
3852
3853 case BRW_OPCODE_AND:
3854 brw_AND(p, dst, src[0], src[1]);
3855 break;
3856 case BRW_OPCODE_OR:
3857 brw_OR(p, dst, src[0], src[1]);
3858 break;
3859 case BRW_OPCODE_XOR:
3860 brw_XOR(p, dst, src[0], src[1]);
3861 break;
3862 case BRW_OPCODE_NOT:
3863 brw_NOT(p, dst, src[0]);
3864 break;
3865 case BRW_OPCODE_ASR:
3866 brw_ASR(p, dst, src[0], src[1]);
3867 break;
3868 case BRW_OPCODE_SHR:
3869 brw_SHR(p, dst, src[0], src[1]);
3870 break;
3871 case BRW_OPCODE_SHL:
3872 brw_SHL(p, dst, src[0], src[1]);
3873 break;
3874
3875 case BRW_OPCODE_CMP:
3876 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
3877 break;
3878 case BRW_OPCODE_SEL:
3879 brw_SEL(p, dst, src[0], src[1]);
3880 break;
3881
3882 case BRW_OPCODE_IF:
3883 if (inst->src[0].file != BAD_FILE) {
3884 /* The instruction has an embedded compare (only allowed on gen6) */
3885 assert(intel->gen == 6);
3886 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
3887 } else {
3888 brw_IF(p, c->dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
3889 }
3890 if_depth_in_loop[loop_stack_depth]++;
3891 break;
3892
3893 case BRW_OPCODE_ELSE:
3894 brw_ELSE(p);
3895 break;
3896 case BRW_OPCODE_ENDIF:
3897 brw_ENDIF(p);
3898 if_depth_in_loop[loop_stack_depth]--;
3899 break;
3900
3901 case BRW_OPCODE_DO:
3902 loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
3903 if (loop_stack_array_size <= loop_stack_depth) {
3904 loop_stack_array_size *= 2;
3905 loop_stack = reralloc(this->mem_ctx, loop_stack, brw_instruction *,
3906 loop_stack_array_size);
3907 if_depth_in_loop = reralloc(this->mem_ctx, if_depth_in_loop, int,
3908 loop_stack_array_size);
3909 }
3910 if_depth_in_loop[loop_stack_depth] = 0;
3911 break;
3912
3913 case BRW_OPCODE_BREAK:
3914 brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
3915 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
3916 break;
3917 case BRW_OPCODE_CONTINUE:
3918 /* FINISHME: We need to write the loop instruction support still. */
3919 if (intel->gen >= 6)
3920 gen6_CONT(p, loop_stack[loop_stack_depth - 1]);
3921 else
3922 brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
3923 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
3924 break;
3925
3926 case BRW_OPCODE_WHILE: {
3927 struct brw_instruction *inst0, *inst1;
3928 GLuint br = 1;
3929
3930 if (intel->gen >= 5)
3931 br = 2;
3932
3933 assert(loop_stack_depth > 0);
3934 loop_stack_depth--;
3935 inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
3936 if (intel->gen < 6) {
3937 /* patch all the BREAK/CONT instructions from last BGNLOOP */
3938 while (inst0 > loop_stack[loop_stack_depth]) {
3939 inst0--;
3940 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
3941 inst0->bits3.if_else.jump_count == 0) {
3942 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
3943 }
3944 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
3945 inst0->bits3.if_else.jump_count == 0) {
3946 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
3947 }
3948 }
3949 }
3950 }
3951 break;
3952
3953 case FS_OPCODE_RCP:
3954 case FS_OPCODE_RSQ:
3955 case FS_OPCODE_SQRT:
3956 case FS_OPCODE_EXP2:
3957 case FS_OPCODE_LOG2:
3958 case FS_OPCODE_POW:
3959 case FS_OPCODE_SIN:
3960 case FS_OPCODE_COS:
3961 generate_math(inst, dst, src);
3962 break;
3963 case FS_OPCODE_PIXEL_X:
3964 generate_pixel_xy(dst, true);
3965 break;
3966 case FS_OPCODE_PIXEL_Y:
3967 generate_pixel_xy(dst, false);
3968 break;
3969 case FS_OPCODE_CINTERP:
3970 brw_MOV(p, dst, src[0]);
3971 break;
3972 case FS_OPCODE_LINTERP:
3973 generate_linterp(inst, dst, src);
3974 break;
3975 case FS_OPCODE_TEX:
3976 case FS_OPCODE_TXB:
3977 case FS_OPCODE_TXD:
3978 case FS_OPCODE_TXL:
3979 generate_tex(inst, dst, src[0]);
3980 break;
3981 case FS_OPCODE_DISCARD:
3982 generate_discard(inst);
3983 break;
3984 case FS_OPCODE_DDX:
3985 generate_ddx(inst, dst, src[0]);
3986 break;
3987 case FS_OPCODE_DDY:
3988 generate_ddy(inst, dst, src[0]);
3989 break;
3990
3991 case FS_OPCODE_SPILL:
3992 generate_spill(inst, src[0]);
3993 break;
3994
3995 case FS_OPCODE_UNSPILL:
3996 generate_unspill(inst, dst);
3997 break;
3998
3999 case FS_OPCODE_PULL_CONSTANT_LOAD:
4000 generate_pull_constant_load(inst, dst);
4001 break;
4002
4003 case FS_OPCODE_FB_WRITE:
4004 generate_fb_write(inst);
4005 break;
4006 default:
4007 if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
4008 _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
4009 brw_opcodes[inst->opcode].name);
4010 } else {
4011 _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
4012 }
4013 fail("unsupported opcode in FS\n");
4014 }
4015
4016 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
4017 for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
4018 if (0) {
4019 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
4020 ((uint32_t *)&p->store[i])[3],
4021 ((uint32_t *)&p->store[i])[2],
4022 ((uint32_t *)&p->store[i])[1],
4023 ((uint32_t *)&p->store[i])[0]);
4024 }
4025 brw_disasm(stdout, &p->store[i], intel->gen);
4026 }
4027 }
4028
4029 last_native_inst = p->nr_insn;
4030 }
4031
4032 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
4033 printf("\n");
4034 }
4035
4036 ralloc_free(loop_stack);
4037 ralloc_free(if_depth_in_loop);
4038
4039 brw_set_uip_jip(p);
4040
4041 /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
4042 * emit issues, it doesn't get the jump distances into the output,
4043 * which is often something we want to debug. So this is here in
4044 * case you're doing that.
4045 */
4046 if (0) {
4047 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
4048 for (unsigned int i = 0; i < p->nr_insn; i++) {
4049 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
4050 ((uint32_t *)&p->store[i])[3],
4051 ((uint32_t *)&p->store[i])[2],
4052 ((uint32_t *)&p->store[i])[1],
4053 ((uint32_t *)&p->store[i])[0]);
4054 brw_disasm(stdout, &p->store[i], intel->gen);
4055 }
4056 }
4057 }
4058 }
4059
4060 bool
4061 fs_visitor::run()
4062 {
4063 uint32_t prog_offset_16 = 0;
4064 uint32_t orig_nr_params = c->prog_data.nr_params;
4065
4066 brw_wm_payload_setup(brw, c);
4067
4068 if (c->dispatch_width == 16) {
4069 /* align to 64 byte boundary. */
4070 while ((c->func.nr_insn * sizeof(struct brw_instruction)) % 64) {
4071 brw_NOP(p);
4072 }
4073
4074 /* Save off the start of this 16-wide program in case we succeed. */
4075 prog_offset_16 = c->func.nr_insn * sizeof(struct brw_instruction);
4076
4077 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
4078 }
4079
4080 if (0) {
4081 emit_dummy_fs();
4082 } else {
4083 calculate_urb_setup();
4084 if (intel->gen < 6)
4085 emit_interpolation_setup_gen4();
4086 else
4087 emit_interpolation_setup_gen6();
4088
4089 /* Generate FS IR for main(). (the visitor only descends into
4090 * functions called "main").
4091 */
4092 foreach_iter(exec_list_iterator, iter, *shader->ir) {
4093 ir_instruction *ir = (ir_instruction *)iter.get();
4094 base_ir = ir;
4095 ir->accept(this);
4096 }
4097
4098 emit_fb_writes();
4099
4100 split_virtual_grfs();
4101
4102 setup_paramvalues_refs();
4103 setup_pull_constants();
4104
4105 bool progress;
4106 do {
4107 progress = false;
4108
4109 progress = remove_duplicate_mrf_writes() || progress;
4110
4111 progress = propagate_constants() || progress;
4112 progress = register_coalesce() || progress;
4113 progress = compute_to_mrf() || progress;
4114 progress = dead_code_eliminate() || progress;
4115 } while (progress);
4116
4117 schedule_instructions();
4118
4119 assign_curb_setup();
4120 assign_urb_setup();
4121
4122 if (0) {
4123 /* Debug of register spilling: Go spill everything. */
4124 int virtual_grf_count = virtual_grf_next;
4125 for (int i = 1; i < virtual_grf_count; i++) {
4126 spill_reg(i);
4127 }
4128 }
4129
4130 if (0)
4131 assign_regs_trivial();
4132 else {
4133 while (!assign_regs()) {
4134 if (failed)
4135 break;
4136 }
4137 }
4138 }
4139 assert(force_uncompressed_stack == 0);
4140 assert(force_sechalf_stack == 0);
4141
4142 if (failed)
4143 return false;
4144
4145 generate_code();
4146
4147 if (c->dispatch_width == 8) {
4148 c->prog_data.total_grf = grf_used;
4149 } else {
4150 c->prog_data.total_grf_16 = grf_used;
4151 c->prog_data.prog_offset_16 = prog_offset_16;
4152
4153 /* Make sure we didn't try to sneak in an extra uniform */
4154 assert(orig_nr_params == c->prog_data.nr_params);
4155 }
4156
4157 return !failed;
4158 }
4159
4160 bool
4161 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
4162 {
4163 struct intel_context *intel = &brw->intel;
4164 struct gl_context *ctx = &intel->ctx;
4165 struct gl_shader_program *prog = ctx->Shader.CurrentFragmentProgram;
4166
4167 if (!prog)
4168 return false;
4169
4170 struct brw_shader *shader =
4171 (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
4172 if (!shader)
4173 return false;
4174
4175 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
4176 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
4177 _mesa_print_ir(shader->ir, NULL);
4178 printf("\n\n");
4179 }
4180
4181 /* Now the main event: Visit the shader IR and generate our FS IR for it.
4182 */
4183 c->dispatch_width = 8;
4184
4185 fs_visitor v(c, shader);
4186 if (!v.run()) {
4187 /* FINISHME: Cleanly fail, test at link time, etc. */
4188 assert(!"not reached");
4189 return false;
4190 }
4191
4192 if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0) {
4193 c->dispatch_width = 16;
4194 fs_visitor v2(c, shader);
4195 v2.import_uniforms(v.variable_ht);
4196 v2.run();
4197 }
4198
4199 c->prog_data.dispatch_width = 8;
4200
4201 return true;
4202 }