i965/fs: Do dead code elimination just after copy propagation.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 extern "C" {
32
33 #include <sys/types.h>
34
35 #include "main/macros.h"
36 #include "main/shaderobj.h"
37 #include "main/uniforms.h"
38 #include "main/fbobject.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_print.h"
41 #include "program/register_allocate.h"
42 #include "program/sampler.h"
43 #include "program/hash_table.h"
44 #include "brw_context.h"
45 #include "brw_eu.h"
46 #include "brw_wm.h"
47 }
48 #include "brw_fs.h"
49 #include "glsl/glsl_types.h"
50 #include "glsl/ir_print_visitor.h"
51
52 void
53 fs_inst::init()
54 {
55 memset(this, 0, sizeof(*this));
56 this->opcode = BRW_OPCODE_NOP;
57 this->conditional_mod = BRW_CONDITIONAL_NONE;
58
59 this->dst = reg_undef;
60 this->src[0] = reg_undef;
61 this->src[1] = reg_undef;
62 this->src[2] = reg_undef;
63 }
64
65 fs_inst::fs_inst()
66 {
67 init();
68 }
69
70 fs_inst::fs_inst(enum opcode opcode)
71 {
72 init();
73 this->opcode = opcode;
74 }
75
76 fs_inst::fs_inst(enum opcode opcode, fs_reg dst)
77 {
78 init();
79 this->opcode = opcode;
80 this->dst = dst;
81
82 if (dst.file == GRF)
83 assert(dst.reg_offset >= 0);
84 }
85
86 fs_inst::fs_inst(enum opcode opcode, fs_reg dst, fs_reg src0)
87 {
88 init();
89 this->opcode = opcode;
90 this->dst = dst;
91 this->src[0] = src0;
92
93 if (dst.file == GRF)
94 assert(dst.reg_offset >= 0);
95 if (src[0].file == GRF)
96 assert(src[0].reg_offset >= 0);
97 }
98
99 fs_inst::fs_inst(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
100 {
101 init();
102 this->opcode = opcode;
103 this->dst = dst;
104 this->src[0] = src0;
105 this->src[1] = src1;
106
107 if (dst.file == GRF)
108 assert(dst.reg_offset >= 0);
109 if (src[0].file == GRF)
110 assert(src[0].reg_offset >= 0);
111 if (src[1].file == GRF)
112 assert(src[1].reg_offset >= 0);
113 }
114
115 fs_inst::fs_inst(enum opcode opcode, fs_reg dst,
116 fs_reg src0, fs_reg src1, fs_reg src2)
117 {
118 init();
119 this->opcode = opcode;
120 this->dst = dst;
121 this->src[0] = src0;
122 this->src[1] = src1;
123 this->src[2] = src2;
124
125 if (dst.file == GRF)
126 assert(dst.reg_offset >= 0);
127 if (src[0].file == GRF)
128 assert(src[0].reg_offset >= 0);
129 if (src[1].file == GRF)
130 assert(src[1].reg_offset >= 0);
131 if (src[2].file == GRF)
132 assert(src[2].reg_offset >= 0);
133 }
134
135 bool
136 fs_inst::equals(fs_inst *inst)
137 {
138 return (opcode == inst->opcode &&
139 dst.equals(inst->dst) &&
140 src[0].equals(inst->src[0]) &&
141 src[1].equals(inst->src[1]) &&
142 src[2].equals(inst->src[2]) &&
143 saturate == inst->saturate &&
144 predicate == inst->predicate &&
145 conditional_mod == inst->conditional_mod &&
146 mlen == inst->mlen &&
147 base_mrf == inst->base_mrf &&
148 sampler == inst->sampler &&
149 target == inst->target &&
150 eot == inst->eot &&
151 header_present == inst->header_present &&
152 shadow_compare == inst->shadow_compare &&
153 offset == inst->offset);
154 }
155
156 int
157 fs_inst::regs_written()
158 {
159 if (is_tex())
160 return 4;
161
162 /* The SINCOS and INT_DIV_QUOTIENT_AND_REMAINDER math functions return 2,
163 * but we don't currently use them...nor do we have an opcode for them.
164 */
165
166 return 1;
167 }
168
169 bool
170 fs_inst::overwrites_reg(const fs_reg &reg)
171 {
172 return (reg.file == dst.file &&
173 reg.reg == dst.reg &&
174 reg.reg_offset >= dst.reg_offset &&
175 reg.reg_offset < dst.reg_offset + regs_written());
176 }
177
178 bool
179 fs_inst::is_tex()
180 {
181 return (opcode == SHADER_OPCODE_TEX ||
182 opcode == FS_OPCODE_TXB ||
183 opcode == SHADER_OPCODE_TXD ||
184 opcode == SHADER_OPCODE_TXF ||
185 opcode == SHADER_OPCODE_TXL ||
186 opcode == SHADER_OPCODE_TXS);
187 }
188
189 bool
190 fs_inst::is_math()
191 {
192 return (opcode == SHADER_OPCODE_RCP ||
193 opcode == SHADER_OPCODE_RSQ ||
194 opcode == SHADER_OPCODE_SQRT ||
195 opcode == SHADER_OPCODE_EXP2 ||
196 opcode == SHADER_OPCODE_LOG2 ||
197 opcode == SHADER_OPCODE_SIN ||
198 opcode == SHADER_OPCODE_COS ||
199 opcode == SHADER_OPCODE_INT_QUOTIENT ||
200 opcode == SHADER_OPCODE_INT_REMAINDER ||
201 opcode == SHADER_OPCODE_POW);
202 }
203
204 void
205 fs_reg::init()
206 {
207 memset(this, 0, sizeof(*this));
208 this->smear = -1;
209 }
210
211 /** Generic unset register constructor. */
212 fs_reg::fs_reg()
213 {
214 init();
215 this->file = BAD_FILE;
216 }
217
218 /** Immediate value constructor. */
219 fs_reg::fs_reg(float f)
220 {
221 init();
222 this->file = IMM;
223 this->type = BRW_REGISTER_TYPE_F;
224 this->imm.f = f;
225 }
226
227 /** Immediate value constructor. */
228 fs_reg::fs_reg(int32_t i)
229 {
230 init();
231 this->file = IMM;
232 this->type = BRW_REGISTER_TYPE_D;
233 this->imm.i = i;
234 }
235
236 /** Immediate value constructor. */
237 fs_reg::fs_reg(uint32_t u)
238 {
239 init();
240 this->file = IMM;
241 this->type = BRW_REGISTER_TYPE_UD;
242 this->imm.u = u;
243 }
244
245 /** Fixed brw_reg Immediate value constructor. */
246 fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
247 {
248 init();
249 this->file = FIXED_HW_REG;
250 this->fixed_hw_reg = fixed_hw_reg;
251 this->type = fixed_hw_reg.type;
252 }
253
254 bool
255 fs_reg::equals(const fs_reg &r) const
256 {
257 return (file == r.file &&
258 reg == r.reg &&
259 reg_offset == r.reg_offset &&
260 type == r.type &&
261 negate == r.negate &&
262 abs == r.abs &&
263 memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
264 sizeof(fixed_hw_reg)) == 0 &&
265 smear == r.smear &&
266 imm.u == r.imm.u);
267 }
268
269 int
270 fs_visitor::type_size(const struct glsl_type *type)
271 {
272 unsigned int size, i;
273
274 switch (type->base_type) {
275 case GLSL_TYPE_UINT:
276 case GLSL_TYPE_INT:
277 case GLSL_TYPE_FLOAT:
278 case GLSL_TYPE_BOOL:
279 return type->components();
280 case GLSL_TYPE_ARRAY:
281 return type_size(type->fields.array) * type->length;
282 case GLSL_TYPE_STRUCT:
283 size = 0;
284 for (i = 0; i < type->length; i++) {
285 size += type_size(type->fields.structure[i].type);
286 }
287 return size;
288 case GLSL_TYPE_SAMPLER:
289 /* Samplers take up no register space, since they're baked in at
290 * link time.
291 */
292 return 0;
293 default:
294 assert(!"not reached");
295 return 0;
296 }
297 }
298
299 void
300 fs_visitor::fail(const char *format, ...)
301 {
302 va_list va;
303 char *msg;
304
305 if (failed)
306 return;
307
308 failed = true;
309
310 va_start(va, format);
311 msg = ralloc_vasprintf(mem_ctx, format, va);
312 va_end(va);
313 msg = ralloc_asprintf(mem_ctx, "FS compile failed: %s\n", msg);
314
315 this->fail_msg = msg;
316
317 if (INTEL_DEBUG & DEBUG_WM) {
318 fprintf(stderr, "%s", msg);
319 }
320 }
321
322 fs_inst *
323 fs_visitor::emit(enum opcode opcode)
324 {
325 return emit(fs_inst(opcode));
326 }
327
328 fs_inst *
329 fs_visitor::emit(enum opcode opcode, fs_reg dst)
330 {
331 return emit(fs_inst(opcode, dst));
332 }
333
334 fs_inst *
335 fs_visitor::emit(enum opcode opcode, fs_reg dst, fs_reg src0)
336 {
337 return emit(fs_inst(opcode, dst, src0));
338 }
339
340 fs_inst *
341 fs_visitor::emit(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
342 {
343 return emit(fs_inst(opcode, dst, src0, src1));
344 }
345
346 fs_inst *
347 fs_visitor::emit(enum opcode opcode, fs_reg dst,
348 fs_reg src0, fs_reg src1, fs_reg src2)
349 {
350 return emit(fs_inst(opcode, dst, src0, src1, src2));
351 }
352
353 void
354 fs_visitor::push_force_uncompressed()
355 {
356 force_uncompressed_stack++;
357 }
358
359 void
360 fs_visitor::pop_force_uncompressed()
361 {
362 force_uncompressed_stack--;
363 assert(force_uncompressed_stack >= 0);
364 }
365
366 void
367 fs_visitor::push_force_sechalf()
368 {
369 force_sechalf_stack++;
370 }
371
372 void
373 fs_visitor::pop_force_sechalf()
374 {
375 force_sechalf_stack--;
376 assert(force_sechalf_stack >= 0);
377 }
378
379 /**
380 * Returns how many MRFs an FS opcode will write over.
381 *
382 * Note that this is not the 0 or 1 implied writes in an actual gen
383 * instruction -- the FS opcodes often generate MOVs in addition.
384 */
385 int
386 fs_visitor::implied_mrf_writes(fs_inst *inst)
387 {
388 if (inst->mlen == 0)
389 return 0;
390
391 switch (inst->opcode) {
392 case SHADER_OPCODE_RCP:
393 case SHADER_OPCODE_RSQ:
394 case SHADER_OPCODE_SQRT:
395 case SHADER_OPCODE_EXP2:
396 case SHADER_OPCODE_LOG2:
397 case SHADER_OPCODE_SIN:
398 case SHADER_OPCODE_COS:
399 return 1 * c->dispatch_width / 8;
400 case SHADER_OPCODE_POW:
401 case SHADER_OPCODE_INT_QUOTIENT:
402 case SHADER_OPCODE_INT_REMAINDER:
403 return 2 * c->dispatch_width / 8;
404 case SHADER_OPCODE_TEX:
405 case FS_OPCODE_TXB:
406 case SHADER_OPCODE_TXD:
407 case SHADER_OPCODE_TXF:
408 case SHADER_OPCODE_TXL:
409 case SHADER_OPCODE_TXS:
410 return 1;
411 case FS_OPCODE_FB_WRITE:
412 return 2;
413 case FS_OPCODE_PULL_CONSTANT_LOAD:
414 case FS_OPCODE_UNSPILL:
415 return 1;
416 case FS_OPCODE_SPILL:
417 return 2;
418 default:
419 assert(!"not reached");
420 return inst->mlen;
421 }
422 }
423
424 int
425 fs_visitor::virtual_grf_alloc(int size)
426 {
427 if (virtual_grf_array_size <= virtual_grf_count) {
428 if (virtual_grf_array_size == 0)
429 virtual_grf_array_size = 16;
430 else
431 virtual_grf_array_size *= 2;
432 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
433 virtual_grf_array_size);
434 }
435 virtual_grf_sizes[virtual_grf_count] = size;
436 return virtual_grf_count++;
437 }
438
439 /** Fixed HW reg constructor. */
440 fs_reg::fs_reg(enum register_file file, int reg)
441 {
442 init();
443 this->file = file;
444 this->reg = reg;
445 this->type = BRW_REGISTER_TYPE_F;
446 }
447
448 /** Fixed HW reg constructor. */
449 fs_reg::fs_reg(enum register_file file, int reg, uint32_t type)
450 {
451 init();
452 this->file = file;
453 this->reg = reg;
454 this->type = type;
455 }
456
457 /** Automatic reg constructor. */
458 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
459 {
460 init();
461
462 this->file = GRF;
463 this->reg = v->virtual_grf_alloc(v->type_size(type));
464 this->reg_offset = 0;
465 this->type = brw_type_for_base_type(type);
466 }
467
468 fs_reg *
469 fs_visitor::variable_storage(ir_variable *var)
470 {
471 return (fs_reg *)hash_table_find(this->variable_ht, var);
472 }
473
474 void
475 import_uniforms_callback(const void *key,
476 void *data,
477 void *closure)
478 {
479 struct hash_table *dst_ht = (struct hash_table *)closure;
480 const fs_reg *reg = (const fs_reg *)data;
481
482 if (reg->file != UNIFORM)
483 return;
484
485 hash_table_insert(dst_ht, data, key);
486 }
487
488 /* For 16-wide, we need to follow from the uniform setup of 8-wide dispatch.
489 * This brings in those uniform definitions
490 */
491 void
492 fs_visitor::import_uniforms(fs_visitor *v)
493 {
494 hash_table_call_foreach(v->variable_ht,
495 import_uniforms_callback,
496 variable_ht);
497 this->params_remap = v->params_remap;
498 }
499
500 /* Our support for uniforms is piggy-backed on the struct
501 * gl_fragment_program, because that's where the values actually
502 * get stored, rather than in some global gl_shader_program uniform
503 * store.
504 */
505 int
506 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
507 {
508 unsigned int offset = 0;
509
510 if (type->is_matrix()) {
511 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
512 type->vector_elements,
513 1);
514
515 for (unsigned int i = 0; i < type->matrix_columns; i++) {
516 offset += setup_uniform_values(loc + offset, column);
517 }
518
519 return offset;
520 }
521
522 switch (type->base_type) {
523 case GLSL_TYPE_FLOAT:
524 case GLSL_TYPE_UINT:
525 case GLSL_TYPE_INT:
526 case GLSL_TYPE_BOOL:
527 for (unsigned int i = 0; i < type->vector_elements; i++) {
528 unsigned int param = c->prog_data.nr_params++;
529
530 this->param_index[param] = loc;
531 this->param_offset[param] = i;
532 }
533 return 1;
534
535 case GLSL_TYPE_STRUCT:
536 for (unsigned int i = 0; i < type->length; i++) {
537 offset += setup_uniform_values(loc + offset,
538 type->fields.structure[i].type);
539 }
540 return offset;
541
542 case GLSL_TYPE_ARRAY:
543 for (unsigned int i = 0; i < type->length; i++) {
544 offset += setup_uniform_values(loc + offset, type->fields.array);
545 }
546 return offset;
547
548 case GLSL_TYPE_SAMPLER:
549 /* The sampler takes up a slot, but we don't use any values from it. */
550 return 1;
551
552 default:
553 assert(!"not reached");
554 return 0;
555 }
556 }
557
558
559 /* Our support for builtin uniforms is even scarier than non-builtin.
560 * It sits on top of the PROG_STATE_VAR parameters that are
561 * automatically updated from GL context state.
562 */
563 void
564 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
565 {
566 const ir_state_slot *const slots = ir->state_slots;
567 assert(ir->state_slots != NULL);
568
569 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
570 /* This state reference has already been setup by ir_to_mesa, but we'll
571 * get the same index back here.
572 */
573 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
574 (gl_state_index *)slots[i].tokens);
575
576 /* Add each of the unique swizzles of the element as a parameter.
577 * This'll end up matching the expected layout of the
578 * array/matrix/structure we're trying to fill in.
579 */
580 int last_swiz = -1;
581 for (unsigned int j = 0; j < 4; j++) {
582 int swiz = GET_SWZ(slots[i].swizzle, j);
583 if (swiz == last_swiz)
584 break;
585 last_swiz = swiz;
586
587 this->param_index[c->prog_data.nr_params] = index;
588 this->param_offset[c->prog_data.nr_params] = swiz;
589 c->prog_data.nr_params++;
590 }
591 }
592 }
593
594 fs_reg *
595 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
596 {
597 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
598 fs_reg wpos = *reg;
599 bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
600
601 /* gl_FragCoord.x */
602 if (ir->pixel_center_integer) {
603 emit(BRW_OPCODE_MOV, wpos, this->pixel_x);
604 } else {
605 emit(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f));
606 }
607 wpos.reg_offset++;
608
609 /* gl_FragCoord.y */
610 if (!flip && ir->pixel_center_integer) {
611 emit(BRW_OPCODE_MOV, wpos, this->pixel_y);
612 } else {
613 fs_reg pixel_y = this->pixel_y;
614 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
615
616 if (flip) {
617 pixel_y.negate = true;
618 offset += c->key.drawable_height - 1.0;
619 }
620
621 emit(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset));
622 }
623 wpos.reg_offset++;
624
625 /* gl_FragCoord.z */
626 if (intel->gen >= 6) {
627 emit(BRW_OPCODE_MOV, wpos,
628 fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
629 } else {
630 emit(FS_OPCODE_LINTERP, wpos,
631 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
632 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
633 interp_reg(FRAG_ATTRIB_WPOS, 2));
634 }
635 wpos.reg_offset++;
636
637 /* gl_FragCoord.w: Already set up in emit_interpolation */
638 emit(BRW_OPCODE_MOV, wpos, this->wpos_w);
639
640 return reg;
641 }
642
643 fs_inst *
644 fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
645 glsl_interp_qualifier interpolation_mode,
646 bool is_centroid)
647 {
648 brw_wm_barycentric_interp_mode barycoord_mode;
649 if (is_centroid) {
650 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
651 barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
652 else
653 barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
654 } else {
655 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
656 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
657 else
658 barycoord_mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
659 }
660 return emit(FS_OPCODE_LINTERP, attr,
661 this->delta_x[barycoord_mode],
662 this->delta_y[barycoord_mode], interp);
663 }
664
665 fs_reg *
666 fs_visitor::emit_general_interpolation(ir_variable *ir)
667 {
668 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
669 reg->type = brw_type_for_base_type(ir->type->get_scalar_type());
670 fs_reg attr = *reg;
671
672 unsigned int array_elements;
673 const glsl_type *type;
674
675 if (ir->type->is_array()) {
676 array_elements = ir->type->length;
677 if (array_elements == 0) {
678 fail("dereferenced array '%s' has length 0\n", ir->name);
679 }
680 type = ir->type->fields.array;
681 } else {
682 array_elements = 1;
683 type = ir->type;
684 }
685
686 glsl_interp_qualifier interpolation_mode =
687 ir->determine_interpolation_mode(c->key.flat_shade);
688
689 int location = ir->location;
690 for (unsigned int i = 0; i < array_elements; i++) {
691 for (unsigned int j = 0; j < type->matrix_columns; j++) {
692 if (urb_setup[location] == -1) {
693 /* If there's no incoming setup data for this slot, don't
694 * emit interpolation for it.
695 */
696 attr.reg_offset += type->vector_elements;
697 location++;
698 continue;
699 }
700
701 if (interpolation_mode == INTERP_QUALIFIER_FLAT) {
702 /* Constant interpolation (flat shading) case. The SF has
703 * handed us defined values in only the constant offset
704 * field of the setup reg.
705 */
706 for (unsigned int k = 0; k < type->vector_elements; k++) {
707 struct brw_reg interp = interp_reg(location, k);
708 interp = suboffset(interp, 3);
709 interp.type = reg->type;
710 emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
711 attr.reg_offset++;
712 }
713 } else {
714 /* Smooth/noperspective interpolation case. */
715 for (unsigned int k = 0; k < type->vector_elements; k++) {
716 /* FINISHME: At some point we probably want to push
717 * this farther by giving similar treatment to the
718 * other potentially constant components of the
719 * attribute, as well as making brw_vs_constval.c
720 * handle varyings other than gl_TexCoord.
721 */
722 if (location >= FRAG_ATTRIB_TEX0 &&
723 location <= FRAG_ATTRIB_TEX7 &&
724 k == 3 && !(c->key.proj_attrib_mask & (1 << location))) {
725 emit(BRW_OPCODE_MOV, attr, fs_reg(1.0f));
726 } else {
727 struct brw_reg interp = interp_reg(location, k);
728 emit_linterp(attr, fs_reg(interp), interpolation_mode,
729 ir->centroid);
730 if (brw->needs_unlit_centroid_workaround && ir->centroid) {
731 /* Get the pixel/sample mask into f0 so that we know
732 * which pixels are lit. Then, for each channel that is
733 * unlit, replace the centroid data with non-centroid
734 * data.
735 */
736 emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS, attr);
737 fs_inst *inst = emit_linterp(attr, fs_reg(interp),
738 interpolation_mode, false);
739 inst->predicate = BRW_PREDICATE_NORMAL;
740 inst->predicate_inverse = true;
741 }
742 if (intel->gen < 6) {
743 emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
744 }
745 }
746 attr.reg_offset++;
747 }
748
749 }
750 location++;
751 }
752 }
753
754 return reg;
755 }
756
757 fs_reg *
758 fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
759 {
760 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
761
762 /* The frontfacing comes in as a bit in the thread payload. */
763 if (intel->gen >= 6) {
764 emit(BRW_OPCODE_ASR, *reg,
765 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
766 fs_reg(15));
767 emit(BRW_OPCODE_NOT, *reg, *reg);
768 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1));
769 } else {
770 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
771 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
772 * us front face
773 */
774 fs_inst *inst = emit(BRW_OPCODE_CMP, *reg,
775 fs_reg(r1_6ud),
776 fs_reg(1u << 31));
777 inst->conditional_mod = BRW_CONDITIONAL_L;
778 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u));
779 }
780
781 return reg;
782 }
783
784 fs_inst *
785 fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src)
786 {
787 switch (opcode) {
788 case SHADER_OPCODE_RCP:
789 case SHADER_OPCODE_RSQ:
790 case SHADER_OPCODE_SQRT:
791 case SHADER_OPCODE_EXP2:
792 case SHADER_OPCODE_LOG2:
793 case SHADER_OPCODE_SIN:
794 case SHADER_OPCODE_COS:
795 break;
796 default:
797 assert(!"not reached: bad math opcode");
798 return NULL;
799 }
800
801 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
802 * might be able to do better by doing execsize = 1 math and then
803 * expanding that result out, but we would need to be careful with
804 * masking.
805 *
806 * Gen 6 hardware ignores source modifiers (negate and abs) on math
807 * instructions, so we also move to a temp to set those up.
808 */
809 if (intel->gen == 6 && (src.file == UNIFORM ||
810 src.abs ||
811 src.negate)) {
812 fs_reg expanded = fs_reg(this, glsl_type::float_type);
813 emit(BRW_OPCODE_MOV, expanded, src);
814 src = expanded;
815 }
816
817 fs_inst *inst = emit(opcode, dst, src);
818
819 if (intel->gen < 6) {
820 inst->base_mrf = 2;
821 inst->mlen = c->dispatch_width / 8;
822 }
823
824 return inst;
825 }
826
827 fs_inst *
828 fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
829 {
830 int base_mrf = 2;
831 fs_inst *inst;
832
833 switch (opcode) {
834 case SHADER_OPCODE_POW:
835 case SHADER_OPCODE_INT_QUOTIENT:
836 case SHADER_OPCODE_INT_REMAINDER:
837 break;
838 default:
839 assert(!"not reached: unsupported binary math opcode.");
840 return NULL;
841 }
842
843 if (intel->gen >= 7) {
844 inst = emit(opcode, dst, src0, src1);
845 } else if (intel->gen == 6) {
846 /* Can't do hstride == 0 args to gen6 math, so expand it out.
847 *
848 * The hardware ignores source modifiers (negate and abs) on math
849 * instructions, so we also move to a temp to set those up.
850 */
851 if (src0.file == UNIFORM || src0.abs || src0.negate) {
852 fs_reg expanded = fs_reg(this, glsl_type::float_type);
853 expanded.type = src0.type;
854 emit(BRW_OPCODE_MOV, expanded, src0);
855 src0 = expanded;
856 }
857
858 if (src1.file == UNIFORM || src1.abs || src1.negate) {
859 fs_reg expanded = fs_reg(this, glsl_type::float_type);
860 expanded.type = src1.type;
861 emit(BRW_OPCODE_MOV, expanded, src1);
862 src1 = expanded;
863 }
864
865 inst = emit(opcode, dst, src0, src1);
866 } else {
867 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
868 * "Message Payload":
869 *
870 * "Operand0[7]. For the INT DIV functions, this operand is the
871 * denominator."
872 * ...
873 * "Operand1[7]. For the INT DIV functions, this operand is the
874 * numerator."
875 */
876 bool is_int_div = opcode != SHADER_OPCODE_POW;
877 fs_reg &op0 = is_int_div ? src1 : src0;
878 fs_reg &op1 = is_int_div ? src0 : src1;
879
880 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1, op1.type), op1);
881 inst = emit(opcode, dst, op0, reg_null_f);
882
883 inst->base_mrf = base_mrf;
884 inst->mlen = 2 * c->dispatch_width / 8;
885 }
886 return inst;
887 }
888
889 /**
890 * To be called after the last _mesa_add_state_reference() call, to
891 * set up prog_data.param[] for assign_curb_setup() and
892 * setup_pull_constants().
893 */
894 void
895 fs_visitor::setup_paramvalues_refs()
896 {
897 if (c->dispatch_width != 8)
898 return;
899
900 /* Set up the pointers to ParamValues now that that array is finalized. */
901 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
902 c->prog_data.param[i] =
903 (const float *)fp->Base.Parameters->ParameterValues[this->param_index[i]] +
904 this->param_offset[i];
905 }
906 }
907
908 void
909 fs_visitor::assign_curb_setup()
910 {
911 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
912 if (c->dispatch_width == 8) {
913 c->prog_data.first_curbe_grf = c->nr_payload_regs;
914 } else {
915 c->prog_data.first_curbe_grf_16 = c->nr_payload_regs;
916 }
917
918 /* Map the offsets in the UNIFORM file to fixed HW regs. */
919 foreach_list(node, &this->instructions) {
920 fs_inst *inst = (fs_inst *)node;
921
922 for (unsigned int i = 0; i < 3; i++) {
923 if (inst->src[i].file == UNIFORM) {
924 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
925 struct brw_reg brw_reg = brw_vec1_grf(c->nr_payload_regs +
926 constant_nr / 8,
927 constant_nr % 8);
928
929 inst->src[i].file = FIXED_HW_REG;
930 inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
931 }
932 }
933 }
934 }
935
936 void
937 fs_visitor::calculate_urb_setup()
938 {
939 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
940 urb_setup[i] = -1;
941 }
942
943 int urb_next = 0;
944 /* Figure out where each of the incoming setup attributes lands. */
945 if (intel->gen >= 6) {
946 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
947 if (fp->Base.InputsRead & BITFIELD64_BIT(i)) {
948 urb_setup[i] = urb_next++;
949 }
950 }
951 } else {
952 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
953 for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) {
954 /* Point size is packed into the header, not as a general attribute */
955 if (i == VERT_RESULT_PSIZ)
956 continue;
957
958 if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) {
959 int fp_index = _mesa_vert_result_to_frag_attrib((gl_vert_result) i);
960
961 /* The back color slot is skipped when the front color is
962 * also written to. In addition, some slots can be
963 * written in the vertex shader and not read in the
964 * fragment shader. So the register number must always be
965 * incremented, mapped or not.
966 */
967 if (fp_index >= 0)
968 urb_setup[fp_index] = urb_next;
969 urb_next++;
970 }
971 }
972
973 /*
974 * It's a FS only attribute, and we did interpolation for this attribute
975 * in SF thread. So, count it here, too.
976 *
977 * See compile_sf_prog() for more info.
978 */
979 if (fp->Base.InputsRead & BITFIELD64_BIT(FRAG_ATTRIB_PNTC))
980 urb_setup[FRAG_ATTRIB_PNTC] = urb_next++;
981 }
982
983 /* Each attribute is 4 setup channels, each of which is half a reg. */
984 c->prog_data.urb_read_length = urb_next * 2;
985 }
986
987 void
988 fs_visitor::assign_urb_setup()
989 {
990 int urb_start = c->nr_payload_regs + c->prog_data.curb_read_length;
991
992 /* Offset all the urb_setup[] index by the actual position of the
993 * setup regs, now that the location of the constants has been chosen.
994 */
995 foreach_list(node, &this->instructions) {
996 fs_inst *inst = (fs_inst *)node;
997
998 if (inst->opcode == FS_OPCODE_LINTERP) {
999 assert(inst->src[2].file == FIXED_HW_REG);
1000 inst->src[2].fixed_hw_reg.nr += urb_start;
1001 }
1002
1003 if (inst->opcode == FS_OPCODE_CINTERP) {
1004 assert(inst->src[0].file == FIXED_HW_REG);
1005 inst->src[0].fixed_hw_reg.nr += urb_start;
1006 }
1007 }
1008
1009 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
1010 }
1011
1012 /**
1013 * Split large virtual GRFs into separate components if we can.
1014 *
1015 * This is mostly duplicated with what brw_fs_vector_splitting does,
1016 * but that's really conservative because it's afraid of doing
1017 * splitting that doesn't result in real progress after the rest of
1018 * the optimization phases, which would cause infinite looping in
1019 * optimization. We can do it once here, safely. This also has the
1020 * opportunity to split interpolated values, or maybe even uniforms,
1021 * which we don't have at the IR level.
1022 *
1023 * We want to split, because virtual GRFs are what we register
1024 * allocate and spill (due to contiguousness requirements for some
1025 * instructions), and they're what we naturally generate in the
1026 * codegen process, but most virtual GRFs don't actually need to be
1027 * contiguous sets of GRFs. If we split, we'll end up with reduced
1028 * live intervals and better dead code elimination and coalescing.
1029 */
1030 void
1031 fs_visitor::split_virtual_grfs()
1032 {
1033 int num_vars = this->virtual_grf_count;
1034 bool split_grf[num_vars];
1035 int new_virtual_grf[num_vars];
1036
1037 /* Try to split anything > 0 sized. */
1038 for (int i = 0; i < num_vars; i++) {
1039 if (this->virtual_grf_sizes[i] != 1)
1040 split_grf[i] = true;
1041 else
1042 split_grf[i] = false;
1043 }
1044
1045 if (brw->has_pln &&
1046 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].file == GRF) {
1047 /* PLN opcodes rely on the delta_xy being contiguous. We only have to
1048 * check this for BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC, because prior to
1049 * Gen6, that was the only supported interpolation mode, and since Gen6,
1050 * delta_x and delta_y are in fixed hardware registers.
1051 */
1052 split_grf[this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg] =
1053 false;
1054 }
1055
1056 foreach_list(node, &this->instructions) {
1057 fs_inst *inst = (fs_inst *)node;
1058
1059 /* If there's a SEND message that requires contiguous destination
1060 * registers, no splitting is allowed.
1061 */
1062 if (inst->regs_written() > 1) {
1063 split_grf[inst->dst.reg] = false;
1064 }
1065 }
1066
1067 /* Allocate new space for split regs. Note that the virtual
1068 * numbers will be contiguous.
1069 */
1070 for (int i = 0; i < num_vars; i++) {
1071 if (split_grf[i]) {
1072 new_virtual_grf[i] = virtual_grf_alloc(1);
1073 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
1074 int reg = virtual_grf_alloc(1);
1075 assert(reg == new_virtual_grf[i] + j - 1);
1076 (void) reg;
1077 }
1078 this->virtual_grf_sizes[i] = 1;
1079 }
1080 }
1081
1082 foreach_list(node, &this->instructions) {
1083 fs_inst *inst = (fs_inst *)node;
1084
1085 if (inst->dst.file == GRF &&
1086 split_grf[inst->dst.reg] &&
1087 inst->dst.reg_offset != 0) {
1088 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
1089 inst->dst.reg_offset - 1);
1090 inst->dst.reg_offset = 0;
1091 }
1092 for (int i = 0; i < 3; i++) {
1093 if (inst->src[i].file == GRF &&
1094 split_grf[inst->src[i].reg] &&
1095 inst->src[i].reg_offset != 0) {
1096 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
1097 inst->src[i].reg_offset - 1);
1098 inst->src[i].reg_offset = 0;
1099 }
1100 }
1101 }
1102 this->live_intervals_valid = false;
1103 }
1104
1105 /**
1106 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1107 *
1108 * During code generation, we create tons of temporary variables, many of
1109 * which get immediately killed and are never used again. Yet, in later
1110 * optimization and analysis passes, such as compute_live_intervals, we need
1111 * to loop over all the virtual GRFs. Compacting them can save a lot of
1112 * overhead.
1113 */
1114 void
1115 fs_visitor::compact_virtual_grfs()
1116 {
1117 /* Mark which virtual GRFs are used, and count how many. */
1118 int remap_table[this->virtual_grf_count];
1119 memset(remap_table, -1, sizeof(remap_table));
1120
1121 foreach_list(node, &this->instructions) {
1122 const fs_inst *inst = (const fs_inst *) node;
1123
1124 if (inst->dst.file == GRF)
1125 remap_table[inst->dst.reg] = 0;
1126
1127 for (int i = 0; i < 3; i++) {
1128 if (inst->src[i].file == GRF)
1129 remap_table[inst->src[i].reg] = 0;
1130 }
1131 }
1132
1133 /* Compact the GRF arrays. */
1134 int new_index = 0;
1135 for (int i = 0; i < this->virtual_grf_count; i++) {
1136 if (remap_table[i] != -1) {
1137 remap_table[i] = new_index;
1138 virtual_grf_sizes[new_index] = virtual_grf_sizes[i];
1139 if (live_intervals_valid) {
1140 virtual_grf_use[new_index] = virtual_grf_use[i];
1141 virtual_grf_def[new_index] = virtual_grf_def[i];
1142 }
1143 ++new_index;
1144 }
1145 }
1146
1147 this->virtual_grf_count = new_index;
1148
1149 /* Patch all the instructions to use the newly renumbered registers */
1150 foreach_list(node, &this->instructions) {
1151 fs_inst *inst = (fs_inst *) node;
1152
1153 if (inst->dst.file == GRF)
1154 inst->dst.reg = remap_table[inst->dst.reg];
1155
1156 for (int i = 0; i < 3; i++) {
1157 if (inst->src[i].file == GRF)
1158 inst->src[i].reg = remap_table[inst->src[i].reg];
1159 }
1160 }
1161 }
1162
1163 bool
1164 fs_visitor::remove_dead_constants()
1165 {
1166 if (c->dispatch_width == 8) {
1167 this->params_remap = ralloc_array(mem_ctx, int, c->prog_data.nr_params);
1168
1169 for (unsigned int i = 0; i < c->prog_data.nr_params; i++)
1170 this->params_remap[i] = -1;
1171
1172 /* Find which params are still in use. */
1173 foreach_list(node, &this->instructions) {
1174 fs_inst *inst = (fs_inst *)node;
1175
1176 for (int i = 0; i < 3; i++) {
1177 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
1178
1179 if (inst->src[i].file != UNIFORM)
1180 continue;
1181
1182 assert(constant_nr < (int)c->prog_data.nr_params);
1183
1184 /* For now, set this to non-negative. We'll give it the
1185 * actual new number in a moment, in order to keep the
1186 * register numbers nicely ordered.
1187 */
1188 this->params_remap[constant_nr] = 0;
1189 }
1190 }
1191
1192 /* Figure out what the new numbers for the params will be. At some
1193 * point when we're doing uniform array access, we're going to want
1194 * to keep the distinction between .reg and .reg_offset, but for
1195 * now we don't care.
1196 */
1197 unsigned int new_nr_params = 0;
1198 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
1199 if (this->params_remap[i] != -1) {
1200 this->params_remap[i] = new_nr_params++;
1201 }
1202 }
1203
1204 /* Update the list of params to be uploaded to match our new numbering. */
1205 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
1206 int remapped = this->params_remap[i];
1207
1208 if (remapped == -1)
1209 continue;
1210
1211 /* We've already done setup_paramvalues_refs() so no need to worry
1212 * about param_index and param_offset.
1213 */
1214 c->prog_data.param[remapped] = c->prog_data.param[i];
1215 }
1216
1217 c->prog_data.nr_params = new_nr_params;
1218 } else {
1219 /* This should have been generated in the 8-wide pass already. */
1220 assert(this->params_remap);
1221 }
1222
1223 /* Now do the renumbering of the shader to remove unused params. */
1224 foreach_list(node, &this->instructions) {
1225 fs_inst *inst = (fs_inst *)node;
1226
1227 for (int i = 0; i < 3; i++) {
1228 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
1229
1230 if (inst->src[i].file != UNIFORM)
1231 continue;
1232
1233 assert(this->params_remap[constant_nr] != -1);
1234 inst->src[i].reg = this->params_remap[constant_nr];
1235 inst->src[i].reg_offset = 0;
1236 }
1237 }
1238
1239 return true;
1240 }
1241
1242 /**
1243 * Choose accesses from the UNIFORM file to demote to using the pull
1244 * constant buffer.
1245 *
1246 * We allow a fragment shader to have more than the specified minimum
1247 * maximum number of fragment shader uniform components (64). If
1248 * there are too many of these, they'd fill up all of register space.
1249 * So, this will push some of them out to the pull constant buffer and
1250 * update the program to load them.
1251 */
1252 void
1253 fs_visitor::setup_pull_constants()
1254 {
1255 /* Only allow 16 registers (128 uniform components) as push constants. */
1256 unsigned int max_uniform_components = 16 * 8;
1257 if (c->prog_data.nr_params <= max_uniform_components)
1258 return;
1259
1260 if (c->dispatch_width == 16) {
1261 fail("Pull constants not supported in 16-wide\n");
1262 return;
1263 }
1264
1265 /* Just demote the end of the list. We could probably do better
1266 * here, demoting things that are rarely used in the program first.
1267 */
1268 int pull_uniform_base = max_uniform_components;
1269 int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
1270
1271 foreach_list(node, &this->instructions) {
1272 fs_inst *inst = (fs_inst *)node;
1273
1274 for (int i = 0; i < 3; i++) {
1275 if (inst->src[i].file != UNIFORM)
1276 continue;
1277
1278 int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
1279 if (uniform_nr < pull_uniform_base)
1280 continue;
1281
1282 fs_reg dst = fs_reg(this, glsl_type::float_type);
1283 fs_reg index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
1284 fs_reg offset = fs_reg((unsigned)(((uniform_nr -
1285 pull_uniform_base) * 4) & ~15));
1286 fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
1287 dst, index, offset);
1288 pull->ir = inst->ir;
1289 pull->annotation = inst->annotation;
1290 pull->base_mrf = 14;
1291 pull->mlen = 1;
1292
1293 inst->insert_before(pull);
1294
1295 inst->src[i].file = GRF;
1296 inst->src[i].reg = dst.reg;
1297 inst->src[i].reg_offset = 0;
1298 inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
1299 }
1300 }
1301
1302 for (int i = 0; i < pull_uniform_count; i++) {
1303 c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
1304 }
1305 c->prog_data.nr_params -= pull_uniform_count;
1306 c->prog_data.nr_pull_params = pull_uniform_count;
1307 }
1308
1309 bool
1310 fs_visitor::opt_algebraic()
1311 {
1312 bool progress = false;
1313
1314 calculate_live_intervals();
1315
1316 foreach_list(node, &this->instructions) {
1317 fs_inst *inst = (fs_inst *)node;
1318
1319 switch (inst->opcode) {
1320 case BRW_OPCODE_MUL:
1321 if (inst->src[1].file != IMM)
1322 continue;
1323
1324 /* a * 1.0 = a */
1325 if (inst->src[1].type == BRW_REGISTER_TYPE_F &&
1326 inst->src[1].imm.f == 1.0) {
1327 inst->opcode = BRW_OPCODE_MOV;
1328 inst->src[1] = reg_undef;
1329 progress = true;
1330 break;
1331 }
1332
1333 /* a * 0.0 = 0.0 */
1334 if (inst->src[1].type == BRW_REGISTER_TYPE_F &&
1335 inst->src[1].imm.f == 0.0) {
1336 inst->opcode = BRW_OPCODE_MOV;
1337 inst->src[0] = fs_reg(0.0f);
1338 inst->src[1] = reg_undef;
1339 progress = true;
1340 break;
1341 }
1342
1343 break;
1344 case BRW_OPCODE_ADD:
1345 if (inst->src[1].file != IMM)
1346 continue;
1347
1348 /* a + 0.0 = a */
1349 if (inst->src[1].type == BRW_REGISTER_TYPE_F &&
1350 inst->src[1].imm.f == 0.0) {
1351 inst->opcode = BRW_OPCODE_MOV;
1352 inst->src[1] = reg_undef;
1353 progress = true;
1354 break;
1355 }
1356 break;
1357 default:
1358 break;
1359 }
1360 }
1361
1362 return progress;
1363 }
1364
1365 /**
1366 * Must be called after calculate_live_intervales() to remove unused
1367 * writes to registers -- register allocation will fail otherwise
1368 * because something deffed but not used won't be considered to
1369 * interfere with other regs.
1370 */
1371 bool
1372 fs_visitor::dead_code_eliminate()
1373 {
1374 bool progress = false;
1375 int pc = 0;
1376
1377 calculate_live_intervals();
1378
1379 foreach_list_safe(node, &this->instructions) {
1380 fs_inst *inst = (fs_inst *)node;
1381
1382 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
1383 inst->remove();
1384 progress = true;
1385 }
1386
1387 pc++;
1388 }
1389
1390 if (progress)
1391 live_intervals_valid = false;
1392
1393 return progress;
1394 }
1395
1396 /**
1397 * Implements a second type of register coalescing: This one checks if
1398 * the two regs involved in a raw move don't interfere, in which case
1399 * they can both by stored in the same place and the MOV removed.
1400 */
1401 bool
1402 fs_visitor::register_coalesce_2()
1403 {
1404 bool progress = false;
1405
1406 calculate_live_intervals();
1407
1408 foreach_list_safe(node, &this->instructions) {
1409 fs_inst *inst = (fs_inst *)node;
1410
1411 if (inst->opcode != BRW_OPCODE_MOV ||
1412 inst->predicate ||
1413 inst->saturate ||
1414 inst->src[0].file != GRF ||
1415 inst->src[0].negate ||
1416 inst->src[0].abs ||
1417 inst->src[0].smear != -1 ||
1418 inst->dst.file != GRF ||
1419 inst->dst.type != inst->src[0].type ||
1420 virtual_grf_sizes[inst->src[0].reg] != 1 ||
1421 virtual_grf_interferes(inst->dst.reg, inst->src[0].reg)) {
1422 continue;
1423 }
1424
1425 int reg_from = inst->src[0].reg;
1426 assert(inst->src[0].reg_offset == 0);
1427 int reg_to = inst->dst.reg;
1428 int reg_to_offset = inst->dst.reg_offset;
1429
1430 foreach_list_safe(node, &this->instructions) {
1431 fs_inst *scan_inst = (fs_inst *)node;
1432
1433 if (scan_inst->dst.file == GRF &&
1434 scan_inst->dst.reg == reg_from) {
1435 scan_inst->dst.reg = reg_to;
1436 scan_inst->dst.reg_offset = reg_to_offset;
1437 }
1438 for (int i = 0; i < 3; i++) {
1439 if (scan_inst->src[i].file == GRF &&
1440 scan_inst->src[i].reg == reg_from) {
1441 scan_inst->src[i].reg = reg_to;
1442 scan_inst->src[i].reg_offset = reg_to_offset;
1443 }
1444 }
1445 }
1446
1447 inst->remove();
1448 live_intervals_valid = false;
1449 progress = true;
1450 continue;
1451 }
1452
1453 return progress;
1454 }
1455
1456 bool
1457 fs_visitor::register_coalesce()
1458 {
1459 bool progress = false;
1460 int if_depth = 0;
1461 int loop_depth = 0;
1462
1463 foreach_list_safe(node, &this->instructions) {
1464 fs_inst *inst = (fs_inst *)node;
1465
1466 /* Make sure that we dominate the instructions we're going to
1467 * scan for interfering with our coalescing, or we won't have
1468 * scanned enough to see if anything interferes with our
1469 * coalescing. We don't dominate the following instructions if
1470 * we're in a loop or an if block.
1471 */
1472 switch (inst->opcode) {
1473 case BRW_OPCODE_DO:
1474 loop_depth++;
1475 break;
1476 case BRW_OPCODE_WHILE:
1477 loop_depth--;
1478 break;
1479 case BRW_OPCODE_IF:
1480 if_depth++;
1481 break;
1482 case BRW_OPCODE_ENDIF:
1483 if_depth--;
1484 break;
1485 default:
1486 break;
1487 }
1488 if (loop_depth || if_depth)
1489 continue;
1490
1491 if (inst->opcode != BRW_OPCODE_MOV ||
1492 inst->predicate ||
1493 inst->saturate ||
1494 inst->dst.file != GRF || (inst->src[0].file != GRF &&
1495 inst->src[0].file != UNIFORM)||
1496 inst->dst.type != inst->src[0].type)
1497 continue;
1498
1499 bool has_source_modifiers = inst->src[0].abs || inst->src[0].negate;
1500
1501 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
1502 * them: check for no writes to either one until the exit of the
1503 * program.
1504 */
1505 bool interfered = false;
1506
1507 for (fs_inst *scan_inst = (fs_inst *)inst->next;
1508 !scan_inst->is_tail_sentinel();
1509 scan_inst = (fs_inst *)scan_inst->next) {
1510 if (scan_inst->dst.file == GRF) {
1511 if (scan_inst->overwrites_reg(inst->dst) ||
1512 scan_inst->overwrites_reg(inst->src[0])) {
1513 interfered = true;
1514 break;
1515 }
1516 }
1517
1518 /* The gen6 MATH instruction can't handle source modifiers or
1519 * unusual register regions, so avoid coalescing those for
1520 * now. We should do something more specific.
1521 */
1522 if (intel->gen >= 6 &&
1523 scan_inst->is_math() &&
1524 (has_source_modifiers || inst->src[0].file == UNIFORM)) {
1525 interfered = true;
1526 break;
1527 }
1528
1529 /* The accumulator result appears to get used for the
1530 * conditional modifier generation. When negating a UD
1531 * value, there is a 33rd bit generated for the sign in the
1532 * accumulator value, so now you can't check, for example,
1533 * equality with a 32-bit value. See piglit fs-op-neg-uint.
1534 */
1535 if (scan_inst->conditional_mod &&
1536 inst->src[0].negate &&
1537 inst->src[0].type == BRW_REGISTER_TYPE_UD) {
1538 interfered = true;
1539 break;
1540 }
1541 }
1542 if (interfered) {
1543 continue;
1544 }
1545
1546 /* Rewrite the later usage to point at the source of the move to
1547 * be removed.
1548 */
1549 for (fs_inst *scan_inst = inst;
1550 !scan_inst->is_tail_sentinel();
1551 scan_inst = (fs_inst *)scan_inst->next) {
1552 for (int i = 0; i < 3; i++) {
1553 if (scan_inst->src[i].file == GRF &&
1554 scan_inst->src[i].reg == inst->dst.reg &&
1555 scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
1556 fs_reg new_src = inst->src[0];
1557 if (scan_inst->src[i].abs) {
1558 new_src.negate = 0;
1559 new_src.abs = 1;
1560 }
1561 new_src.negate ^= scan_inst->src[i].negate;
1562 scan_inst->src[i] = new_src;
1563 }
1564 }
1565 }
1566
1567 inst->remove();
1568 progress = true;
1569 }
1570
1571 if (progress)
1572 live_intervals_valid = false;
1573
1574 return progress;
1575 }
1576
1577
1578 bool
1579 fs_visitor::compute_to_mrf()
1580 {
1581 bool progress = false;
1582 int next_ip = 0;
1583
1584 calculate_live_intervals();
1585
1586 foreach_list_safe(node, &this->instructions) {
1587 fs_inst *inst = (fs_inst *)node;
1588
1589 int ip = next_ip;
1590 next_ip++;
1591
1592 if (inst->opcode != BRW_OPCODE_MOV ||
1593 inst->predicate ||
1594 inst->dst.file != MRF || inst->src[0].file != GRF ||
1595 inst->dst.type != inst->src[0].type ||
1596 inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
1597 continue;
1598
1599 /* Work out which hardware MRF registers are written by this
1600 * instruction.
1601 */
1602 int mrf_low = inst->dst.reg & ~BRW_MRF_COMPR4;
1603 int mrf_high;
1604 if (inst->dst.reg & BRW_MRF_COMPR4) {
1605 mrf_high = mrf_low + 4;
1606 } else if (c->dispatch_width == 16 &&
1607 (!inst->force_uncompressed && !inst->force_sechalf)) {
1608 mrf_high = mrf_low + 1;
1609 } else {
1610 mrf_high = mrf_low;
1611 }
1612
1613 /* Can't compute-to-MRF this GRF if someone else was going to
1614 * read it later.
1615 */
1616 if (this->virtual_grf_use[inst->src[0].reg] > ip)
1617 continue;
1618
1619 /* Found a move of a GRF to a MRF. Let's see if we can go
1620 * rewrite the thing that made this GRF to write into the MRF.
1621 */
1622 fs_inst *scan_inst;
1623 for (scan_inst = (fs_inst *)inst->prev;
1624 scan_inst->prev != NULL;
1625 scan_inst = (fs_inst *)scan_inst->prev) {
1626 if (scan_inst->dst.file == GRF &&
1627 scan_inst->dst.reg == inst->src[0].reg) {
1628 /* Found the last thing to write our reg we want to turn
1629 * into a compute-to-MRF.
1630 */
1631
1632 /* SENDs can only write to GRFs, so no compute-to-MRF. */
1633 if (scan_inst->mlen) {
1634 break;
1635 }
1636
1637 /* If it's predicated, it (probably) didn't populate all
1638 * the channels. We might be able to rewrite everything
1639 * that writes that reg, but it would require smarter
1640 * tracking to delay the rewriting until complete success.
1641 */
1642 if (scan_inst->predicate)
1643 break;
1644
1645 /* If it's half of register setup and not the same half as
1646 * our MOV we're trying to remove, bail for now.
1647 */
1648 if (scan_inst->force_uncompressed != inst->force_uncompressed ||
1649 scan_inst->force_sechalf != inst->force_sechalf) {
1650 break;
1651 }
1652
1653 /* SEND instructions can't have MRF as a destination. */
1654 if (scan_inst->mlen)
1655 break;
1656
1657 if (intel->gen >= 6) {
1658 /* gen6 math instructions must have the destination be
1659 * GRF, so no compute-to-MRF for them.
1660 */
1661 if (scan_inst->is_math()) {
1662 break;
1663 }
1664 }
1665
1666 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
1667 /* Found the creator of our MRF's source value. */
1668 scan_inst->dst.file = MRF;
1669 scan_inst->dst.reg = inst->dst.reg;
1670 scan_inst->saturate |= inst->saturate;
1671 inst->remove();
1672 progress = true;
1673 }
1674 break;
1675 }
1676
1677 /* We don't handle flow control here. Most computation of
1678 * values that end up in MRFs are shortly before the MRF
1679 * write anyway.
1680 */
1681 if (scan_inst->opcode == BRW_OPCODE_DO ||
1682 scan_inst->opcode == BRW_OPCODE_WHILE ||
1683 scan_inst->opcode == BRW_OPCODE_ELSE ||
1684 scan_inst->opcode == BRW_OPCODE_ENDIF) {
1685 break;
1686 }
1687
1688 /* You can't read from an MRF, so if someone else reads our
1689 * MRF's source GRF that we wanted to rewrite, that stops us.
1690 */
1691 bool interfered = false;
1692 for (int i = 0; i < 3; i++) {
1693 if (scan_inst->src[i].file == GRF &&
1694 scan_inst->src[i].reg == inst->src[0].reg &&
1695 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1696 interfered = true;
1697 }
1698 }
1699 if (interfered)
1700 break;
1701
1702 if (scan_inst->dst.file == MRF) {
1703 /* If somebody else writes our MRF here, we can't
1704 * compute-to-MRF before that.
1705 */
1706 int scan_mrf_low = scan_inst->dst.reg & ~BRW_MRF_COMPR4;
1707 int scan_mrf_high;
1708
1709 if (scan_inst->dst.reg & BRW_MRF_COMPR4) {
1710 scan_mrf_high = scan_mrf_low + 4;
1711 } else if (c->dispatch_width == 16 &&
1712 (!scan_inst->force_uncompressed &&
1713 !scan_inst->force_sechalf)) {
1714 scan_mrf_high = scan_mrf_low + 1;
1715 } else {
1716 scan_mrf_high = scan_mrf_low;
1717 }
1718
1719 if (mrf_low == scan_mrf_low ||
1720 mrf_low == scan_mrf_high ||
1721 mrf_high == scan_mrf_low ||
1722 mrf_high == scan_mrf_high) {
1723 break;
1724 }
1725 }
1726
1727 if (scan_inst->mlen > 0) {
1728 /* Found a SEND instruction, which means that there are
1729 * live values in MRFs from base_mrf to base_mrf +
1730 * scan_inst->mlen - 1. Don't go pushing our MRF write up
1731 * above it.
1732 */
1733 if (mrf_low >= scan_inst->base_mrf &&
1734 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
1735 break;
1736 }
1737 if (mrf_high >= scan_inst->base_mrf &&
1738 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
1739 break;
1740 }
1741 }
1742 }
1743 }
1744
1745 if (progress)
1746 live_intervals_valid = false;
1747
1748 return progress;
1749 }
1750
1751 /**
1752 * Walks through basic blocks, looking for repeated MRF writes and
1753 * removing the later ones.
1754 */
1755 bool
1756 fs_visitor::remove_duplicate_mrf_writes()
1757 {
1758 fs_inst *last_mrf_move[16];
1759 bool progress = false;
1760
1761 /* Need to update the MRF tracking for compressed instructions. */
1762 if (c->dispatch_width == 16)
1763 return false;
1764
1765 memset(last_mrf_move, 0, sizeof(last_mrf_move));
1766
1767 foreach_list_safe(node, &this->instructions) {
1768 fs_inst *inst = (fs_inst *)node;
1769
1770 switch (inst->opcode) {
1771 case BRW_OPCODE_DO:
1772 case BRW_OPCODE_WHILE:
1773 case BRW_OPCODE_IF:
1774 case BRW_OPCODE_ELSE:
1775 case BRW_OPCODE_ENDIF:
1776 memset(last_mrf_move, 0, sizeof(last_mrf_move));
1777 continue;
1778 default:
1779 break;
1780 }
1781
1782 if (inst->opcode == BRW_OPCODE_MOV &&
1783 inst->dst.file == MRF) {
1784 fs_inst *prev_inst = last_mrf_move[inst->dst.reg];
1785 if (prev_inst && inst->equals(prev_inst)) {
1786 inst->remove();
1787 progress = true;
1788 continue;
1789 }
1790 }
1791
1792 /* Clear out the last-write records for MRFs that were overwritten. */
1793 if (inst->dst.file == MRF) {
1794 last_mrf_move[inst->dst.reg] = NULL;
1795 }
1796
1797 if (inst->mlen > 0) {
1798 /* Found a SEND instruction, which will include two or fewer
1799 * implied MRF writes. We could do better here.
1800 */
1801 for (int i = 0; i < implied_mrf_writes(inst); i++) {
1802 last_mrf_move[inst->base_mrf + i] = NULL;
1803 }
1804 }
1805
1806 /* Clear out any MRF move records whose sources got overwritten. */
1807 if (inst->dst.file == GRF) {
1808 for (unsigned int i = 0; i < Elements(last_mrf_move); i++) {
1809 if (last_mrf_move[i] &&
1810 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
1811 last_mrf_move[i] = NULL;
1812 }
1813 }
1814 }
1815
1816 if (inst->opcode == BRW_OPCODE_MOV &&
1817 inst->dst.file == MRF &&
1818 inst->src[0].file == GRF &&
1819 !inst->predicate) {
1820 last_mrf_move[inst->dst.reg] = inst;
1821 }
1822 }
1823
1824 if (progress)
1825 live_intervals_valid = false;
1826
1827 return progress;
1828 }
1829
1830 /**
1831 * Possibly returns an instruction that set up @param reg.
1832 *
1833 * Sometimes we want to take the result of some expression/variable
1834 * dereference tree and rewrite the instruction generating the result
1835 * of the tree. When processing the tree, we know that the
1836 * instructions generated are all writing temporaries that are dead
1837 * outside of this tree. So, if we have some instructions that write
1838 * a temporary, we're free to point that temp write somewhere else.
1839 *
1840 * Note that this doesn't guarantee that the instruction generated
1841 * only reg -- it might be the size=4 destination of a texture instruction.
1842 */
1843 fs_inst *
1844 fs_visitor::get_instruction_generating_reg(fs_inst *start,
1845 fs_inst *end,
1846 fs_reg reg)
1847 {
1848 if (end == start ||
1849 end->predicate ||
1850 end->force_uncompressed ||
1851 end->force_sechalf ||
1852 !reg.equals(end->dst)) {
1853 return NULL;
1854 } else {
1855 return end;
1856 }
1857 }
1858
1859 bool
1860 fs_visitor::run()
1861 {
1862 uint32_t prog_offset_16 = 0;
1863 uint32_t orig_nr_params = c->prog_data.nr_params;
1864
1865 brw_wm_payload_setup(brw, c);
1866
1867 if (c->dispatch_width == 16) {
1868 /* We have to do a compaction pass now, or the one at the end of
1869 * execution will squash down where our prog_offset start needs
1870 * to be.
1871 */
1872 brw_compact_instructions(p);
1873
1874 /* align to 64 byte boundary. */
1875 while ((c->func.nr_insn * sizeof(struct brw_instruction)) % 64) {
1876 brw_NOP(p);
1877 }
1878
1879 /* Save off the start of this 16-wide program in case we succeed. */
1880 prog_offset_16 = c->func.nr_insn * sizeof(struct brw_instruction);
1881
1882 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1883 }
1884
1885 if (0) {
1886 emit_dummy_fs();
1887 } else {
1888 calculate_urb_setup();
1889 if (intel->gen < 6)
1890 emit_interpolation_setup_gen4();
1891 else
1892 emit_interpolation_setup_gen6();
1893
1894 /* Generate FS IR for main(). (the visitor only descends into
1895 * functions called "main").
1896 */
1897 if (shader) {
1898 foreach_list(node, &*shader->ir) {
1899 ir_instruction *ir = (ir_instruction *)node;
1900 base_ir = ir;
1901 this->result = reg_undef;
1902 ir->accept(this);
1903 }
1904 } else {
1905 emit_fragment_program_code();
1906 }
1907 if (failed)
1908 return false;
1909
1910 emit_fb_writes();
1911
1912 split_virtual_grfs();
1913
1914 setup_paramvalues_refs();
1915 setup_pull_constants();
1916
1917 bool progress;
1918 do {
1919 progress = false;
1920
1921 compact_virtual_grfs();
1922
1923 progress = remove_duplicate_mrf_writes() || progress;
1924
1925 progress = opt_algebraic() || progress;
1926 progress = opt_cse() || progress;
1927 progress = opt_copy_propagate() || progress;
1928 progress = dead_code_eliminate() || progress;
1929 progress = register_coalesce() || progress;
1930 progress = register_coalesce_2() || progress;
1931 progress = compute_to_mrf() || progress;
1932 } while (progress);
1933
1934 remove_dead_constants();
1935
1936 schedule_instructions();
1937
1938 assign_curb_setup();
1939 assign_urb_setup();
1940
1941 if (0) {
1942 /* Debug of register spilling: Go spill everything. */
1943 for (int i = 0; i < virtual_grf_count; i++) {
1944 spill_reg(i);
1945 }
1946 }
1947
1948 if (0)
1949 assign_regs_trivial();
1950 else {
1951 while (!assign_regs()) {
1952 if (failed)
1953 break;
1954 }
1955 }
1956 }
1957 assert(force_uncompressed_stack == 0);
1958 assert(force_sechalf_stack == 0);
1959
1960 if (failed)
1961 return false;
1962
1963 generate_code();
1964
1965 if (c->dispatch_width == 8) {
1966 c->prog_data.reg_blocks = brw_register_blocks(grf_used);
1967 } else {
1968 c->prog_data.reg_blocks_16 = brw_register_blocks(grf_used);
1969 c->prog_data.prog_offset_16 = prog_offset_16;
1970
1971 /* Make sure we didn't try to sneak in an extra uniform */
1972 assert(orig_nr_params == c->prog_data.nr_params);
1973 (void) orig_nr_params;
1974 }
1975
1976 return !failed;
1977 }
1978
1979 bool
1980 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
1981 struct gl_shader_program *prog)
1982 {
1983 struct intel_context *intel = &brw->intel;
1984 bool start_busy = false;
1985 float start_time = 0;
1986
1987 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
1988 start_busy = (intel->batch.last_bo &&
1989 drm_intel_bo_busy(intel->batch.last_bo));
1990 start_time = get_time();
1991 }
1992
1993 struct brw_shader *shader = NULL;
1994 if (prog)
1995 shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
1996
1997 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
1998 if (shader) {
1999 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
2000 _mesa_print_ir(shader->ir, NULL);
2001 printf("\n\n");
2002 } else {
2003 printf("ARB_fragment_program %d ir for native fragment shader\n",
2004 c->fp->program.Base.Id);
2005 _mesa_print_program(&c->fp->program.Base);
2006 }
2007 }
2008
2009 /* Now the main event: Visit the shader IR and generate our FS IR for it.
2010 */
2011 c->dispatch_width = 8;
2012
2013 fs_visitor v(c, prog, shader);
2014 if (!v.run()) {
2015 prog->LinkStatus = false;
2016 ralloc_strcat(&prog->InfoLog, v.fail_msg);
2017
2018 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n",
2019 v.fail_msg);
2020
2021 return false;
2022 }
2023
2024 if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0) {
2025 c->dispatch_width = 16;
2026 fs_visitor v2(c, prog, shader);
2027 v2.import_uniforms(&v);
2028 if (!v2.run()) {
2029 perf_debug("16-wide shader failed to compile, falling back to "
2030 "8-wide at a 10-20%% performance cost: %s", v2.fail_msg);
2031 }
2032 }
2033
2034 c->prog_data.dispatch_width = 8;
2035
2036 if (unlikely(INTEL_DEBUG & DEBUG_PERF) && shader) {
2037 if (shader->compiled_once)
2038 brw_wm_debug_recompile(brw, prog, &c->key);
2039 shader->compiled_once = true;
2040
2041 if (start_busy && !drm_intel_bo_busy(intel->batch.last_bo)) {
2042 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
2043 (get_time() - start_time) * 1000);
2044 }
2045 }
2046
2047 return true;
2048 }
2049
2050 bool
2051 brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
2052 {
2053 struct brw_context *brw = brw_context(ctx);
2054 struct intel_context *intel = &brw->intel;
2055 struct brw_wm_prog_key key;
2056
2057 if (!prog->_LinkedShaders[MESA_SHADER_FRAGMENT])
2058 return true;
2059
2060 struct gl_fragment_program *fp = (struct gl_fragment_program *)
2061 prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->Program;
2062 struct brw_fragment_program *bfp = brw_fragment_program(fp);
2063 bool program_uses_dfdy = fp->UsesDFdy;
2064
2065 memset(&key, 0, sizeof(key));
2066
2067 if (intel->gen < 6) {
2068 if (fp->UsesKill)
2069 key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
2070
2071 if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
2072 key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
2073
2074 /* Just assume depth testing. */
2075 key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
2076 key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
2077 }
2078
2079 if (prog->Name != 0)
2080 key.proj_attrib_mask = 0xffffffff;
2081
2082 if (intel->gen < 6)
2083 key.vp_outputs_written |= BITFIELD64_BIT(FRAG_ATTRIB_WPOS);
2084
2085 for (int i = 0; i < FRAG_ATTRIB_MAX; i++) {
2086 if (!(fp->Base.InputsRead & BITFIELD64_BIT(i)))
2087 continue;
2088
2089 if (prog->Name == 0)
2090 key.proj_attrib_mask |= 1 << i;
2091
2092 if (intel->gen < 6) {
2093 int vp_index = _mesa_vert_result_to_frag_attrib((gl_vert_result) i);
2094
2095 if (vp_index >= 0)
2096 key.vp_outputs_written |= BITFIELD64_BIT(vp_index);
2097 }
2098 }
2099
2100 key.clamp_fragment_color = true;
2101
2102 for (int i = 0; i < MAX_SAMPLERS; i++) {
2103 if (fp->Base.ShadowSamplers & (1 << i)) {
2104 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
2105 key.tex.swizzles[i] =
2106 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
2107 } else {
2108 /* Color sampler: assume no swizzling. */
2109 key.tex.swizzles[i] = SWIZZLE_XYZW;
2110 }
2111 }
2112
2113 if (fp->Base.InputsRead & FRAG_BIT_WPOS) {
2114 key.drawable_height = ctx->DrawBuffer->Height;
2115 }
2116
2117 if ((fp->Base.InputsRead & FRAG_BIT_WPOS) || program_uses_dfdy) {
2118 key.render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
2119 }
2120
2121 key.nr_color_regions = 1;
2122
2123 key.program_string_id = bfp->id;
2124
2125 uint32_t old_prog_offset = brw->wm.prog_offset;
2126 struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
2127
2128 bool success = do_wm_prog(brw, prog, bfp, &key);
2129
2130 brw->wm.prog_offset = old_prog_offset;
2131 brw->wm.prog_data = old_prog_data;
2132
2133 return success;
2134 }