i965: add support for ARB_shader_subroutine
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include <sys/types.h>
32
33 #include "util/hash_table.h"
34 #include "main/macros.h"
35 #include "main/shaderobj.h"
36 #include "main/fbobject.h"
37 #include "program/prog_parameter.h"
38 #include "program/prog_print.h"
39 #include "util/register_allocate.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_wm.h"
44 #include "brw_fs.h"
45 #include "brw_cfg.h"
46 #include "brw_dead_control_flow.h"
47 #include "main/uniforms.h"
48 #include "brw_fs_live_variables.h"
49 #include "glsl/glsl_types.h"
50 #include "program/sampler.h"
51
52 using namespace brw;
53
54 void
55 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
56 const fs_reg *src, unsigned sources)
57 {
58 memset(this, 0, sizeof(*this));
59
60 this->src = new fs_reg[MAX2(sources, 3)];
61 for (unsigned i = 0; i < sources; i++)
62 this->src[i] = src[i];
63
64 this->opcode = opcode;
65 this->dst = dst;
66 this->sources = sources;
67 this->exec_size = exec_size;
68
69 assert(dst.file != IMM && dst.file != UNIFORM);
70
71 assert(this->exec_size != 0);
72
73 this->conditional_mod = BRW_CONDITIONAL_NONE;
74
75 /* This will be the case for almost all instructions. */
76 switch (dst.file) {
77 case GRF:
78 case HW_REG:
79 case MRF:
80 case ATTR:
81 this->regs_written = DIV_ROUND_UP(dst.component_size(exec_size),
82 REG_SIZE);
83 break;
84 case BAD_FILE:
85 this->regs_written = 0;
86 break;
87 case IMM:
88 case UNIFORM:
89 unreachable("Invalid destination register file");
90 default:
91 unreachable("Invalid register file");
92 }
93
94 this->writes_accumulator = false;
95 }
96
97 fs_inst::fs_inst()
98 {
99 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
100 }
101
102 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
103 {
104 init(opcode, exec_size, reg_undef, NULL, 0);
105 }
106
107 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
108 {
109 init(opcode, exec_size, dst, NULL, 0);
110 }
111
112 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
113 const fs_reg &src0)
114 {
115 const fs_reg src[1] = { src0 };
116 init(opcode, exec_size, dst, src, 1);
117 }
118
119 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
120 const fs_reg &src0, const fs_reg &src1)
121 {
122 const fs_reg src[2] = { src0, src1 };
123 init(opcode, exec_size, dst, src, 2);
124 }
125
126 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
127 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
128 {
129 const fs_reg src[3] = { src0, src1, src2 };
130 init(opcode, exec_size, dst, src, 3);
131 }
132
133 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
134 const fs_reg src[], unsigned sources)
135 {
136 init(opcode, exec_width, dst, src, sources);
137 }
138
139 fs_inst::fs_inst(const fs_inst &that)
140 {
141 memcpy(this, &that, sizeof(that));
142
143 this->src = new fs_reg[MAX2(that.sources, 3)];
144
145 for (unsigned i = 0; i < that.sources; i++)
146 this->src[i] = that.src[i];
147 }
148
149 fs_inst::~fs_inst()
150 {
151 delete[] this->src;
152 }
153
154 void
155 fs_inst::resize_sources(uint8_t num_sources)
156 {
157 if (this->sources != num_sources) {
158 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
159
160 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
161 src[i] = this->src[i];
162
163 delete[] this->src;
164 this->src = src;
165 this->sources = num_sources;
166 }
167 }
168
169 void
170 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
171 const fs_reg &dst,
172 const fs_reg &surf_index,
173 const fs_reg &varying_offset,
174 uint32_t const_offset)
175 {
176 /* We have our constant surface use a pitch of 4 bytes, so our index can
177 * be any component of a vector, and then we load 4 contiguous
178 * components starting from that.
179 *
180 * We break down the const_offset to a portion added to the variable
181 * offset and a portion done using reg_offset, which means that if you
182 * have GLSL using something like "uniform vec4 a[20]; gl_FragColor =
183 * a[i]", we'll temporarily generate 4 vec4 loads from offset i * 4, and
184 * CSE can later notice that those loads are all the same and eliminate
185 * the redundant ones.
186 */
187 fs_reg vec4_offset = vgrf(glsl_type::int_type);
188 bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3));
189
190 int scale = 1;
191 if (devinfo->gen == 4 && bld.dispatch_width() == 8) {
192 /* Pre-gen5, we can either use a SIMD8 message that requires (header,
193 * u, v, r) as parameters, or we can just use the SIMD16 message
194 * consisting of (header, u). We choose the second, at the cost of a
195 * longer return length.
196 */
197 scale = 2;
198 }
199
200 enum opcode op;
201 if (devinfo->gen >= 7)
202 op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
203 else
204 op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD;
205
206 int regs_written = 4 * (bld.dispatch_width() / 8) * scale;
207 fs_reg vec4_result = fs_reg(GRF, alloc.allocate(regs_written), dst.type);
208 fs_inst *inst = bld.emit(op, vec4_result, surf_index, vec4_offset);
209 inst->regs_written = regs_written;
210
211 if (devinfo->gen < 7) {
212 inst->base_mrf = 13;
213 inst->header_size = 1;
214 if (devinfo->gen == 4)
215 inst->mlen = 3;
216 else
217 inst->mlen = 1 + bld.dispatch_width() / 8;
218 }
219
220 bld.MOV(dst, offset(vec4_result, bld, (const_offset & 3) * scale));
221 }
222
223 /**
224 * A helper for MOV generation for fixing up broken hardware SEND dependency
225 * handling.
226 */
227 void
228 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
229 {
230 /* The caller always wants uncompressed to emit the minimal extra
231 * dependencies, and to avoid having to deal with aligning its regs to 2.
232 */
233 const fs_builder ubld = bld.annotate("send dependency resolve")
234 .half(0);
235
236 ubld.MOV(ubld.null_reg_f(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
237 }
238
239 bool
240 fs_inst::equals(fs_inst *inst) const
241 {
242 return (opcode == inst->opcode &&
243 dst.equals(inst->dst) &&
244 src[0].equals(inst->src[0]) &&
245 src[1].equals(inst->src[1]) &&
246 src[2].equals(inst->src[2]) &&
247 saturate == inst->saturate &&
248 predicate == inst->predicate &&
249 conditional_mod == inst->conditional_mod &&
250 mlen == inst->mlen &&
251 base_mrf == inst->base_mrf &&
252 target == inst->target &&
253 eot == inst->eot &&
254 header_size == inst->header_size &&
255 shadow_compare == inst->shadow_compare &&
256 exec_size == inst->exec_size &&
257 offset == inst->offset);
258 }
259
260 bool
261 fs_inst::overwrites_reg(const fs_reg &reg) const
262 {
263 return reg.in_range(dst, regs_written);
264 }
265
266 bool
267 fs_inst::is_send_from_grf() const
268 {
269 switch (opcode) {
270 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
271 case SHADER_OPCODE_SHADER_TIME_ADD:
272 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
273 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
274 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
275 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
276 case SHADER_OPCODE_UNTYPED_ATOMIC:
277 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
278 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
279 case SHADER_OPCODE_TYPED_ATOMIC:
280 case SHADER_OPCODE_TYPED_SURFACE_READ:
281 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
282 case SHADER_OPCODE_URB_WRITE_SIMD8:
283 return true;
284 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
285 return src[1].file == GRF;
286 case FS_OPCODE_FB_WRITE:
287 return src[0].file == GRF;
288 default:
289 if (is_tex())
290 return src[0].file == GRF;
291
292 return false;
293 }
294 }
295
296 bool
297 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
298 {
299 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
300 return false;
301
302 fs_reg reg = this->src[0];
303 if (reg.file != GRF || reg.reg_offset != 0 || reg.stride == 0)
304 return false;
305
306 if (grf_alloc.sizes[reg.reg] != this->regs_written)
307 return false;
308
309 for (int i = 0; i < this->sources; i++) {
310 reg.type = this->src[i].type;
311 if (!this->src[i].equals(reg))
312 return false;
313
314 if (i < this->header_size) {
315 reg.reg_offset += 1;
316 } else {
317 reg.reg_offset += this->exec_size / 8;
318 }
319 }
320
321 return true;
322 }
323
324 bool
325 fs_inst::can_do_source_mods(const struct brw_device_info *devinfo)
326 {
327 if (devinfo->gen == 6 && is_math())
328 return false;
329
330 if (is_send_from_grf())
331 return false;
332
333 if (!backend_instruction::can_do_source_mods())
334 return false;
335
336 return true;
337 }
338
339 bool
340 fs_inst::has_side_effects() const
341 {
342 return this->eot || backend_instruction::has_side_effects();
343 }
344
345 void
346 fs_reg::init()
347 {
348 memset(this, 0, sizeof(*this));
349 stride = 1;
350 }
351
352 /** Generic unset register constructor. */
353 fs_reg::fs_reg()
354 {
355 init();
356 this->file = BAD_FILE;
357 }
358
359 /** Immediate value constructor. */
360 fs_reg::fs_reg(float f)
361 {
362 init();
363 this->file = IMM;
364 this->type = BRW_REGISTER_TYPE_F;
365 this->stride = 0;
366 this->fixed_hw_reg.dw1.f = f;
367 }
368
369 /** Immediate value constructor. */
370 fs_reg::fs_reg(int32_t i)
371 {
372 init();
373 this->file = IMM;
374 this->type = BRW_REGISTER_TYPE_D;
375 this->stride = 0;
376 this->fixed_hw_reg.dw1.d = i;
377 }
378
379 /** Immediate value constructor. */
380 fs_reg::fs_reg(uint32_t u)
381 {
382 init();
383 this->file = IMM;
384 this->type = BRW_REGISTER_TYPE_UD;
385 this->stride = 0;
386 this->fixed_hw_reg.dw1.ud = u;
387 }
388
389 /** Vector float immediate value constructor. */
390 fs_reg::fs_reg(uint8_t vf[4])
391 {
392 init();
393 this->file = IMM;
394 this->type = BRW_REGISTER_TYPE_VF;
395 memcpy(&this->fixed_hw_reg.dw1.ud, vf, sizeof(unsigned));
396 }
397
398 /** Vector float immediate value constructor. */
399 fs_reg::fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3)
400 {
401 init();
402 this->file = IMM;
403 this->type = BRW_REGISTER_TYPE_VF;
404 this->fixed_hw_reg.dw1.ud = (vf0 << 0) |
405 (vf1 << 8) |
406 (vf2 << 16) |
407 (vf3 << 24);
408 }
409
410 /** Fixed brw_reg. */
411 fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
412 {
413 init();
414 this->file = HW_REG;
415 this->fixed_hw_reg = fixed_hw_reg;
416 this->type = fixed_hw_reg.type;
417 }
418
419 bool
420 fs_reg::equals(const fs_reg &r) const
421 {
422 return (file == r.file &&
423 reg == r.reg &&
424 reg_offset == r.reg_offset &&
425 subreg_offset == r.subreg_offset &&
426 type == r.type &&
427 negate == r.negate &&
428 abs == r.abs &&
429 !reladdr && !r.reladdr &&
430 memcmp(&fixed_hw_reg, &r.fixed_hw_reg, sizeof(fixed_hw_reg)) == 0 &&
431 stride == r.stride);
432 }
433
434 fs_reg &
435 fs_reg::set_smear(unsigned subreg)
436 {
437 assert(file != HW_REG && file != IMM);
438 subreg_offset = subreg * type_sz(type);
439 stride = 0;
440 return *this;
441 }
442
443 bool
444 fs_reg::is_contiguous() const
445 {
446 return stride == 1;
447 }
448
449 unsigned
450 fs_reg::component_size(unsigned width) const
451 {
452 const unsigned stride = (file != HW_REG ? this->stride :
453 fixed_hw_reg.hstride == 0 ? 0 :
454 1 << (fixed_hw_reg.hstride - 1));
455 return MAX2(width * stride, 1) * type_sz(type);
456 }
457
458 int
459 fs_visitor::type_size(const struct glsl_type *type)
460 {
461 unsigned int size, i;
462
463 switch (type->base_type) {
464 case GLSL_TYPE_UINT:
465 case GLSL_TYPE_INT:
466 case GLSL_TYPE_FLOAT:
467 case GLSL_TYPE_BOOL:
468 return type->components();
469 case GLSL_TYPE_ARRAY:
470 return type_size(type->fields.array) * type->length;
471 case GLSL_TYPE_STRUCT:
472 size = 0;
473 for (i = 0; i < type->length; i++) {
474 size += type_size(type->fields.structure[i].type);
475 }
476 return size;
477 case GLSL_TYPE_SAMPLER:
478 /* Samplers take up no register space, since they're baked in at
479 * link time.
480 */
481 return 0;
482 case GLSL_TYPE_ATOMIC_UINT:
483 return 0;
484 case GLSL_TYPE_SUBROUTINE:
485 return 1;
486 case GLSL_TYPE_IMAGE:
487 case GLSL_TYPE_VOID:
488 case GLSL_TYPE_ERROR:
489 case GLSL_TYPE_INTERFACE:
490 case GLSL_TYPE_DOUBLE:
491 unreachable("not reached");
492 }
493
494 return 0;
495 }
496
497 /**
498 * Create a MOV to read the timestamp register.
499 *
500 * The caller is responsible for emitting the MOV. The return value is
501 * the destination of the MOV, with extra parameters set.
502 */
503 fs_reg
504 fs_visitor::get_timestamp(const fs_builder &bld)
505 {
506 assert(devinfo->gen >= 7);
507
508 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
509 BRW_ARF_TIMESTAMP,
510 0),
511 BRW_REGISTER_TYPE_UD));
512
513 fs_reg dst = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
514
515 /* We want to read the 3 fields we care about even if it's not enabled in
516 * the dispatch.
517 */
518 bld.group(4, 0).exec_all().MOV(dst, ts);
519
520 /* The caller wants the low 32 bits of the timestamp. Since it's running
521 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
522 * which is plenty of time for our purposes. It is identical across the
523 * EUs, but since it's tracking GPU core speed it will increment at a
524 * varying rate as render P-states change.
525 *
526 * The caller could also check if render P-states have changed (or anything
527 * else that might disrupt timing) by setting smear to 2 and checking if
528 * that field is != 0.
529 */
530 dst.set_smear(0);
531
532 return dst;
533 }
534
535 void
536 fs_visitor::emit_shader_time_begin()
537 {
538 shader_start_time = get_timestamp(bld.annotate("shader time start"));
539 }
540
541 void
542 fs_visitor::emit_shader_time_end()
543 {
544 /* Insert our code just before the final SEND with EOT. */
545 exec_node *end = this->instructions.get_tail();
546 assert(end && ((fs_inst *) end)->eot);
547 const fs_builder ibld = bld.annotate("shader time end")
548 .exec_all().at(NULL, end);
549
550 fs_reg shader_end_time = get_timestamp(ibld);
551
552 /* Check that there weren't any timestamp reset events (assuming these
553 * were the only two timestamp reads that happened).
554 */
555 fs_reg reset = shader_end_time;
556 reset.set_smear(2);
557 set_condmod(BRW_CONDITIONAL_Z,
558 ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u)));
559 ibld.IF(BRW_PREDICATE_NORMAL);
560
561 fs_reg start = shader_start_time;
562 start.negate = true;
563 fs_reg diff = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
564 diff.set_smear(0);
565
566 const fs_builder cbld = ibld.group(1, 0);
567 cbld.group(1, 0).ADD(diff, start, shader_end_time);
568
569 /* If there were no instructions between the two timestamp gets, the diff
570 * is 2 cycles. Remove that overhead, so I can forget about that when
571 * trying to determine the time taken for single instructions.
572 */
573 cbld.ADD(diff, diff, fs_reg(-2u));
574 SHADER_TIME_ADD(cbld, 0, diff);
575 SHADER_TIME_ADD(cbld, 1, fs_reg(1u));
576 ibld.emit(BRW_OPCODE_ELSE);
577 SHADER_TIME_ADD(cbld, 2, fs_reg(1u));
578 ibld.emit(BRW_OPCODE_ENDIF);
579 }
580
581 void
582 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
583 int shader_time_subindex,
584 fs_reg value)
585 {
586 int index = shader_time_index * 3 + shader_time_subindex;
587 fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE);
588
589 fs_reg payload;
590 if (dispatch_width == 8)
591 payload = vgrf(glsl_type::uvec2_type);
592 else
593 payload = vgrf(glsl_type::uint_type);
594
595 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
596 }
597
598 void
599 fs_visitor::vfail(const char *format, va_list va)
600 {
601 char *msg;
602
603 if (failed)
604 return;
605
606 failed = true;
607
608 msg = ralloc_vasprintf(mem_ctx, format, va);
609 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
610
611 this->fail_msg = msg;
612
613 if (debug_enabled) {
614 fprintf(stderr, "%s", msg);
615 }
616 }
617
618 void
619 fs_visitor::fail(const char *format, ...)
620 {
621 va_list va;
622
623 va_start(va, format);
624 vfail(format, va);
625 va_end(va);
626 }
627
628 /**
629 * Mark this program as impossible to compile in SIMD16 mode.
630 *
631 * During the SIMD8 compile (which happens first), we can detect and flag
632 * things that are unsupported in SIMD16 mode, so the compiler can skip
633 * the SIMD16 compile altogether.
634 *
635 * During a SIMD16 compile (if one happens anyway), this just calls fail().
636 */
637 void
638 fs_visitor::no16(const char *msg)
639 {
640 if (dispatch_width == 16) {
641 fail("%s", msg);
642 } else {
643 simd16_unsupported = true;
644
645 compiler->shader_perf_log(log_data,
646 "SIMD16 shader failed to compile: %s", msg);
647 }
648 }
649
650 /**
651 * Returns true if the instruction has a flag that means it won't
652 * update an entire destination register.
653 *
654 * For example, dead code elimination and live variable analysis want to know
655 * when a write to a variable screens off any preceding values that were in
656 * it.
657 */
658 bool
659 fs_inst::is_partial_write() const
660 {
661 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
662 (this->exec_size * type_sz(this->dst.type)) < 32 ||
663 !this->dst.is_contiguous());
664 }
665
666 int
667 fs_inst::regs_read(int arg) const
668 {
669 unsigned components = 1;
670 switch (opcode) {
671 case FS_OPCODE_FB_WRITE:
672 case SHADER_OPCODE_URB_WRITE_SIMD8:
673 case SHADER_OPCODE_UNTYPED_ATOMIC:
674 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
675 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
676 case SHADER_OPCODE_TYPED_ATOMIC:
677 case SHADER_OPCODE_TYPED_SURFACE_READ:
678 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
679 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
680 if (arg == 0)
681 return mlen;
682 break;
683
684 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
685 /* The payload is actually stored in src1 */
686 if (arg == 1)
687 return mlen;
688 break;
689
690 case FS_OPCODE_LINTERP:
691 if (arg == 0)
692 return exec_size / 4;
693 else
694 return 1;
695
696 case FS_OPCODE_PIXEL_X:
697 case FS_OPCODE_PIXEL_Y:
698 if (arg == 0)
699 components = 2;
700 break;
701
702 case SHADER_OPCODE_LOAD_PAYLOAD:
703 if (arg < this->header_size)
704 return 1;
705 break;
706
707 case CS_OPCODE_CS_TERMINATE:
708 return 1;
709
710 default:
711 if (is_tex() && arg == 0 && src[0].file == GRF)
712 return mlen;
713 break;
714 }
715
716 switch (src[arg].file) {
717 case BAD_FILE:
718 case UNIFORM:
719 case IMM:
720 return 1;
721 case GRF:
722 case HW_REG:
723 return DIV_ROUND_UP(components * src[arg].component_size(exec_size),
724 REG_SIZE);
725 case MRF:
726 unreachable("MRF registers are not allowed as sources");
727 default:
728 unreachable("Invalid register file");
729 }
730 }
731
732 bool
733 fs_inst::reads_flag() const
734 {
735 return predicate;
736 }
737
738 bool
739 fs_inst::writes_flag() const
740 {
741 return (conditional_mod && (opcode != BRW_OPCODE_SEL &&
742 opcode != BRW_OPCODE_IF &&
743 opcode != BRW_OPCODE_WHILE)) ||
744 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
745 }
746
747 /**
748 * Returns how many MRFs an FS opcode will write over.
749 *
750 * Note that this is not the 0 or 1 implied writes in an actual gen
751 * instruction -- the FS opcodes often generate MOVs in addition.
752 */
753 int
754 fs_visitor::implied_mrf_writes(fs_inst *inst)
755 {
756 if (inst->mlen == 0)
757 return 0;
758
759 if (inst->base_mrf == -1)
760 return 0;
761
762 switch (inst->opcode) {
763 case SHADER_OPCODE_RCP:
764 case SHADER_OPCODE_RSQ:
765 case SHADER_OPCODE_SQRT:
766 case SHADER_OPCODE_EXP2:
767 case SHADER_OPCODE_LOG2:
768 case SHADER_OPCODE_SIN:
769 case SHADER_OPCODE_COS:
770 return 1 * dispatch_width / 8;
771 case SHADER_OPCODE_POW:
772 case SHADER_OPCODE_INT_QUOTIENT:
773 case SHADER_OPCODE_INT_REMAINDER:
774 return 2 * dispatch_width / 8;
775 case SHADER_OPCODE_TEX:
776 case FS_OPCODE_TXB:
777 case SHADER_OPCODE_TXD:
778 case SHADER_OPCODE_TXF:
779 case SHADER_OPCODE_TXF_CMS:
780 case SHADER_OPCODE_TXF_MCS:
781 case SHADER_OPCODE_TG4:
782 case SHADER_OPCODE_TG4_OFFSET:
783 case SHADER_OPCODE_TXL:
784 case SHADER_OPCODE_TXS:
785 case SHADER_OPCODE_LOD:
786 return 1;
787 case FS_OPCODE_FB_WRITE:
788 return 2;
789 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
790 case SHADER_OPCODE_GEN4_SCRATCH_READ:
791 return 1;
792 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
793 return inst->mlen;
794 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
795 return inst->mlen;
796 case SHADER_OPCODE_UNTYPED_ATOMIC:
797 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
798 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
799 case SHADER_OPCODE_TYPED_ATOMIC:
800 case SHADER_OPCODE_TYPED_SURFACE_READ:
801 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
802 case SHADER_OPCODE_URB_WRITE_SIMD8:
803 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
804 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
805 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
806 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
807 return 0;
808 default:
809 unreachable("not reached");
810 }
811 }
812
813 fs_reg
814 fs_visitor::vgrf(const glsl_type *const type)
815 {
816 int reg_width = dispatch_width / 8;
817 return fs_reg(GRF, alloc.allocate(type_size(type) * reg_width),
818 brw_type_for_base_type(type));
819 }
820
821 /** Fixed HW reg constructor. */
822 fs_reg::fs_reg(enum register_file file, int reg)
823 {
824 init();
825 this->file = file;
826 this->reg = reg;
827 this->type = BRW_REGISTER_TYPE_F;
828 this->stride = (file == UNIFORM ? 0 : 1);
829 }
830
831 /** Fixed HW reg constructor. */
832 fs_reg::fs_reg(enum register_file file, int reg, enum brw_reg_type type)
833 {
834 init();
835 this->file = file;
836 this->reg = reg;
837 this->type = type;
838 this->stride = (file == UNIFORM ? 0 : 1);
839 }
840
841 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
842 * This brings in those uniform definitions
843 */
844 void
845 fs_visitor::import_uniforms(fs_visitor *v)
846 {
847 this->push_constant_loc = v->push_constant_loc;
848 this->pull_constant_loc = v->pull_constant_loc;
849 this->uniforms = v->uniforms;
850 this->param_size = v->param_size;
851 }
852
853 fs_reg *
854 fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer,
855 bool origin_upper_left)
856 {
857 assert(stage == MESA_SHADER_FRAGMENT);
858 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
859 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec4_type));
860 fs_reg wpos = *reg;
861 bool flip = !origin_upper_left ^ key->render_to_fbo;
862
863 /* gl_FragCoord.x */
864 if (pixel_center_integer) {
865 bld.MOV(wpos, this->pixel_x);
866 } else {
867 bld.ADD(wpos, this->pixel_x, fs_reg(0.5f));
868 }
869 wpos = offset(wpos, bld, 1);
870
871 /* gl_FragCoord.y */
872 if (!flip && pixel_center_integer) {
873 bld.MOV(wpos, this->pixel_y);
874 } else {
875 fs_reg pixel_y = this->pixel_y;
876 float offset = (pixel_center_integer ? 0.0 : 0.5);
877
878 if (flip) {
879 pixel_y.negate = true;
880 offset += key->drawable_height - 1.0;
881 }
882
883 bld.ADD(wpos, pixel_y, fs_reg(offset));
884 }
885 wpos = offset(wpos, bld, 1);
886
887 /* gl_FragCoord.z */
888 if (devinfo->gen >= 6) {
889 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
890 } else {
891 bld.emit(FS_OPCODE_LINTERP, wpos,
892 this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
893 interp_reg(VARYING_SLOT_POS, 2));
894 }
895 wpos = offset(wpos, bld, 1);
896
897 /* gl_FragCoord.w: Already set up in emit_interpolation */
898 bld.MOV(wpos, this->wpos_w);
899
900 return reg;
901 }
902
903 fs_inst *
904 fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
905 glsl_interp_qualifier interpolation_mode,
906 bool is_centroid, bool is_sample)
907 {
908 brw_wm_barycentric_interp_mode barycoord_mode;
909 if (devinfo->gen >= 6) {
910 if (is_centroid) {
911 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
912 barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
913 else
914 barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
915 } else if (is_sample) {
916 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
917 barycoord_mode = BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
918 else
919 barycoord_mode = BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
920 } else {
921 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
922 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
923 else
924 barycoord_mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
925 }
926 } else {
927 /* On Ironlake and below, there is only one interpolation mode.
928 * Centroid interpolation doesn't mean anything on this hardware --
929 * there is no multisampling.
930 */
931 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
932 }
933 return bld.emit(FS_OPCODE_LINTERP, attr,
934 this->delta_xy[barycoord_mode], interp);
935 }
936
937 void
938 fs_visitor::emit_general_interpolation(fs_reg attr, const char *name,
939 const glsl_type *type,
940 glsl_interp_qualifier interpolation_mode,
941 int location, bool mod_centroid,
942 bool mod_sample)
943 {
944 attr.type = brw_type_for_base_type(type->get_scalar_type());
945
946 assert(stage == MESA_SHADER_FRAGMENT);
947 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
948 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
949
950 unsigned int array_elements;
951
952 if (type->is_array()) {
953 array_elements = type->length;
954 if (array_elements == 0) {
955 fail("dereferenced array '%s' has length 0\n", name);
956 }
957 type = type->fields.array;
958 } else {
959 array_elements = 1;
960 }
961
962 if (interpolation_mode == INTERP_QUALIFIER_NONE) {
963 bool is_gl_Color =
964 location == VARYING_SLOT_COL0 || location == VARYING_SLOT_COL1;
965 if (key->flat_shade && is_gl_Color) {
966 interpolation_mode = INTERP_QUALIFIER_FLAT;
967 } else {
968 interpolation_mode = INTERP_QUALIFIER_SMOOTH;
969 }
970 }
971
972 for (unsigned int i = 0; i < array_elements; i++) {
973 for (unsigned int j = 0; j < type->matrix_columns; j++) {
974 if (prog_data->urb_setup[location] == -1) {
975 /* If there's no incoming setup data for this slot, don't
976 * emit interpolation for it.
977 */
978 attr = offset(attr, bld, type->vector_elements);
979 location++;
980 continue;
981 }
982
983 if (interpolation_mode == INTERP_QUALIFIER_FLAT) {
984 /* Constant interpolation (flat shading) case. The SF has
985 * handed us defined values in only the constant offset
986 * field of the setup reg.
987 */
988 for (unsigned int k = 0; k < type->vector_elements; k++) {
989 struct brw_reg interp = interp_reg(location, k);
990 interp = suboffset(interp, 3);
991 interp.type = attr.type;
992 bld.emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
993 attr = offset(attr, bld, 1);
994 }
995 } else {
996 /* Smooth/noperspective interpolation case. */
997 for (unsigned int k = 0; k < type->vector_elements; k++) {
998 struct brw_reg interp = interp_reg(location, k);
999 if (devinfo->needs_unlit_centroid_workaround && mod_centroid) {
1000 /* Get the pixel/sample mask into f0 so that we know
1001 * which pixels are lit. Then, for each channel that is
1002 * unlit, replace the centroid data with non-centroid
1003 * data.
1004 */
1005 bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
1006
1007 fs_inst *inst;
1008 inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
1009 false, false);
1010 inst->predicate = BRW_PREDICATE_NORMAL;
1011 inst->predicate_inverse = true;
1012 if (devinfo->has_pln)
1013 inst->no_dd_clear = true;
1014
1015 inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
1016 mod_centroid && !key->persample_shading,
1017 mod_sample || key->persample_shading);
1018 inst->predicate = BRW_PREDICATE_NORMAL;
1019 inst->predicate_inverse = false;
1020 if (devinfo->has_pln)
1021 inst->no_dd_check = true;
1022
1023 } else {
1024 emit_linterp(attr, fs_reg(interp), interpolation_mode,
1025 mod_centroid && !key->persample_shading,
1026 mod_sample || key->persample_shading);
1027 }
1028 if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
1029 bld.MUL(attr, attr, this->pixel_w);
1030 }
1031 attr = offset(attr, bld, 1);
1032 }
1033
1034 }
1035 location++;
1036 }
1037 }
1038 }
1039
1040 fs_reg *
1041 fs_visitor::emit_frontfacing_interpolation()
1042 {
1043 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1044
1045 if (devinfo->gen >= 6) {
1046 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1047 * a boolean result from this (~0/true or 0/false).
1048 *
1049 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1050 * this task in only one instruction:
1051 * - a negation source modifier will flip the bit; and
1052 * - a W -> D type conversion will sign extend the bit into the high
1053 * word of the destination.
1054 *
1055 * An ASR 15 fills the low word of the destination.
1056 */
1057 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1058 g0.negate = true;
1059
1060 bld.ASR(*reg, g0, fs_reg(15));
1061 } else {
1062 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1063 * a boolean result from this (1/true or 0/false).
1064 *
1065 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1066 * the negation source modifier to flip it. Unfortunately the SHR
1067 * instruction only operates on UD (or D with an abs source modifier)
1068 * sources without negation.
1069 *
1070 * Instead, use ASR (which will give ~0/true or 0/false).
1071 */
1072 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1073 g1_6.negate = true;
1074
1075 bld.ASR(*reg, g1_6, fs_reg(31));
1076 }
1077
1078 return reg;
1079 }
1080
1081 void
1082 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1083 {
1084 assert(stage == MESA_SHADER_FRAGMENT);
1085 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1086 assert(dst.type == BRW_REGISTER_TYPE_F);
1087
1088 if (key->compute_pos_offset) {
1089 /* Convert int_sample_pos to floating point */
1090 bld.MOV(dst, int_sample_pos);
1091 /* Scale to the range [0, 1] */
1092 bld.MUL(dst, dst, fs_reg(1 / 16.0f));
1093 }
1094 else {
1095 /* From ARB_sample_shading specification:
1096 * "When rendering to a non-multisample buffer, or if multisample
1097 * rasterization is disabled, gl_SamplePosition will always be
1098 * (0.5, 0.5).
1099 */
1100 bld.MOV(dst, fs_reg(0.5f));
1101 }
1102 }
1103
1104 fs_reg *
1105 fs_visitor::emit_samplepos_setup()
1106 {
1107 assert(devinfo->gen >= 6);
1108
1109 const fs_builder abld = bld.annotate("compute sample position");
1110 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1111 fs_reg pos = *reg;
1112 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1113 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1114
1115 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1116 * mode will be enabled.
1117 *
1118 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1119 * R31.1:0 Position Offset X/Y for Slot[3:0]
1120 * R31.3:2 Position Offset X/Y for Slot[7:4]
1121 * .....
1122 *
1123 * The X, Y sample positions come in as bytes in thread payload. So, read
1124 * the positions using vstride=16, width=8, hstride=2.
1125 */
1126 struct brw_reg sample_pos_reg =
1127 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1128 BRW_REGISTER_TYPE_B), 16, 8, 2);
1129
1130 if (dispatch_width == 8) {
1131 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1132 } else {
1133 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1134 abld.half(1).MOV(half(int_sample_x, 1),
1135 fs_reg(suboffset(sample_pos_reg, 16)));
1136 }
1137 /* Compute gl_SamplePosition.x */
1138 compute_sample_position(pos, int_sample_x);
1139 pos = offset(pos, abld, 1);
1140 if (dispatch_width == 8) {
1141 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1142 } else {
1143 abld.half(0).MOV(half(int_sample_y, 0),
1144 fs_reg(suboffset(sample_pos_reg, 1)));
1145 abld.half(1).MOV(half(int_sample_y, 1),
1146 fs_reg(suboffset(sample_pos_reg, 17)));
1147 }
1148 /* Compute gl_SamplePosition.y */
1149 compute_sample_position(pos, int_sample_y);
1150 return reg;
1151 }
1152
1153 fs_reg *
1154 fs_visitor::emit_sampleid_setup()
1155 {
1156 assert(stage == MESA_SHADER_FRAGMENT);
1157 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1158 assert(devinfo->gen >= 6);
1159
1160 const fs_builder abld = bld.annotate("compute sample id");
1161 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1162
1163 if (key->compute_sample_id) {
1164 fs_reg t1 = vgrf(glsl_type::int_type);
1165 fs_reg t2 = vgrf(glsl_type::int_type);
1166 t2.type = BRW_REGISTER_TYPE_UW;
1167
1168 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1169 * 8x multisampling, subspan 0 will represent sample N (where N
1170 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1171 * 7. We can find the value of N by looking at R0.0 bits 7:6
1172 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1173 * (since samples are always delivered in pairs). That is, we
1174 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1175 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1176 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1177 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1178 * populating a temporary variable with the sequence (0, 1, 2, 3),
1179 * and then reading from it using vstride=1, width=4, hstride=0.
1180 * These computations hold good for 4x multisampling as well.
1181 *
1182 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1183 * the first four slots are sample 0 of subspan 0; the next four
1184 * are sample 1 of subspan 0; the third group is sample 0 of
1185 * subspan 1, and finally sample 1 of subspan 1.
1186 */
1187 abld.exec_all()
1188 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
1189 fs_reg(0xc0));
1190 abld.exec_all().SHR(t1, t1, fs_reg(5));
1191
1192 /* This works for both SIMD8 and SIMD16 */
1193 abld.exec_all()
1194 .MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210));
1195
1196 /* This special instruction takes care of setting vstride=1,
1197 * width=4, hstride=0 of t2 during an ADD instruction.
1198 */
1199 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1200 } else {
1201 /* As per GL_ARB_sample_shading specification:
1202 * "When rendering to a non-multisample buffer, or if multisample
1203 * rasterization is disabled, gl_SampleID will always be zero."
1204 */
1205 abld.MOV(*reg, fs_reg(0));
1206 }
1207
1208 return reg;
1209 }
1210
1211 void
1212 fs_visitor::resolve_source_modifiers(fs_reg *src)
1213 {
1214 if (!src->abs && !src->negate)
1215 return;
1216
1217 fs_reg temp = bld.vgrf(src->type);
1218 bld.MOV(temp, *src);
1219 *src = temp;
1220 }
1221
1222 void
1223 fs_visitor::emit_discard_jump()
1224 {
1225 assert(((brw_wm_prog_data*) this->prog_data)->uses_kill);
1226
1227 /* For performance, after a discard, jump to the end of the
1228 * shader if all relevant channels have been discarded.
1229 */
1230 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1231 discard_jump->flag_subreg = 1;
1232
1233 discard_jump->predicate = (dispatch_width == 8)
1234 ? BRW_PREDICATE_ALIGN1_ANY8H
1235 : BRW_PREDICATE_ALIGN1_ANY16H;
1236 discard_jump->predicate_inverse = true;
1237 }
1238
1239 void
1240 fs_visitor::assign_curb_setup()
1241 {
1242 if (dispatch_width == 8) {
1243 prog_data->dispatch_grf_start_reg = payload.num_regs;
1244 } else {
1245 if (stage == MESA_SHADER_FRAGMENT) {
1246 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1247 prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
1248 } else if (stage == MESA_SHADER_COMPUTE) {
1249 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
1250 prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
1251 } else {
1252 unreachable("Unsupported shader type!");
1253 }
1254 }
1255
1256 prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
1257
1258 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1259 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1260 for (unsigned int i = 0; i < inst->sources; i++) {
1261 if (inst->src[i].file == UNIFORM) {
1262 int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
1263 int constant_nr;
1264 if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1265 constant_nr = push_constant_loc[uniform_nr];
1266 } else {
1267 /* Section 5.11 of the OpenGL 4.1 spec says:
1268 * "Out-of-bounds reads return undefined values, which include
1269 * values from other variables of the active program or zero."
1270 * Just return the first push constant.
1271 */
1272 constant_nr = 0;
1273 }
1274
1275 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1276 constant_nr / 8,
1277 constant_nr % 8);
1278
1279 assert(inst->src[i].stride == 0);
1280 inst->src[i].file = HW_REG;
1281 inst->src[i].fixed_hw_reg = byte_offset(
1282 retype(brw_reg, inst->src[i].type),
1283 inst->src[i].subreg_offset);
1284 }
1285 }
1286 }
1287 }
1288
1289 void
1290 fs_visitor::calculate_urb_setup()
1291 {
1292 assert(stage == MESA_SHADER_FRAGMENT);
1293 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1294 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1295
1296 memset(prog_data->urb_setup, -1,
1297 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1298
1299 int urb_next = 0;
1300 /* Figure out where each of the incoming setup attributes lands. */
1301 if (devinfo->gen >= 6) {
1302 if (_mesa_bitcount_64(prog->InputsRead &
1303 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1304 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1305 * first 16 varying inputs, so we can put them wherever we want.
1306 * Just put them in order.
1307 *
1308 * This is useful because it means that (a) inputs not used by the
1309 * fragment shader won't take up valuable register space, and (b) we
1310 * won't have to recompile the fragment shader if it gets paired with
1311 * a different vertex (or geometry) shader.
1312 */
1313 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1314 if (prog->InputsRead & BRW_FS_VARYING_INPUT_MASK &
1315 BITFIELD64_BIT(i)) {
1316 prog_data->urb_setup[i] = urb_next++;
1317 }
1318 }
1319 } else {
1320 /* We have enough input varyings that the SF/SBE pipeline stage can't
1321 * arbitrarily rearrange them to suit our whim; we have to put them
1322 * in an order that matches the output of the previous pipeline stage
1323 * (geometry or vertex shader).
1324 */
1325 struct brw_vue_map prev_stage_vue_map;
1326 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1327 key->input_slots_valid);
1328 int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
1329 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1330 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1331 slot++) {
1332 int varying = prev_stage_vue_map.slot_to_varying[slot];
1333 /* Note that varying == BRW_VARYING_SLOT_COUNT when a slot is
1334 * unused.
1335 */
1336 if (varying != BRW_VARYING_SLOT_COUNT &&
1337 (prog->InputsRead & BRW_FS_VARYING_INPUT_MASK &
1338 BITFIELD64_BIT(varying))) {
1339 prog_data->urb_setup[varying] = slot - first_slot;
1340 }
1341 }
1342 urb_next = prev_stage_vue_map.num_slots - first_slot;
1343 }
1344 } else {
1345 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1346 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1347 /* Point size is packed into the header, not as a general attribute */
1348 if (i == VARYING_SLOT_PSIZ)
1349 continue;
1350
1351 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1352 /* The back color slot is skipped when the front color is
1353 * also written to. In addition, some slots can be
1354 * written in the vertex shader and not read in the
1355 * fragment shader. So the register number must always be
1356 * incremented, mapped or not.
1357 */
1358 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1359 prog_data->urb_setup[i] = urb_next;
1360 urb_next++;
1361 }
1362 }
1363
1364 /*
1365 * It's a FS only attribute, and we did interpolation for this attribute
1366 * in SF thread. So, count it here, too.
1367 *
1368 * See compile_sf_prog() for more info.
1369 */
1370 if (prog->InputsRead & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1371 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1372 }
1373
1374 prog_data->num_varying_inputs = urb_next;
1375 }
1376
1377 void
1378 fs_visitor::assign_urb_setup()
1379 {
1380 assert(stage == MESA_SHADER_FRAGMENT);
1381 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1382
1383 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1384
1385 /* Offset all the urb_setup[] index by the actual position of the
1386 * setup regs, now that the location of the constants has been chosen.
1387 */
1388 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1389 if (inst->opcode == FS_OPCODE_LINTERP) {
1390 assert(inst->src[1].file == HW_REG);
1391 inst->src[1].fixed_hw_reg.nr += urb_start;
1392 }
1393
1394 if (inst->opcode == FS_OPCODE_CINTERP) {
1395 assert(inst->src[0].file == HW_REG);
1396 inst->src[0].fixed_hw_reg.nr += urb_start;
1397 }
1398 }
1399
1400 /* Each attribute is 4 setup channels, each of which is half a reg. */
1401 this->first_non_payload_grf =
1402 urb_start + prog_data->num_varying_inputs * 2;
1403 }
1404
1405 void
1406 fs_visitor::assign_vs_urb_setup()
1407 {
1408 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
1409 int grf, count, slot, channel, attr;
1410
1411 assert(stage == MESA_SHADER_VERTEX);
1412 count = _mesa_bitcount_64(vs_prog_data->inputs_read);
1413 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid)
1414 count++;
1415
1416 /* Each attribute is 4 regs. */
1417 this->first_non_payload_grf =
1418 payload.num_regs + prog_data->curb_read_length + count * 4;
1419
1420 unsigned vue_entries =
1421 MAX2(count, vs_prog_data->base.vue_map.num_slots);
1422
1423 vs_prog_data->base.urb_entry_size = ALIGN(vue_entries, 4) / 4;
1424 vs_prog_data->base.urb_read_length = (count + 1) / 2;
1425
1426 assert(vs_prog_data->base.urb_read_length <= 15);
1427
1428 /* Rewrite all ATTR file references to the hw grf that they land in. */
1429 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1430 for (int i = 0; i < inst->sources; i++) {
1431 if (inst->src[i].file == ATTR) {
1432
1433 if (inst->src[i].reg == VERT_ATTRIB_MAX) {
1434 slot = count - 1;
1435 } else {
1436 /* Attributes come in in a contiguous block, ordered by their
1437 * gl_vert_attrib value. That means we can compute the slot
1438 * number for an attribute by masking out the enabled
1439 * attributes before it and counting the bits.
1440 */
1441 attr = inst->src[i].reg + inst->src[i].reg_offset / 4;
1442 slot = _mesa_bitcount_64(vs_prog_data->inputs_read &
1443 BITFIELD64_MASK(attr));
1444 }
1445
1446 channel = inst->src[i].reg_offset & 3;
1447
1448 grf = payload.num_regs +
1449 prog_data->curb_read_length +
1450 slot * 4 + channel;
1451
1452 inst->src[i].file = HW_REG;
1453 inst->src[i].fixed_hw_reg =
1454 retype(brw_vec8_grf(grf, 0), inst->src[i].type);
1455 }
1456 }
1457 }
1458 }
1459
1460 /**
1461 * Split large virtual GRFs into separate components if we can.
1462 *
1463 * This is mostly duplicated with what brw_fs_vector_splitting does,
1464 * but that's really conservative because it's afraid of doing
1465 * splitting that doesn't result in real progress after the rest of
1466 * the optimization phases, which would cause infinite looping in
1467 * optimization. We can do it once here, safely. This also has the
1468 * opportunity to split interpolated values, or maybe even uniforms,
1469 * which we don't have at the IR level.
1470 *
1471 * We want to split, because virtual GRFs are what we register
1472 * allocate and spill (due to contiguousness requirements for some
1473 * instructions), and they're what we naturally generate in the
1474 * codegen process, but most virtual GRFs don't actually need to be
1475 * contiguous sets of GRFs. If we split, we'll end up with reduced
1476 * live intervals and better dead code elimination and coalescing.
1477 */
1478 void
1479 fs_visitor::split_virtual_grfs()
1480 {
1481 int num_vars = this->alloc.count;
1482
1483 /* Count the total number of registers */
1484 int reg_count = 0;
1485 int vgrf_to_reg[num_vars];
1486 for (int i = 0; i < num_vars; i++) {
1487 vgrf_to_reg[i] = reg_count;
1488 reg_count += alloc.sizes[i];
1489 }
1490
1491 /* An array of "split points". For each register slot, this indicates
1492 * if this slot can be separated from the previous slot. Every time an
1493 * instruction uses multiple elements of a register (as a source or
1494 * destination), we mark the used slots as inseparable. Then we go
1495 * through and split the registers into the smallest pieces we can.
1496 */
1497 bool split_points[reg_count];
1498 memset(split_points, 0, sizeof(split_points));
1499
1500 /* Mark all used registers as fully splittable */
1501 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1502 if (inst->dst.file == GRF) {
1503 int reg = vgrf_to_reg[inst->dst.reg];
1504 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.reg]; j++)
1505 split_points[reg + j] = true;
1506 }
1507
1508 for (int i = 0; i < inst->sources; i++) {
1509 if (inst->src[i].file == GRF) {
1510 int reg = vgrf_to_reg[inst->src[i].reg];
1511 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].reg]; j++)
1512 split_points[reg + j] = true;
1513 }
1514 }
1515 }
1516
1517 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1518 if (inst->dst.file == GRF) {
1519 int reg = vgrf_to_reg[inst->dst.reg] + inst->dst.reg_offset;
1520 for (int j = 1; j < inst->regs_written; j++)
1521 split_points[reg + j] = false;
1522 }
1523 for (int i = 0; i < inst->sources; i++) {
1524 if (inst->src[i].file == GRF) {
1525 int reg = vgrf_to_reg[inst->src[i].reg] + inst->src[i].reg_offset;
1526 for (int j = 1; j < inst->regs_read(i); j++)
1527 split_points[reg + j] = false;
1528 }
1529 }
1530 }
1531
1532 int new_virtual_grf[reg_count];
1533 int new_reg_offset[reg_count];
1534
1535 int reg = 0;
1536 for (int i = 0; i < num_vars; i++) {
1537 /* The first one should always be 0 as a quick sanity check. */
1538 assert(split_points[reg] == false);
1539
1540 /* j = 0 case */
1541 new_reg_offset[reg] = 0;
1542 reg++;
1543 int offset = 1;
1544
1545 /* j > 0 case */
1546 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1547 /* If this is a split point, reset the offset to 0 and allocate a
1548 * new virtual GRF for the previous offset many registers
1549 */
1550 if (split_points[reg]) {
1551 assert(offset <= MAX_VGRF_SIZE);
1552 int grf = alloc.allocate(offset);
1553 for (int k = reg - offset; k < reg; k++)
1554 new_virtual_grf[k] = grf;
1555 offset = 0;
1556 }
1557 new_reg_offset[reg] = offset;
1558 offset++;
1559 reg++;
1560 }
1561
1562 /* The last one gets the original register number */
1563 assert(offset <= MAX_VGRF_SIZE);
1564 alloc.sizes[i] = offset;
1565 for (int k = reg - offset; k < reg; k++)
1566 new_virtual_grf[k] = i;
1567 }
1568 assert(reg == reg_count);
1569
1570 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1571 if (inst->dst.file == GRF) {
1572 reg = vgrf_to_reg[inst->dst.reg] + inst->dst.reg_offset;
1573 inst->dst.reg = new_virtual_grf[reg];
1574 inst->dst.reg_offset = new_reg_offset[reg];
1575 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1576 }
1577 for (int i = 0; i < inst->sources; i++) {
1578 if (inst->src[i].file == GRF) {
1579 reg = vgrf_to_reg[inst->src[i].reg] + inst->src[i].reg_offset;
1580 inst->src[i].reg = new_virtual_grf[reg];
1581 inst->src[i].reg_offset = new_reg_offset[reg];
1582 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1583 }
1584 }
1585 }
1586 invalidate_live_intervals();
1587 }
1588
1589 /**
1590 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1591 *
1592 * During code generation, we create tons of temporary variables, many of
1593 * which get immediately killed and are never used again. Yet, in later
1594 * optimization and analysis passes, such as compute_live_intervals, we need
1595 * to loop over all the virtual GRFs. Compacting them can save a lot of
1596 * overhead.
1597 */
1598 bool
1599 fs_visitor::compact_virtual_grfs()
1600 {
1601 bool progress = false;
1602 int remap_table[this->alloc.count];
1603 memset(remap_table, -1, sizeof(remap_table));
1604
1605 /* Mark which virtual GRFs are used. */
1606 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1607 if (inst->dst.file == GRF)
1608 remap_table[inst->dst.reg] = 0;
1609
1610 for (int i = 0; i < inst->sources; i++) {
1611 if (inst->src[i].file == GRF)
1612 remap_table[inst->src[i].reg] = 0;
1613 }
1614 }
1615
1616 /* Compact the GRF arrays. */
1617 int new_index = 0;
1618 for (unsigned i = 0; i < this->alloc.count; i++) {
1619 if (remap_table[i] == -1) {
1620 /* We just found an unused register. This means that we are
1621 * actually going to compact something.
1622 */
1623 progress = true;
1624 } else {
1625 remap_table[i] = new_index;
1626 alloc.sizes[new_index] = alloc.sizes[i];
1627 invalidate_live_intervals();
1628 ++new_index;
1629 }
1630 }
1631
1632 this->alloc.count = new_index;
1633
1634 /* Patch all the instructions to use the newly renumbered registers */
1635 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1636 if (inst->dst.file == GRF)
1637 inst->dst.reg = remap_table[inst->dst.reg];
1638
1639 for (int i = 0; i < inst->sources; i++) {
1640 if (inst->src[i].file == GRF)
1641 inst->src[i].reg = remap_table[inst->src[i].reg];
1642 }
1643 }
1644
1645 /* Patch all the references to delta_xy, since they're used in register
1646 * allocation. If they're unused, switch them to BAD_FILE so we don't
1647 * think some random VGRF is delta_xy.
1648 */
1649 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
1650 if (delta_xy[i].file == GRF) {
1651 if (remap_table[delta_xy[i].reg] != -1) {
1652 delta_xy[i].reg = remap_table[delta_xy[i].reg];
1653 } else {
1654 delta_xy[i].file = BAD_FILE;
1655 }
1656 }
1657 }
1658
1659 return progress;
1660 }
1661
1662 /*
1663 * Implements array access of uniforms by inserting a
1664 * PULL_CONSTANT_LOAD instruction.
1665 *
1666 * Unlike temporary GRF array access (where we don't support it due to
1667 * the difficulty of doing relative addressing on instruction
1668 * destinations), we could potentially do array access of uniforms
1669 * that were loaded in GRF space as push constants. In real-world
1670 * usage we've seen, though, the arrays being used are always larger
1671 * than we could load as push constants, so just always move all
1672 * uniform array access out to a pull constant buffer.
1673 */
1674 void
1675 fs_visitor::move_uniform_array_access_to_pull_constants()
1676 {
1677 if (dispatch_width != 8)
1678 return;
1679
1680 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
1681 memset(pull_constant_loc, -1, sizeof(pull_constant_loc[0]) * uniforms);
1682
1683 /* Walk through and find array access of uniforms. Put a copy of that
1684 * uniform in the pull constant buffer.
1685 *
1686 * Note that we don't move constant-indexed accesses to arrays. No
1687 * testing has been done of the performance impact of this choice.
1688 */
1689 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
1690 for (int i = 0 ; i < inst->sources; i++) {
1691 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
1692 continue;
1693
1694 int uniform = inst->src[i].reg;
1695
1696 /* If this array isn't already present in the pull constant buffer,
1697 * add it.
1698 */
1699 if (pull_constant_loc[uniform] == -1) {
1700 const gl_constant_value **values = &stage_prog_data->param[uniform];
1701
1702 assert(param_size[uniform]);
1703
1704 for (int j = 0; j < param_size[uniform]; j++) {
1705 pull_constant_loc[uniform + j] = stage_prog_data->nr_pull_params;
1706
1707 stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
1708 values[j];
1709 }
1710 }
1711 }
1712 }
1713 }
1714
1715 /**
1716 * Assign UNIFORM file registers to either push constants or pull constants.
1717 *
1718 * We allow a fragment shader to have more than the specified minimum
1719 * maximum number of fragment shader uniform components (64). If
1720 * there are too many of these, they'd fill up all of register space.
1721 * So, this will push some of them out to the pull constant buffer and
1722 * update the program to load them.
1723 */
1724 void
1725 fs_visitor::assign_constant_locations()
1726 {
1727 /* Only the first compile (SIMD8 mode) gets to decide on locations. */
1728 if (dispatch_width != 8)
1729 return;
1730
1731 /* Find which UNIFORM registers are still in use. */
1732 bool is_live[uniforms];
1733 for (unsigned int i = 0; i < uniforms; i++) {
1734 is_live[i] = false;
1735 }
1736
1737 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1738 for (int i = 0; i < inst->sources; i++) {
1739 if (inst->src[i].file != UNIFORM)
1740 continue;
1741
1742 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
1743 if (constant_nr >= 0 && constant_nr < (int) uniforms)
1744 is_live[constant_nr] = true;
1745 }
1746 }
1747
1748 /* Only allow 16 registers (128 uniform components) as push constants.
1749 *
1750 * Just demote the end of the list. We could probably do better
1751 * here, demoting things that are rarely used in the program first.
1752 *
1753 * If changing this value, note the limitation about total_regs in
1754 * brw_curbe.c.
1755 */
1756 unsigned int max_push_components = 16 * 8;
1757 unsigned int num_push_constants = 0;
1758
1759 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
1760
1761 for (unsigned int i = 0; i < uniforms; i++) {
1762 if (!is_live[i] || pull_constant_loc[i] != -1) {
1763 /* This UNIFORM register is either dead, or has already been demoted
1764 * to a pull const. Mark it as no longer living in the param[] array.
1765 */
1766 push_constant_loc[i] = -1;
1767 continue;
1768 }
1769
1770 if (num_push_constants < max_push_components) {
1771 /* Retain as a push constant. Record the location in the params[]
1772 * array.
1773 */
1774 push_constant_loc[i] = num_push_constants++;
1775 } else {
1776 /* Demote to a pull constant. */
1777 push_constant_loc[i] = -1;
1778
1779 int pull_index = stage_prog_data->nr_pull_params++;
1780 stage_prog_data->pull_param[pull_index] = stage_prog_data->param[i];
1781 pull_constant_loc[i] = pull_index;
1782 }
1783 }
1784
1785 stage_prog_data->nr_params = num_push_constants;
1786
1787 /* Up until now, the param[] array has been indexed by reg + reg_offset
1788 * of UNIFORM registers. Condense it to only contain the uniforms we
1789 * chose to upload as push constants.
1790 */
1791 for (unsigned int i = 0; i < uniforms; i++) {
1792 int remapped = push_constant_loc[i];
1793
1794 if (remapped == -1)
1795 continue;
1796
1797 assert(remapped <= (int)i);
1798 stage_prog_data->param[remapped] = stage_prog_data->param[i];
1799 }
1800 }
1801
1802 /**
1803 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
1804 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
1805 */
1806 void
1807 fs_visitor::demote_pull_constants()
1808 {
1809 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1810 for (int i = 0; i < inst->sources; i++) {
1811 if (inst->src[i].file != UNIFORM)
1812 continue;
1813
1814 int pull_index;
1815 unsigned location = inst->src[i].reg + inst->src[i].reg_offset;
1816 if (location >= uniforms) /* Out of bounds access */
1817 pull_index = -1;
1818 else
1819 pull_index = pull_constant_loc[location];
1820
1821 if (pull_index == -1)
1822 continue;
1823
1824 /* Set up the annotation tracking for new generated instructions. */
1825 const fs_builder ibld = bld.annotate(inst->annotation, inst->ir)
1826 .at(block, inst);
1827 fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
1828 fs_reg dst = vgrf(glsl_type::float_type);
1829
1830 assert(inst->src[i].stride == 0);
1831
1832 /* Generate a pull load into dst. */
1833 if (inst->src[i].reladdr) {
1834 VARYING_PULL_CONSTANT_LOAD(ibld, dst,
1835 surf_index,
1836 *inst->src[i].reladdr,
1837 pull_index);
1838 inst->src[i].reladdr = NULL;
1839 inst->src[i].stride = 1;
1840 } else {
1841 fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
1842 ibld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
1843 dst, surf_index, offset);
1844 inst->src[i].set_smear(pull_index & 3);
1845 }
1846
1847 /* Rewrite the instruction to use the temporary VGRF. */
1848 inst->src[i].file = GRF;
1849 inst->src[i].reg = dst.reg;
1850 inst->src[i].reg_offset = 0;
1851 }
1852 }
1853 invalidate_live_intervals();
1854 }
1855
1856 bool
1857 fs_visitor::opt_algebraic()
1858 {
1859 bool progress = false;
1860
1861 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1862 switch (inst->opcode) {
1863 case BRW_OPCODE_MOV:
1864 if (inst->src[0].file != IMM)
1865 break;
1866
1867 if (inst->saturate) {
1868 if (inst->dst.type != inst->src[0].type)
1869 assert(!"unimplemented: saturate mixed types");
1870
1871 if (brw_saturate_immediate(inst->dst.type,
1872 &inst->src[0].fixed_hw_reg)) {
1873 inst->saturate = false;
1874 progress = true;
1875 }
1876 }
1877 break;
1878
1879 case BRW_OPCODE_MUL:
1880 if (inst->src[1].file != IMM)
1881 continue;
1882
1883 /* a * 1.0 = a */
1884 if (inst->src[1].is_one()) {
1885 inst->opcode = BRW_OPCODE_MOV;
1886 inst->src[1] = reg_undef;
1887 progress = true;
1888 break;
1889 }
1890
1891 /* a * -1.0 = -a */
1892 if (inst->src[1].is_negative_one()) {
1893 inst->opcode = BRW_OPCODE_MOV;
1894 inst->src[0].negate = !inst->src[0].negate;
1895 inst->src[1] = reg_undef;
1896 progress = true;
1897 break;
1898 }
1899
1900 /* a * 0.0 = 0.0 */
1901 if (inst->src[1].is_zero()) {
1902 inst->opcode = BRW_OPCODE_MOV;
1903 inst->src[0] = inst->src[1];
1904 inst->src[1] = reg_undef;
1905 progress = true;
1906 break;
1907 }
1908
1909 if (inst->src[0].file == IMM) {
1910 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
1911 inst->opcode = BRW_OPCODE_MOV;
1912 inst->src[0].fixed_hw_reg.dw1.f *= inst->src[1].fixed_hw_reg.dw1.f;
1913 inst->src[1] = reg_undef;
1914 progress = true;
1915 break;
1916 }
1917 break;
1918 case BRW_OPCODE_ADD:
1919 if (inst->src[1].file != IMM)
1920 continue;
1921
1922 /* a + 0.0 = a */
1923 if (inst->src[1].is_zero()) {
1924 inst->opcode = BRW_OPCODE_MOV;
1925 inst->src[1] = reg_undef;
1926 progress = true;
1927 break;
1928 }
1929
1930 if (inst->src[0].file == IMM) {
1931 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
1932 inst->opcode = BRW_OPCODE_MOV;
1933 inst->src[0].fixed_hw_reg.dw1.f += inst->src[1].fixed_hw_reg.dw1.f;
1934 inst->src[1] = reg_undef;
1935 progress = true;
1936 break;
1937 }
1938 break;
1939 case BRW_OPCODE_OR:
1940 if (inst->src[0].equals(inst->src[1])) {
1941 inst->opcode = BRW_OPCODE_MOV;
1942 inst->src[1] = reg_undef;
1943 progress = true;
1944 break;
1945 }
1946 break;
1947 case BRW_OPCODE_LRP:
1948 if (inst->src[1].equals(inst->src[2])) {
1949 inst->opcode = BRW_OPCODE_MOV;
1950 inst->src[0] = inst->src[1];
1951 inst->src[1] = reg_undef;
1952 inst->src[2] = reg_undef;
1953 progress = true;
1954 break;
1955 }
1956 break;
1957 case BRW_OPCODE_CMP:
1958 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
1959 inst->src[0].abs &&
1960 inst->src[0].negate &&
1961 inst->src[1].is_zero()) {
1962 inst->src[0].abs = false;
1963 inst->src[0].negate = false;
1964 inst->conditional_mod = BRW_CONDITIONAL_Z;
1965 progress = true;
1966 break;
1967 }
1968 break;
1969 case BRW_OPCODE_SEL:
1970 if (inst->src[0].equals(inst->src[1])) {
1971 inst->opcode = BRW_OPCODE_MOV;
1972 inst->src[1] = reg_undef;
1973 inst->predicate = BRW_PREDICATE_NONE;
1974 inst->predicate_inverse = false;
1975 progress = true;
1976 } else if (inst->saturate && inst->src[1].file == IMM) {
1977 switch (inst->conditional_mod) {
1978 case BRW_CONDITIONAL_LE:
1979 case BRW_CONDITIONAL_L:
1980 switch (inst->src[1].type) {
1981 case BRW_REGISTER_TYPE_F:
1982 if (inst->src[1].fixed_hw_reg.dw1.f >= 1.0f) {
1983 inst->opcode = BRW_OPCODE_MOV;
1984 inst->src[1] = reg_undef;
1985 inst->conditional_mod = BRW_CONDITIONAL_NONE;
1986 progress = true;
1987 }
1988 break;
1989 default:
1990 break;
1991 }
1992 break;
1993 case BRW_CONDITIONAL_GE:
1994 case BRW_CONDITIONAL_G:
1995 switch (inst->src[1].type) {
1996 case BRW_REGISTER_TYPE_F:
1997 if (inst->src[1].fixed_hw_reg.dw1.f <= 0.0f) {
1998 inst->opcode = BRW_OPCODE_MOV;
1999 inst->src[1] = reg_undef;
2000 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2001 progress = true;
2002 }
2003 break;
2004 default:
2005 break;
2006 }
2007 default:
2008 break;
2009 }
2010 }
2011 break;
2012 case BRW_OPCODE_MAD:
2013 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2014 inst->opcode = BRW_OPCODE_MOV;
2015 inst->src[1] = reg_undef;
2016 inst->src[2] = reg_undef;
2017 progress = true;
2018 } else if (inst->src[0].is_zero()) {
2019 inst->opcode = BRW_OPCODE_MUL;
2020 inst->src[0] = inst->src[2];
2021 inst->src[2] = reg_undef;
2022 progress = true;
2023 } else if (inst->src[1].is_one()) {
2024 inst->opcode = BRW_OPCODE_ADD;
2025 inst->src[1] = inst->src[2];
2026 inst->src[2] = reg_undef;
2027 progress = true;
2028 } else if (inst->src[2].is_one()) {
2029 inst->opcode = BRW_OPCODE_ADD;
2030 inst->src[2] = reg_undef;
2031 progress = true;
2032 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2033 inst->opcode = BRW_OPCODE_ADD;
2034 inst->src[1].fixed_hw_reg.dw1.f *= inst->src[2].fixed_hw_reg.dw1.f;
2035 inst->src[2] = reg_undef;
2036 progress = true;
2037 }
2038 break;
2039 case SHADER_OPCODE_RCP: {
2040 fs_inst *prev = (fs_inst *)inst->prev;
2041 if (prev->opcode == SHADER_OPCODE_SQRT) {
2042 if (inst->src[0].equals(prev->dst)) {
2043 inst->opcode = SHADER_OPCODE_RSQ;
2044 inst->src[0] = prev->src[0];
2045 progress = true;
2046 }
2047 }
2048 break;
2049 }
2050 case SHADER_OPCODE_BROADCAST:
2051 if (is_uniform(inst->src[0])) {
2052 inst->opcode = BRW_OPCODE_MOV;
2053 inst->sources = 1;
2054 inst->force_writemask_all = true;
2055 progress = true;
2056 } else if (inst->src[1].file == IMM) {
2057 inst->opcode = BRW_OPCODE_MOV;
2058 inst->src[0] = component(inst->src[0],
2059 inst->src[1].fixed_hw_reg.dw1.ud);
2060 inst->sources = 1;
2061 inst->force_writemask_all = true;
2062 progress = true;
2063 }
2064 break;
2065
2066 default:
2067 break;
2068 }
2069
2070 /* Swap if src[0] is immediate. */
2071 if (progress && inst->is_commutative()) {
2072 if (inst->src[0].file == IMM) {
2073 fs_reg tmp = inst->src[1];
2074 inst->src[1] = inst->src[0];
2075 inst->src[0] = tmp;
2076 }
2077 }
2078 }
2079 return progress;
2080 }
2081
2082 /**
2083 * Optimize sample messages that have constant zero values for the trailing
2084 * texture coordinates. We can just reduce the message length for these
2085 * instructions instead of reserving a register for it. Trailing parameters
2086 * that aren't sent default to zero anyway. This will cause the dead code
2087 * eliminator to remove the MOV instruction that would otherwise be emitted to
2088 * set up the zero value.
2089 */
2090 bool
2091 fs_visitor::opt_zero_samples()
2092 {
2093 /* Gen4 infers the texturing opcode based on the message length so we can't
2094 * change it.
2095 */
2096 if (devinfo->gen < 5)
2097 return false;
2098
2099 bool progress = false;
2100
2101 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2102 if (!inst->is_tex())
2103 continue;
2104
2105 fs_inst *load_payload = (fs_inst *) inst->prev;
2106
2107 if (load_payload->is_head_sentinel() ||
2108 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2109 continue;
2110
2111 /* We don't want to remove the message header or the first parameter.
2112 * Removing the first parameter is not allowed, see the Haswell PRM
2113 * volume 7, page 149:
2114 *
2115 * "Parameter 0 is required except for the sampleinfo message, which
2116 * has no parameter 0"
2117 */
2118 while (inst->mlen > inst->header_size + dispatch_width / 8 &&
2119 load_payload->src[(inst->mlen - inst->header_size) /
2120 (dispatch_width / 8) +
2121 inst->header_size - 1].is_zero()) {
2122 inst->mlen -= dispatch_width / 8;
2123 progress = true;
2124 }
2125 }
2126
2127 if (progress)
2128 invalidate_live_intervals();
2129
2130 return progress;
2131 }
2132
2133 /**
2134 * Optimize sample messages which are followed by the final RT write.
2135 *
2136 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2137 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2138 * final texturing results copied to the framebuffer write payload and modify
2139 * them to write to the framebuffer directly.
2140 */
2141 bool
2142 fs_visitor::opt_sampler_eot()
2143 {
2144 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2145
2146 if (stage != MESA_SHADER_FRAGMENT)
2147 return false;
2148
2149 if (devinfo->gen < 9 && !devinfo->is_cherryview)
2150 return false;
2151
2152 /* FINISHME: It should be possible to implement this optimization when there
2153 * are multiple drawbuffers.
2154 */
2155 if (key->nr_color_regions != 1)
2156 return false;
2157
2158 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2159 fs_inst *fb_write = (fs_inst *) cfg->blocks[cfg->num_blocks - 1]->end();
2160 assert(fb_write->eot);
2161 assert(fb_write->opcode == FS_OPCODE_FB_WRITE);
2162
2163 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2164
2165 /* There wasn't one; nothing to do. */
2166 if (unlikely(tex_inst->is_head_sentinel()) || !tex_inst->is_tex())
2167 return false;
2168
2169 /* This optimisation doesn't seem to work for textureGather for some
2170 * reason. I can't find any documentation or known workarounds to indicate
2171 * that this is expected, but considering that it is probably pretty
2172 * unlikely that a shader would directly write out the results from
2173 * textureGather we might as well just disable it.
2174 */
2175 if (tex_inst->opcode == SHADER_OPCODE_TG4 ||
2176 tex_inst->opcode == SHADER_OPCODE_TG4_OFFSET)
2177 return false;
2178
2179 /* If there's no header present, we need to munge the LOAD_PAYLOAD as well.
2180 * It's very likely to be the previous instruction.
2181 */
2182 fs_inst *load_payload = (fs_inst *) tex_inst->prev;
2183 if (load_payload->is_head_sentinel() ||
2184 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2185 return false;
2186
2187 assert(!tex_inst->eot); /* We can't get here twice */
2188 assert((tex_inst->offset & (0xff << 24)) == 0);
2189
2190 tex_inst->offset |= fb_write->target << 24;
2191 tex_inst->eot = true;
2192 tex_inst->dst = bld.null_reg_ud();
2193 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2194
2195 /* If a header is present, marking the eot is sufficient. Otherwise, we need
2196 * to create a new LOAD_PAYLOAD command with the same sources and a space
2197 * saved for the header. Using a new destination register not only makes sure
2198 * we have enough space, but it will make sure the dead code eliminator kills
2199 * the instruction that this will replace.
2200 */
2201 if (tex_inst->header_size != 0)
2202 return true;
2203
2204 fs_reg send_header = bld.vgrf(BRW_REGISTER_TYPE_F,
2205 load_payload->sources + 1);
2206 fs_reg *new_sources =
2207 ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
2208
2209 new_sources[0] = fs_reg();
2210 for (int i = 0; i < load_payload->sources; i++)
2211 new_sources[i+1] = load_payload->src[i];
2212
2213 /* The LOAD_PAYLOAD helper seems like the obvious choice here. However, it
2214 * requires a lot of information about the sources to appropriately figure
2215 * out the number of registers needed to be used. Given this stage in our
2216 * optimization, we may not have the appropriate GRFs required by
2217 * LOAD_PAYLOAD at this point (copy propagation). Therefore, we need to
2218 * manually emit the instruction.
2219 */
2220 fs_inst *new_load_payload = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD,
2221 load_payload->exec_size,
2222 send_header,
2223 new_sources,
2224 load_payload->sources + 1);
2225
2226 new_load_payload->regs_written = load_payload->regs_written + 1;
2227 new_load_payload->header_size = 1;
2228 tex_inst->mlen++;
2229 tex_inst->header_size = 1;
2230 tex_inst->insert_before(cfg->blocks[cfg->num_blocks - 1], new_load_payload);
2231 tex_inst->src[0] = send_header;
2232
2233 return true;
2234 }
2235
2236 bool
2237 fs_visitor::opt_register_renaming()
2238 {
2239 bool progress = false;
2240 int depth = 0;
2241
2242 int remap[alloc.count];
2243 memset(remap, -1, sizeof(int) * alloc.count);
2244
2245 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2246 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2247 depth++;
2248 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2249 inst->opcode == BRW_OPCODE_WHILE) {
2250 depth--;
2251 }
2252
2253 /* Rewrite instruction sources. */
2254 for (int i = 0; i < inst->sources; i++) {
2255 if (inst->src[i].file == GRF &&
2256 remap[inst->src[i].reg] != -1 &&
2257 remap[inst->src[i].reg] != inst->src[i].reg) {
2258 inst->src[i].reg = remap[inst->src[i].reg];
2259 progress = true;
2260 }
2261 }
2262
2263 const int dst = inst->dst.reg;
2264
2265 if (depth == 0 &&
2266 inst->dst.file == GRF &&
2267 alloc.sizes[inst->dst.reg] == inst->exec_size / 8 &&
2268 !inst->is_partial_write()) {
2269 if (remap[dst] == -1) {
2270 remap[dst] = dst;
2271 } else {
2272 remap[dst] = alloc.allocate(inst->exec_size / 8);
2273 inst->dst.reg = remap[dst];
2274 progress = true;
2275 }
2276 } else if (inst->dst.file == GRF &&
2277 remap[dst] != -1 &&
2278 remap[dst] != dst) {
2279 inst->dst.reg = remap[dst];
2280 progress = true;
2281 }
2282 }
2283
2284 if (progress) {
2285 invalidate_live_intervals();
2286
2287 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2288 if (delta_xy[i].file == GRF && remap[delta_xy[i].reg] != -1) {
2289 delta_xy[i].reg = remap[delta_xy[i].reg];
2290 }
2291 }
2292 }
2293
2294 return progress;
2295 }
2296
2297 /**
2298 * Remove redundant or useless discard jumps.
2299 *
2300 * For example, we can eliminate jumps in the following sequence:
2301 *
2302 * discard-jump (redundant with the next jump)
2303 * discard-jump (useless; jumps to the next instruction)
2304 * placeholder-halt
2305 */
2306 bool
2307 fs_visitor::opt_redundant_discard_jumps()
2308 {
2309 bool progress = false;
2310
2311 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2312
2313 fs_inst *placeholder_halt = NULL;
2314 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2315 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2316 placeholder_halt = inst;
2317 break;
2318 }
2319 }
2320
2321 if (!placeholder_halt)
2322 return false;
2323
2324 /* Delete any HALTs immediately before the placeholder halt. */
2325 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2326 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2327 prev = (fs_inst *) placeholder_halt->prev) {
2328 prev->remove(last_bblock);
2329 progress = true;
2330 }
2331
2332 if (progress)
2333 invalidate_live_intervals();
2334
2335 return progress;
2336 }
2337
2338 bool
2339 fs_visitor::compute_to_mrf()
2340 {
2341 bool progress = false;
2342 int next_ip = 0;
2343
2344 /* No MRFs on Gen >= 7. */
2345 if (devinfo->gen >= 7)
2346 return false;
2347
2348 calculate_live_intervals();
2349
2350 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2351 int ip = next_ip;
2352 next_ip++;
2353
2354 if (inst->opcode != BRW_OPCODE_MOV ||
2355 inst->is_partial_write() ||
2356 inst->dst.file != MRF || inst->src[0].file != GRF ||
2357 inst->dst.type != inst->src[0].type ||
2358 inst->src[0].abs || inst->src[0].negate ||
2359 !inst->src[0].is_contiguous() ||
2360 inst->src[0].subreg_offset)
2361 continue;
2362
2363 /* Work out which hardware MRF registers are written by this
2364 * instruction.
2365 */
2366 int mrf_low = inst->dst.reg & ~BRW_MRF_COMPR4;
2367 int mrf_high;
2368 if (inst->dst.reg & BRW_MRF_COMPR4) {
2369 mrf_high = mrf_low + 4;
2370 } else if (inst->exec_size == 16) {
2371 mrf_high = mrf_low + 1;
2372 } else {
2373 mrf_high = mrf_low;
2374 }
2375
2376 /* Can't compute-to-MRF this GRF if someone else was going to
2377 * read it later.
2378 */
2379 if (this->virtual_grf_end[inst->src[0].reg] > ip)
2380 continue;
2381
2382 /* Found a move of a GRF to a MRF. Let's see if we can go
2383 * rewrite the thing that made this GRF to write into the MRF.
2384 */
2385 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst, block) {
2386 if (scan_inst->dst.file == GRF &&
2387 scan_inst->dst.reg == inst->src[0].reg) {
2388 /* Found the last thing to write our reg we want to turn
2389 * into a compute-to-MRF.
2390 */
2391
2392 /* If this one instruction didn't populate all the
2393 * channels, bail. We might be able to rewrite everything
2394 * that writes that reg, but it would require smarter
2395 * tracking to delay the rewriting until complete success.
2396 */
2397 if (scan_inst->is_partial_write())
2398 break;
2399
2400 /* Things returning more than one register would need us to
2401 * understand coalescing out more than one MOV at a time.
2402 */
2403 if (scan_inst->regs_written > scan_inst->exec_size / 8)
2404 break;
2405
2406 /* SEND instructions can't have MRF as a destination. */
2407 if (scan_inst->mlen)
2408 break;
2409
2410 if (devinfo->gen == 6) {
2411 /* gen6 math instructions must have the destination be
2412 * GRF, so no compute-to-MRF for them.
2413 */
2414 if (scan_inst->is_math()) {
2415 break;
2416 }
2417 }
2418
2419 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
2420 /* Found the creator of our MRF's source value. */
2421 scan_inst->dst.file = MRF;
2422 scan_inst->dst.reg = inst->dst.reg;
2423 scan_inst->saturate |= inst->saturate;
2424 inst->remove(block);
2425 progress = true;
2426 }
2427 break;
2428 }
2429
2430 /* We don't handle control flow here. Most computation of
2431 * values that end up in MRFs are shortly before the MRF
2432 * write anyway.
2433 */
2434 if (block->start() == scan_inst)
2435 break;
2436
2437 /* You can't read from an MRF, so if someone else reads our
2438 * MRF's source GRF that we wanted to rewrite, that stops us.
2439 */
2440 bool interfered = false;
2441 for (int i = 0; i < scan_inst->sources; i++) {
2442 if (scan_inst->src[i].file == GRF &&
2443 scan_inst->src[i].reg == inst->src[0].reg &&
2444 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
2445 interfered = true;
2446 }
2447 }
2448 if (interfered)
2449 break;
2450
2451 if (scan_inst->dst.file == MRF) {
2452 /* If somebody else writes our MRF here, we can't
2453 * compute-to-MRF before that.
2454 */
2455 int scan_mrf_low = scan_inst->dst.reg & ~BRW_MRF_COMPR4;
2456 int scan_mrf_high;
2457
2458 if (scan_inst->dst.reg & BRW_MRF_COMPR4) {
2459 scan_mrf_high = scan_mrf_low + 4;
2460 } else if (scan_inst->exec_size == 16) {
2461 scan_mrf_high = scan_mrf_low + 1;
2462 } else {
2463 scan_mrf_high = scan_mrf_low;
2464 }
2465
2466 if (mrf_low == scan_mrf_low ||
2467 mrf_low == scan_mrf_high ||
2468 mrf_high == scan_mrf_low ||
2469 mrf_high == scan_mrf_high) {
2470 break;
2471 }
2472 }
2473
2474 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
2475 /* Found a SEND instruction, which means that there are
2476 * live values in MRFs from base_mrf to base_mrf +
2477 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2478 * above it.
2479 */
2480 if (mrf_low >= scan_inst->base_mrf &&
2481 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
2482 break;
2483 }
2484 if (mrf_high >= scan_inst->base_mrf &&
2485 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
2486 break;
2487 }
2488 }
2489 }
2490 }
2491
2492 if (progress)
2493 invalidate_live_intervals();
2494
2495 return progress;
2496 }
2497
2498 /**
2499 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2500 * flow. We could probably do better here with some form of divergence
2501 * analysis.
2502 */
2503 bool
2504 fs_visitor::eliminate_find_live_channel()
2505 {
2506 bool progress = false;
2507 unsigned depth = 0;
2508
2509 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2510 switch (inst->opcode) {
2511 case BRW_OPCODE_IF:
2512 case BRW_OPCODE_DO:
2513 depth++;
2514 break;
2515
2516 case BRW_OPCODE_ENDIF:
2517 case BRW_OPCODE_WHILE:
2518 depth--;
2519 break;
2520
2521 case FS_OPCODE_DISCARD_JUMP:
2522 /* This can potentially make control flow non-uniform until the end
2523 * of the program.
2524 */
2525 return progress;
2526
2527 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2528 if (depth == 0) {
2529 inst->opcode = BRW_OPCODE_MOV;
2530 inst->src[0] = fs_reg(0);
2531 inst->sources = 1;
2532 inst->force_writemask_all = true;
2533 progress = true;
2534 }
2535 break;
2536
2537 default:
2538 break;
2539 }
2540 }
2541
2542 return progress;
2543 }
2544
2545 /**
2546 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2547 * instructions to FS_OPCODE_REP_FB_WRITE.
2548 */
2549 void
2550 fs_visitor::emit_repclear_shader()
2551 {
2552 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2553 int base_mrf = 1;
2554 int color_mrf = base_mrf + 2;
2555
2556 fs_inst *mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
2557 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
2558
2559 fs_inst *write;
2560 if (key->nr_color_regions == 1) {
2561 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2562 write->saturate = key->clamp_fragment_color;
2563 write->base_mrf = color_mrf;
2564 write->target = 0;
2565 write->header_size = 0;
2566 write->mlen = 1;
2567 } else {
2568 assume(key->nr_color_regions > 0);
2569 for (int i = 0; i < key->nr_color_regions; ++i) {
2570 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2571 write->saturate = key->clamp_fragment_color;
2572 write->base_mrf = base_mrf;
2573 write->target = i;
2574 write->header_size = 2;
2575 write->mlen = 3;
2576 }
2577 }
2578 write->eot = true;
2579
2580 calculate_cfg();
2581
2582 assign_constant_locations();
2583 assign_curb_setup();
2584
2585 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
2586 assert(mov->src[0].file == HW_REG);
2587 mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
2588 }
2589
2590 /**
2591 * Walks through basic blocks, looking for repeated MRF writes and
2592 * removing the later ones.
2593 */
2594 bool
2595 fs_visitor::remove_duplicate_mrf_writes()
2596 {
2597 fs_inst *last_mrf_move[16];
2598 bool progress = false;
2599
2600 /* Need to update the MRF tracking for compressed instructions. */
2601 if (dispatch_width == 16)
2602 return false;
2603
2604 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2605
2606 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2607 if (inst->is_control_flow()) {
2608 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2609 }
2610
2611 if (inst->opcode == BRW_OPCODE_MOV &&
2612 inst->dst.file == MRF) {
2613 fs_inst *prev_inst = last_mrf_move[inst->dst.reg];
2614 if (prev_inst && inst->equals(prev_inst)) {
2615 inst->remove(block);
2616 progress = true;
2617 continue;
2618 }
2619 }
2620
2621 /* Clear out the last-write records for MRFs that were overwritten. */
2622 if (inst->dst.file == MRF) {
2623 last_mrf_move[inst->dst.reg] = NULL;
2624 }
2625
2626 if (inst->mlen > 0 && inst->base_mrf != -1) {
2627 /* Found a SEND instruction, which will include two or fewer
2628 * implied MRF writes. We could do better here.
2629 */
2630 for (int i = 0; i < implied_mrf_writes(inst); i++) {
2631 last_mrf_move[inst->base_mrf + i] = NULL;
2632 }
2633 }
2634
2635 /* Clear out any MRF move records whose sources got overwritten. */
2636 if (inst->dst.file == GRF) {
2637 for (unsigned int i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
2638 if (last_mrf_move[i] &&
2639 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
2640 last_mrf_move[i] = NULL;
2641 }
2642 }
2643 }
2644
2645 if (inst->opcode == BRW_OPCODE_MOV &&
2646 inst->dst.file == MRF &&
2647 inst->src[0].file == GRF &&
2648 !inst->is_partial_write()) {
2649 last_mrf_move[inst->dst.reg] = inst;
2650 }
2651 }
2652
2653 if (progress)
2654 invalidate_live_intervals();
2655
2656 return progress;
2657 }
2658
2659 static void
2660 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
2661 {
2662 /* Clear the flag for registers that actually got read (as expected). */
2663 for (int i = 0; i < inst->sources; i++) {
2664 int grf;
2665 if (inst->src[i].file == GRF) {
2666 grf = inst->src[i].reg;
2667 } else if (inst->src[i].file == HW_REG &&
2668 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
2669 grf = inst->src[i].fixed_hw_reg.nr;
2670 } else {
2671 continue;
2672 }
2673
2674 if (grf >= first_grf &&
2675 grf < first_grf + grf_len) {
2676 deps[grf - first_grf] = false;
2677 if (inst->exec_size == 16)
2678 deps[grf - first_grf + 1] = false;
2679 }
2680 }
2681 }
2682
2683 /**
2684 * Implements this workaround for the original 965:
2685 *
2686 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
2687 * check for post destination dependencies on this instruction, software
2688 * must ensure that there is no destination hazard for the case of ‘write
2689 * followed by a posted write’ shown in the following example.
2690 *
2691 * 1. mov r3 0
2692 * 2. send r3.xy <rest of send instruction>
2693 * 3. mov r2 r3
2694 *
2695 * Due to no post-destination dependency check on the ‘send’, the above
2696 * code sequence could have two instructions (1 and 2) in flight at the
2697 * same time that both consider ‘r3’ as the target of their final writes.
2698 */
2699 void
2700 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
2701 fs_inst *inst)
2702 {
2703 int write_len = inst->regs_written;
2704 int first_write_grf = inst->dst.reg;
2705 bool needs_dep[BRW_MAX_MRF];
2706 assert(write_len < (int)sizeof(needs_dep) - 1);
2707
2708 memset(needs_dep, false, sizeof(needs_dep));
2709 memset(needs_dep, true, write_len);
2710
2711 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
2712
2713 /* Walk backwards looking for writes to registers we're writing which
2714 * aren't read since being written. If we hit the start of the program,
2715 * we assume that there are no outstanding dependencies on entry to the
2716 * program.
2717 */
2718 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst, block) {
2719 /* If we hit control flow, assume that there *are* outstanding
2720 * dependencies, and force their cleanup before our instruction.
2721 */
2722 if (block->start() == scan_inst) {
2723 for (int i = 0; i < write_len; i++) {
2724 if (needs_dep[i])
2725 DEP_RESOLVE_MOV(bld.at(block, inst), first_write_grf + i);
2726 }
2727 return;
2728 }
2729
2730 /* We insert our reads as late as possible on the assumption that any
2731 * instruction but a MOV that might have left us an outstanding
2732 * dependency has more latency than a MOV.
2733 */
2734 if (scan_inst->dst.file == GRF) {
2735 for (int i = 0; i < scan_inst->regs_written; i++) {
2736 int reg = scan_inst->dst.reg + i;
2737
2738 if (reg >= first_write_grf &&
2739 reg < first_write_grf + write_len &&
2740 needs_dep[reg - first_write_grf]) {
2741 DEP_RESOLVE_MOV(bld.at(block, inst), reg);
2742 needs_dep[reg - first_write_grf] = false;
2743 if (scan_inst->exec_size == 16)
2744 needs_dep[reg - first_write_grf + 1] = false;
2745 }
2746 }
2747 }
2748
2749 /* Clear the flag for registers that actually got read (as expected). */
2750 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
2751
2752 /* Continue the loop only if we haven't resolved all the dependencies */
2753 int i;
2754 for (i = 0; i < write_len; i++) {
2755 if (needs_dep[i])
2756 break;
2757 }
2758 if (i == write_len)
2759 return;
2760 }
2761 }
2762
2763 /**
2764 * Implements this workaround for the original 965:
2765 *
2766 * "[DevBW, DevCL] Errata: A destination register from a send can not be
2767 * used as a destination register until after it has been sourced by an
2768 * instruction with a different destination register.
2769 */
2770 void
2771 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
2772 {
2773 int write_len = inst->regs_written;
2774 int first_write_grf = inst->dst.reg;
2775 bool needs_dep[BRW_MAX_MRF];
2776 assert(write_len < (int)sizeof(needs_dep) - 1);
2777
2778 memset(needs_dep, false, sizeof(needs_dep));
2779 memset(needs_dep, true, write_len);
2780 /* Walk forwards looking for writes to registers we're writing which aren't
2781 * read before being written.
2782 */
2783 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst, block) {
2784 /* If we hit control flow, force resolve all remaining dependencies. */
2785 if (block->end() == scan_inst) {
2786 for (int i = 0; i < write_len; i++) {
2787 if (needs_dep[i])
2788 DEP_RESOLVE_MOV(bld.at(block, scan_inst), first_write_grf + i);
2789 }
2790 return;
2791 }
2792
2793 /* Clear the flag for registers that actually got read (as expected). */
2794 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
2795
2796 /* We insert our reads as late as possible since they're reading the
2797 * result of a SEND, which has massive latency.
2798 */
2799 if (scan_inst->dst.file == GRF &&
2800 scan_inst->dst.reg >= first_write_grf &&
2801 scan_inst->dst.reg < first_write_grf + write_len &&
2802 needs_dep[scan_inst->dst.reg - first_write_grf]) {
2803 DEP_RESOLVE_MOV(bld.at(block, scan_inst), scan_inst->dst.reg);
2804 needs_dep[scan_inst->dst.reg - first_write_grf] = false;
2805 }
2806
2807 /* Continue the loop only if we haven't resolved all the dependencies */
2808 int i;
2809 for (i = 0; i < write_len; i++) {
2810 if (needs_dep[i])
2811 break;
2812 }
2813 if (i == write_len)
2814 return;
2815 }
2816 }
2817
2818 void
2819 fs_visitor::insert_gen4_send_dependency_workarounds()
2820 {
2821 if (devinfo->gen != 4 || devinfo->is_g4x)
2822 return;
2823
2824 bool progress = false;
2825
2826 /* Note that we're done with register allocation, so GRF fs_regs always
2827 * have a .reg_offset of 0.
2828 */
2829
2830 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2831 if (inst->mlen != 0 && inst->dst.file == GRF) {
2832 insert_gen4_pre_send_dependency_workarounds(block, inst);
2833 insert_gen4_post_send_dependency_workarounds(block, inst);
2834 progress = true;
2835 }
2836 }
2837
2838 if (progress)
2839 invalidate_live_intervals();
2840 }
2841
2842 /**
2843 * Turns the generic expression-style uniform pull constant load instruction
2844 * into a hardware-specific series of instructions for loading a pull
2845 * constant.
2846 *
2847 * The expression style allows the CSE pass before this to optimize out
2848 * repeated loads from the same offset, and gives the pre-register-allocation
2849 * scheduling full flexibility, while the conversion to native instructions
2850 * allows the post-register-allocation scheduler the best information
2851 * possible.
2852 *
2853 * Note that execution masking for setting up pull constant loads is special:
2854 * the channels that need to be written are unrelated to the current execution
2855 * mask, since a later instruction will use one of the result channels as a
2856 * source operand for all 8 or 16 of its channels.
2857 */
2858 void
2859 fs_visitor::lower_uniform_pull_constant_loads()
2860 {
2861 foreach_block_and_inst (block, fs_inst, inst, cfg) {
2862 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
2863 continue;
2864
2865 if (devinfo->gen >= 7) {
2866 /* The offset arg before was a vec4-aligned byte offset. We need to
2867 * turn it into a dword offset.
2868 */
2869 fs_reg const_offset_reg = inst->src[1];
2870 assert(const_offset_reg.file == IMM &&
2871 const_offset_reg.type == BRW_REGISTER_TYPE_UD);
2872 const_offset_reg.fixed_hw_reg.dw1.ud /= 4;
2873
2874 fs_reg payload, offset;
2875 if (devinfo->gen >= 9) {
2876 /* We have to use a message header on Skylake to get SIMD4x2
2877 * mode. Reserve space for the register.
2878 */
2879 offset = payload = fs_reg(GRF, alloc.allocate(2));
2880 offset.reg_offset++;
2881 inst->mlen = 2;
2882 } else {
2883 offset = payload = fs_reg(GRF, alloc.allocate(1));
2884 inst->mlen = 1;
2885 }
2886
2887 /* This is actually going to be a MOV, but since only the first dword
2888 * is accessed, we have a special opcode to do just that one. Note
2889 * that this needs to be an operation that will be considered a def
2890 * by live variable analysis, or register allocation will explode.
2891 */
2892 fs_inst *setup = new(mem_ctx) fs_inst(FS_OPCODE_SET_SIMD4X2_OFFSET,
2893 8, offset, const_offset_reg);
2894 setup->force_writemask_all = true;
2895
2896 setup->ir = inst->ir;
2897 setup->annotation = inst->annotation;
2898 inst->insert_before(block, setup);
2899
2900 /* Similarly, this will only populate the first 4 channels of the
2901 * result register (since we only use smear values from 0-3), but we
2902 * don't tell the optimizer.
2903 */
2904 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
2905 inst->src[1] = payload;
2906 inst->base_mrf = -1;
2907
2908 invalidate_live_intervals();
2909 } else {
2910 /* Before register allocation, we didn't tell the scheduler about the
2911 * MRF we use. We know it's safe to use this MRF because nothing
2912 * else does except for register spill/unspill, which generates and
2913 * uses its MRF within a single IR instruction.
2914 */
2915 inst->base_mrf = 14;
2916 inst->mlen = 1;
2917 }
2918 }
2919 }
2920
2921 bool
2922 fs_visitor::lower_load_payload()
2923 {
2924 bool progress = false;
2925
2926 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2927 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2928 continue;
2929
2930 assert(inst->dst.file == MRF || inst->dst.file == GRF);
2931 assert(inst->saturate == false);
2932 fs_reg dst = inst->dst;
2933
2934 /* Get rid of COMPR4. We'll add it back in if we need it */
2935 if (dst.file == MRF)
2936 dst.reg = dst.reg & ~BRW_MRF_COMPR4;
2937
2938 const fs_builder hbld = bld.exec_all().group(8, 0).at(block, inst);
2939
2940 for (uint8_t i = 0; i < inst->header_size; i++) {
2941 if (inst->src[i].file != BAD_FILE) {
2942 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
2943 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
2944 hbld.MOV(mov_dst, mov_src);
2945 }
2946 dst = offset(dst, hbld, 1);
2947 }
2948
2949 const fs_builder ibld = bld.exec_all(inst->force_writemask_all)
2950 .group(inst->exec_size, inst->force_sechalf)
2951 .at(block, inst);
2952
2953 if (inst->dst.file == MRF && (inst->dst.reg & BRW_MRF_COMPR4) &&
2954 inst->exec_size > 8) {
2955 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
2956 * a straightforward copy. Instead, the result of the
2957 * LOAD_PAYLOAD is treated as interleaved and the first four
2958 * non-header sources are unpacked as:
2959 *
2960 * m + 0: r0
2961 * m + 1: g0
2962 * m + 2: b0
2963 * m + 3: a0
2964 * m + 4: r1
2965 * m + 5: g1
2966 * m + 6: b1
2967 * m + 7: a1
2968 *
2969 * This is used for gen <= 5 fb writes.
2970 */
2971 assert(inst->exec_size == 16);
2972 assert(inst->header_size + 4 <= inst->sources);
2973 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
2974 if (inst->src[i].file != BAD_FILE) {
2975 if (devinfo->has_compr4) {
2976 fs_reg compr4_dst = retype(dst, inst->src[i].type);
2977 compr4_dst.reg |= BRW_MRF_COMPR4;
2978 ibld.MOV(compr4_dst, inst->src[i]);
2979 } else {
2980 /* Platform doesn't have COMPR4. We have to fake it */
2981 fs_reg mov_dst = retype(dst, inst->src[i].type);
2982 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
2983 mov_dst.reg += 4;
2984 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
2985 }
2986 }
2987
2988 dst.reg++;
2989 }
2990
2991 /* The loop above only ever incremented us through the first set
2992 * of 4 registers. However, thanks to the magic of COMPR4, we
2993 * actually wrote to the first 8 registers, so we need to take
2994 * that into account now.
2995 */
2996 dst.reg += 4;
2997
2998 /* The COMPR4 code took care of the first 4 sources. We'll let
2999 * the regular path handle any remaining sources. Yes, we are
3000 * modifying the instruction but we're about to delete it so
3001 * this really doesn't hurt anything.
3002 */
3003 inst->header_size += 4;
3004 }
3005
3006 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3007 if (inst->src[i].file != BAD_FILE)
3008 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3009 dst = offset(dst, ibld, 1);
3010 }
3011
3012 inst->remove(block);
3013 progress = true;
3014 }
3015
3016 if (progress)
3017 invalidate_live_intervals();
3018
3019 return progress;
3020 }
3021
3022 bool
3023 fs_visitor::lower_integer_multiplication()
3024 {
3025 bool progress = false;
3026
3027 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit operation
3028 * directly, but Cherryview cannot.
3029 */
3030 if (devinfo->gen >= 8 && !devinfo->is_cherryview)
3031 return false;
3032
3033 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3034 if (inst->opcode != BRW_OPCODE_MUL ||
3035 inst->dst.is_accumulator() ||
3036 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3037 inst->dst.type != BRW_REGISTER_TYPE_UD))
3038 continue;
3039
3040 const fs_builder ibld = bld.at(block, inst);
3041
3042 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3043 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3044 * src1 are used.
3045 *
3046 * If multiplying by an immediate value that fits in 16-bits, do a
3047 * single MUL instruction with that value in the proper location.
3048 */
3049 if (inst->src[1].file == IMM &&
3050 inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
3051 if (devinfo->gen < 7) {
3052 fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
3053 inst->dst.type);
3054 ibld.MOV(imm, inst->src[1]);
3055 ibld.MUL(inst->dst, imm, inst->src[0]);
3056 } else {
3057 ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
3058 }
3059 } else {
3060 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3061 * do 32-bit integer multiplication in one instruction, but instead
3062 * must do a sequence (which actually calculates a 64-bit result):
3063 *
3064 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3065 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3066 * mov(8) g2<1>D acc0<8,8,1>D
3067 *
3068 * But on Gen > 6, the ability to use second accumulator register
3069 * (acc1) for non-float data types was removed, preventing a simple
3070 * implementation in SIMD16. A 16-channel result can be calculated by
3071 * executing the three instructions twice in SIMD8, once with quarter
3072 * control of 1Q for the first eight channels and again with 2Q for
3073 * the second eight channels.
3074 *
3075 * Which accumulator register is implicitly accessed (by AccWrEnable
3076 * for instance) is determined by the quarter control. Unfortunately
3077 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3078 * implicit accumulator access by an instruction with 2Q will access
3079 * acc1 regardless of whether the data type is usable in acc1.
3080 *
3081 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3082 * integer data types.
3083 *
3084 * Since we only want the low 32-bits of the result, we can do two
3085 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3086 * adjust the high result and add them (like the mach is doing):
3087 *
3088 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3089 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3090 * shl(8) g9<1>D g8<8,8,1>D 16D
3091 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3092 *
3093 * We avoid the shl instruction by realizing that we only want to add
3094 * the low 16-bits of the "high" result to the high 16-bits of the
3095 * "low" result and using proper regioning on the add:
3096 *
3097 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3098 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3099 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3100 *
3101 * Since it does not use the (single) accumulator register, we can
3102 * schedule multi-component multiplications much better.
3103 */
3104
3105 if (inst->conditional_mod && inst->dst.is_null()) {
3106 inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
3107 inst->dst.type);
3108 }
3109 fs_reg low = inst->dst;
3110 fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
3111 inst->dst.type);
3112
3113 if (devinfo->gen >= 7) {
3114 fs_reg src1_0_w = inst->src[1];
3115 fs_reg src1_1_w = inst->src[1];
3116
3117 if (inst->src[1].file == IMM) {
3118 src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
3119 src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
3120 } else {
3121 src1_0_w.type = BRW_REGISTER_TYPE_UW;
3122 if (src1_0_w.stride != 0) {
3123 assert(src1_0_w.stride == 1);
3124 src1_0_w.stride = 2;
3125 }
3126
3127 src1_1_w.type = BRW_REGISTER_TYPE_UW;
3128 if (src1_1_w.stride != 0) {
3129 assert(src1_1_w.stride == 1);
3130 src1_1_w.stride = 2;
3131 }
3132 src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3133 }
3134 ibld.MUL(low, inst->src[0], src1_0_w);
3135 ibld.MUL(high, inst->src[0], src1_1_w);
3136 } else {
3137 fs_reg src0_0_w = inst->src[0];
3138 fs_reg src0_1_w = inst->src[0];
3139
3140 src0_0_w.type = BRW_REGISTER_TYPE_UW;
3141 if (src0_0_w.stride != 0) {
3142 assert(src0_0_w.stride == 1);
3143 src0_0_w.stride = 2;
3144 }
3145
3146 src0_1_w.type = BRW_REGISTER_TYPE_UW;
3147 if (src0_1_w.stride != 0) {
3148 assert(src0_1_w.stride == 1);
3149 src0_1_w.stride = 2;
3150 }
3151 src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3152
3153 ibld.MUL(low, src0_0_w, inst->src[1]);
3154 ibld.MUL(high, src0_1_w, inst->src[1]);
3155 }
3156
3157 fs_reg dst = inst->dst;
3158 dst.type = BRW_REGISTER_TYPE_UW;
3159 dst.subreg_offset = 2;
3160 dst.stride = 2;
3161
3162 high.type = BRW_REGISTER_TYPE_UW;
3163 high.stride = 2;
3164
3165 low.type = BRW_REGISTER_TYPE_UW;
3166 low.subreg_offset = 2;
3167 low.stride = 2;
3168
3169 ibld.ADD(dst, low, high);
3170
3171 if (inst->conditional_mod) {
3172 fs_reg null(retype(ibld.null_reg_f(), inst->dst.type));
3173 set_condmod(inst->conditional_mod,
3174 ibld.MOV(null, inst->dst));
3175 }
3176 }
3177
3178 inst->remove(block);
3179 progress = true;
3180 }
3181
3182 if (progress)
3183 invalidate_live_intervals();
3184
3185 return progress;
3186 }
3187
3188 void
3189 fs_visitor::dump_instructions()
3190 {
3191 dump_instructions(NULL);
3192 }
3193
3194 void
3195 fs_visitor::dump_instructions(const char *name)
3196 {
3197 FILE *file = stderr;
3198 if (name && geteuid() != 0) {
3199 file = fopen(name, "w");
3200 if (!file)
3201 file = stderr;
3202 }
3203
3204 if (cfg) {
3205 calculate_register_pressure();
3206 int ip = 0, max_pressure = 0;
3207 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
3208 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
3209 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
3210 dump_instruction(inst, file);
3211 ip++;
3212 }
3213 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
3214 } else {
3215 int ip = 0;
3216 foreach_in_list(backend_instruction, inst, &instructions) {
3217 fprintf(file, "%4d: ", ip++);
3218 dump_instruction(inst, file);
3219 }
3220 }
3221
3222 if (file != stderr) {
3223 fclose(file);
3224 }
3225 }
3226
3227 void
3228 fs_visitor::dump_instruction(backend_instruction *be_inst)
3229 {
3230 dump_instruction(be_inst, stderr);
3231 }
3232
3233 void
3234 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
3235 {
3236 fs_inst *inst = (fs_inst *)be_inst;
3237
3238 if (inst->predicate) {
3239 fprintf(file, "(%cf0.%d) ",
3240 inst->predicate_inverse ? '-' : '+',
3241 inst->flag_subreg);
3242 }
3243
3244 fprintf(file, "%s", brw_instruction_name(inst->opcode));
3245 if (inst->saturate)
3246 fprintf(file, ".sat");
3247 if (inst->conditional_mod) {
3248 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
3249 if (!inst->predicate &&
3250 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
3251 inst->opcode != BRW_OPCODE_IF &&
3252 inst->opcode != BRW_OPCODE_WHILE))) {
3253 fprintf(file, ".f0.%d", inst->flag_subreg);
3254 }
3255 }
3256 fprintf(file, "(%d) ", inst->exec_size);
3257
3258 if (inst->mlen) {
3259 fprintf(file, "(mlen: %d) ", inst->mlen);
3260 }
3261
3262 switch (inst->dst.file) {
3263 case GRF:
3264 fprintf(file, "vgrf%d", inst->dst.reg);
3265 if (alloc.sizes[inst->dst.reg] != inst->regs_written ||
3266 inst->dst.subreg_offset)
3267 fprintf(file, "+%d.%d",
3268 inst->dst.reg_offset, inst->dst.subreg_offset);
3269 break;
3270 case MRF:
3271 fprintf(file, "m%d", inst->dst.reg);
3272 break;
3273 case BAD_FILE:
3274 fprintf(file, "(null)");
3275 break;
3276 case UNIFORM:
3277 fprintf(file, "***u%d***", inst->dst.reg + inst->dst.reg_offset);
3278 break;
3279 case ATTR:
3280 fprintf(file, "***attr%d***", inst->dst.reg + inst->dst.reg_offset);
3281 break;
3282 case HW_REG:
3283 if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
3284 switch (inst->dst.fixed_hw_reg.nr) {
3285 case BRW_ARF_NULL:
3286 fprintf(file, "null");
3287 break;
3288 case BRW_ARF_ADDRESS:
3289 fprintf(file, "a0.%d", inst->dst.fixed_hw_reg.subnr);
3290 break;
3291 case BRW_ARF_ACCUMULATOR:
3292 fprintf(file, "acc%d", inst->dst.fixed_hw_reg.subnr);
3293 break;
3294 case BRW_ARF_FLAG:
3295 fprintf(file, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
3296 inst->dst.fixed_hw_reg.subnr);
3297 break;
3298 default:
3299 fprintf(file, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
3300 inst->dst.fixed_hw_reg.subnr);
3301 break;
3302 }
3303 } else {
3304 fprintf(file, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
3305 }
3306 if (inst->dst.fixed_hw_reg.subnr)
3307 fprintf(file, "+%d", inst->dst.fixed_hw_reg.subnr);
3308 break;
3309 default:
3310 fprintf(file, "???");
3311 break;
3312 }
3313 fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
3314
3315 for (int i = 0; i < inst->sources; i++) {
3316 if (inst->src[i].negate)
3317 fprintf(file, "-");
3318 if (inst->src[i].abs)
3319 fprintf(file, "|");
3320 switch (inst->src[i].file) {
3321 case GRF:
3322 fprintf(file, "vgrf%d", inst->src[i].reg);
3323 if (alloc.sizes[inst->src[i].reg] != (unsigned)inst->regs_read(i) ||
3324 inst->src[i].subreg_offset)
3325 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
3326 inst->src[i].subreg_offset);
3327 break;
3328 case MRF:
3329 fprintf(file, "***m%d***", inst->src[i].reg);
3330 break;
3331 case ATTR:
3332 fprintf(file, "attr%d", inst->src[i].reg + inst->src[i].reg_offset);
3333 break;
3334 case UNIFORM:
3335 fprintf(file, "u%d", inst->src[i].reg + inst->src[i].reg_offset);
3336 if (inst->src[i].reladdr) {
3337 fprintf(file, "+reladdr");
3338 } else if (inst->src[i].subreg_offset) {
3339 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
3340 inst->src[i].subreg_offset);
3341 }
3342 break;
3343 case BAD_FILE:
3344 fprintf(file, "(null)");
3345 break;
3346 case IMM:
3347 switch (inst->src[i].type) {
3348 case BRW_REGISTER_TYPE_F:
3349 fprintf(file, "%ff", inst->src[i].fixed_hw_reg.dw1.f);
3350 break;
3351 case BRW_REGISTER_TYPE_W:
3352 case BRW_REGISTER_TYPE_D:
3353 fprintf(file, "%dd", inst->src[i].fixed_hw_reg.dw1.d);
3354 break;
3355 case BRW_REGISTER_TYPE_UW:
3356 case BRW_REGISTER_TYPE_UD:
3357 fprintf(file, "%uu", inst->src[i].fixed_hw_reg.dw1.ud);
3358 break;
3359 case BRW_REGISTER_TYPE_VF:
3360 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
3361 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 0) & 0xff),
3362 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 8) & 0xff),
3363 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 16) & 0xff),
3364 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 24) & 0xff));
3365 break;
3366 default:
3367 fprintf(file, "???");
3368 break;
3369 }
3370 break;
3371 case HW_REG:
3372 if (inst->src[i].fixed_hw_reg.negate)
3373 fprintf(file, "-");
3374 if (inst->src[i].fixed_hw_reg.abs)
3375 fprintf(file, "|");
3376 if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
3377 switch (inst->src[i].fixed_hw_reg.nr) {
3378 case BRW_ARF_NULL:
3379 fprintf(file, "null");
3380 break;
3381 case BRW_ARF_ADDRESS:
3382 fprintf(file, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
3383 break;
3384 case BRW_ARF_ACCUMULATOR:
3385 fprintf(file, "acc%d", inst->src[i].fixed_hw_reg.subnr);
3386 break;
3387 case BRW_ARF_FLAG:
3388 fprintf(file, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
3389 inst->src[i].fixed_hw_reg.subnr);
3390 break;
3391 default:
3392 fprintf(file, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
3393 inst->src[i].fixed_hw_reg.subnr);
3394 break;
3395 }
3396 } else {
3397 fprintf(file, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
3398 }
3399 if (inst->src[i].fixed_hw_reg.subnr)
3400 fprintf(file, "+%d", inst->src[i].fixed_hw_reg.subnr);
3401 if (inst->src[i].fixed_hw_reg.abs)
3402 fprintf(file, "|");
3403 break;
3404 default:
3405 fprintf(file, "???");
3406 break;
3407 }
3408 if (inst->src[i].abs)
3409 fprintf(file, "|");
3410
3411 if (inst->src[i].file != IMM) {
3412 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
3413 }
3414
3415 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
3416 fprintf(file, ", ");
3417 }
3418
3419 fprintf(file, " ");
3420
3421 if (dispatch_width == 16 && inst->exec_size == 8) {
3422 if (inst->force_sechalf)
3423 fprintf(file, "2ndhalf ");
3424 else
3425 fprintf(file, "1sthalf ");
3426 }
3427
3428 fprintf(file, "\n");
3429 }
3430
3431 /**
3432 * Possibly returns an instruction that set up @param reg.
3433 *
3434 * Sometimes we want to take the result of some expression/variable
3435 * dereference tree and rewrite the instruction generating the result
3436 * of the tree. When processing the tree, we know that the
3437 * instructions generated are all writing temporaries that are dead
3438 * outside of this tree. So, if we have some instructions that write
3439 * a temporary, we're free to point that temp write somewhere else.
3440 *
3441 * Note that this doesn't guarantee that the instruction generated
3442 * only reg -- it might be the size=4 destination of a texture instruction.
3443 */
3444 fs_inst *
3445 fs_visitor::get_instruction_generating_reg(fs_inst *start,
3446 fs_inst *end,
3447 const fs_reg &reg)
3448 {
3449 if (end == start ||
3450 end->is_partial_write() ||
3451 reg.reladdr ||
3452 !reg.equals(end->dst)) {
3453 return NULL;
3454 } else {
3455 return end;
3456 }
3457 }
3458
3459 void
3460 fs_visitor::setup_payload_gen6()
3461 {
3462 bool uses_depth =
3463 (prog->InputsRead & (1 << VARYING_SLOT_POS)) != 0;
3464 unsigned barycentric_interp_modes =
3465 (stage == MESA_SHADER_FRAGMENT) ?
3466 ((brw_wm_prog_data*) this->prog_data)->barycentric_interp_modes : 0;
3467
3468 assert(devinfo->gen >= 6);
3469
3470 /* R0-1: masks, pixel X/Y coordinates. */
3471 payload.num_regs = 2;
3472 /* R2: only for 32-pixel dispatch.*/
3473
3474 /* R3-26: barycentric interpolation coordinates. These appear in the
3475 * same order that they appear in the brw_wm_barycentric_interp_mode
3476 * enum. Each set of coordinates occupies 2 registers if dispatch width
3477 * == 8 and 4 registers if dispatch width == 16. Coordinates only
3478 * appear if they were enabled using the "Barycentric Interpolation
3479 * Mode" bits in WM_STATE.
3480 */
3481 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
3482 if (barycentric_interp_modes & (1 << i)) {
3483 payload.barycentric_coord_reg[i] = payload.num_regs;
3484 payload.num_regs += 2;
3485 if (dispatch_width == 16) {
3486 payload.num_regs += 2;
3487 }
3488 }
3489 }
3490
3491 /* R27: interpolated depth if uses source depth */
3492 if (uses_depth) {
3493 payload.source_depth_reg = payload.num_regs;
3494 payload.num_regs++;
3495 if (dispatch_width == 16) {
3496 /* R28: interpolated depth if not SIMD8. */
3497 payload.num_regs++;
3498 }
3499 }
3500 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
3501 if (uses_depth) {
3502 payload.source_w_reg = payload.num_regs;
3503 payload.num_regs++;
3504 if (dispatch_width == 16) {
3505 /* R30: interpolated W if not SIMD8. */
3506 payload.num_regs++;
3507 }
3508 }
3509
3510 if (stage == MESA_SHADER_FRAGMENT) {
3511 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
3512 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3513 prog_data->uses_pos_offset = key->compute_pos_offset;
3514 /* R31: MSAA position offsets. */
3515 if (prog_data->uses_pos_offset) {
3516 payload.sample_pos_reg = payload.num_regs;
3517 payload.num_regs++;
3518 }
3519 }
3520
3521 /* R32: MSAA input coverage mask */
3522 if (prog->SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN) {
3523 assert(devinfo->gen >= 7);
3524 payload.sample_mask_in_reg = payload.num_regs;
3525 payload.num_regs++;
3526 if (dispatch_width == 16) {
3527 /* R33: input coverage mask if not SIMD8. */
3528 payload.num_regs++;
3529 }
3530 }
3531
3532 /* R34-: bary for 32-pixel. */
3533 /* R58-59: interp W for 32-pixel. */
3534
3535 if (prog->OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
3536 source_depth_to_render_target = true;
3537 }
3538 }
3539
3540 void
3541 fs_visitor::setup_vs_payload()
3542 {
3543 /* R0: thread header, R1: urb handles */
3544 payload.num_regs = 2;
3545 }
3546
3547 void
3548 fs_visitor::setup_cs_payload()
3549 {
3550 assert(devinfo->gen >= 7);
3551
3552 payload.num_regs = 1;
3553 }
3554
3555 void
3556 fs_visitor::assign_binding_table_offsets()
3557 {
3558 assert(stage == MESA_SHADER_FRAGMENT);
3559 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
3560 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3561 uint32_t next_binding_table_offset = 0;
3562
3563 /* If there are no color regions, we still perform an FB write to a null
3564 * renderbuffer, which we place at surface index 0.
3565 */
3566 prog_data->binding_table.render_target_start = next_binding_table_offset;
3567 next_binding_table_offset += MAX2(key->nr_color_regions, 1);
3568
3569 assign_common_binding_table_offsets(next_binding_table_offset);
3570 }
3571
3572 void
3573 fs_visitor::calculate_register_pressure()
3574 {
3575 invalidate_live_intervals();
3576 calculate_live_intervals();
3577
3578 unsigned num_instructions = 0;
3579 foreach_block(block, cfg)
3580 num_instructions += block->instructions.length();
3581
3582 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
3583
3584 for (unsigned reg = 0; reg < alloc.count; reg++) {
3585 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
3586 regs_live_at_ip[ip] += alloc.sizes[reg];
3587 }
3588 }
3589
3590 void
3591 fs_visitor::optimize()
3592 {
3593 /* bld is the common builder object pointing at the end of the program we
3594 * used to translate it into i965 IR. For the optimization and lowering
3595 * passes coming next, any code added after the end of the program without
3596 * having explicitly called fs_builder::at() clearly points at a mistake.
3597 * Ideally optimization passes wouldn't be part of the visitor so they
3598 * wouldn't have access to bld at all, but they do, so just in case some
3599 * pass forgets to ask for a location explicitly set it to NULL here to
3600 * make it trip.
3601 */
3602 bld = bld.at(NULL, NULL);
3603
3604 split_virtual_grfs();
3605
3606 move_uniform_array_access_to_pull_constants();
3607 assign_constant_locations();
3608 demote_pull_constants();
3609
3610 #define OPT(pass, args...) ({ \
3611 pass_num++; \
3612 bool this_progress = pass(args); \
3613 \
3614 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
3615 char filename[64]; \
3616 snprintf(filename, 64, "%s%d-%04d-%02d-%02d-" #pass, \
3617 stage_abbrev, dispatch_width, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
3618 \
3619 backend_shader::dump_instructions(filename); \
3620 } \
3621 \
3622 progress = progress || this_progress; \
3623 this_progress; \
3624 })
3625
3626 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
3627 char filename[64];
3628 snprintf(filename, 64, "%s%d-%04d-00-start",
3629 stage_abbrev, dispatch_width,
3630 shader_prog ? shader_prog->Name : 0);
3631
3632 backend_shader::dump_instructions(filename);
3633 }
3634
3635 bool progress;
3636 int iteration = 0;
3637 int pass_num = 0;
3638 do {
3639 progress = false;
3640 pass_num = 0;
3641 iteration++;
3642
3643 OPT(remove_duplicate_mrf_writes);
3644
3645 OPT(opt_algebraic);
3646 OPT(opt_cse);
3647 OPT(opt_copy_propagate);
3648 OPT(opt_peephole_predicated_break);
3649 OPT(opt_cmod_propagation);
3650 OPT(dead_code_eliminate);
3651 OPT(opt_peephole_sel);
3652 OPT(dead_control_flow_eliminate, this);
3653 OPT(opt_register_renaming);
3654 OPT(opt_redundant_discard_jumps);
3655 OPT(opt_saturate_propagation);
3656 OPT(opt_zero_samples);
3657 OPT(register_coalesce);
3658 OPT(compute_to_mrf);
3659 OPT(eliminate_find_live_channel);
3660
3661 OPT(compact_virtual_grfs);
3662 } while (progress);
3663
3664 pass_num = 0;
3665
3666 OPT(opt_sampler_eot);
3667
3668 if (OPT(lower_load_payload)) {
3669 split_virtual_grfs();
3670 OPT(register_coalesce);
3671 OPT(compute_to_mrf);
3672 OPT(dead_code_eliminate);
3673 }
3674
3675 OPT(opt_combine_constants);
3676 OPT(lower_integer_multiplication);
3677
3678 lower_uniform_pull_constant_loads();
3679 }
3680
3681 /**
3682 * Three source instruction must have a GRF/MRF destination register.
3683 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
3684 */
3685 void
3686 fs_visitor::fixup_3src_null_dest()
3687 {
3688 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3689 if (inst->is_3src() && inst->dst.is_null()) {
3690 inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
3691 inst->dst.type);
3692 }
3693 }
3694 }
3695
3696 void
3697 fs_visitor::allocate_registers()
3698 {
3699 bool allocated_without_spills;
3700
3701 static const enum instruction_scheduler_mode pre_modes[] = {
3702 SCHEDULE_PRE,
3703 SCHEDULE_PRE_NON_LIFO,
3704 SCHEDULE_PRE_LIFO,
3705 };
3706
3707 /* Try each scheduling heuristic to see if it can successfully register
3708 * allocate without spilling. They should be ordered by decreasing
3709 * performance but increasing likelihood of allocating.
3710 */
3711 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
3712 schedule_instructions(pre_modes[i]);
3713
3714 if (0) {
3715 assign_regs_trivial();
3716 allocated_without_spills = true;
3717 } else {
3718 allocated_without_spills = assign_regs(false);
3719 }
3720 if (allocated_without_spills)
3721 break;
3722 }
3723
3724 if (!allocated_without_spills) {
3725 /* We assume that any spilling is worse than just dropping back to
3726 * SIMD8. There's probably actually some intermediate point where
3727 * SIMD16 with a couple of spills is still better.
3728 */
3729 if (dispatch_width == 16) {
3730 fail("Failure to register allocate. Reduce number of "
3731 "live scalar values to avoid this.");
3732 } else {
3733 compiler->shader_perf_log(log_data,
3734 "%s shader triggered register spilling. "
3735 "Try reducing the number of live scalar "
3736 "values to improve performance.\n",
3737 stage_name);
3738 }
3739
3740 /* Since we're out of heuristics, just go spill registers until we
3741 * get an allocation.
3742 */
3743 while (!assign_regs(true)) {
3744 if (failed)
3745 break;
3746 }
3747 }
3748
3749 /* This must come after all optimization and register allocation, since
3750 * it inserts dead code that happens to have side effects, and it does
3751 * so based on the actual physical registers in use.
3752 */
3753 insert_gen4_send_dependency_workarounds();
3754
3755 if (failed)
3756 return;
3757
3758 if (!allocated_without_spills)
3759 schedule_instructions(SCHEDULE_POST);
3760
3761 if (last_scratch > 0)
3762 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
3763 }
3764
3765 bool
3766 fs_visitor::run_vs(gl_clip_plane *clip_planes)
3767 {
3768 assert(stage == MESA_SHADER_VERTEX);
3769
3770 assign_common_binding_table_offsets(0);
3771 setup_vs_payload();
3772
3773 if (shader_time_index >= 0)
3774 emit_shader_time_begin();
3775
3776 emit_nir_code();
3777
3778 if (failed)
3779 return false;
3780
3781 compute_clip_distance(clip_planes);
3782
3783 emit_urb_writes();
3784
3785 if (shader_time_index >= 0)
3786 emit_shader_time_end();
3787
3788 calculate_cfg();
3789
3790 optimize();
3791
3792 assign_curb_setup();
3793 assign_vs_urb_setup();
3794
3795 fixup_3src_null_dest();
3796 allocate_registers();
3797
3798 return !failed;
3799 }
3800
3801 bool
3802 fs_visitor::run_fs(bool do_rep_send)
3803 {
3804 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
3805 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
3806
3807 assert(stage == MESA_SHADER_FRAGMENT);
3808
3809 sanity_param_count = prog->Parameters->NumParameters;
3810
3811 assign_binding_table_offsets();
3812
3813 if (devinfo->gen >= 6)
3814 setup_payload_gen6();
3815 else
3816 setup_payload_gen4();
3817
3818 if (0) {
3819 emit_dummy_fs();
3820 } else if (do_rep_send) {
3821 assert(dispatch_width == 16);
3822 emit_repclear_shader();
3823 } else {
3824 if (shader_time_index >= 0)
3825 emit_shader_time_begin();
3826
3827 calculate_urb_setup();
3828 if (prog->InputsRead > 0) {
3829 if (devinfo->gen < 6)
3830 emit_interpolation_setup_gen4();
3831 else
3832 emit_interpolation_setup_gen6();
3833 }
3834
3835 /* We handle discards by keeping track of the still-live pixels in f0.1.
3836 * Initialize it with the dispatched pixels.
3837 */
3838 if (wm_prog_data->uses_kill) {
3839 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
3840 discard_init->flag_subreg = 1;
3841 }
3842
3843 /* Generate FS IR for main(). (the visitor only descends into
3844 * functions called "main").
3845 */
3846 emit_nir_code();
3847
3848 if (failed)
3849 return false;
3850
3851 if (wm_prog_data->uses_kill)
3852 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
3853
3854 if (wm_key->alpha_test_func)
3855 emit_alpha_test();
3856
3857 emit_fb_writes();
3858
3859 if (shader_time_index >= 0)
3860 emit_shader_time_end();
3861
3862 calculate_cfg();
3863
3864 optimize();
3865
3866 assign_curb_setup();
3867 assign_urb_setup();
3868
3869 fixup_3src_null_dest();
3870 allocate_registers();
3871
3872 if (failed)
3873 return false;
3874 }
3875
3876 if (dispatch_width == 8)
3877 wm_prog_data->reg_blocks = brw_register_blocks(grf_used);
3878 else
3879 wm_prog_data->reg_blocks_16 = brw_register_blocks(grf_used);
3880
3881 /* If any state parameters were appended, then ParameterValues could have
3882 * been realloced, in which case the driver uniform storage set up by
3883 * _mesa_associate_uniform_storage() would point to freed memory. Make
3884 * sure that didn't happen.
3885 */
3886 assert(sanity_param_count == prog->Parameters->NumParameters);
3887
3888 return !failed;
3889 }
3890
3891 bool
3892 fs_visitor::run_cs()
3893 {
3894 assert(stage == MESA_SHADER_COMPUTE);
3895 assert(shader);
3896
3897 sanity_param_count = prog->Parameters->NumParameters;
3898
3899 assign_common_binding_table_offsets(0);
3900
3901 setup_cs_payload();
3902
3903 if (shader_time_index >= 0)
3904 emit_shader_time_begin();
3905
3906 emit_nir_code();
3907
3908 if (failed)
3909 return false;
3910
3911 emit_cs_terminate();
3912
3913 if (shader_time_index >= 0)
3914 emit_shader_time_end();
3915
3916 calculate_cfg();
3917
3918 optimize();
3919
3920 assign_curb_setup();
3921
3922 fixup_3src_null_dest();
3923 allocate_registers();
3924
3925 if (failed)
3926 return false;
3927
3928 /* If any state parameters were appended, then ParameterValues could have
3929 * been realloced, in which case the driver uniform storage set up by
3930 * _mesa_associate_uniform_storage() would point to freed memory. Make
3931 * sure that didn't happen.
3932 */
3933 assert(sanity_param_count == prog->Parameters->NumParameters);
3934
3935 return !failed;
3936 }
3937
3938 const unsigned *
3939 brw_wm_fs_emit(struct brw_context *brw,
3940 void *mem_ctx,
3941 const struct brw_wm_prog_key *key,
3942 struct brw_wm_prog_data *prog_data,
3943 struct gl_fragment_program *fp,
3944 struct gl_shader_program *prog,
3945 unsigned *final_assembly_size)
3946 {
3947 bool start_busy = false;
3948 double start_time = 0;
3949
3950 if (unlikely(brw->perf_debug)) {
3951 start_busy = (brw->batch.last_bo &&
3952 drm_intel_bo_busy(brw->batch.last_bo));
3953 start_time = get_time();
3954 }
3955
3956 struct brw_shader *shader = NULL;
3957 if (prog)
3958 shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
3959
3960 if (unlikely(INTEL_DEBUG & DEBUG_WM))
3961 brw_dump_ir("fragment", prog, &shader->base, &fp->Base);
3962
3963 int st_index8 = -1, st_index16 = -1;
3964 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
3965 st_index8 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS8);
3966 st_index16 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS16);
3967 }
3968
3969 /* Now the main event: Visit the shader IR and generate our FS IR for it.
3970 */
3971 fs_visitor v(brw->intelScreen->compiler, brw,
3972 mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
3973 prog, &fp->Base, 8, st_index8);
3974 if (!v.run_fs(false /* do_rep_send */)) {
3975 if (prog) {
3976 prog->LinkStatus = false;
3977 ralloc_strcat(&prog->InfoLog, v.fail_msg);
3978 }
3979
3980 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n",
3981 v.fail_msg);
3982
3983 return NULL;
3984 }
3985
3986 cfg_t *simd16_cfg = NULL;
3987 fs_visitor v2(brw->intelScreen->compiler, brw,
3988 mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
3989 prog, &fp->Base, 16, st_index16);
3990 if (likely(!(INTEL_DEBUG & DEBUG_NO16) || brw->use_rep_send)) {
3991 if (!v.simd16_unsupported) {
3992 /* Try a SIMD16 compile */
3993 v2.import_uniforms(&v);
3994 if (!v2.run_fs(brw->use_rep_send)) {
3995 perf_debug("SIMD16 shader failed to compile: %s", v2.fail_msg);
3996 } else {
3997 simd16_cfg = v2.cfg;
3998 }
3999 }
4000 }
4001
4002 cfg_t *simd8_cfg;
4003 int no_simd8 = (INTEL_DEBUG & DEBUG_NO8) || brw->no_simd8;
4004 if ((no_simd8 || brw->gen < 5) && simd16_cfg) {
4005 simd8_cfg = NULL;
4006 prog_data->no_8 = true;
4007 } else {
4008 simd8_cfg = v.cfg;
4009 prog_data->no_8 = false;
4010 }
4011
4012 fs_generator g(brw->intelScreen->compiler, brw,
4013 mem_ctx, (void *) key, &prog_data->base,
4014 &fp->Base, v.promoted_constants, v.runtime_check_aads_emit, "FS");
4015
4016 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
4017 char *name;
4018 if (prog)
4019 name = ralloc_asprintf(mem_ctx, "%s fragment shader %d",
4020 prog->Label ? prog->Label : "unnamed",
4021 prog->Name);
4022 else
4023 name = ralloc_asprintf(mem_ctx, "fragment program %d", fp->Base.Id);
4024
4025 g.enable_debug(name);
4026 }
4027
4028 if (simd8_cfg)
4029 g.generate_code(simd8_cfg, 8);
4030 if (simd16_cfg)
4031 prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
4032
4033 if (unlikely(brw->perf_debug) && shader) {
4034 if (shader->compiled_once)
4035 brw_wm_debug_recompile(brw, prog, key);
4036 shader->compiled_once = true;
4037
4038 if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
4039 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
4040 (get_time() - start_time) * 1000);
4041 }
4042 }
4043
4044 return g.get_assembly(final_assembly_size);
4045 }
4046
4047 extern "C" bool
4048 brw_fs_precompile(struct gl_context *ctx,
4049 struct gl_shader_program *shader_prog,
4050 struct gl_program *prog)
4051 {
4052 struct brw_context *brw = brw_context(ctx);
4053 struct brw_wm_prog_key key;
4054
4055 struct gl_fragment_program *fp = (struct gl_fragment_program *) prog;
4056 struct brw_fragment_program *bfp = brw_fragment_program(fp);
4057 bool program_uses_dfdy = fp->UsesDFdy;
4058
4059 memset(&key, 0, sizeof(key));
4060
4061 if (brw->gen < 6) {
4062 if (fp->UsesKill)
4063 key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
4064
4065 if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
4066 key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
4067
4068 /* Just assume depth testing. */
4069 key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
4070 key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
4071 }
4072
4073 if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.InputsRead &
4074 BRW_FS_VARYING_INPUT_MASK) > 16)
4075 key.input_slots_valid = fp->Base.InputsRead | VARYING_BIT_POS;
4076
4077 brw_setup_tex_for_precompile(brw, &key.tex, &fp->Base);
4078
4079 if (fp->Base.InputsRead & VARYING_BIT_POS) {
4080 key.drawable_height = ctx->DrawBuffer->Height;
4081 }
4082
4083 key.nr_color_regions = _mesa_bitcount_64(fp->Base.OutputsWritten &
4084 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
4085 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));
4086
4087 if ((fp->Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
4088 key.render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer) ||
4089 key.nr_color_regions > 1;
4090 }
4091
4092 key.program_string_id = bfp->id;
4093
4094 uint32_t old_prog_offset = brw->wm.base.prog_offset;
4095 struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
4096
4097 bool success = brw_codegen_wm_prog(brw, shader_prog, bfp, &key);
4098
4099 brw->wm.base.prog_offset = old_prog_offset;
4100 brw->wm.prog_data = old_prog_data;
4101
4102 return success;
4103 }
4104
4105 void
4106 brw_setup_tex_for_precompile(struct brw_context *brw,
4107 struct brw_sampler_prog_key_data *tex,
4108 struct gl_program *prog)
4109 {
4110 const bool has_shader_channel_select = brw->is_haswell || brw->gen >= 8;
4111 unsigned sampler_count = _mesa_fls(prog->SamplersUsed);
4112 for (unsigned i = 0; i < sampler_count; i++) {
4113 if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
4114 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
4115 tex->swizzles[i] =
4116 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
4117 } else {
4118 /* Color sampler: assume no swizzling. */
4119 tex->swizzles[i] = SWIZZLE_XYZW;
4120 }
4121 }
4122 }