i965: Move brw_get_shader_time_index() call out of emit functions
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include <sys/types.h>
32
33 #include "util/hash_table.h"
34 #include "main/macros.h"
35 #include "main/shaderobj.h"
36 #include "main/fbobject.h"
37 #include "program/prog_parameter.h"
38 #include "program/prog_print.h"
39 #include "util/register_allocate.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_wm.h"
44 #include "brw_fs.h"
45 #include "brw_cs.h"
46 #include "brw_cfg.h"
47 #include "brw_dead_control_flow.h"
48 #include "main/uniforms.h"
49 #include "brw_fs_live_variables.h"
50 #include "glsl/glsl_types.h"
51 #include "program/sampler.h"
52
53 using namespace brw;
54
55 void
56 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
57 const fs_reg *src, unsigned sources)
58 {
59 memset(this, 0, sizeof(*this));
60
61 this->src = new fs_reg[MAX2(sources, 3)];
62 for (unsigned i = 0; i < sources; i++)
63 this->src[i] = src[i];
64
65 this->opcode = opcode;
66 this->dst = dst;
67 this->sources = sources;
68 this->exec_size = exec_size;
69
70 assert(dst.file != IMM && dst.file != UNIFORM);
71
72 assert(this->exec_size != 0);
73
74 this->conditional_mod = BRW_CONDITIONAL_NONE;
75
76 /* This will be the case for almost all instructions. */
77 switch (dst.file) {
78 case GRF:
79 case HW_REG:
80 case MRF:
81 case ATTR:
82 this->regs_written = DIV_ROUND_UP(dst.component_size(exec_size),
83 REG_SIZE);
84 break;
85 case BAD_FILE:
86 this->regs_written = 0;
87 break;
88 case IMM:
89 case UNIFORM:
90 unreachable("Invalid destination register file");
91 default:
92 unreachable("Invalid register file");
93 }
94
95 this->writes_accumulator = false;
96 }
97
98 fs_inst::fs_inst()
99 {
100 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
101 }
102
103 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
104 {
105 init(opcode, exec_size, reg_undef, NULL, 0);
106 }
107
108 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
109 {
110 init(opcode, exec_size, dst, NULL, 0);
111 }
112
113 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
114 const fs_reg &src0)
115 {
116 const fs_reg src[1] = { src0 };
117 init(opcode, exec_size, dst, src, 1);
118 }
119
120 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
121 const fs_reg &src0, const fs_reg &src1)
122 {
123 const fs_reg src[2] = { src0, src1 };
124 init(opcode, exec_size, dst, src, 2);
125 }
126
127 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
128 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
129 {
130 const fs_reg src[3] = { src0, src1, src2 };
131 init(opcode, exec_size, dst, src, 3);
132 }
133
134 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
135 const fs_reg src[], unsigned sources)
136 {
137 init(opcode, exec_width, dst, src, sources);
138 }
139
140 fs_inst::fs_inst(const fs_inst &that)
141 {
142 memcpy(this, &that, sizeof(that));
143
144 this->src = new fs_reg[MAX2(that.sources, 3)];
145
146 for (unsigned i = 0; i < that.sources; i++)
147 this->src[i] = that.src[i];
148 }
149
150 fs_inst::~fs_inst()
151 {
152 delete[] this->src;
153 }
154
155 void
156 fs_inst::resize_sources(uint8_t num_sources)
157 {
158 if (this->sources != num_sources) {
159 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
160
161 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
162 src[i] = this->src[i];
163
164 delete[] this->src;
165 this->src = src;
166 this->sources = num_sources;
167 }
168 }
169
170 void
171 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
172 const fs_reg &dst,
173 const fs_reg &surf_index,
174 const fs_reg &varying_offset,
175 uint32_t const_offset)
176 {
177 /* We have our constant surface use a pitch of 4 bytes, so our index can
178 * be any component of a vector, and then we load 4 contiguous
179 * components starting from that.
180 *
181 * We break down the const_offset to a portion added to the variable
182 * offset and a portion done using reg_offset, which means that if you
183 * have GLSL using something like "uniform vec4 a[20]; gl_FragColor =
184 * a[i]", we'll temporarily generate 4 vec4 loads from offset i * 4, and
185 * CSE can later notice that those loads are all the same and eliminate
186 * the redundant ones.
187 */
188 fs_reg vec4_offset = vgrf(glsl_type::int_type);
189 bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3));
190
191 int scale = 1;
192 if (devinfo->gen == 4 && bld.dispatch_width() == 8) {
193 /* Pre-gen5, we can either use a SIMD8 message that requires (header,
194 * u, v, r) as parameters, or we can just use the SIMD16 message
195 * consisting of (header, u). We choose the second, at the cost of a
196 * longer return length.
197 */
198 scale = 2;
199 }
200
201 enum opcode op;
202 if (devinfo->gen >= 7)
203 op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
204 else
205 op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD;
206
207 int regs_written = 4 * (bld.dispatch_width() / 8) * scale;
208 fs_reg vec4_result = fs_reg(GRF, alloc.allocate(regs_written), dst.type);
209 fs_inst *inst = bld.emit(op, vec4_result, surf_index, vec4_offset);
210 inst->regs_written = regs_written;
211
212 if (devinfo->gen < 7) {
213 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen);
214 inst->header_size = 1;
215 if (devinfo->gen == 4)
216 inst->mlen = 3;
217 else
218 inst->mlen = 1 + bld.dispatch_width() / 8;
219 }
220
221 bld.MOV(dst, offset(vec4_result, bld, (const_offset & 3) * scale));
222 }
223
224 /**
225 * A helper for MOV generation for fixing up broken hardware SEND dependency
226 * handling.
227 */
228 void
229 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
230 {
231 /* The caller always wants uncompressed to emit the minimal extra
232 * dependencies, and to avoid having to deal with aligning its regs to 2.
233 */
234 const fs_builder ubld = bld.annotate("send dependency resolve")
235 .half(0);
236
237 ubld.MOV(ubld.null_reg_f(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
238 }
239
240 bool
241 fs_inst::equals(fs_inst *inst) const
242 {
243 return (opcode == inst->opcode &&
244 dst.equals(inst->dst) &&
245 src[0].equals(inst->src[0]) &&
246 src[1].equals(inst->src[1]) &&
247 src[2].equals(inst->src[2]) &&
248 saturate == inst->saturate &&
249 predicate == inst->predicate &&
250 conditional_mod == inst->conditional_mod &&
251 mlen == inst->mlen &&
252 base_mrf == inst->base_mrf &&
253 target == inst->target &&
254 eot == inst->eot &&
255 header_size == inst->header_size &&
256 shadow_compare == inst->shadow_compare &&
257 exec_size == inst->exec_size &&
258 offset == inst->offset);
259 }
260
261 bool
262 fs_inst::overwrites_reg(const fs_reg &reg) const
263 {
264 return reg.in_range(dst, regs_written);
265 }
266
267 bool
268 fs_inst::is_send_from_grf() const
269 {
270 switch (opcode) {
271 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
272 case SHADER_OPCODE_SHADER_TIME_ADD:
273 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
274 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
275 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
276 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
277 case SHADER_OPCODE_UNTYPED_ATOMIC:
278 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
279 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
280 case SHADER_OPCODE_TYPED_ATOMIC:
281 case SHADER_OPCODE_TYPED_SURFACE_READ:
282 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
283 case SHADER_OPCODE_URB_WRITE_SIMD8:
284 return true;
285 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
286 return src[1].file == GRF;
287 case FS_OPCODE_FB_WRITE:
288 return src[0].file == GRF;
289 default:
290 if (is_tex())
291 return src[0].file == GRF;
292
293 return false;
294 }
295 }
296
297 bool
298 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
299 {
300 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
301 return false;
302
303 fs_reg reg = this->src[0];
304 if (reg.file != GRF || reg.reg_offset != 0 || reg.stride == 0)
305 return false;
306
307 if (grf_alloc.sizes[reg.reg] != this->regs_written)
308 return false;
309
310 for (int i = 0; i < this->sources; i++) {
311 reg.type = this->src[i].type;
312 if (!this->src[i].equals(reg))
313 return false;
314
315 if (i < this->header_size) {
316 reg.reg_offset += 1;
317 } else {
318 reg.reg_offset += this->exec_size / 8;
319 }
320 }
321
322 return true;
323 }
324
325 bool
326 fs_inst::can_do_source_mods(const struct brw_device_info *devinfo)
327 {
328 if (devinfo->gen == 6 && is_math())
329 return false;
330
331 if (is_send_from_grf())
332 return false;
333
334 if (!backend_instruction::can_do_source_mods())
335 return false;
336
337 return true;
338 }
339
340 bool
341 fs_inst::has_side_effects() const
342 {
343 return this->eot || backend_instruction::has_side_effects();
344 }
345
346 void
347 fs_reg::init()
348 {
349 memset(this, 0, sizeof(*this));
350 stride = 1;
351 }
352
353 /** Generic unset register constructor. */
354 fs_reg::fs_reg()
355 {
356 init();
357 this->file = BAD_FILE;
358 }
359
360 /** Immediate value constructor. */
361 fs_reg::fs_reg(float f)
362 {
363 init();
364 this->file = IMM;
365 this->type = BRW_REGISTER_TYPE_F;
366 this->stride = 0;
367 this->fixed_hw_reg.dw1.f = f;
368 }
369
370 /** Immediate value constructor. */
371 fs_reg::fs_reg(int32_t i)
372 {
373 init();
374 this->file = IMM;
375 this->type = BRW_REGISTER_TYPE_D;
376 this->stride = 0;
377 this->fixed_hw_reg.dw1.d = i;
378 }
379
380 /** Immediate value constructor. */
381 fs_reg::fs_reg(uint32_t u)
382 {
383 init();
384 this->file = IMM;
385 this->type = BRW_REGISTER_TYPE_UD;
386 this->stride = 0;
387 this->fixed_hw_reg.dw1.ud = u;
388 }
389
390 /** Vector float immediate value constructor. */
391 fs_reg::fs_reg(uint8_t vf[4])
392 {
393 init();
394 this->file = IMM;
395 this->type = BRW_REGISTER_TYPE_VF;
396 memcpy(&this->fixed_hw_reg.dw1.ud, vf, sizeof(unsigned));
397 }
398
399 /** Vector float immediate value constructor. */
400 fs_reg::fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3)
401 {
402 init();
403 this->file = IMM;
404 this->type = BRW_REGISTER_TYPE_VF;
405 this->fixed_hw_reg.dw1.ud = (vf0 << 0) |
406 (vf1 << 8) |
407 (vf2 << 16) |
408 (vf3 << 24);
409 }
410
411 /** Fixed brw_reg. */
412 fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
413 {
414 init();
415 this->file = HW_REG;
416 this->fixed_hw_reg = fixed_hw_reg;
417 this->type = fixed_hw_reg.type;
418 }
419
420 bool
421 fs_reg::equals(const fs_reg &r) const
422 {
423 return (file == r.file &&
424 reg == r.reg &&
425 reg_offset == r.reg_offset &&
426 subreg_offset == r.subreg_offset &&
427 type == r.type &&
428 negate == r.negate &&
429 abs == r.abs &&
430 !reladdr && !r.reladdr &&
431 ((file != HW_REG && file != IMM) ||
432 memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
433 sizeof(fixed_hw_reg)) == 0) &&
434 stride == r.stride);
435 }
436
437 fs_reg &
438 fs_reg::set_smear(unsigned subreg)
439 {
440 assert(file != HW_REG && file != IMM);
441 subreg_offset = subreg * type_sz(type);
442 stride = 0;
443 return *this;
444 }
445
446 bool
447 fs_reg::is_contiguous() const
448 {
449 return stride == 1;
450 }
451
452 unsigned
453 fs_reg::component_size(unsigned width) const
454 {
455 const unsigned stride = (file != HW_REG ? this->stride :
456 fixed_hw_reg.hstride == 0 ? 0 :
457 1 << (fixed_hw_reg.hstride - 1));
458 return MAX2(width * stride, 1) * type_sz(type);
459 }
460
461 extern "C" int
462 type_size_scalar(const struct glsl_type *type)
463 {
464 unsigned int size, i;
465
466 switch (type->base_type) {
467 case GLSL_TYPE_UINT:
468 case GLSL_TYPE_INT:
469 case GLSL_TYPE_FLOAT:
470 case GLSL_TYPE_BOOL:
471 return type->components();
472 case GLSL_TYPE_ARRAY:
473 return type_size_scalar(type->fields.array) * type->length;
474 case GLSL_TYPE_STRUCT:
475 size = 0;
476 for (i = 0; i < type->length; i++) {
477 size += type_size_scalar(type->fields.structure[i].type);
478 }
479 return size;
480 case GLSL_TYPE_SAMPLER:
481 /* Samplers take up no register space, since they're baked in at
482 * link time.
483 */
484 return 0;
485 case GLSL_TYPE_ATOMIC_UINT:
486 return 0;
487 case GLSL_TYPE_SUBROUTINE:
488 return 1;
489 case GLSL_TYPE_IMAGE:
490 return BRW_IMAGE_PARAM_SIZE;
491 case GLSL_TYPE_VOID:
492 case GLSL_TYPE_ERROR:
493 case GLSL_TYPE_INTERFACE:
494 case GLSL_TYPE_DOUBLE:
495 unreachable("not reached");
496 }
497
498 return 0;
499 }
500
501 /**
502 * Create a MOV to read the timestamp register.
503 *
504 * The caller is responsible for emitting the MOV. The return value is
505 * the destination of the MOV, with extra parameters set.
506 */
507 fs_reg
508 fs_visitor::get_timestamp(const fs_builder &bld)
509 {
510 assert(devinfo->gen >= 7);
511
512 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
513 BRW_ARF_TIMESTAMP,
514 0),
515 BRW_REGISTER_TYPE_UD));
516
517 fs_reg dst = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
518
519 /* We want to read the 3 fields we care about even if it's not enabled in
520 * the dispatch.
521 */
522 bld.group(4, 0).exec_all().MOV(dst, ts);
523
524 /* The caller wants the low 32 bits of the timestamp. Since it's running
525 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
526 * which is plenty of time for our purposes. It is identical across the
527 * EUs, but since it's tracking GPU core speed it will increment at a
528 * varying rate as render P-states change.
529 *
530 * The caller could also check if render P-states have changed (or anything
531 * else that might disrupt timing) by setting smear to 2 and checking if
532 * that field is != 0.
533 */
534 dst.set_smear(0);
535
536 return dst;
537 }
538
539 void
540 fs_visitor::emit_shader_time_begin()
541 {
542 shader_start_time = get_timestamp(bld.annotate("shader time start"));
543 }
544
545 void
546 fs_visitor::emit_shader_time_end()
547 {
548 /* Insert our code just before the final SEND with EOT. */
549 exec_node *end = this->instructions.get_tail();
550 assert(end && ((fs_inst *) end)->eot);
551 const fs_builder ibld = bld.annotate("shader time end")
552 .exec_all().at(NULL, end);
553
554 fs_reg shader_end_time = get_timestamp(ibld);
555
556 /* Check that there weren't any timestamp reset events (assuming these
557 * were the only two timestamp reads that happened).
558 */
559 fs_reg reset = shader_end_time;
560 reset.set_smear(2);
561 set_condmod(BRW_CONDITIONAL_Z,
562 ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u)));
563 ibld.IF(BRW_PREDICATE_NORMAL);
564
565 fs_reg start = shader_start_time;
566 start.negate = true;
567 fs_reg diff = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
568 diff.set_smear(0);
569
570 const fs_builder cbld = ibld.group(1, 0);
571 cbld.group(1, 0).ADD(diff, start, shader_end_time);
572
573 /* If there were no instructions between the two timestamp gets, the diff
574 * is 2 cycles. Remove that overhead, so I can forget about that when
575 * trying to determine the time taken for single instructions.
576 */
577 cbld.ADD(diff, diff, fs_reg(-2u));
578 SHADER_TIME_ADD(cbld, 0, diff);
579 SHADER_TIME_ADD(cbld, 1, fs_reg(1u));
580 ibld.emit(BRW_OPCODE_ELSE);
581 SHADER_TIME_ADD(cbld, 2, fs_reg(1u));
582 ibld.emit(BRW_OPCODE_ENDIF);
583 }
584
585 void
586 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
587 int shader_time_subindex,
588 fs_reg value)
589 {
590 int index = shader_time_index * 3 + shader_time_subindex;
591 fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE);
592
593 fs_reg payload;
594 if (dispatch_width == 8)
595 payload = vgrf(glsl_type::uvec2_type);
596 else
597 payload = vgrf(glsl_type::uint_type);
598
599 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
600 }
601
602 void
603 fs_visitor::vfail(const char *format, va_list va)
604 {
605 char *msg;
606
607 if (failed)
608 return;
609
610 failed = true;
611
612 msg = ralloc_vasprintf(mem_ctx, format, va);
613 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
614
615 this->fail_msg = msg;
616
617 if (debug_enabled) {
618 fprintf(stderr, "%s", msg);
619 }
620 }
621
622 void
623 fs_visitor::fail(const char *format, ...)
624 {
625 va_list va;
626
627 va_start(va, format);
628 vfail(format, va);
629 va_end(va);
630 }
631
632 /**
633 * Mark this program as impossible to compile in SIMD16 mode.
634 *
635 * During the SIMD8 compile (which happens first), we can detect and flag
636 * things that are unsupported in SIMD16 mode, so the compiler can skip
637 * the SIMD16 compile altogether.
638 *
639 * During a SIMD16 compile (if one happens anyway), this just calls fail().
640 */
641 void
642 fs_visitor::no16(const char *msg)
643 {
644 if (dispatch_width == 16) {
645 fail("%s", msg);
646 } else {
647 simd16_unsupported = true;
648
649 compiler->shader_perf_log(log_data,
650 "SIMD16 shader failed to compile: %s", msg);
651 }
652 }
653
654 /**
655 * Returns true if the instruction has a flag that means it won't
656 * update an entire destination register.
657 *
658 * For example, dead code elimination and live variable analysis want to know
659 * when a write to a variable screens off any preceding values that were in
660 * it.
661 */
662 bool
663 fs_inst::is_partial_write() const
664 {
665 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
666 (this->exec_size * type_sz(this->dst.type)) < 32 ||
667 !this->dst.is_contiguous());
668 }
669
670 unsigned
671 fs_inst::components_read(unsigned i) const
672 {
673 switch (opcode) {
674 case FS_OPCODE_LINTERP:
675 if (i == 0)
676 return 2;
677 else
678 return 1;
679
680 case FS_OPCODE_PIXEL_X:
681 case FS_OPCODE_PIXEL_Y:
682 assert(i == 0);
683 return 2;
684
685 case FS_OPCODE_FB_WRITE_LOGICAL:
686 assert(src[6].file == IMM);
687 /* First/second FB write color. */
688 if (i < 2)
689 return src[6].fixed_hw_reg.dw1.ud;
690 else
691 return 1;
692
693 case SHADER_OPCODE_TEX_LOGICAL:
694 case SHADER_OPCODE_TXD_LOGICAL:
695 case SHADER_OPCODE_TXF_LOGICAL:
696 case SHADER_OPCODE_TXL_LOGICAL:
697 case SHADER_OPCODE_TXS_LOGICAL:
698 case FS_OPCODE_TXB_LOGICAL:
699 case SHADER_OPCODE_TXF_CMS_LOGICAL:
700 case SHADER_OPCODE_TXF_UMS_LOGICAL:
701 case SHADER_OPCODE_TXF_MCS_LOGICAL:
702 case SHADER_OPCODE_LOD_LOGICAL:
703 case SHADER_OPCODE_TG4_LOGICAL:
704 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
705 assert(src[8].file == IMM && src[9].file == IMM);
706 /* Texture coordinates. */
707 if (i == 0)
708 return src[8].fixed_hw_reg.dw1.ud;
709 /* Texture derivatives. */
710 else if ((i == 2 || i == 3) && opcode == SHADER_OPCODE_TXD_LOGICAL)
711 return src[9].fixed_hw_reg.dw1.ud;
712 /* Texture offset. */
713 else if (i == 7)
714 return 2;
715 else
716 return 1;
717
718 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
719 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
720 assert(src[3].file == IMM);
721 /* Surface coordinates. */
722 if (i == 0)
723 return src[3].fixed_hw_reg.dw1.ud;
724 /* Surface operation source (ignored for reads). */
725 else if (i == 1)
726 return 0;
727 else
728 return 1;
729
730 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
731 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
732 assert(src[3].file == IMM &&
733 src[4].file == IMM);
734 /* Surface coordinates. */
735 if (i == 0)
736 return src[3].fixed_hw_reg.dw1.ud;
737 /* Surface operation source. */
738 else if (i == 1)
739 return src[4].fixed_hw_reg.dw1.ud;
740 else
741 return 1;
742
743 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
744 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
745 assert(src[3].file == IMM &&
746 src[4].file == IMM);
747 const unsigned op = src[4].fixed_hw_reg.dw1.ud;
748 /* Surface coordinates. */
749 if (i == 0)
750 return src[3].fixed_hw_reg.dw1.ud;
751 /* Surface operation source. */
752 else if (i == 1 && op == BRW_AOP_CMPWR)
753 return 2;
754 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
755 op == BRW_AOP_PREDEC))
756 return 0;
757 else
758 return 1;
759 }
760
761 default:
762 return 1;
763 }
764 }
765
766 int
767 fs_inst::regs_read(int arg) const
768 {
769 switch (opcode) {
770 case FS_OPCODE_FB_WRITE:
771 case SHADER_OPCODE_URB_WRITE_SIMD8:
772 case SHADER_OPCODE_UNTYPED_ATOMIC:
773 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
774 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
775 case SHADER_OPCODE_TYPED_ATOMIC:
776 case SHADER_OPCODE_TYPED_SURFACE_READ:
777 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
778 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
779 if (arg == 0)
780 return mlen;
781 break;
782
783 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
784 /* The payload is actually stored in src1 */
785 if (arg == 1)
786 return mlen;
787 break;
788
789 case FS_OPCODE_LINTERP:
790 if (arg == 1)
791 return 1;
792 break;
793
794 case SHADER_OPCODE_LOAD_PAYLOAD:
795 if (arg < this->header_size)
796 return 1;
797 break;
798
799 case CS_OPCODE_CS_TERMINATE:
800 case SHADER_OPCODE_BARRIER:
801 return 1;
802
803 default:
804 if (is_tex() && arg == 0 && src[0].file == GRF)
805 return mlen;
806 break;
807 }
808
809 switch (src[arg].file) {
810 case BAD_FILE:
811 return 0;
812 case UNIFORM:
813 case IMM:
814 return 1;
815 case GRF:
816 case ATTR:
817 case HW_REG:
818 return DIV_ROUND_UP(components_read(arg) *
819 src[arg].component_size(exec_size),
820 REG_SIZE);
821 case MRF:
822 unreachable("MRF registers are not allowed as sources");
823 default:
824 unreachable("Invalid register file");
825 }
826 }
827
828 bool
829 fs_inst::reads_flag() const
830 {
831 return predicate;
832 }
833
834 bool
835 fs_inst::writes_flag() const
836 {
837 return (conditional_mod && (opcode != BRW_OPCODE_SEL &&
838 opcode != BRW_OPCODE_IF &&
839 opcode != BRW_OPCODE_WHILE)) ||
840 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
841 }
842
843 /**
844 * Returns how many MRFs an FS opcode will write over.
845 *
846 * Note that this is not the 0 or 1 implied writes in an actual gen
847 * instruction -- the FS opcodes often generate MOVs in addition.
848 */
849 int
850 fs_visitor::implied_mrf_writes(fs_inst *inst)
851 {
852 if (inst->mlen == 0)
853 return 0;
854
855 if (inst->base_mrf == -1)
856 return 0;
857
858 switch (inst->opcode) {
859 case SHADER_OPCODE_RCP:
860 case SHADER_OPCODE_RSQ:
861 case SHADER_OPCODE_SQRT:
862 case SHADER_OPCODE_EXP2:
863 case SHADER_OPCODE_LOG2:
864 case SHADER_OPCODE_SIN:
865 case SHADER_OPCODE_COS:
866 return 1 * dispatch_width / 8;
867 case SHADER_OPCODE_POW:
868 case SHADER_OPCODE_INT_QUOTIENT:
869 case SHADER_OPCODE_INT_REMAINDER:
870 return 2 * dispatch_width / 8;
871 case SHADER_OPCODE_TEX:
872 case FS_OPCODE_TXB:
873 case SHADER_OPCODE_TXD:
874 case SHADER_OPCODE_TXF:
875 case SHADER_OPCODE_TXF_CMS:
876 case SHADER_OPCODE_TXF_MCS:
877 case SHADER_OPCODE_TG4:
878 case SHADER_OPCODE_TG4_OFFSET:
879 case SHADER_OPCODE_TXL:
880 case SHADER_OPCODE_TXS:
881 case SHADER_OPCODE_LOD:
882 case SHADER_OPCODE_SAMPLEINFO:
883 return 1;
884 case FS_OPCODE_FB_WRITE:
885 return 2;
886 case FS_OPCODE_GET_BUFFER_SIZE:
887 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
888 case SHADER_OPCODE_GEN4_SCRATCH_READ:
889 return 1;
890 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
891 return inst->mlen;
892 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
893 return inst->mlen;
894 case SHADER_OPCODE_UNTYPED_ATOMIC:
895 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
896 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
897 case SHADER_OPCODE_TYPED_ATOMIC:
898 case SHADER_OPCODE_TYPED_SURFACE_READ:
899 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
900 case SHADER_OPCODE_URB_WRITE_SIMD8:
901 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
902 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
903 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
904 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
905 return 0;
906 default:
907 unreachable("not reached");
908 }
909 }
910
911 fs_reg
912 fs_visitor::vgrf(const glsl_type *const type)
913 {
914 int reg_width = dispatch_width / 8;
915 return fs_reg(GRF, alloc.allocate(type_size_scalar(type) * reg_width),
916 brw_type_for_base_type(type));
917 }
918
919 /** Fixed HW reg constructor. */
920 fs_reg::fs_reg(enum register_file file, int reg)
921 {
922 init();
923 this->file = file;
924 this->reg = reg;
925 this->type = BRW_REGISTER_TYPE_F;
926 this->stride = (file == UNIFORM ? 0 : 1);
927 }
928
929 /** Fixed HW reg constructor. */
930 fs_reg::fs_reg(enum register_file file, int reg, enum brw_reg_type type)
931 {
932 init();
933 this->file = file;
934 this->reg = reg;
935 this->type = type;
936 this->stride = (file == UNIFORM ? 0 : 1);
937 }
938
939 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
940 * This brings in those uniform definitions
941 */
942 void
943 fs_visitor::import_uniforms(fs_visitor *v)
944 {
945 this->push_constant_loc = v->push_constant_loc;
946 this->pull_constant_loc = v->pull_constant_loc;
947 this->uniforms = v->uniforms;
948 this->param_size = v->param_size;
949 }
950
951 fs_reg *
952 fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer,
953 bool origin_upper_left)
954 {
955 assert(stage == MESA_SHADER_FRAGMENT);
956 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
957 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec4_type));
958 fs_reg wpos = *reg;
959 bool flip = !origin_upper_left ^ key->render_to_fbo;
960
961 /* gl_FragCoord.x */
962 if (pixel_center_integer) {
963 bld.MOV(wpos, this->pixel_x);
964 } else {
965 bld.ADD(wpos, this->pixel_x, fs_reg(0.5f));
966 }
967 wpos = offset(wpos, bld, 1);
968
969 /* gl_FragCoord.y */
970 if (!flip && pixel_center_integer) {
971 bld.MOV(wpos, this->pixel_y);
972 } else {
973 fs_reg pixel_y = this->pixel_y;
974 float offset = (pixel_center_integer ? 0.0f : 0.5f);
975
976 if (flip) {
977 pixel_y.negate = true;
978 offset += key->drawable_height - 1.0f;
979 }
980
981 bld.ADD(wpos, pixel_y, fs_reg(offset));
982 }
983 wpos = offset(wpos, bld, 1);
984
985 /* gl_FragCoord.z */
986 if (devinfo->gen >= 6) {
987 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
988 } else {
989 bld.emit(FS_OPCODE_LINTERP, wpos,
990 this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
991 interp_reg(VARYING_SLOT_POS, 2));
992 }
993 wpos = offset(wpos, bld, 1);
994
995 /* gl_FragCoord.w: Already set up in emit_interpolation */
996 bld.MOV(wpos, this->wpos_w);
997
998 return reg;
999 }
1000
1001 fs_inst *
1002 fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
1003 glsl_interp_qualifier interpolation_mode,
1004 bool is_centroid, bool is_sample)
1005 {
1006 brw_wm_barycentric_interp_mode barycoord_mode;
1007 if (devinfo->gen >= 6) {
1008 if (is_centroid) {
1009 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1010 barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
1011 else
1012 barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
1013 } else if (is_sample) {
1014 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1015 barycoord_mode = BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
1016 else
1017 barycoord_mode = BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
1018 } else {
1019 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1020 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1021 else
1022 barycoord_mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
1023 }
1024 } else {
1025 /* On Ironlake and below, there is only one interpolation mode.
1026 * Centroid interpolation doesn't mean anything on this hardware --
1027 * there is no multisampling.
1028 */
1029 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1030 }
1031 return bld.emit(FS_OPCODE_LINTERP, attr,
1032 this->delta_xy[barycoord_mode], interp);
1033 }
1034
1035 void
1036 fs_visitor::emit_general_interpolation(fs_reg attr, const char *name,
1037 const glsl_type *type,
1038 glsl_interp_qualifier interpolation_mode,
1039 int location, bool mod_centroid,
1040 bool mod_sample)
1041 {
1042 attr.type = brw_type_for_base_type(type->get_scalar_type());
1043
1044 assert(stage == MESA_SHADER_FRAGMENT);
1045 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1046 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1047
1048 unsigned int array_elements;
1049
1050 if (type->is_array()) {
1051 array_elements = type->length;
1052 if (array_elements == 0) {
1053 fail("dereferenced array '%s' has length 0\n", name);
1054 }
1055 type = type->fields.array;
1056 } else {
1057 array_elements = 1;
1058 }
1059
1060 if (interpolation_mode == INTERP_QUALIFIER_NONE) {
1061 bool is_gl_Color =
1062 location == VARYING_SLOT_COL0 || location == VARYING_SLOT_COL1;
1063 if (key->flat_shade && is_gl_Color) {
1064 interpolation_mode = INTERP_QUALIFIER_FLAT;
1065 } else {
1066 interpolation_mode = INTERP_QUALIFIER_SMOOTH;
1067 }
1068 }
1069
1070 for (unsigned int i = 0; i < array_elements; i++) {
1071 for (unsigned int j = 0; j < type->matrix_columns; j++) {
1072 if (prog_data->urb_setup[location] == -1) {
1073 /* If there's no incoming setup data for this slot, don't
1074 * emit interpolation for it.
1075 */
1076 attr = offset(attr, bld, type->vector_elements);
1077 location++;
1078 continue;
1079 }
1080
1081 if (interpolation_mode == INTERP_QUALIFIER_FLAT) {
1082 /* Constant interpolation (flat shading) case. The SF has
1083 * handed us defined values in only the constant offset
1084 * field of the setup reg.
1085 */
1086 for (unsigned int k = 0; k < type->vector_elements; k++) {
1087 struct brw_reg interp = interp_reg(location, k);
1088 interp = suboffset(interp, 3);
1089 interp.type = attr.type;
1090 bld.emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
1091 attr = offset(attr, bld, 1);
1092 }
1093 } else {
1094 /* Smooth/noperspective interpolation case. */
1095 for (unsigned int k = 0; k < type->vector_elements; k++) {
1096 struct brw_reg interp = interp_reg(location, k);
1097 if (devinfo->needs_unlit_centroid_workaround && mod_centroid) {
1098 /* Get the pixel/sample mask into f0 so that we know
1099 * which pixels are lit. Then, for each channel that is
1100 * unlit, replace the centroid data with non-centroid
1101 * data.
1102 */
1103 bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
1104
1105 fs_inst *inst;
1106 inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
1107 false, false);
1108 inst->predicate = BRW_PREDICATE_NORMAL;
1109 inst->predicate_inverse = true;
1110 if (devinfo->has_pln)
1111 inst->no_dd_clear = true;
1112
1113 inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
1114 mod_centroid && !key->persample_shading,
1115 mod_sample || key->persample_shading);
1116 inst->predicate = BRW_PREDICATE_NORMAL;
1117 inst->predicate_inverse = false;
1118 if (devinfo->has_pln)
1119 inst->no_dd_check = true;
1120
1121 } else {
1122 emit_linterp(attr, fs_reg(interp), interpolation_mode,
1123 mod_centroid && !key->persample_shading,
1124 mod_sample || key->persample_shading);
1125 }
1126 if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
1127 bld.MUL(attr, attr, this->pixel_w);
1128 }
1129 attr = offset(attr, bld, 1);
1130 }
1131
1132 }
1133 location++;
1134 }
1135 }
1136 }
1137
1138 fs_reg *
1139 fs_visitor::emit_frontfacing_interpolation()
1140 {
1141 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1142
1143 if (devinfo->gen >= 6) {
1144 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1145 * a boolean result from this (~0/true or 0/false).
1146 *
1147 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1148 * this task in only one instruction:
1149 * - a negation source modifier will flip the bit; and
1150 * - a W -> D type conversion will sign extend the bit into the high
1151 * word of the destination.
1152 *
1153 * An ASR 15 fills the low word of the destination.
1154 */
1155 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1156 g0.negate = true;
1157
1158 bld.ASR(*reg, g0, fs_reg(15));
1159 } else {
1160 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1161 * a boolean result from this (1/true or 0/false).
1162 *
1163 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1164 * the negation source modifier to flip it. Unfortunately the SHR
1165 * instruction only operates on UD (or D with an abs source modifier)
1166 * sources without negation.
1167 *
1168 * Instead, use ASR (which will give ~0/true or 0/false).
1169 */
1170 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1171 g1_6.negate = true;
1172
1173 bld.ASR(*reg, g1_6, fs_reg(31));
1174 }
1175
1176 return reg;
1177 }
1178
1179 void
1180 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1181 {
1182 assert(stage == MESA_SHADER_FRAGMENT);
1183 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1184 assert(dst.type == BRW_REGISTER_TYPE_F);
1185
1186 if (key->compute_pos_offset) {
1187 /* Convert int_sample_pos to floating point */
1188 bld.MOV(dst, int_sample_pos);
1189 /* Scale to the range [0, 1] */
1190 bld.MUL(dst, dst, fs_reg(1 / 16.0f));
1191 }
1192 else {
1193 /* From ARB_sample_shading specification:
1194 * "When rendering to a non-multisample buffer, or if multisample
1195 * rasterization is disabled, gl_SamplePosition will always be
1196 * (0.5, 0.5).
1197 */
1198 bld.MOV(dst, fs_reg(0.5f));
1199 }
1200 }
1201
1202 fs_reg *
1203 fs_visitor::emit_samplepos_setup()
1204 {
1205 assert(devinfo->gen >= 6);
1206
1207 const fs_builder abld = bld.annotate("compute sample position");
1208 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1209 fs_reg pos = *reg;
1210 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1211 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1212
1213 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1214 * mode will be enabled.
1215 *
1216 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1217 * R31.1:0 Position Offset X/Y for Slot[3:0]
1218 * R31.3:2 Position Offset X/Y for Slot[7:4]
1219 * .....
1220 *
1221 * The X, Y sample positions come in as bytes in thread payload. So, read
1222 * the positions using vstride=16, width=8, hstride=2.
1223 */
1224 struct brw_reg sample_pos_reg =
1225 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1226 BRW_REGISTER_TYPE_B), 16, 8, 2);
1227
1228 if (dispatch_width == 8) {
1229 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1230 } else {
1231 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1232 abld.half(1).MOV(half(int_sample_x, 1),
1233 fs_reg(suboffset(sample_pos_reg, 16)));
1234 }
1235 /* Compute gl_SamplePosition.x */
1236 compute_sample_position(pos, int_sample_x);
1237 pos = offset(pos, abld, 1);
1238 if (dispatch_width == 8) {
1239 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1240 } else {
1241 abld.half(0).MOV(half(int_sample_y, 0),
1242 fs_reg(suboffset(sample_pos_reg, 1)));
1243 abld.half(1).MOV(half(int_sample_y, 1),
1244 fs_reg(suboffset(sample_pos_reg, 17)));
1245 }
1246 /* Compute gl_SamplePosition.y */
1247 compute_sample_position(pos, int_sample_y);
1248 return reg;
1249 }
1250
1251 fs_reg *
1252 fs_visitor::emit_sampleid_setup()
1253 {
1254 assert(stage == MESA_SHADER_FRAGMENT);
1255 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1256 assert(devinfo->gen >= 6);
1257
1258 const fs_builder abld = bld.annotate("compute sample id");
1259 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1260
1261 if (key->compute_sample_id) {
1262 fs_reg t1 = vgrf(glsl_type::int_type);
1263 fs_reg t2 = vgrf(glsl_type::int_type);
1264 t2.type = BRW_REGISTER_TYPE_UW;
1265
1266 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1267 * 8x multisampling, subspan 0 will represent sample N (where N
1268 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1269 * 7. We can find the value of N by looking at R0.0 bits 7:6
1270 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1271 * (since samples are always delivered in pairs). That is, we
1272 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1273 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1274 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1275 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1276 * populating a temporary variable with the sequence (0, 1, 2, 3),
1277 * and then reading from it using vstride=1, width=4, hstride=0.
1278 * These computations hold good for 4x multisampling as well.
1279 *
1280 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1281 * the first four slots are sample 0 of subspan 0; the next four
1282 * are sample 1 of subspan 0; the third group is sample 0 of
1283 * subspan 1, and finally sample 1 of subspan 1.
1284 */
1285 abld.exec_all()
1286 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
1287 fs_reg(0xc0));
1288 abld.exec_all().SHR(t1, t1, fs_reg(5));
1289
1290 /* This works for both SIMD8 and SIMD16 */
1291 abld.exec_all()
1292 .MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210));
1293
1294 /* This special instruction takes care of setting vstride=1,
1295 * width=4, hstride=0 of t2 during an ADD instruction.
1296 */
1297 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1298 } else {
1299 /* As per GL_ARB_sample_shading specification:
1300 * "When rendering to a non-multisample buffer, or if multisample
1301 * rasterization is disabled, gl_SampleID will always be zero."
1302 */
1303 abld.MOV(*reg, fs_reg(0));
1304 }
1305
1306 return reg;
1307 }
1308
1309 fs_reg
1310 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1311 {
1312 if (!src.abs && !src.negate)
1313 return src;
1314
1315 fs_reg temp = bld.vgrf(src.type);
1316 bld.MOV(temp, src);
1317
1318 return temp;
1319 }
1320
1321 void
1322 fs_visitor::emit_discard_jump()
1323 {
1324 assert(((brw_wm_prog_data*) this->prog_data)->uses_kill);
1325
1326 /* For performance, after a discard, jump to the end of the
1327 * shader if all relevant channels have been discarded.
1328 */
1329 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1330 discard_jump->flag_subreg = 1;
1331
1332 discard_jump->predicate = (dispatch_width == 8)
1333 ? BRW_PREDICATE_ALIGN1_ANY8H
1334 : BRW_PREDICATE_ALIGN1_ANY16H;
1335 discard_jump->predicate_inverse = true;
1336 }
1337
1338 void
1339 fs_visitor::assign_curb_setup()
1340 {
1341 if (dispatch_width == 8) {
1342 prog_data->dispatch_grf_start_reg = payload.num_regs;
1343 } else {
1344 if (stage == MESA_SHADER_FRAGMENT) {
1345 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1346 prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
1347 } else if (stage == MESA_SHADER_COMPUTE) {
1348 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
1349 prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
1350 } else {
1351 unreachable("Unsupported shader type!");
1352 }
1353 }
1354
1355 prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
1356
1357 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1358 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1359 for (unsigned int i = 0; i < inst->sources; i++) {
1360 if (inst->src[i].file == UNIFORM) {
1361 int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
1362 int constant_nr;
1363 if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1364 constant_nr = push_constant_loc[uniform_nr];
1365 } else {
1366 /* Section 5.11 of the OpenGL 4.1 spec says:
1367 * "Out-of-bounds reads return undefined values, which include
1368 * values from other variables of the active program or zero."
1369 * Just return the first push constant.
1370 */
1371 constant_nr = 0;
1372 }
1373
1374 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1375 constant_nr / 8,
1376 constant_nr % 8);
1377
1378 assert(inst->src[i].stride == 0);
1379 inst->src[i].file = HW_REG;
1380 inst->src[i].fixed_hw_reg = byte_offset(
1381 retype(brw_reg, inst->src[i].type),
1382 inst->src[i].subreg_offset);
1383 }
1384 }
1385 }
1386
1387 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1388 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1389 }
1390
1391 void
1392 fs_visitor::calculate_urb_setup()
1393 {
1394 assert(stage == MESA_SHADER_FRAGMENT);
1395 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1396 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1397
1398 memset(prog_data->urb_setup, -1,
1399 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1400
1401 int urb_next = 0;
1402 /* Figure out where each of the incoming setup attributes lands. */
1403 if (devinfo->gen >= 6) {
1404 if (_mesa_bitcount_64(nir->info.inputs_read &
1405 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1406 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1407 * first 16 varying inputs, so we can put them wherever we want.
1408 * Just put them in order.
1409 *
1410 * This is useful because it means that (a) inputs not used by the
1411 * fragment shader won't take up valuable register space, and (b) we
1412 * won't have to recompile the fragment shader if it gets paired with
1413 * a different vertex (or geometry) shader.
1414 */
1415 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1416 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1417 BITFIELD64_BIT(i)) {
1418 prog_data->urb_setup[i] = urb_next++;
1419 }
1420 }
1421 } else {
1422 /* We have enough input varyings that the SF/SBE pipeline stage can't
1423 * arbitrarily rearrange them to suit our whim; we have to put them
1424 * in an order that matches the output of the previous pipeline stage
1425 * (geometry or vertex shader).
1426 */
1427 struct brw_vue_map prev_stage_vue_map;
1428 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1429 key->input_slots_valid,
1430 nir->info.separate_shader);
1431 int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
1432 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1433 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1434 slot++) {
1435 int varying = prev_stage_vue_map.slot_to_varying[slot];
1436 /* Note that varying == BRW_VARYING_SLOT_COUNT when a slot is
1437 * unused.
1438 */
1439 if (varying != BRW_VARYING_SLOT_COUNT &&
1440 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1441 BITFIELD64_BIT(varying))) {
1442 prog_data->urb_setup[varying] = slot - first_slot;
1443 }
1444 }
1445 urb_next = prev_stage_vue_map.num_slots - first_slot;
1446 }
1447 } else {
1448 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1449 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1450 /* Point size is packed into the header, not as a general attribute */
1451 if (i == VARYING_SLOT_PSIZ)
1452 continue;
1453
1454 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1455 /* The back color slot is skipped when the front color is
1456 * also written to. In addition, some slots can be
1457 * written in the vertex shader and not read in the
1458 * fragment shader. So the register number must always be
1459 * incremented, mapped or not.
1460 */
1461 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1462 prog_data->urb_setup[i] = urb_next;
1463 urb_next++;
1464 }
1465 }
1466
1467 /*
1468 * It's a FS only attribute, and we did interpolation for this attribute
1469 * in SF thread. So, count it here, too.
1470 *
1471 * See compile_sf_prog() for more info.
1472 */
1473 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1474 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1475 }
1476
1477 prog_data->num_varying_inputs = urb_next;
1478 }
1479
1480 void
1481 fs_visitor::assign_urb_setup()
1482 {
1483 assert(stage == MESA_SHADER_FRAGMENT);
1484 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1485
1486 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1487
1488 /* Offset all the urb_setup[] index by the actual position of the
1489 * setup regs, now that the location of the constants has been chosen.
1490 */
1491 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1492 if (inst->opcode == FS_OPCODE_LINTERP) {
1493 assert(inst->src[1].file == HW_REG);
1494 inst->src[1].fixed_hw_reg.nr += urb_start;
1495 }
1496
1497 if (inst->opcode == FS_OPCODE_CINTERP) {
1498 assert(inst->src[0].file == HW_REG);
1499 inst->src[0].fixed_hw_reg.nr += urb_start;
1500 }
1501 }
1502
1503 /* Each attribute is 4 setup channels, each of which is half a reg. */
1504 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1505 }
1506
1507 void
1508 fs_visitor::assign_vs_urb_setup()
1509 {
1510 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
1511 int grf, count, slot, channel, attr;
1512
1513 assert(stage == MESA_SHADER_VERTEX);
1514 count = _mesa_bitcount_64(vs_prog_data->inputs_read);
1515 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid)
1516 count++;
1517
1518 /* Each attribute is 4 regs. */
1519 this->first_non_payload_grf += count * 4;
1520
1521 unsigned vue_entries =
1522 MAX2(count, vs_prog_data->base.vue_map.num_slots);
1523
1524 vs_prog_data->base.urb_entry_size = ALIGN(vue_entries, 4) / 4;
1525 vs_prog_data->base.urb_read_length = (count + 1) / 2;
1526
1527 assert(vs_prog_data->base.urb_read_length <= 15);
1528
1529 /* Rewrite all ATTR file references to the hw grf that they land in. */
1530 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1531 for (int i = 0; i < inst->sources; i++) {
1532 if (inst->src[i].file == ATTR) {
1533
1534 if (inst->src[i].reg == VERT_ATTRIB_MAX) {
1535 slot = count - 1;
1536 } else {
1537 /* Attributes come in in a contiguous block, ordered by their
1538 * gl_vert_attrib value. That means we can compute the slot
1539 * number for an attribute by masking out the enabled
1540 * attributes before it and counting the bits.
1541 */
1542 attr = inst->src[i].reg + inst->src[i].reg_offset / 4;
1543 slot = _mesa_bitcount_64(vs_prog_data->inputs_read &
1544 BITFIELD64_MASK(attr));
1545 }
1546
1547 channel = inst->src[i].reg_offset & 3;
1548
1549 grf = payload.num_regs +
1550 prog_data->curb_read_length +
1551 slot * 4 + channel;
1552
1553 inst->src[i].file = HW_REG;
1554 inst->src[i].fixed_hw_reg =
1555 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1556 inst->src[i].subreg_offset),
1557 inst->exec_size * inst->src[i].stride,
1558 inst->exec_size, inst->src[i].stride);
1559 }
1560 }
1561 }
1562 }
1563
1564 /**
1565 * Split large virtual GRFs into separate components if we can.
1566 *
1567 * This is mostly duplicated with what brw_fs_vector_splitting does,
1568 * but that's really conservative because it's afraid of doing
1569 * splitting that doesn't result in real progress after the rest of
1570 * the optimization phases, which would cause infinite looping in
1571 * optimization. We can do it once here, safely. This also has the
1572 * opportunity to split interpolated values, or maybe even uniforms,
1573 * which we don't have at the IR level.
1574 *
1575 * We want to split, because virtual GRFs are what we register
1576 * allocate and spill (due to contiguousness requirements for some
1577 * instructions), and they're what we naturally generate in the
1578 * codegen process, but most virtual GRFs don't actually need to be
1579 * contiguous sets of GRFs. If we split, we'll end up with reduced
1580 * live intervals and better dead code elimination and coalescing.
1581 */
1582 void
1583 fs_visitor::split_virtual_grfs()
1584 {
1585 int num_vars = this->alloc.count;
1586
1587 /* Count the total number of registers */
1588 int reg_count = 0;
1589 int vgrf_to_reg[num_vars];
1590 for (int i = 0; i < num_vars; i++) {
1591 vgrf_to_reg[i] = reg_count;
1592 reg_count += alloc.sizes[i];
1593 }
1594
1595 /* An array of "split points". For each register slot, this indicates
1596 * if this slot can be separated from the previous slot. Every time an
1597 * instruction uses multiple elements of a register (as a source or
1598 * destination), we mark the used slots as inseparable. Then we go
1599 * through and split the registers into the smallest pieces we can.
1600 */
1601 bool split_points[reg_count];
1602 memset(split_points, 0, sizeof(split_points));
1603
1604 /* Mark all used registers as fully splittable */
1605 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1606 if (inst->dst.file == GRF) {
1607 int reg = vgrf_to_reg[inst->dst.reg];
1608 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.reg]; j++)
1609 split_points[reg + j] = true;
1610 }
1611
1612 for (int i = 0; i < inst->sources; i++) {
1613 if (inst->src[i].file == GRF) {
1614 int reg = vgrf_to_reg[inst->src[i].reg];
1615 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].reg]; j++)
1616 split_points[reg + j] = true;
1617 }
1618 }
1619 }
1620
1621 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1622 if (inst->dst.file == GRF) {
1623 int reg = vgrf_to_reg[inst->dst.reg] + inst->dst.reg_offset;
1624 for (int j = 1; j < inst->regs_written; j++)
1625 split_points[reg + j] = false;
1626 }
1627 for (int i = 0; i < inst->sources; i++) {
1628 if (inst->src[i].file == GRF) {
1629 int reg = vgrf_to_reg[inst->src[i].reg] + inst->src[i].reg_offset;
1630 for (int j = 1; j < inst->regs_read(i); j++)
1631 split_points[reg + j] = false;
1632 }
1633 }
1634 }
1635
1636 int new_virtual_grf[reg_count];
1637 int new_reg_offset[reg_count];
1638
1639 int reg = 0;
1640 for (int i = 0; i < num_vars; i++) {
1641 /* The first one should always be 0 as a quick sanity check. */
1642 assert(split_points[reg] == false);
1643
1644 /* j = 0 case */
1645 new_reg_offset[reg] = 0;
1646 reg++;
1647 int offset = 1;
1648
1649 /* j > 0 case */
1650 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1651 /* If this is a split point, reset the offset to 0 and allocate a
1652 * new virtual GRF for the previous offset many registers
1653 */
1654 if (split_points[reg]) {
1655 assert(offset <= MAX_VGRF_SIZE);
1656 int grf = alloc.allocate(offset);
1657 for (int k = reg - offset; k < reg; k++)
1658 new_virtual_grf[k] = grf;
1659 offset = 0;
1660 }
1661 new_reg_offset[reg] = offset;
1662 offset++;
1663 reg++;
1664 }
1665
1666 /* The last one gets the original register number */
1667 assert(offset <= MAX_VGRF_SIZE);
1668 alloc.sizes[i] = offset;
1669 for (int k = reg - offset; k < reg; k++)
1670 new_virtual_grf[k] = i;
1671 }
1672 assert(reg == reg_count);
1673
1674 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1675 if (inst->dst.file == GRF) {
1676 reg = vgrf_to_reg[inst->dst.reg] + inst->dst.reg_offset;
1677 inst->dst.reg = new_virtual_grf[reg];
1678 inst->dst.reg_offset = new_reg_offset[reg];
1679 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1680 }
1681 for (int i = 0; i < inst->sources; i++) {
1682 if (inst->src[i].file == GRF) {
1683 reg = vgrf_to_reg[inst->src[i].reg] + inst->src[i].reg_offset;
1684 inst->src[i].reg = new_virtual_grf[reg];
1685 inst->src[i].reg_offset = new_reg_offset[reg];
1686 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1687 }
1688 }
1689 }
1690 invalidate_live_intervals();
1691 }
1692
1693 /**
1694 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1695 *
1696 * During code generation, we create tons of temporary variables, many of
1697 * which get immediately killed and are never used again. Yet, in later
1698 * optimization and analysis passes, such as compute_live_intervals, we need
1699 * to loop over all the virtual GRFs. Compacting them can save a lot of
1700 * overhead.
1701 */
1702 bool
1703 fs_visitor::compact_virtual_grfs()
1704 {
1705 bool progress = false;
1706 int remap_table[this->alloc.count];
1707 memset(remap_table, -1, sizeof(remap_table));
1708
1709 /* Mark which virtual GRFs are used. */
1710 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1711 if (inst->dst.file == GRF)
1712 remap_table[inst->dst.reg] = 0;
1713
1714 for (int i = 0; i < inst->sources; i++) {
1715 if (inst->src[i].file == GRF)
1716 remap_table[inst->src[i].reg] = 0;
1717 }
1718 }
1719
1720 /* Compact the GRF arrays. */
1721 int new_index = 0;
1722 for (unsigned i = 0; i < this->alloc.count; i++) {
1723 if (remap_table[i] == -1) {
1724 /* We just found an unused register. This means that we are
1725 * actually going to compact something.
1726 */
1727 progress = true;
1728 } else {
1729 remap_table[i] = new_index;
1730 alloc.sizes[new_index] = alloc.sizes[i];
1731 invalidate_live_intervals();
1732 ++new_index;
1733 }
1734 }
1735
1736 this->alloc.count = new_index;
1737
1738 /* Patch all the instructions to use the newly renumbered registers */
1739 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1740 if (inst->dst.file == GRF)
1741 inst->dst.reg = remap_table[inst->dst.reg];
1742
1743 for (int i = 0; i < inst->sources; i++) {
1744 if (inst->src[i].file == GRF)
1745 inst->src[i].reg = remap_table[inst->src[i].reg];
1746 }
1747 }
1748
1749 /* Patch all the references to delta_xy, since they're used in register
1750 * allocation. If they're unused, switch them to BAD_FILE so we don't
1751 * think some random VGRF is delta_xy.
1752 */
1753 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
1754 if (delta_xy[i].file == GRF) {
1755 if (remap_table[delta_xy[i].reg] != -1) {
1756 delta_xy[i].reg = remap_table[delta_xy[i].reg];
1757 } else {
1758 delta_xy[i].file = BAD_FILE;
1759 }
1760 }
1761 }
1762
1763 return progress;
1764 }
1765
1766 /**
1767 * Assign UNIFORM file registers to either push constants or pull constants.
1768 *
1769 * We allow a fragment shader to have more than the specified minimum
1770 * maximum number of fragment shader uniform components (64). If
1771 * there are too many of these, they'd fill up all of register space.
1772 * So, this will push some of them out to the pull constant buffer and
1773 * update the program to load them. We also use pull constants for all
1774 * indirect constant loads because we don't support indirect accesses in
1775 * registers yet.
1776 */
1777 void
1778 fs_visitor::assign_constant_locations()
1779 {
1780 /* Only the first compile (SIMD8 mode) gets to decide on locations. */
1781 if (dispatch_width != 8)
1782 return;
1783
1784 unsigned int num_pull_constants = 0;
1785
1786 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
1787 memset(pull_constant_loc, -1, sizeof(pull_constant_loc[0]) * uniforms);
1788
1789 bool is_live[uniforms];
1790 memset(is_live, 0, sizeof(is_live));
1791
1792 /* First, we walk through the instructions and do two things:
1793 *
1794 * 1) Figure out which uniforms are live.
1795 *
1796 * 2) Find all indirect access of uniform arrays and flag them as needing
1797 * to go into the pull constant buffer.
1798 *
1799 * Note that we don't move constant-indexed accesses to arrays. No
1800 * testing has been done of the performance impact of this choice.
1801 */
1802 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
1803 for (int i = 0 ; i < inst->sources; i++) {
1804 if (inst->src[i].file != UNIFORM)
1805 continue;
1806
1807 if (inst->src[i].reladdr) {
1808 int uniform = inst->src[i].reg;
1809
1810 /* If this array isn't already present in the pull constant buffer,
1811 * add it.
1812 */
1813 if (pull_constant_loc[uniform] == -1) {
1814 assert(param_size[uniform]);
1815 for (int j = 0; j < param_size[uniform]; j++)
1816 pull_constant_loc[uniform + j] = num_pull_constants++;
1817 }
1818 } else {
1819 /* Mark the the one accessed uniform as live */
1820 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
1821 if (constant_nr >= 0 && constant_nr < (int) uniforms)
1822 is_live[constant_nr] = true;
1823 }
1824 }
1825 }
1826
1827 /* Only allow 16 registers (128 uniform components) as push constants.
1828 *
1829 * Just demote the end of the list. We could probably do better
1830 * here, demoting things that are rarely used in the program first.
1831 *
1832 * If changing this value, note the limitation about total_regs in
1833 * brw_curbe.c.
1834 */
1835 unsigned int max_push_components = 16 * 8;
1836 unsigned int num_push_constants = 0;
1837
1838 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
1839
1840 for (unsigned int i = 0; i < uniforms; i++) {
1841 if (!is_live[i] || pull_constant_loc[i] != -1) {
1842 /* This UNIFORM register is either dead, or has already been demoted
1843 * to a pull const. Mark it as no longer living in the param[] array.
1844 */
1845 push_constant_loc[i] = -1;
1846 continue;
1847 }
1848
1849 if (num_push_constants < max_push_components) {
1850 /* Retain as a push constant. Record the location in the params[]
1851 * array.
1852 */
1853 push_constant_loc[i] = num_push_constants++;
1854 } else {
1855 /* Demote to a pull constant. */
1856 push_constant_loc[i] = -1;
1857 pull_constant_loc[i] = num_pull_constants++;
1858 }
1859 }
1860
1861 stage_prog_data->nr_params = num_push_constants;
1862 stage_prog_data->nr_pull_params = num_pull_constants;
1863
1864 /* Up until now, the param[] array has been indexed by reg + reg_offset
1865 * of UNIFORM registers. Move pull constants into pull_param[] and
1866 * condense param[] to only contain the uniforms we chose to push.
1867 *
1868 * NOTE: Because we are condensing the params[] array, we know that
1869 * push_constant_loc[i] <= i and we can do it in one smooth loop without
1870 * having to make a copy.
1871 */
1872 for (unsigned int i = 0; i < uniforms; i++) {
1873 const gl_constant_value *value = stage_prog_data->param[i];
1874
1875 if (pull_constant_loc[i] != -1) {
1876 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
1877 } else if (push_constant_loc[i] != -1) {
1878 stage_prog_data->param[push_constant_loc[i]] = value;
1879 }
1880 }
1881 }
1882
1883 /**
1884 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
1885 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
1886 */
1887 void
1888 fs_visitor::demote_pull_constants()
1889 {
1890 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1891 for (int i = 0; i < inst->sources; i++) {
1892 if (inst->src[i].file != UNIFORM)
1893 continue;
1894
1895 int pull_index;
1896 unsigned location = inst->src[i].reg + inst->src[i].reg_offset;
1897 if (location >= uniforms) /* Out of bounds access */
1898 pull_index = -1;
1899 else
1900 pull_index = pull_constant_loc[location];
1901
1902 if (pull_index == -1)
1903 continue;
1904
1905 /* Set up the annotation tracking for new generated instructions. */
1906 const fs_builder ibld(this, block, inst);
1907 fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
1908 fs_reg dst = vgrf(glsl_type::float_type);
1909
1910 assert(inst->src[i].stride == 0);
1911
1912 /* Generate a pull load into dst. */
1913 if (inst->src[i].reladdr) {
1914 VARYING_PULL_CONSTANT_LOAD(ibld, dst,
1915 surf_index,
1916 *inst->src[i].reladdr,
1917 pull_index);
1918 inst->src[i].reladdr = NULL;
1919 inst->src[i].stride = 1;
1920 } else {
1921 const fs_builder ubld = ibld.exec_all().group(8, 0);
1922 fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
1923 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
1924 dst, surf_index, offset);
1925 inst->src[i].set_smear(pull_index & 3);
1926 }
1927
1928 /* Rewrite the instruction to use the temporary VGRF. */
1929 inst->src[i].file = GRF;
1930 inst->src[i].reg = dst.reg;
1931 inst->src[i].reg_offset = 0;
1932 }
1933 }
1934 invalidate_live_intervals();
1935 }
1936
1937 bool
1938 fs_visitor::opt_algebraic()
1939 {
1940 bool progress = false;
1941
1942 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1943 switch (inst->opcode) {
1944 case BRW_OPCODE_MOV:
1945 if (inst->src[0].file != IMM)
1946 break;
1947
1948 if (inst->saturate) {
1949 if (inst->dst.type != inst->src[0].type)
1950 assert(!"unimplemented: saturate mixed types");
1951
1952 if (brw_saturate_immediate(inst->dst.type,
1953 &inst->src[0].fixed_hw_reg)) {
1954 inst->saturate = false;
1955 progress = true;
1956 }
1957 }
1958 break;
1959
1960 case BRW_OPCODE_MUL:
1961 if (inst->src[1].file != IMM)
1962 continue;
1963
1964 /* a * 1.0 = a */
1965 if (inst->src[1].is_one()) {
1966 inst->opcode = BRW_OPCODE_MOV;
1967 inst->src[1] = reg_undef;
1968 progress = true;
1969 break;
1970 }
1971
1972 /* a * -1.0 = -a */
1973 if (inst->src[1].is_negative_one()) {
1974 inst->opcode = BRW_OPCODE_MOV;
1975 inst->src[0].negate = !inst->src[0].negate;
1976 inst->src[1] = reg_undef;
1977 progress = true;
1978 break;
1979 }
1980
1981 /* a * 0.0 = 0.0 */
1982 if (inst->src[1].is_zero()) {
1983 inst->opcode = BRW_OPCODE_MOV;
1984 inst->src[0] = inst->src[1];
1985 inst->src[1] = reg_undef;
1986 progress = true;
1987 break;
1988 }
1989
1990 if (inst->src[0].file == IMM) {
1991 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
1992 inst->opcode = BRW_OPCODE_MOV;
1993 inst->src[0].fixed_hw_reg.dw1.f *= inst->src[1].fixed_hw_reg.dw1.f;
1994 inst->src[1] = reg_undef;
1995 progress = true;
1996 break;
1997 }
1998 break;
1999 case BRW_OPCODE_ADD:
2000 if (inst->src[1].file != IMM)
2001 continue;
2002
2003 /* a + 0.0 = a */
2004 if (inst->src[1].is_zero()) {
2005 inst->opcode = BRW_OPCODE_MOV;
2006 inst->src[1] = reg_undef;
2007 progress = true;
2008 break;
2009 }
2010
2011 if (inst->src[0].file == IMM) {
2012 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2013 inst->opcode = BRW_OPCODE_MOV;
2014 inst->src[0].fixed_hw_reg.dw1.f += inst->src[1].fixed_hw_reg.dw1.f;
2015 inst->src[1] = reg_undef;
2016 progress = true;
2017 break;
2018 }
2019 break;
2020 case BRW_OPCODE_OR:
2021 if (inst->src[0].equals(inst->src[1])) {
2022 inst->opcode = BRW_OPCODE_MOV;
2023 inst->src[1] = reg_undef;
2024 progress = true;
2025 break;
2026 }
2027 break;
2028 case BRW_OPCODE_LRP:
2029 if (inst->src[1].equals(inst->src[2])) {
2030 inst->opcode = BRW_OPCODE_MOV;
2031 inst->src[0] = inst->src[1];
2032 inst->src[1] = reg_undef;
2033 inst->src[2] = reg_undef;
2034 progress = true;
2035 break;
2036 }
2037 break;
2038 case BRW_OPCODE_CMP:
2039 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
2040 inst->src[0].abs &&
2041 inst->src[0].negate &&
2042 inst->src[1].is_zero()) {
2043 inst->src[0].abs = false;
2044 inst->src[0].negate = false;
2045 inst->conditional_mod = BRW_CONDITIONAL_Z;
2046 progress = true;
2047 break;
2048 }
2049 break;
2050 case BRW_OPCODE_SEL:
2051 if (inst->src[0].equals(inst->src[1])) {
2052 inst->opcode = BRW_OPCODE_MOV;
2053 inst->src[1] = reg_undef;
2054 inst->predicate = BRW_PREDICATE_NONE;
2055 inst->predicate_inverse = false;
2056 progress = true;
2057 } else if (inst->saturate && inst->src[1].file == IMM) {
2058 switch (inst->conditional_mod) {
2059 case BRW_CONDITIONAL_LE:
2060 case BRW_CONDITIONAL_L:
2061 switch (inst->src[1].type) {
2062 case BRW_REGISTER_TYPE_F:
2063 if (inst->src[1].fixed_hw_reg.dw1.f >= 1.0f) {
2064 inst->opcode = BRW_OPCODE_MOV;
2065 inst->src[1] = reg_undef;
2066 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2067 progress = true;
2068 }
2069 break;
2070 default:
2071 break;
2072 }
2073 break;
2074 case BRW_CONDITIONAL_GE:
2075 case BRW_CONDITIONAL_G:
2076 switch (inst->src[1].type) {
2077 case BRW_REGISTER_TYPE_F:
2078 if (inst->src[1].fixed_hw_reg.dw1.f <= 0.0f) {
2079 inst->opcode = BRW_OPCODE_MOV;
2080 inst->src[1] = reg_undef;
2081 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2082 progress = true;
2083 }
2084 break;
2085 default:
2086 break;
2087 }
2088 default:
2089 break;
2090 }
2091 }
2092 break;
2093 case BRW_OPCODE_MAD:
2094 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2095 inst->opcode = BRW_OPCODE_MOV;
2096 inst->src[1] = reg_undef;
2097 inst->src[2] = reg_undef;
2098 progress = true;
2099 } else if (inst->src[0].is_zero()) {
2100 inst->opcode = BRW_OPCODE_MUL;
2101 inst->src[0] = inst->src[2];
2102 inst->src[2] = reg_undef;
2103 progress = true;
2104 } else if (inst->src[1].is_one()) {
2105 inst->opcode = BRW_OPCODE_ADD;
2106 inst->src[1] = inst->src[2];
2107 inst->src[2] = reg_undef;
2108 progress = true;
2109 } else if (inst->src[2].is_one()) {
2110 inst->opcode = BRW_OPCODE_ADD;
2111 inst->src[2] = reg_undef;
2112 progress = true;
2113 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2114 inst->opcode = BRW_OPCODE_ADD;
2115 inst->src[1].fixed_hw_reg.dw1.f *= inst->src[2].fixed_hw_reg.dw1.f;
2116 inst->src[2] = reg_undef;
2117 progress = true;
2118 }
2119 break;
2120 case SHADER_OPCODE_RCP: {
2121 fs_inst *prev = (fs_inst *)inst->prev;
2122 if (prev->opcode == SHADER_OPCODE_SQRT) {
2123 if (inst->src[0].equals(prev->dst)) {
2124 inst->opcode = SHADER_OPCODE_RSQ;
2125 inst->src[0] = prev->src[0];
2126 progress = true;
2127 }
2128 }
2129 break;
2130 }
2131 case SHADER_OPCODE_BROADCAST:
2132 if (is_uniform(inst->src[0])) {
2133 inst->opcode = BRW_OPCODE_MOV;
2134 inst->sources = 1;
2135 inst->force_writemask_all = true;
2136 progress = true;
2137 } else if (inst->src[1].file == IMM) {
2138 inst->opcode = BRW_OPCODE_MOV;
2139 inst->src[0] = component(inst->src[0],
2140 inst->src[1].fixed_hw_reg.dw1.ud);
2141 inst->sources = 1;
2142 inst->force_writemask_all = true;
2143 progress = true;
2144 }
2145 break;
2146
2147 default:
2148 break;
2149 }
2150
2151 /* Swap if src[0] is immediate. */
2152 if (progress && inst->is_commutative()) {
2153 if (inst->src[0].file == IMM) {
2154 fs_reg tmp = inst->src[1];
2155 inst->src[1] = inst->src[0];
2156 inst->src[0] = tmp;
2157 }
2158 }
2159 }
2160 return progress;
2161 }
2162
2163 /**
2164 * Optimize sample messages that have constant zero values for the trailing
2165 * texture coordinates. We can just reduce the message length for these
2166 * instructions instead of reserving a register for it. Trailing parameters
2167 * that aren't sent default to zero anyway. This will cause the dead code
2168 * eliminator to remove the MOV instruction that would otherwise be emitted to
2169 * set up the zero value.
2170 */
2171 bool
2172 fs_visitor::opt_zero_samples()
2173 {
2174 /* Gen4 infers the texturing opcode based on the message length so we can't
2175 * change it.
2176 */
2177 if (devinfo->gen < 5)
2178 return false;
2179
2180 bool progress = false;
2181
2182 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2183 if (!inst->is_tex())
2184 continue;
2185
2186 fs_inst *load_payload = (fs_inst *) inst->prev;
2187
2188 if (load_payload->is_head_sentinel() ||
2189 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2190 continue;
2191
2192 /* We don't want to remove the message header or the first parameter.
2193 * Removing the first parameter is not allowed, see the Haswell PRM
2194 * volume 7, page 149:
2195 *
2196 * "Parameter 0 is required except for the sampleinfo message, which
2197 * has no parameter 0"
2198 */
2199 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2200 load_payload->src[(inst->mlen - inst->header_size) /
2201 (inst->exec_size / 8) +
2202 inst->header_size - 1].is_zero()) {
2203 inst->mlen -= inst->exec_size / 8;
2204 progress = true;
2205 }
2206 }
2207
2208 if (progress)
2209 invalidate_live_intervals();
2210
2211 return progress;
2212 }
2213
2214 /**
2215 * Optimize sample messages which are followed by the final RT write.
2216 *
2217 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2218 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2219 * final texturing results copied to the framebuffer write payload and modify
2220 * them to write to the framebuffer directly.
2221 */
2222 bool
2223 fs_visitor::opt_sampler_eot()
2224 {
2225 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2226
2227 if (stage != MESA_SHADER_FRAGMENT)
2228 return false;
2229
2230 if (devinfo->gen < 9 && !devinfo->is_cherryview)
2231 return false;
2232
2233 /* FINISHME: It should be possible to implement this optimization when there
2234 * are multiple drawbuffers.
2235 */
2236 if (key->nr_color_regions != 1)
2237 return false;
2238
2239 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2240 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2241 fs_inst *fb_write = (fs_inst *)block->end();
2242 assert(fb_write->eot);
2243 assert(fb_write->opcode == FS_OPCODE_FB_WRITE);
2244
2245 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2246
2247 /* There wasn't one; nothing to do. */
2248 if (unlikely(tex_inst->is_head_sentinel()) || !tex_inst->is_tex())
2249 return false;
2250
2251 /* This optimisation doesn't seem to work for textureGather for some
2252 * reason. I can't find any documentation or known workarounds to indicate
2253 * that this is expected, but considering that it is probably pretty
2254 * unlikely that a shader would directly write out the results from
2255 * textureGather we might as well just disable it.
2256 */
2257 if (tex_inst->opcode == SHADER_OPCODE_TG4 ||
2258 tex_inst->opcode == SHADER_OPCODE_TG4_OFFSET)
2259 return false;
2260
2261 /* If there's no header present, we need to munge the LOAD_PAYLOAD as well.
2262 * It's very likely to be the previous instruction.
2263 */
2264 fs_inst *load_payload = (fs_inst *) tex_inst->prev;
2265 if (load_payload->is_head_sentinel() ||
2266 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2267 return false;
2268
2269 assert(!tex_inst->eot); /* We can't get here twice */
2270 assert((tex_inst->offset & (0xff << 24)) == 0);
2271
2272 const fs_builder ibld(this, block, tex_inst);
2273
2274 tex_inst->offset |= fb_write->target << 24;
2275 tex_inst->eot = true;
2276 tex_inst->dst = ibld.null_reg_ud();
2277 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2278
2279 /* If a header is present, marking the eot is sufficient. Otherwise, we need
2280 * to create a new LOAD_PAYLOAD command with the same sources and a space
2281 * saved for the header. Using a new destination register not only makes sure
2282 * we have enough space, but it will make sure the dead code eliminator kills
2283 * the instruction that this will replace.
2284 */
2285 if (tex_inst->header_size != 0)
2286 return true;
2287
2288 fs_reg send_header = ibld.vgrf(BRW_REGISTER_TYPE_F,
2289 load_payload->sources + 1);
2290 fs_reg *new_sources =
2291 ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
2292
2293 new_sources[0] = fs_reg();
2294 for (int i = 0; i < load_payload->sources; i++)
2295 new_sources[i+1] = load_payload->src[i];
2296
2297 /* The LOAD_PAYLOAD helper seems like the obvious choice here. However, it
2298 * requires a lot of information about the sources to appropriately figure
2299 * out the number of registers needed to be used. Given this stage in our
2300 * optimization, we may not have the appropriate GRFs required by
2301 * LOAD_PAYLOAD at this point (copy propagation). Therefore, we need to
2302 * manually emit the instruction.
2303 */
2304 fs_inst *new_load_payload = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD,
2305 load_payload->exec_size,
2306 send_header,
2307 new_sources,
2308 load_payload->sources + 1);
2309
2310 new_load_payload->regs_written = load_payload->regs_written + 1;
2311 new_load_payload->header_size = 1;
2312 tex_inst->mlen++;
2313 tex_inst->header_size = 1;
2314 tex_inst->insert_before(cfg->blocks[cfg->num_blocks - 1], new_load_payload);
2315 tex_inst->src[0] = send_header;
2316
2317 return true;
2318 }
2319
2320 bool
2321 fs_visitor::opt_register_renaming()
2322 {
2323 bool progress = false;
2324 int depth = 0;
2325
2326 int remap[alloc.count];
2327 memset(remap, -1, sizeof(int) * alloc.count);
2328
2329 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2330 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2331 depth++;
2332 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2333 inst->opcode == BRW_OPCODE_WHILE) {
2334 depth--;
2335 }
2336
2337 /* Rewrite instruction sources. */
2338 for (int i = 0; i < inst->sources; i++) {
2339 if (inst->src[i].file == GRF &&
2340 remap[inst->src[i].reg] != -1 &&
2341 remap[inst->src[i].reg] != inst->src[i].reg) {
2342 inst->src[i].reg = remap[inst->src[i].reg];
2343 progress = true;
2344 }
2345 }
2346
2347 const int dst = inst->dst.reg;
2348
2349 if (depth == 0 &&
2350 inst->dst.file == GRF &&
2351 alloc.sizes[inst->dst.reg] == inst->exec_size / 8 &&
2352 !inst->is_partial_write()) {
2353 if (remap[dst] == -1) {
2354 remap[dst] = dst;
2355 } else {
2356 remap[dst] = alloc.allocate(inst->exec_size / 8);
2357 inst->dst.reg = remap[dst];
2358 progress = true;
2359 }
2360 } else if (inst->dst.file == GRF &&
2361 remap[dst] != -1 &&
2362 remap[dst] != dst) {
2363 inst->dst.reg = remap[dst];
2364 progress = true;
2365 }
2366 }
2367
2368 if (progress) {
2369 invalidate_live_intervals();
2370
2371 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2372 if (delta_xy[i].file == GRF && remap[delta_xy[i].reg] != -1) {
2373 delta_xy[i].reg = remap[delta_xy[i].reg];
2374 }
2375 }
2376 }
2377
2378 return progress;
2379 }
2380
2381 /**
2382 * Remove redundant or useless discard jumps.
2383 *
2384 * For example, we can eliminate jumps in the following sequence:
2385 *
2386 * discard-jump (redundant with the next jump)
2387 * discard-jump (useless; jumps to the next instruction)
2388 * placeholder-halt
2389 */
2390 bool
2391 fs_visitor::opt_redundant_discard_jumps()
2392 {
2393 bool progress = false;
2394
2395 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2396
2397 fs_inst *placeholder_halt = NULL;
2398 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2399 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2400 placeholder_halt = inst;
2401 break;
2402 }
2403 }
2404
2405 if (!placeholder_halt)
2406 return false;
2407
2408 /* Delete any HALTs immediately before the placeholder halt. */
2409 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2410 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2411 prev = (fs_inst *) placeholder_halt->prev) {
2412 prev->remove(last_bblock);
2413 progress = true;
2414 }
2415
2416 if (progress)
2417 invalidate_live_intervals();
2418
2419 return progress;
2420 }
2421
2422 bool
2423 fs_visitor::compute_to_mrf()
2424 {
2425 bool progress = false;
2426 int next_ip = 0;
2427
2428 /* No MRFs on Gen >= 7. */
2429 if (devinfo->gen >= 7)
2430 return false;
2431
2432 calculate_live_intervals();
2433
2434 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2435 int ip = next_ip;
2436 next_ip++;
2437
2438 if (inst->opcode != BRW_OPCODE_MOV ||
2439 inst->is_partial_write() ||
2440 inst->dst.file != MRF || inst->src[0].file != GRF ||
2441 inst->dst.type != inst->src[0].type ||
2442 inst->src[0].abs || inst->src[0].negate ||
2443 !inst->src[0].is_contiguous() ||
2444 inst->src[0].subreg_offset)
2445 continue;
2446
2447 /* Work out which hardware MRF registers are written by this
2448 * instruction.
2449 */
2450 int mrf_low = inst->dst.reg & ~BRW_MRF_COMPR4;
2451 int mrf_high;
2452 if (inst->dst.reg & BRW_MRF_COMPR4) {
2453 mrf_high = mrf_low + 4;
2454 } else if (inst->exec_size == 16) {
2455 mrf_high = mrf_low + 1;
2456 } else {
2457 mrf_high = mrf_low;
2458 }
2459
2460 /* Can't compute-to-MRF this GRF if someone else was going to
2461 * read it later.
2462 */
2463 if (this->virtual_grf_end[inst->src[0].reg] > ip)
2464 continue;
2465
2466 /* Found a move of a GRF to a MRF. Let's see if we can go
2467 * rewrite the thing that made this GRF to write into the MRF.
2468 */
2469 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst, block) {
2470 if (scan_inst->dst.file == GRF &&
2471 scan_inst->dst.reg == inst->src[0].reg) {
2472 /* Found the last thing to write our reg we want to turn
2473 * into a compute-to-MRF.
2474 */
2475
2476 /* If this one instruction didn't populate all the
2477 * channels, bail. We might be able to rewrite everything
2478 * that writes that reg, but it would require smarter
2479 * tracking to delay the rewriting until complete success.
2480 */
2481 if (scan_inst->is_partial_write())
2482 break;
2483
2484 /* Things returning more than one register would need us to
2485 * understand coalescing out more than one MOV at a time.
2486 */
2487 if (scan_inst->regs_written > scan_inst->exec_size / 8)
2488 break;
2489
2490 /* SEND instructions can't have MRF as a destination. */
2491 if (scan_inst->mlen)
2492 break;
2493
2494 if (devinfo->gen == 6) {
2495 /* gen6 math instructions must have the destination be
2496 * GRF, so no compute-to-MRF for them.
2497 */
2498 if (scan_inst->is_math()) {
2499 break;
2500 }
2501 }
2502
2503 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
2504 /* Found the creator of our MRF's source value. */
2505 scan_inst->dst.file = MRF;
2506 scan_inst->dst.reg = inst->dst.reg;
2507 scan_inst->saturate |= inst->saturate;
2508 inst->remove(block);
2509 progress = true;
2510 }
2511 break;
2512 }
2513
2514 /* We don't handle control flow here. Most computation of
2515 * values that end up in MRFs are shortly before the MRF
2516 * write anyway.
2517 */
2518 if (block->start() == scan_inst)
2519 break;
2520
2521 /* You can't read from an MRF, so if someone else reads our
2522 * MRF's source GRF that we wanted to rewrite, that stops us.
2523 */
2524 bool interfered = false;
2525 for (int i = 0; i < scan_inst->sources; i++) {
2526 if (scan_inst->src[i].file == GRF &&
2527 scan_inst->src[i].reg == inst->src[0].reg &&
2528 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
2529 interfered = true;
2530 }
2531 }
2532 if (interfered)
2533 break;
2534
2535 if (scan_inst->dst.file == MRF) {
2536 /* If somebody else writes our MRF here, we can't
2537 * compute-to-MRF before that.
2538 */
2539 int scan_mrf_low = scan_inst->dst.reg & ~BRW_MRF_COMPR4;
2540 int scan_mrf_high;
2541
2542 if (scan_inst->dst.reg & BRW_MRF_COMPR4) {
2543 scan_mrf_high = scan_mrf_low + 4;
2544 } else if (scan_inst->exec_size == 16) {
2545 scan_mrf_high = scan_mrf_low + 1;
2546 } else {
2547 scan_mrf_high = scan_mrf_low;
2548 }
2549
2550 if (mrf_low == scan_mrf_low ||
2551 mrf_low == scan_mrf_high ||
2552 mrf_high == scan_mrf_low ||
2553 mrf_high == scan_mrf_high) {
2554 break;
2555 }
2556 }
2557
2558 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
2559 /* Found a SEND instruction, which means that there are
2560 * live values in MRFs from base_mrf to base_mrf +
2561 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2562 * above it.
2563 */
2564 if (mrf_low >= scan_inst->base_mrf &&
2565 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
2566 break;
2567 }
2568 if (mrf_high >= scan_inst->base_mrf &&
2569 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
2570 break;
2571 }
2572 }
2573 }
2574 }
2575
2576 if (progress)
2577 invalidate_live_intervals();
2578
2579 return progress;
2580 }
2581
2582 /**
2583 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2584 * flow. We could probably do better here with some form of divergence
2585 * analysis.
2586 */
2587 bool
2588 fs_visitor::eliminate_find_live_channel()
2589 {
2590 bool progress = false;
2591 unsigned depth = 0;
2592
2593 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2594 switch (inst->opcode) {
2595 case BRW_OPCODE_IF:
2596 case BRW_OPCODE_DO:
2597 depth++;
2598 break;
2599
2600 case BRW_OPCODE_ENDIF:
2601 case BRW_OPCODE_WHILE:
2602 depth--;
2603 break;
2604
2605 case FS_OPCODE_DISCARD_JUMP:
2606 /* This can potentially make control flow non-uniform until the end
2607 * of the program.
2608 */
2609 return progress;
2610
2611 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2612 if (depth == 0) {
2613 inst->opcode = BRW_OPCODE_MOV;
2614 inst->src[0] = fs_reg(0);
2615 inst->sources = 1;
2616 inst->force_writemask_all = true;
2617 progress = true;
2618 }
2619 break;
2620
2621 default:
2622 break;
2623 }
2624 }
2625
2626 return progress;
2627 }
2628
2629 /**
2630 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2631 * instructions to FS_OPCODE_REP_FB_WRITE.
2632 */
2633 void
2634 fs_visitor::emit_repclear_shader()
2635 {
2636 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2637 int base_mrf = 1;
2638 int color_mrf = base_mrf + 2;
2639
2640 fs_inst *mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
2641 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
2642
2643 fs_inst *write;
2644 if (key->nr_color_regions == 1) {
2645 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2646 write->saturate = key->clamp_fragment_color;
2647 write->base_mrf = color_mrf;
2648 write->target = 0;
2649 write->header_size = 0;
2650 write->mlen = 1;
2651 } else {
2652 assume(key->nr_color_regions > 0);
2653 for (int i = 0; i < key->nr_color_regions; ++i) {
2654 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2655 write->saturate = key->clamp_fragment_color;
2656 write->base_mrf = base_mrf;
2657 write->target = i;
2658 write->header_size = 2;
2659 write->mlen = 3;
2660 }
2661 }
2662 write->eot = true;
2663
2664 calculate_cfg();
2665
2666 assign_constant_locations();
2667 assign_curb_setup();
2668
2669 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
2670 assert(mov->src[0].file == HW_REG);
2671 mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
2672 }
2673
2674 /**
2675 * Walks through basic blocks, looking for repeated MRF writes and
2676 * removing the later ones.
2677 */
2678 bool
2679 fs_visitor::remove_duplicate_mrf_writes()
2680 {
2681 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
2682 bool progress = false;
2683
2684 /* Need to update the MRF tracking for compressed instructions. */
2685 if (dispatch_width == 16)
2686 return false;
2687
2688 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2689
2690 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2691 if (inst->is_control_flow()) {
2692 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2693 }
2694
2695 if (inst->opcode == BRW_OPCODE_MOV &&
2696 inst->dst.file == MRF) {
2697 fs_inst *prev_inst = last_mrf_move[inst->dst.reg];
2698 if (prev_inst && inst->equals(prev_inst)) {
2699 inst->remove(block);
2700 progress = true;
2701 continue;
2702 }
2703 }
2704
2705 /* Clear out the last-write records for MRFs that were overwritten. */
2706 if (inst->dst.file == MRF) {
2707 last_mrf_move[inst->dst.reg] = NULL;
2708 }
2709
2710 if (inst->mlen > 0 && inst->base_mrf != -1) {
2711 /* Found a SEND instruction, which will include two or fewer
2712 * implied MRF writes. We could do better here.
2713 */
2714 for (int i = 0; i < implied_mrf_writes(inst); i++) {
2715 last_mrf_move[inst->base_mrf + i] = NULL;
2716 }
2717 }
2718
2719 /* Clear out any MRF move records whose sources got overwritten. */
2720 if (inst->dst.file == GRF) {
2721 for (unsigned int i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
2722 if (last_mrf_move[i] &&
2723 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
2724 last_mrf_move[i] = NULL;
2725 }
2726 }
2727 }
2728
2729 if (inst->opcode == BRW_OPCODE_MOV &&
2730 inst->dst.file == MRF &&
2731 inst->src[0].file == GRF &&
2732 !inst->is_partial_write()) {
2733 last_mrf_move[inst->dst.reg] = inst;
2734 }
2735 }
2736
2737 if (progress)
2738 invalidate_live_intervals();
2739
2740 return progress;
2741 }
2742
2743 static void
2744 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
2745 {
2746 /* Clear the flag for registers that actually got read (as expected). */
2747 for (int i = 0; i < inst->sources; i++) {
2748 int grf;
2749 if (inst->src[i].file == GRF) {
2750 grf = inst->src[i].reg;
2751 } else if (inst->src[i].file == HW_REG &&
2752 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
2753 grf = inst->src[i].fixed_hw_reg.nr;
2754 } else {
2755 continue;
2756 }
2757
2758 if (grf >= first_grf &&
2759 grf < first_grf + grf_len) {
2760 deps[grf - first_grf] = false;
2761 if (inst->exec_size == 16)
2762 deps[grf - first_grf + 1] = false;
2763 }
2764 }
2765 }
2766
2767 /**
2768 * Implements this workaround for the original 965:
2769 *
2770 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
2771 * check for post destination dependencies on this instruction, software
2772 * must ensure that there is no destination hazard for the case of ‘write
2773 * followed by a posted write’ shown in the following example.
2774 *
2775 * 1. mov r3 0
2776 * 2. send r3.xy <rest of send instruction>
2777 * 3. mov r2 r3
2778 *
2779 * Due to no post-destination dependency check on the ‘send’, the above
2780 * code sequence could have two instructions (1 and 2) in flight at the
2781 * same time that both consider ‘r3’ as the target of their final writes.
2782 */
2783 void
2784 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
2785 fs_inst *inst)
2786 {
2787 int write_len = inst->regs_written;
2788 int first_write_grf = inst->dst.reg;
2789 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
2790 assert(write_len < (int)sizeof(needs_dep) - 1);
2791
2792 memset(needs_dep, false, sizeof(needs_dep));
2793 memset(needs_dep, true, write_len);
2794
2795 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
2796
2797 /* Walk backwards looking for writes to registers we're writing which
2798 * aren't read since being written. If we hit the start of the program,
2799 * we assume that there are no outstanding dependencies on entry to the
2800 * program.
2801 */
2802 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst, block) {
2803 /* If we hit control flow, assume that there *are* outstanding
2804 * dependencies, and force their cleanup before our instruction.
2805 */
2806 if (block->start() == scan_inst) {
2807 for (int i = 0; i < write_len; i++) {
2808 if (needs_dep[i])
2809 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
2810 first_write_grf + i);
2811 }
2812 return;
2813 }
2814
2815 /* We insert our reads as late as possible on the assumption that any
2816 * instruction but a MOV that might have left us an outstanding
2817 * dependency has more latency than a MOV.
2818 */
2819 if (scan_inst->dst.file == GRF) {
2820 for (int i = 0; i < scan_inst->regs_written; i++) {
2821 int reg = scan_inst->dst.reg + i;
2822
2823 if (reg >= first_write_grf &&
2824 reg < first_write_grf + write_len &&
2825 needs_dep[reg - first_write_grf]) {
2826 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
2827 needs_dep[reg - first_write_grf] = false;
2828 if (scan_inst->exec_size == 16)
2829 needs_dep[reg - first_write_grf + 1] = false;
2830 }
2831 }
2832 }
2833
2834 /* Clear the flag for registers that actually got read (as expected). */
2835 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
2836
2837 /* Continue the loop only if we haven't resolved all the dependencies */
2838 int i;
2839 for (i = 0; i < write_len; i++) {
2840 if (needs_dep[i])
2841 break;
2842 }
2843 if (i == write_len)
2844 return;
2845 }
2846 }
2847
2848 /**
2849 * Implements this workaround for the original 965:
2850 *
2851 * "[DevBW, DevCL] Errata: A destination register from a send can not be
2852 * used as a destination register until after it has been sourced by an
2853 * instruction with a different destination register.
2854 */
2855 void
2856 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
2857 {
2858 int write_len = inst->regs_written;
2859 int first_write_grf = inst->dst.reg;
2860 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
2861 assert(write_len < (int)sizeof(needs_dep) - 1);
2862
2863 memset(needs_dep, false, sizeof(needs_dep));
2864 memset(needs_dep, true, write_len);
2865 /* Walk forwards looking for writes to registers we're writing which aren't
2866 * read before being written.
2867 */
2868 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst, block) {
2869 /* If we hit control flow, force resolve all remaining dependencies. */
2870 if (block->end() == scan_inst) {
2871 for (int i = 0; i < write_len; i++) {
2872 if (needs_dep[i])
2873 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
2874 first_write_grf + i);
2875 }
2876 return;
2877 }
2878
2879 /* Clear the flag for registers that actually got read (as expected). */
2880 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
2881
2882 /* We insert our reads as late as possible since they're reading the
2883 * result of a SEND, which has massive latency.
2884 */
2885 if (scan_inst->dst.file == GRF &&
2886 scan_inst->dst.reg >= first_write_grf &&
2887 scan_inst->dst.reg < first_write_grf + write_len &&
2888 needs_dep[scan_inst->dst.reg - first_write_grf]) {
2889 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
2890 scan_inst->dst.reg);
2891 needs_dep[scan_inst->dst.reg - first_write_grf] = false;
2892 }
2893
2894 /* Continue the loop only if we haven't resolved all the dependencies */
2895 int i;
2896 for (i = 0; i < write_len; i++) {
2897 if (needs_dep[i])
2898 break;
2899 }
2900 if (i == write_len)
2901 return;
2902 }
2903 }
2904
2905 void
2906 fs_visitor::insert_gen4_send_dependency_workarounds()
2907 {
2908 if (devinfo->gen != 4 || devinfo->is_g4x)
2909 return;
2910
2911 bool progress = false;
2912
2913 /* Note that we're done with register allocation, so GRF fs_regs always
2914 * have a .reg_offset of 0.
2915 */
2916
2917 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2918 if (inst->mlen != 0 && inst->dst.file == GRF) {
2919 insert_gen4_pre_send_dependency_workarounds(block, inst);
2920 insert_gen4_post_send_dependency_workarounds(block, inst);
2921 progress = true;
2922 }
2923 }
2924
2925 if (progress)
2926 invalidate_live_intervals();
2927 }
2928
2929 /**
2930 * Turns the generic expression-style uniform pull constant load instruction
2931 * into a hardware-specific series of instructions for loading a pull
2932 * constant.
2933 *
2934 * The expression style allows the CSE pass before this to optimize out
2935 * repeated loads from the same offset, and gives the pre-register-allocation
2936 * scheduling full flexibility, while the conversion to native instructions
2937 * allows the post-register-allocation scheduler the best information
2938 * possible.
2939 *
2940 * Note that execution masking for setting up pull constant loads is special:
2941 * the channels that need to be written are unrelated to the current execution
2942 * mask, since a later instruction will use one of the result channels as a
2943 * source operand for all 8 or 16 of its channels.
2944 */
2945 void
2946 fs_visitor::lower_uniform_pull_constant_loads()
2947 {
2948 foreach_block_and_inst (block, fs_inst, inst, cfg) {
2949 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
2950 continue;
2951
2952 if (devinfo->gen >= 7) {
2953 /* The offset arg before was a vec4-aligned byte offset. We need to
2954 * turn it into a dword offset.
2955 */
2956 fs_reg const_offset_reg = inst->src[1];
2957 assert(const_offset_reg.file == IMM &&
2958 const_offset_reg.type == BRW_REGISTER_TYPE_UD);
2959 const_offset_reg.fixed_hw_reg.dw1.ud /= 4;
2960
2961 fs_reg payload, offset;
2962 if (devinfo->gen >= 9) {
2963 /* We have to use a message header on Skylake to get SIMD4x2
2964 * mode. Reserve space for the register.
2965 */
2966 offset = payload = fs_reg(GRF, alloc.allocate(2));
2967 offset.reg_offset++;
2968 inst->mlen = 2;
2969 } else {
2970 offset = payload = fs_reg(GRF, alloc.allocate(1));
2971 inst->mlen = 1;
2972 }
2973
2974 /* This is actually going to be a MOV, but since only the first dword
2975 * is accessed, we have a special opcode to do just that one. Note
2976 * that this needs to be an operation that will be considered a def
2977 * by live variable analysis, or register allocation will explode.
2978 */
2979 fs_inst *setup = new(mem_ctx) fs_inst(FS_OPCODE_SET_SIMD4X2_OFFSET,
2980 8, offset, const_offset_reg);
2981 setup->force_writemask_all = true;
2982
2983 setup->ir = inst->ir;
2984 setup->annotation = inst->annotation;
2985 inst->insert_before(block, setup);
2986
2987 /* Similarly, this will only populate the first 4 channels of the
2988 * result register (since we only use smear values from 0-3), but we
2989 * don't tell the optimizer.
2990 */
2991 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
2992 inst->src[1] = payload;
2993 inst->base_mrf = -1;
2994
2995 invalidate_live_intervals();
2996 } else {
2997 /* Before register allocation, we didn't tell the scheduler about the
2998 * MRF we use. We know it's safe to use this MRF because nothing
2999 * else does except for register spill/unspill, which generates and
3000 * uses its MRF within a single IR instruction.
3001 */
3002 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3003 inst->mlen = 1;
3004 }
3005 }
3006 }
3007
3008 bool
3009 fs_visitor::lower_load_payload()
3010 {
3011 bool progress = false;
3012
3013 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3014 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3015 continue;
3016
3017 assert(inst->dst.file == MRF || inst->dst.file == GRF);
3018 assert(inst->saturate == false);
3019 fs_reg dst = inst->dst;
3020
3021 /* Get rid of COMPR4. We'll add it back in if we need it */
3022 if (dst.file == MRF)
3023 dst.reg = dst.reg & ~BRW_MRF_COMPR4;
3024
3025 const fs_builder ibld(this, block, inst);
3026 const fs_builder hbld = ibld.exec_all().group(8, 0);
3027
3028 for (uint8_t i = 0; i < inst->header_size; i++) {
3029 if (inst->src[i].file != BAD_FILE) {
3030 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3031 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3032 hbld.MOV(mov_dst, mov_src);
3033 }
3034 dst = offset(dst, hbld, 1);
3035 }
3036
3037 if (inst->dst.file == MRF && (inst->dst.reg & BRW_MRF_COMPR4) &&
3038 inst->exec_size > 8) {
3039 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3040 * a straightforward copy. Instead, the result of the
3041 * LOAD_PAYLOAD is treated as interleaved and the first four
3042 * non-header sources are unpacked as:
3043 *
3044 * m + 0: r0
3045 * m + 1: g0
3046 * m + 2: b0
3047 * m + 3: a0
3048 * m + 4: r1
3049 * m + 5: g1
3050 * m + 6: b1
3051 * m + 7: a1
3052 *
3053 * This is used for gen <= 5 fb writes.
3054 */
3055 assert(inst->exec_size == 16);
3056 assert(inst->header_size + 4 <= inst->sources);
3057 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3058 if (inst->src[i].file != BAD_FILE) {
3059 if (devinfo->has_compr4) {
3060 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3061 compr4_dst.reg |= BRW_MRF_COMPR4;
3062 ibld.MOV(compr4_dst, inst->src[i]);
3063 } else {
3064 /* Platform doesn't have COMPR4. We have to fake it */
3065 fs_reg mov_dst = retype(dst, inst->src[i].type);
3066 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3067 mov_dst.reg += 4;
3068 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3069 }
3070 }
3071
3072 dst.reg++;
3073 }
3074
3075 /* The loop above only ever incremented us through the first set
3076 * of 4 registers. However, thanks to the magic of COMPR4, we
3077 * actually wrote to the first 8 registers, so we need to take
3078 * that into account now.
3079 */
3080 dst.reg += 4;
3081
3082 /* The COMPR4 code took care of the first 4 sources. We'll let
3083 * the regular path handle any remaining sources. Yes, we are
3084 * modifying the instruction but we're about to delete it so
3085 * this really doesn't hurt anything.
3086 */
3087 inst->header_size += 4;
3088 }
3089
3090 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3091 if (inst->src[i].file != BAD_FILE)
3092 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3093 dst = offset(dst, ibld, 1);
3094 }
3095
3096 inst->remove(block);
3097 progress = true;
3098 }
3099
3100 if (progress)
3101 invalidate_live_intervals();
3102
3103 return progress;
3104 }
3105
3106 bool
3107 fs_visitor::lower_integer_multiplication()
3108 {
3109 bool progress = false;
3110
3111 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3112 const fs_builder ibld(this, block, inst);
3113
3114 if (inst->opcode == BRW_OPCODE_MUL) {
3115 if (inst->dst.is_accumulator() ||
3116 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3117 inst->dst.type != BRW_REGISTER_TYPE_UD))
3118 continue;
3119
3120 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
3121 * operation directly, but CHV/BXT cannot.
3122 */
3123 if (devinfo->gen >= 8 &&
3124 !devinfo->is_cherryview && !devinfo->is_broxton)
3125 continue;
3126
3127 if (inst->src[1].file == IMM &&
3128 inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
3129 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3130 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3131 * src1 are used.
3132 *
3133 * If multiplying by an immediate value that fits in 16-bits, do a
3134 * single MUL instruction with that value in the proper location.
3135 */
3136 if (devinfo->gen < 7) {
3137 fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
3138 inst->dst.type);
3139 ibld.MOV(imm, inst->src[1]);
3140 ibld.MUL(inst->dst, imm, inst->src[0]);
3141 } else {
3142 ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
3143 }
3144 } else {
3145 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3146 * do 32-bit integer multiplication in one instruction, but instead
3147 * must do a sequence (which actually calculates a 64-bit result):
3148 *
3149 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3150 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3151 * mov(8) g2<1>D acc0<8,8,1>D
3152 *
3153 * But on Gen > 6, the ability to use second accumulator register
3154 * (acc1) for non-float data types was removed, preventing a simple
3155 * implementation in SIMD16. A 16-channel result can be calculated by
3156 * executing the three instructions twice in SIMD8, once with quarter
3157 * control of 1Q for the first eight channels and again with 2Q for
3158 * the second eight channels.
3159 *
3160 * Which accumulator register is implicitly accessed (by AccWrEnable
3161 * for instance) is determined by the quarter control. Unfortunately
3162 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3163 * implicit accumulator access by an instruction with 2Q will access
3164 * acc1 regardless of whether the data type is usable in acc1.
3165 *
3166 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3167 * integer data types.
3168 *
3169 * Since we only want the low 32-bits of the result, we can do two
3170 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3171 * adjust the high result and add them (like the mach is doing):
3172 *
3173 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3174 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3175 * shl(8) g9<1>D g8<8,8,1>D 16D
3176 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3177 *
3178 * We avoid the shl instruction by realizing that we only want to add
3179 * the low 16-bits of the "high" result to the high 16-bits of the
3180 * "low" result and using proper regioning on the add:
3181 *
3182 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3183 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3184 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3185 *
3186 * Since it does not use the (single) accumulator register, we can
3187 * schedule multi-component multiplications much better.
3188 */
3189
3190 fs_reg orig_dst = inst->dst;
3191 if (orig_dst.is_null() || orig_dst.file == MRF) {
3192 inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
3193 inst->dst.type);
3194 }
3195 fs_reg low = inst->dst;
3196 fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
3197 inst->dst.type);
3198
3199 if (devinfo->gen >= 7) {
3200 fs_reg src1_0_w = inst->src[1];
3201 fs_reg src1_1_w = inst->src[1];
3202
3203 if (inst->src[1].file == IMM) {
3204 src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
3205 src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
3206 } else {
3207 src1_0_w.type = BRW_REGISTER_TYPE_UW;
3208 if (src1_0_w.stride != 0) {
3209 assert(src1_0_w.stride == 1);
3210 src1_0_w.stride = 2;
3211 }
3212
3213 src1_1_w.type = BRW_REGISTER_TYPE_UW;
3214 if (src1_1_w.stride != 0) {
3215 assert(src1_1_w.stride == 1);
3216 src1_1_w.stride = 2;
3217 }
3218 src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3219 }
3220 ibld.MUL(low, inst->src[0], src1_0_w);
3221 ibld.MUL(high, inst->src[0], src1_1_w);
3222 } else {
3223 fs_reg src0_0_w = inst->src[0];
3224 fs_reg src0_1_w = inst->src[0];
3225
3226 src0_0_w.type = BRW_REGISTER_TYPE_UW;
3227 if (src0_0_w.stride != 0) {
3228 assert(src0_0_w.stride == 1);
3229 src0_0_w.stride = 2;
3230 }
3231
3232 src0_1_w.type = BRW_REGISTER_TYPE_UW;
3233 if (src0_1_w.stride != 0) {
3234 assert(src0_1_w.stride == 1);
3235 src0_1_w.stride = 2;
3236 }
3237 src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3238
3239 ibld.MUL(low, src0_0_w, inst->src[1]);
3240 ibld.MUL(high, src0_1_w, inst->src[1]);
3241 }
3242
3243 fs_reg dst = inst->dst;
3244 dst.type = BRW_REGISTER_TYPE_UW;
3245 dst.subreg_offset = 2;
3246 dst.stride = 2;
3247
3248 high.type = BRW_REGISTER_TYPE_UW;
3249 high.stride = 2;
3250
3251 low.type = BRW_REGISTER_TYPE_UW;
3252 low.subreg_offset = 2;
3253 low.stride = 2;
3254
3255 ibld.ADD(dst, low, high);
3256
3257 if (inst->conditional_mod || orig_dst.file == MRF) {
3258 set_condmod(inst->conditional_mod,
3259 ibld.MOV(orig_dst, inst->dst));
3260 }
3261 }
3262
3263 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3264 /* Should have been lowered to 8-wide. */
3265 assert(inst->exec_size <= 8);
3266 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3267 inst->dst.type);
3268 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3269 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3270
3271 if (devinfo->gen >= 8) {
3272 /* Until Gen8, integer multiplies read 32-bits from one source,
3273 * and 16-bits from the other, and relying on the MACH instruction
3274 * to generate the high bits of the result.
3275 *
3276 * On Gen8, the multiply instruction does a full 32x32-bit
3277 * multiply, but in order to do a 64-bit multiply we can simulate
3278 * the previous behavior and then use a MACH instruction.
3279 *
3280 * FINISHME: Don't use source modifiers on src1.
3281 */
3282 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3283 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3284 mul->src[1].type = (type_is_signed(mul->src[1].type) ?
3285 BRW_REGISTER_TYPE_W : BRW_REGISTER_TYPE_UW);
3286 mul->src[1].stride *= 2;
3287
3288 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3289 inst->force_sechalf) {
3290 /* Among other things the quarter control bits influence which
3291 * accumulator register is used by the hardware for instructions
3292 * that access the accumulator implicitly (e.g. MACH). A
3293 * second-half instruction would normally map to acc1, which
3294 * doesn't exist on Gen7 and up (the hardware does emulate it for
3295 * floating-point instructions *only* by taking advantage of the
3296 * extra precision of acc0 not normally used for floating point
3297 * arithmetic).
3298 *
3299 * HSW and up are careful enough not to try to access an
3300 * accumulator register that doesn't exist, but on earlier Gen7
3301 * hardware we need to make sure that the quarter control bits are
3302 * zero to avoid non-deterministic behaviour and emit an extra MOV
3303 * to get the result masked correctly according to the current
3304 * channel enables.
3305 */
3306 mach->force_sechalf = false;
3307 mach->force_writemask_all = true;
3308 mach->dst = ibld.vgrf(inst->dst.type);
3309 ibld.MOV(inst->dst, mach->dst);
3310 }
3311 } else {
3312 continue;
3313 }
3314
3315 inst->remove(block);
3316 progress = true;
3317 }
3318
3319 if (progress)
3320 invalidate_live_intervals();
3321
3322 return progress;
3323 }
3324
3325 static void
3326 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
3327 fs_reg *dst, fs_reg color, unsigned components)
3328 {
3329 if (key->clamp_fragment_color) {
3330 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
3331 assert(color.type == BRW_REGISTER_TYPE_F);
3332
3333 for (unsigned i = 0; i < components; i++)
3334 set_saturate(true,
3335 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
3336
3337 color = tmp;
3338 }
3339
3340 for (unsigned i = 0; i < components; i++)
3341 dst[i] = offset(color, bld, i);
3342 }
3343
3344 static void
3345 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
3346 const brw_wm_prog_data *prog_data,
3347 const brw_wm_prog_key *key,
3348 const fs_visitor::thread_payload &payload)
3349 {
3350 assert(inst->src[6].file == IMM);
3351 const brw_device_info *devinfo = bld.shader->devinfo;
3352 const fs_reg &color0 = inst->src[0];
3353 const fs_reg &color1 = inst->src[1];
3354 const fs_reg &src0_alpha = inst->src[2];
3355 const fs_reg &src_depth = inst->src[3];
3356 const fs_reg &dst_depth = inst->src[4];
3357 fs_reg sample_mask = inst->src[5];
3358 const unsigned components = inst->src[6].fixed_hw_reg.dw1.ud;
3359
3360 /* We can potentially have a message length of up to 15, so we have to set
3361 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3362 */
3363 fs_reg sources[15];
3364 int header_size = 2, payload_header_size;
3365 unsigned length = 0;
3366
3367 /* From the Sandy Bridge PRM, volume 4, page 198:
3368 *
3369 * "Dispatched Pixel Enables. One bit per pixel indicating
3370 * which pixels were originally enabled when the thread was
3371 * dispatched. This field is only required for the end-of-
3372 * thread message and on all dual-source messages."
3373 */
3374 if (devinfo->gen >= 6 &&
3375 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3376 color1.file == BAD_FILE &&
3377 key->nr_color_regions == 1) {
3378 header_size = 0;
3379 }
3380
3381 if (header_size != 0) {
3382 assert(header_size == 2);
3383 /* Allocate 2 registers for a header */
3384 length += 2;
3385 }
3386
3387 if (payload.aa_dest_stencil_reg) {
3388 sources[length] = fs_reg(GRF, bld.shader->alloc.allocate(1));
3389 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
3390 .MOV(sources[length],
3391 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
3392 length++;
3393 }
3394
3395 if (prog_data->uses_omask) {
3396 sources[length] = fs_reg(GRF, bld.shader->alloc.allocate(1),
3397 BRW_REGISTER_TYPE_UD);
3398
3399 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
3400 * relevant. Since it's unsigned single words one vgrf is always
3401 * 16-wide, but only the lower or higher 8 channels will be used by the
3402 * hardware when doing a SIMD8 write depending on whether we have
3403 * selected the subspans for the first or second half respectively.
3404 */
3405 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
3406 sample_mask.type = BRW_REGISTER_TYPE_UW;
3407 sample_mask.stride *= 2;
3408
3409 bld.exec_all().annotate("FB write oMask")
3410 .MOV(half(retype(sources[length], BRW_REGISTER_TYPE_UW),
3411 inst->force_sechalf),
3412 sample_mask);
3413 length++;
3414 }
3415
3416 payload_header_size = length;
3417
3418 if (src0_alpha.file != BAD_FILE) {
3419 /* FIXME: This is being passed at the wrong location in the payload and
3420 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
3421 * It's supposed to be immediately before oMask but there seems to be no
3422 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
3423 * requires header sources to form a contiguous segment at the beginning
3424 * of the message and src0_alpha has per-channel semantics.
3425 */
3426 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
3427 length++;
3428 }
3429
3430 setup_color_payload(bld, key, &sources[length], color0, components);
3431 length += 4;
3432
3433 if (color1.file != BAD_FILE) {
3434 setup_color_payload(bld, key, &sources[length], color1, components);
3435 length += 4;
3436 }
3437
3438 if (src_depth.file != BAD_FILE) {
3439 sources[length] = src_depth;
3440 length++;
3441 }
3442
3443 if (dst_depth.file != BAD_FILE) {
3444 sources[length] = dst_depth;
3445 length++;
3446 }
3447
3448 fs_inst *load;
3449 if (devinfo->gen >= 7) {
3450 /* Send from the GRF */
3451 fs_reg payload = fs_reg(GRF, -1, BRW_REGISTER_TYPE_F);
3452 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
3453 payload.reg = bld.shader->alloc.allocate(load->regs_written);
3454 load->dst = payload;
3455
3456 inst->src[0] = payload;
3457 inst->resize_sources(1);
3458 inst->base_mrf = -1;
3459 } else {
3460 /* Send from the MRF */
3461 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
3462 sources, length, payload_header_size);
3463
3464 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3465 * will do this for us if we just give it a COMPR4 destination.
3466 */
3467 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
3468 load->dst.reg |= BRW_MRF_COMPR4;
3469
3470 inst->resize_sources(0);
3471 inst->base_mrf = 1;
3472 }
3473
3474 inst->opcode = FS_OPCODE_FB_WRITE;
3475 inst->mlen = load->regs_written;
3476 inst->header_size = header_size;
3477 }
3478
3479 static void
3480 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
3481 const fs_reg &coordinate,
3482 const fs_reg &shadow_c,
3483 const fs_reg &lod, const fs_reg &lod2,
3484 const fs_reg &sampler,
3485 unsigned coord_components,
3486 unsigned grad_components)
3487 {
3488 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
3489 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
3490 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
3491 fs_reg msg_end = msg_begin;
3492
3493 /* g0 header. */
3494 msg_end = offset(msg_end, bld.group(8, 0), 1);
3495
3496 for (unsigned i = 0; i < coord_components; i++)
3497 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
3498 offset(coordinate, bld, i));
3499
3500 msg_end = offset(msg_end, bld, coord_components);
3501
3502 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
3503 * require all three components to be present and zero if they are unused.
3504 */
3505 if (coord_components > 0 &&
3506 (has_lod || shadow_c.file != BAD_FILE ||
3507 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
3508 for (unsigned i = coord_components; i < 3; i++)
3509 bld.MOV(offset(msg_end, bld, i), fs_reg(0.0f));
3510
3511 msg_end = offset(msg_end, bld, 3 - coord_components);
3512 }
3513
3514 if (op == SHADER_OPCODE_TXD) {
3515 /* TXD unsupported in SIMD16 mode. */
3516 assert(bld.dispatch_width() == 8);
3517
3518 /* the slots for u and v are always present, but r is optional */
3519 if (coord_components < 2)
3520 msg_end = offset(msg_end, bld, 2 - coord_components);
3521
3522 /* P = u, v, r
3523 * dPdx = dudx, dvdx, drdx
3524 * dPdy = dudy, dvdy, drdy
3525 *
3526 * 1-arg: Does not exist.
3527 *
3528 * 2-arg: dudx dvdx dudy dvdy
3529 * dPdx.x dPdx.y dPdy.x dPdy.y
3530 * m4 m5 m6 m7
3531 *
3532 * 3-arg: dudx dvdx drdx dudy dvdy drdy
3533 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
3534 * m5 m6 m7 m8 m9 m10
3535 */
3536 for (unsigned i = 0; i < grad_components; i++)
3537 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
3538
3539 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3540
3541 for (unsigned i = 0; i < grad_components; i++)
3542 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
3543
3544 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3545 }
3546
3547 if (has_lod) {
3548 /* Bias/LOD with shadow comparitor is unsupported in SIMD16 -- *Without*
3549 * shadow comparitor (including RESINFO) it's unsupported in SIMD8 mode.
3550 */
3551 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
3552 bld.dispatch_width() == 16);
3553
3554 const brw_reg_type type =
3555 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
3556 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
3557 bld.MOV(retype(msg_end, type), lod);
3558 msg_end = offset(msg_end, bld, 1);
3559 }
3560
3561 if (shadow_c.file != BAD_FILE) {
3562 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
3563 /* There's no plain shadow compare message, so we use shadow
3564 * compare with a bias of 0.0.
3565 */
3566 bld.MOV(msg_end, fs_reg(0.0f));
3567 msg_end = offset(msg_end, bld, 1);
3568 }
3569
3570 bld.MOV(msg_end, shadow_c);
3571 msg_end = offset(msg_end, bld, 1);
3572 }
3573
3574 inst->opcode = op;
3575 inst->src[0] = reg_undef;
3576 inst->src[1] = sampler;
3577 inst->resize_sources(2);
3578 inst->base_mrf = msg_begin.reg;
3579 inst->mlen = msg_end.reg - msg_begin.reg;
3580 inst->header_size = 1;
3581 }
3582
3583 static void
3584 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
3585 fs_reg coordinate,
3586 const fs_reg &shadow_c,
3587 fs_reg lod, fs_reg lod2,
3588 const fs_reg &sample_index,
3589 const fs_reg &sampler,
3590 const fs_reg &offset_value,
3591 unsigned coord_components,
3592 unsigned grad_components)
3593 {
3594 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
3595 fs_reg msg_coords = message;
3596 unsigned header_size = 0;
3597
3598 if (offset_value.file != BAD_FILE) {
3599 /* The offsets set up by the visitor are in the m1 header, so we can't
3600 * go headerless.
3601 */
3602 header_size = 1;
3603 message.reg--;
3604 }
3605
3606 for (unsigned i = 0; i < coord_components; i++) {
3607 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type), coordinate);
3608 coordinate = offset(coordinate, bld, 1);
3609 }
3610 fs_reg msg_end = offset(msg_coords, bld, coord_components);
3611 fs_reg msg_lod = offset(msg_coords, bld, 4);
3612
3613 if (shadow_c.file != BAD_FILE) {
3614 fs_reg msg_shadow = msg_lod;
3615 bld.MOV(msg_shadow, shadow_c);
3616 msg_lod = offset(msg_shadow, bld, 1);
3617 msg_end = msg_lod;
3618 }
3619
3620 switch (op) {
3621 case SHADER_OPCODE_TXL:
3622 case FS_OPCODE_TXB:
3623 bld.MOV(msg_lod, lod);
3624 msg_end = offset(msg_lod, bld, 1);
3625 break;
3626 case SHADER_OPCODE_TXD:
3627 /**
3628 * P = u, v, r
3629 * dPdx = dudx, dvdx, drdx
3630 * dPdy = dudy, dvdy, drdy
3631 *
3632 * Load up these values:
3633 * - dudx dudy dvdx dvdy drdx drdy
3634 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
3635 */
3636 msg_end = msg_lod;
3637 for (unsigned i = 0; i < grad_components; i++) {
3638 bld.MOV(msg_end, lod);
3639 lod = offset(lod, bld, 1);
3640 msg_end = offset(msg_end, bld, 1);
3641
3642 bld.MOV(msg_end, lod2);
3643 lod2 = offset(lod2, bld, 1);
3644 msg_end = offset(msg_end, bld, 1);
3645 }
3646 break;
3647 case SHADER_OPCODE_TXS:
3648 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
3649 bld.MOV(msg_lod, lod);
3650 msg_end = offset(msg_lod, bld, 1);
3651 break;
3652 case SHADER_OPCODE_TXF:
3653 msg_lod = offset(msg_coords, bld, 3);
3654 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
3655 msg_end = offset(msg_lod, bld, 1);
3656 break;
3657 case SHADER_OPCODE_TXF_CMS:
3658 msg_lod = offset(msg_coords, bld, 3);
3659 /* lod */
3660 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), fs_reg(0u));
3661 /* sample index */
3662 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
3663 msg_end = offset(msg_lod, bld, 2);
3664 break;
3665 default:
3666 break;
3667 }
3668
3669 inst->opcode = op;
3670 inst->src[0] = reg_undef;
3671 inst->src[1] = sampler;
3672 inst->resize_sources(2);
3673 inst->base_mrf = message.reg;
3674 inst->mlen = msg_end.reg - message.reg;
3675 inst->header_size = header_size;
3676
3677 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
3678 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
3679 }
3680
3681 static bool
3682 is_high_sampler(const struct brw_device_info *devinfo, const fs_reg &sampler)
3683 {
3684 if (devinfo->gen < 8 && !devinfo->is_haswell)
3685 return false;
3686
3687 return sampler.file != IMM || sampler.fixed_hw_reg.dw1.ud >= 16;
3688 }
3689
3690 static void
3691 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
3692 fs_reg coordinate,
3693 const fs_reg &shadow_c,
3694 fs_reg lod, fs_reg lod2,
3695 const fs_reg &sample_index,
3696 const fs_reg &mcs, const fs_reg &sampler,
3697 fs_reg offset_value,
3698 unsigned coord_components,
3699 unsigned grad_components)
3700 {
3701 const brw_device_info *devinfo = bld.shader->devinfo;
3702 int reg_width = bld.dispatch_width() / 8;
3703 unsigned header_size = 0, length = 0;
3704 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
3705 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
3706 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
3707
3708 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
3709 offset_value.file != BAD_FILE ||
3710 is_high_sampler(devinfo, sampler)) {
3711 /* For general texture offsets (no txf workaround), we need a header to
3712 * put them in. Note that we're only reserving space for it in the
3713 * message payload as it will be initialized implicitly by the
3714 * generator.
3715 *
3716 * TG4 needs to place its channel select in the header, for interaction
3717 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
3718 * larger sampler numbers we need to offset the Sampler State Pointer in
3719 * the header.
3720 */
3721 header_size = 1;
3722 sources[0] = fs_reg();
3723 length++;
3724 }
3725
3726 if (shadow_c.file != BAD_FILE) {
3727 bld.MOV(sources[length], shadow_c);
3728 length++;
3729 }
3730
3731 bool coordinate_done = false;
3732
3733 /* The sampler can only meaningfully compute LOD for fragment shader
3734 * messages. For all other stages, we change the opcode to TXL and
3735 * hardcode the LOD to 0.
3736 */
3737 if (bld.shader->stage != MESA_SHADER_FRAGMENT &&
3738 op == SHADER_OPCODE_TEX) {
3739 op = SHADER_OPCODE_TXL;
3740 lod = fs_reg(0.0f);
3741 }
3742
3743 /* Set up the LOD info */
3744 switch (op) {
3745 case FS_OPCODE_TXB:
3746 case SHADER_OPCODE_TXL:
3747 bld.MOV(sources[length], lod);
3748 length++;
3749 break;
3750 case SHADER_OPCODE_TXD:
3751 /* TXD should have been lowered in SIMD16 mode. */
3752 assert(bld.dispatch_width() == 8);
3753
3754 /* Load dPdx and the coordinate together:
3755 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
3756 */
3757 for (unsigned i = 0; i < coord_components; i++) {
3758 bld.MOV(sources[length], coordinate);
3759 coordinate = offset(coordinate, bld, 1);
3760 length++;
3761
3762 /* For cube map array, the coordinate is (u,v,r,ai) but there are
3763 * only derivatives for (u, v, r).
3764 */
3765 if (i < grad_components) {
3766 bld.MOV(sources[length], lod);
3767 lod = offset(lod, bld, 1);
3768 length++;
3769
3770 bld.MOV(sources[length], lod2);
3771 lod2 = offset(lod2, bld, 1);
3772 length++;
3773 }
3774 }
3775
3776 coordinate_done = true;
3777 break;
3778 case SHADER_OPCODE_TXS:
3779 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
3780 length++;
3781 break;
3782 case SHADER_OPCODE_TXF:
3783 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
3784 * On Gen9 they are u, v, lod, r
3785 */
3786 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3787 coordinate = offset(coordinate, bld, 1);
3788 length++;
3789
3790 if (devinfo->gen >= 9) {
3791 if (coord_components >= 2) {
3792 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3793 coordinate = offset(coordinate, bld, 1);
3794 }
3795 length++;
3796 }
3797
3798 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
3799 length++;
3800
3801 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++) {
3802 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3803 coordinate = offset(coordinate, bld, 1);
3804 length++;
3805 }
3806
3807 coordinate_done = true;
3808 break;
3809 case SHADER_OPCODE_TXF_CMS:
3810 case SHADER_OPCODE_TXF_UMS:
3811 case SHADER_OPCODE_TXF_MCS:
3812 if (op == SHADER_OPCODE_TXF_UMS || op == SHADER_OPCODE_TXF_CMS) {
3813 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
3814 length++;
3815 }
3816
3817 if (op == SHADER_OPCODE_TXF_CMS) {
3818 /* Data from the multisample control surface. */
3819 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
3820 length++;
3821 }
3822
3823 /* There is no offsetting for this message; just copy in the integer
3824 * texture coordinates.
3825 */
3826 for (unsigned i = 0; i < coord_components; i++) {
3827 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3828 coordinate = offset(coordinate, bld, 1);
3829 length++;
3830 }
3831
3832 coordinate_done = true;
3833 break;
3834 case SHADER_OPCODE_TG4_OFFSET:
3835 /* gather4_po_c should have been lowered in SIMD16 mode. */
3836 assert(bld.dispatch_width() == 8 || shadow_c.file == BAD_FILE);
3837
3838 /* More crazy intermixing */
3839 for (unsigned i = 0; i < 2; i++) { /* u, v */
3840 bld.MOV(sources[length], coordinate);
3841 coordinate = offset(coordinate, bld, 1);
3842 length++;
3843 }
3844
3845 for (unsigned i = 0; i < 2; i++) { /* offu, offv */
3846 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), offset_value);
3847 offset_value = offset(offset_value, bld, 1);
3848 length++;
3849 }
3850
3851 if (coord_components == 3) { /* r if present */
3852 bld.MOV(sources[length], coordinate);
3853 coordinate = offset(coordinate, bld, 1);
3854 length++;
3855 }
3856
3857 coordinate_done = true;
3858 break;
3859 default:
3860 break;
3861 }
3862
3863 /* Set up the coordinate (except for cases where it was done above) */
3864 if (!coordinate_done) {
3865 for (unsigned i = 0; i < coord_components; i++) {
3866 bld.MOV(sources[length], coordinate);
3867 coordinate = offset(coordinate, bld, 1);
3868 length++;
3869 }
3870 }
3871
3872 int mlen;
3873 if (reg_width == 2)
3874 mlen = length * reg_width - header_size;
3875 else
3876 mlen = length * reg_width;
3877
3878 const fs_reg src_payload = fs_reg(GRF, bld.shader->alloc.allocate(mlen),
3879 BRW_REGISTER_TYPE_F);
3880 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
3881
3882 /* Generate the SEND. */
3883 inst->opcode = op;
3884 inst->src[0] = src_payload;
3885 inst->src[1] = sampler;
3886 inst->resize_sources(2);
3887 inst->base_mrf = -1;
3888 inst->mlen = mlen;
3889 inst->header_size = header_size;
3890
3891 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
3892 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
3893 }
3894
3895 static void
3896 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
3897 {
3898 const brw_device_info *devinfo = bld.shader->devinfo;
3899 const fs_reg &coordinate = inst->src[0];
3900 const fs_reg &shadow_c = inst->src[1];
3901 const fs_reg &lod = inst->src[2];
3902 const fs_reg &lod2 = inst->src[3];
3903 const fs_reg &sample_index = inst->src[4];
3904 const fs_reg &mcs = inst->src[5];
3905 const fs_reg &sampler = inst->src[6];
3906 const fs_reg &offset_value = inst->src[7];
3907 assert(inst->src[8].file == IMM && inst->src[9].file == IMM);
3908 const unsigned coord_components = inst->src[8].fixed_hw_reg.dw1.ud;
3909 const unsigned grad_components = inst->src[9].fixed_hw_reg.dw1.ud;
3910
3911 if (devinfo->gen >= 7) {
3912 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
3913 shadow_c, lod, lod2, sample_index,
3914 mcs, sampler, offset_value,
3915 coord_components, grad_components);
3916 } else if (devinfo->gen >= 5) {
3917 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
3918 shadow_c, lod, lod2, sample_index,
3919 sampler, offset_value,
3920 coord_components, grad_components);
3921 } else {
3922 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
3923 shadow_c, lod, lod2, sampler,
3924 coord_components, grad_components);
3925 }
3926 }
3927
3928 /**
3929 * Initialize the header present in some typed and untyped surface
3930 * messages.
3931 */
3932 static fs_reg
3933 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
3934 {
3935 fs_builder ubld = bld.exec_all().group(8, 0);
3936 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
3937 ubld.MOV(dst, fs_reg(0));
3938 ubld.MOV(component(dst, 7), sample_mask);
3939 return dst;
3940 }
3941
3942 static void
3943 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
3944 const fs_reg &sample_mask)
3945 {
3946 /* Get the logical send arguments. */
3947 const fs_reg &addr = inst->src[0];
3948 const fs_reg &src = inst->src[1];
3949 const fs_reg &surface = inst->src[2];
3950 const UNUSED fs_reg &dims = inst->src[3];
3951 const fs_reg &arg = inst->src[4];
3952
3953 /* Calculate the total number of components of the payload. */
3954 const unsigned addr_sz = inst->components_read(0);
3955 const unsigned src_sz = inst->components_read(1);
3956 const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
3957 const unsigned sz = header_sz + addr_sz + src_sz;
3958
3959 /* Allocate space for the payload. */
3960 fs_reg *const components = new fs_reg[sz];
3961 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
3962 unsigned n = 0;
3963
3964 /* Construct the payload. */
3965 if (header_sz)
3966 components[n++] = emit_surface_header(bld, sample_mask);
3967
3968 for (unsigned i = 0; i < addr_sz; i++)
3969 components[n++] = offset(addr, bld, i);
3970
3971 for (unsigned i = 0; i < src_sz; i++)
3972 components[n++] = offset(src, bld, i);
3973
3974 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
3975
3976 /* Update the original instruction. */
3977 inst->opcode = op;
3978 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
3979 inst->header_size = header_sz;
3980
3981 inst->src[0] = payload;
3982 inst->src[1] = surface;
3983 inst->src[2] = arg;
3984 inst->resize_sources(3);
3985
3986 delete[] components;
3987 }
3988
3989 bool
3990 fs_visitor::lower_logical_sends()
3991 {
3992 bool progress = false;
3993
3994 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3995 const fs_builder ibld(this, block, inst);
3996
3997 switch (inst->opcode) {
3998 case FS_OPCODE_FB_WRITE_LOGICAL:
3999 assert(stage == MESA_SHADER_FRAGMENT);
4000 lower_fb_write_logical_send(ibld, inst,
4001 (const brw_wm_prog_data *)prog_data,
4002 (const brw_wm_prog_key *)key,
4003 payload);
4004 break;
4005
4006 case SHADER_OPCODE_TEX_LOGICAL:
4007 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
4008 break;
4009
4010 case SHADER_OPCODE_TXD_LOGICAL:
4011 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
4012 break;
4013
4014 case SHADER_OPCODE_TXF_LOGICAL:
4015 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
4016 break;
4017
4018 case SHADER_OPCODE_TXL_LOGICAL:
4019 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
4020 break;
4021
4022 case SHADER_OPCODE_TXS_LOGICAL:
4023 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
4024 break;
4025
4026 case FS_OPCODE_TXB_LOGICAL:
4027 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
4028 break;
4029
4030 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4031 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
4032 break;
4033
4034 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4035 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
4036 break;
4037
4038 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4039 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
4040 break;
4041
4042 case SHADER_OPCODE_LOD_LOGICAL:
4043 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
4044 break;
4045
4046 case SHADER_OPCODE_TG4_LOGICAL:
4047 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
4048 break;
4049
4050 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4051 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
4052 break;
4053
4054 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4055 lower_surface_logical_send(ibld, inst,
4056 SHADER_OPCODE_UNTYPED_SURFACE_READ,
4057 fs_reg(0xffff));
4058 break;
4059
4060 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4061 lower_surface_logical_send(ibld, inst,
4062 SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
4063 ibld.sample_mask_reg());
4064 break;
4065
4066 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4067 lower_surface_logical_send(ibld, inst,
4068 SHADER_OPCODE_UNTYPED_ATOMIC,
4069 ibld.sample_mask_reg());
4070 break;
4071
4072 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4073 lower_surface_logical_send(ibld, inst,
4074 SHADER_OPCODE_TYPED_SURFACE_READ,
4075 fs_reg(0xffff));
4076 break;
4077
4078 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4079 lower_surface_logical_send(ibld, inst,
4080 SHADER_OPCODE_TYPED_SURFACE_WRITE,
4081 ibld.sample_mask_reg());
4082 break;
4083
4084 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4085 lower_surface_logical_send(ibld, inst,
4086 SHADER_OPCODE_TYPED_ATOMIC,
4087 ibld.sample_mask_reg());
4088 break;
4089
4090 default:
4091 continue;
4092 }
4093
4094 progress = true;
4095 }
4096
4097 if (progress)
4098 invalidate_live_intervals();
4099
4100 return progress;
4101 }
4102
4103 /**
4104 * Get the closest native SIMD width supported by the hardware for instruction
4105 * \p inst. The instruction will be left untouched by
4106 * fs_visitor::lower_simd_width() if the returned value is equal to the
4107 * original execution size.
4108 */
4109 static unsigned
4110 get_lowered_simd_width(const struct brw_device_info *devinfo,
4111 const fs_inst *inst)
4112 {
4113 switch (inst->opcode) {
4114 case BRW_OPCODE_MOV:
4115 case BRW_OPCODE_SEL:
4116 case BRW_OPCODE_NOT:
4117 case BRW_OPCODE_AND:
4118 case BRW_OPCODE_OR:
4119 case BRW_OPCODE_XOR:
4120 case BRW_OPCODE_SHR:
4121 case BRW_OPCODE_SHL:
4122 case BRW_OPCODE_ASR:
4123 case BRW_OPCODE_CMP:
4124 case BRW_OPCODE_CMPN:
4125 case BRW_OPCODE_CSEL:
4126 case BRW_OPCODE_F32TO16:
4127 case BRW_OPCODE_F16TO32:
4128 case BRW_OPCODE_BFREV:
4129 case BRW_OPCODE_BFE:
4130 case BRW_OPCODE_BFI1:
4131 case BRW_OPCODE_BFI2:
4132 case BRW_OPCODE_ADD:
4133 case BRW_OPCODE_MUL:
4134 case BRW_OPCODE_AVG:
4135 case BRW_OPCODE_FRC:
4136 case BRW_OPCODE_RNDU:
4137 case BRW_OPCODE_RNDD:
4138 case BRW_OPCODE_RNDE:
4139 case BRW_OPCODE_RNDZ:
4140 case BRW_OPCODE_LZD:
4141 case BRW_OPCODE_FBH:
4142 case BRW_OPCODE_FBL:
4143 case BRW_OPCODE_CBIT:
4144 case BRW_OPCODE_SAD2:
4145 case BRW_OPCODE_MAD:
4146 case BRW_OPCODE_LRP:
4147 case SHADER_OPCODE_RCP:
4148 case SHADER_OPCODE_RSQ:
4149 case SHADER_OPCODE_SQRT:
4150 case SHADER_OPCODE_EXP2:
4151 case SHADER_OPCODE_LOG2:
4152 case SHADER_OPCODE_POW:
4153 case SHADER_OPCODE_INT_QUOTIENT:
4154 case SHADER_OPCODE_INT_REMAINDER:
4155 case SHADER_OPCODE_SIN:
4156 case SHADER_OPCODE_COS: {
4157 /* According to the PRMs:
4158 * "A. In Direct Addressing mode, a source cannot span more than 2
4159 * adjacent GRF registers.
4160 * B. A destination cannot span more than 2 adjacent GRF registers."
4161 *
4162 * Look for the source or destination with the largest register region
4163 * which is the one that is going to limit the overal execution size of
4164 * the instruction due to this rule.
4165 */
4166 unsigned reg_count = inst->regs_written;
4167
4168 for (unsigned i = 0; i < inst->sources; i++)
4169 reg_count = MAX2(reg_count, (unsigned)inst->regs_read(i));
4170
4171 /* Calculate the maximum execution size of the instruction based on the
4172 * factor by which it goes over the hardware limit of 2 GRFs.
4173 */
4174 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
4175 }
4176 case SHADER_OPCODE_MULH:
4177 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
4178 * is 8-wide on Gen7+.
4179 */
4180 return (devinfo->gen >= 7 ? 8 : inst->exec_size);
4181
4182 case FS_OPCODE_FB_WRITE_LOGICAL:
4183 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
4184 * here.
4185 */
4186 assert(devinfo->gen != 6 || inst->src[3].file == BAD_FILE ||
4187 inst->exec_size == 8);
4188 /* Dual-source FB writes are unsupported in SIMD16 mode. */
4189 return (inst->src[1].file != BAD_FILE ? 8 : inst->exec_size);
4190
4191 case SHADER_OPCODE_TXD_LOGICAL:
4192 /* TXD is unsupported in SIMD16 mode. */
4193 return 8;
4194
4195 case SHADER_OPCODE_TG4_OFFSET_LOGICAL: {
4196 /* gather4_po_c is unsupported in SIMD16 mode. */
4197 const fs_reg &shadow_c = inst->src[1];
4198 return (shadow_c.file != BAD_FILE ? 8 : inst->exec_size);
4199 }
4200 case SHADER_OPCODE_TXL_LOGICAL:
4201 case FS_OPCODE_TXB_LOGICAL: {
4202 /* Gen4 doesn't have SIMD8 non-shadow-compare bias/LOD instructions, and
4203 * Gen4-6 can't support TXL and TXB with shadow comparison in SIMD16
4204 * mode because the message exceeds the maximum length of 11.
4205 */
4206 const fs_reg &shadow_c = inst->src[1];
4207 if (devinfo->gen == 4 && shadow_c.file == BAD_FILE)
4208 return 16;
4209 else if (devinfo->gen < 7 && shadow_c.file != BAD_FILE)
4210 return 8;
4211 else
4212 return inst->exec_size;
4213 }
4214 case SHADER_OPCODE_TXF_LOGICAL:
4215 case SHADER_OPCODE_TXS_LOGICAL:
4216 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
4217 * messages. Use SIMD16 instead.
4218 */
4219 if (devinfo->gen == 4)
4220 return 16;
4221 else
4222 return inst->exec_size;
4223
4224 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4225 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4226 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4227 return 8;
4228
4229 default:
4230 return inst->exec_size;
4231 }
4232 }
4233
4234 /**
4235 * The \p rows array of registers represents a \p num_rows by \p num_columns
4236 * matrix in row-major order, write it in column-major order into the register
4237 * passed as destination. \p stride gives the separation between matrix
4238 * elements in the input in fs_builder::dispatch_width() units.
4239 */
4240 static void
4241 emit_transpose(const fs_builder &bld,
4242 const fs_reg &dst, const fs_reg *rows,
4243 unsigned num_rows, unsigned num_columns, unsigned stride)
4244 {
4245 fs_reg *const components = new fs_reg[num_rows * num_columns];
4246
4247 for (unsigned i = 0; i < num_columns; ++i) {
4248 for (unsigned j = 0; j < num_rows; ++j)
4249 components[num_rows * i + j] = offset(rows[j], bld, stride * i);
4250 }
4251
4252 bld.LOAD_PAYLOAD(dst, components, num_rows * num_columns, 0);
4253
4254 delete[] components;
4255 }
4256
4257 bool
4258 fs_visitor::lower_simd_width()
4259 {
4260 bool progress = false;
4261
4262 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4263 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
4264
4265 if (lower_width != inst->exec_size) {
4266 /* Builder matching the original instruction. We may also need to
4267 * emit an instruction of width larger than the original, set the
4268 * execution size of the builder to the highest of both for now so
4269 * we're sure that both cases can be handled.
4270 */
4271 const fs_builder ibld = bld.at(block, inst)
4272 .exec_all(inst->force_writemask_all)
4273 .group(MAX2(inst->exec_size, lower_width),
4274 inst->force_sechalf);
4275
4276 /* Split the copies in chunks of the execution width of either the
4277 * original or the lowered instruction, whichever is lower.
4278 */
4279 const unsigned copy_width = MIN2(lower_width, inst->exec_size);
4280 const unsigned n = inst->exec_size / copy_width;
4281 const unsigned dst_size = inst->regs_written * REG_SIZE /
4282 inst->dst.component_size(inst->exec_size);
4283 fs_reg dsts[4];
4284
4285 assert(n > 0 && n <= ARRAY_SIZE(dsts) &&
4286 !inst->writes_accumulator && !inst->mlen);
4287
4288 for (unsigned i = 0; i < n; i++) {
4289 /* Emit a copy of the original instruction with the lowered width.
4290 * If the EOT flag was set throw it away except for the last
4291 * instruction to avoid killing the thread prematurely.
4292 */
4293 fs_inst split_inst = *inst;
4294 split_inst.exec_size = lower_width;
4295 split_inst.eot = inst->eot && i == n - 1;
4296
4297 /* Select the correct channel enables for the i-th group, then
4298 * transform the sources and destination and emit the lowered
4299 * instruction.
4300 */
4301 const fs_builder lbld = ibld.group(lower_width, i);
4302
4303 for (unsigned j = 0; j < inst->sources; j++) {
4304 if (inst->src[j].file != BAD_FILE &&
4305 !is_uniform(inst->src[j])) {
4306 /* Get the i-th copy_width-wide chunk of the source. */
4307 const fs_reg src = horiz_offset(inst->src[j], copy_width * i);
4308 const unsigned src_size = inst->components_read(j);
4309
4310 /* Use a trivial transposition to copy one every n
4311 * copy_width-wide components of the register into a
4312 * temporary passed as source to the lowered instruction.
4313 */
4314 split_inst.src[j] = lbld.vgrf(inst->src[j].type, src_size);
4315 emit_transpose(lbld.group(copy_width, 0),
4316 split_inst.src[j], &src, 1, src_size, n);
4317 }
4318 }
4319
4320 if (inst->regs_written) {
4321 /* Allocate enough space to hold the result of the lowered
4322 * instruction and fix up the number of registers written.
4323 */
4324 split_inst.dst = dsts[i] =
4325 lbld.vgrf(inst->dst.type, dst_size);
4326 split_inst.regs_written =
4327 DIV_ROUND_UP(inst->regs_written * lower_width,
4328 inst->exec_size);
4329 }
4330
4331 lbld.emit(split_inst);
4332 }
4333
4334 if (inst->regs_written) {
4335 /* Distance between useful channels in the temporaries, skipping
4336 * garbage if the lowered instruction is wider than the original.
4337 */
4338 const unsigned m = lower_width / copy_width;
4339
4340 /* Interleave the components of the result from the lowered
4341 * instructions. We need to set exec_all() when copying more than
4342 * one half per component, because LOAD_PAYLOAD (in terms of which
4343 * emit_transpose is implemented) can only use the same channel
4344 * enable signals for all of its non-header sources.
4345 */
4346 emit_transpose(ibld.exec_all(inst->exec_size > copy_width)
4347 .group(copy_width, 0),
4348 inst->dst, dsts, n, dst_size, m);
4349 }
4350
4351 inst->remove(block);
4352 progress = true;
4353 }
4354 }
4355
4356 if (progress)
4357 invalidate_live_intervals();
4358
4359 return progress;
4360 }
4361
4362 void
4363 fs_visitor::dump_instructions()
4364 {
4365 dump_instructions(NULL);
4366 }
4367
4368 void
4369 fs_visitor::dump_instructions(const char *name)
4370 {
4371 FILE *file = stderr;
4372 if (name && geteuid() != 0) {
4373 file = fopen(name, "w");
4374 if (!file)
4375 file = stderr;
4376 }
4377
4378 if (cfg) {
4379 calculate_register_pressure();
4380 int ip = 0, max_pressure = 0;
4381 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
4382 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
4383 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
4384 dump_instruction(inst, file);
4385 ip++;
4386 }
4387 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
4388 } else {
4389 int ip = 0;
4390 foreach_in_list(backend_instruction, inst, &instructions) {
4391 fprintf(file, "%4d: ", ip++);
4392 dump_instruction(inst, file);
4393 }
4394 }
4395
4396 if (file != stderr) {
4397 fclose(file);
4398 }
4399 }
4400
4401 void
4402 fs_visitor::dump_instruction(backend_instruction *be_inst)
4403 {
4404 dump_instruction(be_inst, stderr);
4405 }
4406
4407 void
4408 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
4409 {
4410 fs_inst *inst = (fs_inst *)be_inst;
4411
4412 if (inst->predicate) {
4413 fprintf(file, "(%cf0.%d) ",
4414 inst->predicate_inverse ? '-' : '+',
4415 inst->flag_subreg);
4416 }
4417
4418 fprintf(file, "%s", brw_instruction_name(inst->opcode));
4419 if (inst->saturate)
4420 fprintf(file, ".sat");
4421 if (inst->conditional_mod) {
4422 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
4423 if (!inst->predicate &&
4424 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
4425 inst->opcode != BRW_OPCODE_IF &&
4426 inst->opcode != BRW_OPCODE_WHILE))) {
4427 fprintf(file, ".f0.%d", inst->flag_subreg);
4428 }
4429 }
4430 fprintf(file, "(%d) ", inst->exec_size);
4431
4432 if (inst->mlen) {
4433 fprintf(file, "(mlen: %d) ", inst->mlen);
4434 }
4435
4436 switch (inst->dst.file) {
4437 case GRF:
4438 fprintf(file, "vgrf%d", inst->dst.reg);
4439 if (alloc.sizes[inst->dst.reg] != inst->regs_written ||
4440 inst->dst.subreg_offset)
4441 fprintf(file, "+%d.%d",
4442 inst->dst.reg_offset, inst->dst.subreg_offset);
4443 break;
4444 case MRF:
4445 fprintf(file, "m%d", inst->dst.reg);
4446 break;
4447 case BAD_FILE:
4448 fprintf(file, "(null)");
4449 break;
4450 case UNIFORM:
4451 fprintf(file, "***u%d***", inst->dst.reg + inst->dst.reg_offset);
4452 break;
4453 case ATTR:
4454 fprintf(file, "***attr%d***", inst->dst.reg + inst->dst.reg_offset);
4455 break;
4456 case HW_REG:
4457 if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
4458 switch (inst->dst.fixed_hw_reg.nr) {
4459 case BRW_ARF_NULL:
4460 fprintf(file, "null");
4461 break;
4462 case BRW_ARF_ADDRESS:
4463 fprintf(file, "a0.%d", inst->dst.fixed_hw_reg.subnr);
4464 break;
4465 case BRW_ARF_ACCUMULATOR:
4466 fprintf(file, "acc%d", inst->dst.fixed_hw_reg.subnr);
4467 break;
4468 case BRW_ARF_FLAG:
4469 fprintf(file, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
4470 inst->dst.fixed_hw_reg.subnr);
4471 break;
4472 default:
4473 fprintf(file, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
4474 inst->dst.fixed_hw_reg.subnr);
4475 break;
4476 }
4477 } else {
4478 fprintf(file, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
4479 }
4480 if (inst->dst.fixed_hw_reg.subnr)
4481 fprintf(file, "+%d", inst->dst.fixed_hw_reg.subnr);
4482 break;
4483 default:
4484 fprintf(file, "???");
4485 break;
4486 }
4487 fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
4488
4489 for (int i = 0; i < inst->sources; i++) {
4490 if (inst->src[i].negate)
4491 fprintf(file, "-");
4492 if (inst->src[i].abs)
4493 fprintf(file, "|");
4494 switch (inst->src[i].file) {
4495 case GRF:
4496 fprintf(file, "vgrf%d", inst->src[i].reg);
4497 if (alloc.sizes[inst->src[i].reg] != (unsigned)inst->regs_read(i) ||
4498 inst->src[i].subreg_offset)
4499 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
4500 inst->src[i].subreg_offset);
4501 break;
4502 case MRF:
4503 fprintf(file, "***m%d***", inst->src[i].reg);
4504 break;
4505 case ATTR:
4506 fprintf(file, "attr%d+%d", inst->src[i].reg, inst->src[i].reg_offset);
4507 break;
4508 case UNIFORM:
4509 fprintf(file, "u%d", inst->src[i].reg + inst->src[i].reg_offset);
4510 if (inst->src[i].reladdr) {
4511 fprintf(file, "+reladdr");
4512 } else if (inst->src[i].subreg_offset) {
4513 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
4514 inst->src[i].subreg_offset);
4515 }
4516 break;
4517 case BAD_FILE:
4518 fprintf(file, "(null)");
4519 break;
4520 case IMM:
4521 switch (inst->src[i].type) {
4522 case BRW_REGISTER_TYPE_F:
4523 fprintf(file, "%ff", inst->src[i].fixed_hw_reg.dw1.f);
4524 break;
4525 case BRW_REGISTER_TYPE_W:
4526 case BRW_REGISTER_TYPE_D:
4527 fprintf(file, "%dd", inst->src[i].fixed_hw_reg.dw1.d);
4528 break;
4529 case BRW_REGISTER_TYPE_UW:
4530 case BRW_REGISTER_TYPE_UD:
4531 fprintf(file, "%uu", inst->src[i].fixed_hw_reg.dw1.ud);
4532 break;
4533 case BRW_REGISTER_TYPE_VF:
4534 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
4535 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 0) & 0xff),
4536 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 8) & 0xff),
4537 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 16) & 0xff),
4538 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 24) & 0xff));
4539 break;
4540 default:
4541 fprintf(file, "???");
4542 break;
4543 }
4544 break;
4545 case HW_REG:
4546 if (inst->src[i].fixed_hw_reg.negate)
4547 fprintf(file, "-");
4548 if (inst->src[i].fixed_hw_reg.abs)
4549 fprintf(file, "|");
4550 if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
4551 switch (inst->src[i].fixed_hw_reg.nr) {
4552 case BRW_ARF_NULL:
4553 fprintf(file, "null");
4554 break;
4555 case BRW_ARF_ADDRESS:
4556 fprintf(file, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
4557 break;
4558 case BRW_ARF_ACCUMULATOR:
4559 fprintf(file, "acc%d", inst->src[i].fixed_hw_reg.subnr);
4560 break;
4561 case BRW_ARF_FLAG:
4562 fprintf(file, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
4563 inst->src[i].fixed_hw_reg.subnr);
4564 break;
4565 default:
4566 fprintf(file, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
4567 inst->src[i].fixed_hw_reg.subnr);
4568 break;
4569 }
4570 } else {
4571 fprintf(file, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
4572 }
4573 if (inst->src[i].fixed_hw_reg.subnr)
4574 fprintf(file, "+%d", inst->src[i].fixed_hw_reg.subnr);
4575 if (inst->src[i].fixed_hw_reg.abs)
4576 fprintf(file, "|");
4577 break;
4578 default:
4579 fprintf(file, "???");
4580 break;
4581 }
4582 if (inst->src[i].abs)
4583 fprintf(file, "|");
4584
4585 if (inst->src[i].file != IMM) {
4586 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
4587 }
4588
4589 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
4590 fprintf(file, ", ");
4591 }
4592
4593 fprintf(file, " ");
4594
4595 if (dispatch_width == 16 && inst->exec_size == 8) {
4596 if (inst->force_sechalf)
4597 fprintf(file, "2ndhalf ");
4598 else
4599 fprintf(file, "1sthalf ");
4600 }
4601
4602 fprintf(file, "\n");
4603 }
4604
4605 /**
4606 * Possibly returns an instruction that set up @param reg.
4607 *
4608 * Sometimes we want to take the result of some expression/variable
4609 * dereference tree and rewrite the instruction generating the result
4610 * of the tree. When processing the tree, we know that the
4611 * instructions generated are all writing temporaries that are dead
4612 * outside of this tree. So, if we have some instructions that write
4613 * a temporary, we're free to point that temp write somewhere else.
4614 *
4615 * Note that this doesn't guarantee that the instruction generated
4616 * only reg -- it might be the size=4 destination of a texture instruction.
4617 */
4618 fs_inst *
4619 fs_visitor::get_instruction_generating_reg(fs_inst *start,
4620 fs_inst *end,
4621 const fs_reg &reg)
4622 {
4623 if (end == start ||
4624 end->is_partial_write() ||
4625 reg.reladdr ||
4626 !reg.equals(end->dst)) {
4627 return NULL;
4628 } else {
4629 return end;
4630 }
4631 }
4632
4633 void
4634 fs_visitor::setup_payload_gen6()
4635 {
4636 bool uses_depth =
4637 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
4638 unsigned barycentric_interp_modes =
4639 (stage == MESA_SHADER_FRAGMENT) ?
4640 ((brw_wm_prog_data*) this->prog_data)->barycentric_interp_modes : 0;
4641
4642 assert(devinfo->gen >= 6);
4643
4644 /* R0-1: masks, pixel X/Y coordinates. */
4645 payload.num_regs = 2;
4646 /* R2: only for 32-pixel dispatch.*/
4647
4648 /* R3-26: barycentric interpolation coordinates. These appear in the
4649 * same order that they appear in the brw_wm_barycentric_interp_mode
4650 * enum. Each set of coordinates occupies 2 registers if dispatch width
4651 * == 8 and 4 registers if dispatch width == 16. Coordinates only
4652 * appear if they were enabled using the "Barycentric Interpolation
4653 * Mode" bits in WM_STATE.
4654 */
4655 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
4656 if (barycentric_interp_modes & (1 << i)) {
4657 payload.barycentric_coord_reg[i] = payload.num_regs;
4658 payload.num_regs += 2;
4659 if (dispatch_width == 16) {
4660 payload.num_regs += 2;
4661 }
4662 }
4663 }
4664
4665 /* R27: interpolated depth if uses source depth */
4666 if (uses_depth) {
4667 payload.source_depth_reg = payload.num_regs;
4668 payload.num_regs++;
4669 if (dispatch_width == 16) {
4670 /* R28: interpolated depth if not SIMD8. */
4671 payload.num_regs++;
4672 }
4673 }
4674 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
4675 if (uses_depth) {
4676 payload.source_w_reg = payload.num_regs;
4677 payload.num_regs++;
4678 if (dispatch_width == 16) {
4679 /* R30: interpolated W if not SIMD8. */
4680 payload.num_regs++;
4681 }
4682 }
4683
4684 if (stage == MESA_SHADER_FRAGMENT) {
4685 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
4686 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
4687 prog_data->uses_pos_offset = key->compute_pos_offset;
4688 /* R31: MSAA position offsets. */
4689 if (prog_data->uses_pos_offset) {
4690 payload.sample_pos_reg = payload.num_regs;
4691 payload.num_regs++;
4692 }
4693 }
4694
4695 /* R32: MSAA input coverage mask */
4696 if (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) {
4697 assert(devinfo->gen >= 7);
4698 payload.sample_mask_in_reg = payload.num_regs;
4699 payload.num_regs++;
4700 if (dispatch_width == 16) {
4701 /* R33: input coverage mask if not SIMD8. */
4702 payload.num_regs++;
4703 }
4704 }
4705
4706 /* R34-: bary for 32-pixel. */
4707 /* R58-59: interp W for 32-pixel. */
4708
4709 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
4710 source_depth_to_render_target = true;
4711 }
4712 }
4713
4714 void
4715 fs_visitor::setup_vs_payload()
4716 {
4717 /* R0: thread header, R1: urb handles */
4718 payload.num_regs = 2;
4719 }
4720
4721 /**
4722 * We are building the local ID push constant data using the simplest possible
4723 * method. We simply push the local IDs directly as they should appear in the
4724 * registers for the uvec3 gl_LocalInvocationID variable.
4725 *
4726 * Therefore, for SIMD8, we use 3 full registers, and for SIMD16 we use 6
4727 * registers worth of push constant space.
4728 *
4729 * Note: Any updates to brw_cs_prog_local_id_payload_dwords,
4730 * fill_local_id_payload or fs_visitor::emit_cs_local_invocation_id_setup need
4731 * to coordinated.
4732 *
4733 * FINISHME: There are a few easy optimizations to consider.
4734 *
4735 * 1. If gl_WorkGroupSize x, y or z is 1, we can just use zero, and there is
4736 * no need for using push constant space for that dimension.
4737 *
4738 * 2. Since GL_MAX_COMPUTE_WORK_GROUP_SIZE is currently 1024 or less, we can
4739 * easily use 16-bit words rather than 32-bit dwords in the push constant
4740 * data.
4741 *
4742 * 3. If gl_WorkGroupSize x, y or z is small, then we can use bytes for
4743 * conveying the data, and thereby reduce push constant usage.
4744 *
4745 */
4746 void
4747 fs_visitor::setup_cs_payload()
4748 {
4749 assert(devinfo->gen >= 7);
4750 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
4751
4752 payload.num_regs = 1;
4753
4754 if (nir->info.system_values_read & SYSTEM_BIT_LOCAL_INVOCATION_ID) {
4755 prog_data->local_invocation_id_regs = dispatch_width * 3 / 8;
4756 payload.local_invocation_id_reg = payload.num_regs;
4757 payload.num_regs += prog_data->local_invocation_id_regs;
4758 }
4759 }
4760
4761 void
4762 fs_visitor::calculate_register_pressure()
4763 {
4764 invalidate_live_intervals();
4765 calculate_live_intervals();
4766
4767 unsigned num_instructions = 0;
4768 foreach_block(block, cfg)
4769 num_instructions += block->instructions.length();
4770
4771 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
4772
4773 for (unsigned reg = 0; reg < alloc.count; reg++) {
4774 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
4775 regs_live_at_ip[ip] += alloc.sizes[reg];
4776 }
4777 }
4778
4779 void
4780 fs_visitor::optimize()
4781 {
4782 /* Start by validating the shader we currently have. */
4783 validate();
4784
4785 /* bld is the common builder object pointing at the end of the program we
4786 * used to translate it into i965 IR. For the optimization and lowering
4787 * passes coming next, any code added after the end of the program without
4788 * having explicitly called fs_builder::at() clearly points at a mistake.
4789 * Ideally optimization passes wouldn't be part of the visitor so they
4790 * wouldn't have access to bld at all, but they do, so just in case some
4791 * pass forgets to ask for a location explicitly set it to NULL here to
4792 * make it trip. The dispatch width is initialized to a bogus value to
4793 * make sure that optimizations set the execution controls explicitly to
4794 * match the code they are manipulating instead of relying on the defaults.
4795 */
4796 bld = fs_builder(this, 64);
4797
4798 assign_constant_locations();
4799 demote_pull_constants();
4800
4801 validate();
4802
4803 split_virtual_grfs();
4804 validate();
4805
4806 #define OPT(pass, args...) ({ \
4807 pass_num++; \
4808 bool this_progress = pass(args); \
4809 \
4810 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
4811 char filename[64]; \
4812 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
4813 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
4814 \
4815 backend_shader::dump_instructions(filename); \
4816 } \
4817 \
4818 validate(); \
4819 \
4820 progress = progress || this_progress; \
4821 this_progress; \
4822 })
4823
4824 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
4825 char filename[64];
4826 snprintf(filename, 64, "%s%d-%s-00-start",
4827 stage_abbrev, dispatch_width, nir->info.name);
4828
4829 backend_shader::dump_instructions(filename);
4830 }
4831
4832 bool progress = false;
4833 int iteration = 0;
4834 int pass_num = 0;
4835
4836 OPT(lower_simd_width);
4837 OPT(lower_logical_sends);
4838
4839 do {
4840 progress = false;
4841 pass_num = 0;
4842 iteration++;
4843
4844 OPT(remove_duplicate_mrf_writes);
4845
4846 OPT(opt_algebraic);
4847 OPT(opt_cse);
4848 OPT(opt_copy_propagate);
4849 OPT(opt_predicated_break, this);
4850 OPT(opt_cmod_propagation);
4851 OPT(dead_code_eliminate);
4852 OPT(opt_peephole_sel);
4853 OPT(dead_control_flow_eliminate, this);
4854 OPT(opt_register_renaming);
4855 OPT(opt_redundant_discard_jumps);
4856 OPT(opt_saturate_propagation);
4857 OPT(opt_zero_samples);
4858 OPT(register_coalesce);
4859 OPT(compute_to_mrf);
4860 OPT(eliminate_find_live_channel);
4861
4862 OPT(compact_virtual_grfs);
4863 } while (progress);
4864
4865 pass_num = 0;
4866
4867 OPT(opt_sampler_eot);
4868
4869 if (OPT(lower_load_payload)) {
4870 split_virtual_grfs();
4871 OPT(register_coalesce);
4872 OPT(compute_to_mrf);
4873 OPT(dead_code_eliminate);
4874 }
4875
4876 OPT(opt_combine_constants);
4877 OPT(lower_integer_multiplication);
4878
4879 lower_uniform_pull_constant_loads();
4880
4881 validate();
4882 }
4883
4884 /**
4885 * Three source instruction must have a GRF/MRF destination register.
4886 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
4887 */
4888 void
4889 fs_visitor::fixup_3src_null_dest()
4890 {
4891 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
4892 if (inst->is_3src() && inst->dst.is_null()) {
4893 inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
4894 inst->dst.type);
4895 }
4896 }
4897 }
4898
4899 void
4900 fs_visitor::allocate_registers()
4901 {
4902 bool allocated_without_spills;
4903
4904 static const enum instruction_scheduler_mode pre_modes[] = {
4905 SCHEDULE_PRE,
4906 SCHEDULE_PRE_NON_LIFO,
4907 SCHEDULE_PRE_LIFO,
4908 };
4909
4910 /* Try each scheduling heuristic to see if it can successfully register
4911 * allocate without spilling. They should be ordered by decreasing
4912 * performance but increasing likelihood of allocating.
4913 */
4914 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
4915 schedule_instructions(pre_modes[i]);
4916
4917 if (0) {
4918 assign_regs_trivial();
4919 allocated_without_spills = true;
4920 } else {
4921 allocated_without_spills = assign_regs(false);
4922 }
4923 if (allocated_without_spills)
4924 break;
4925 }
4926
4927 if (!allocated_without_spills) {
4928 /* We assume that any spilling is worse than just dropping back to
4929 * SIMD8. There's probably actually some intermediate point where
4930 * SIMD16 with a couple of spills is still better.
4931 */
4932 if (dispatch_width == 16) {
4933 fail("Failure to register allocate. Reduce number of "
4934 "live scalar values to avoid this.");
4935 } else {
4936 compiler->shader_perf_log(log_data,
4937 "%s shader triggered register spilling. "
4938 "Try reducing the number of live scalar "
4939 "values to improve performance.\n",
4940 stage_name);
4941 }
4942
4943 /* Since we're out of heuristics, just go spill registers until we
4944 * get an allocation.
4945 */
4946 while (!assign_regs(true)) {
4947 if (failed)
4948 break;
4949 }
4950 }
4951
4952 /* This must come after all optimization and register allocation, since
4953 * it inserts dead code that happens to have side effects, and it does
4954 * so based on the actual physical registers in use.
4955 */
4956 insert_gen4_send_dependency_workarounds();
4957
4958 if (failed)
4959 return;
4960
4961 if (!allocated_without_spills)
4962 schedule_instructions(SCHEDULE_POST);
4963
4964 if (last_scratch > 0)
4965 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
4966 }
4967
4968 bool
4969 fs_visitor::run_vs(gl_clip_plane *clip_planes)
4970 {
4971 assert(stage == MESA_SHADER_VERTEX);
4972
4973 setup_vs_payload();
4974
4975 if (shader_time_index >= 0)
4976 emit_shader_time_begin();
4977
4978 emit_nir_code();
4979
4980 if (failed)
4981 return false;
4982
4983 compute_clip_distance(clip_planes);
4984
4985 emit_urb_writes();
4986
4987 if (shader_time_index >= 0)
4988 emit_shader_time_end();
4989
4990 calculate_cfg();
4991
4992 optimize();
4993
4994 assign_curb_setup();
4995 assign_vs_urb_setup();
4996
4997 fixup_3src_null_dest();
4998 allocate_registers();
4999
5000 return !failed;
5001 }
5002
5003 bool
5004 fs_visitor::run_fs(bool do_rep_send)
5005 {
5006 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
5007 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
5008
5009 assert(stage == MESA_SHADER_FRAGMENT);
5010
5011 if (devinfo->gen >= 6)
5012 setup_payload_gen6();
5013 else
5014 setup_payload_gen4();
5015
5016 if (0) {
5017 emit_dummy_fs();
5018 } else if (do_rep_send) {
5019 assert(dispatch_width == 16);
5020 emit_repclear_shader();
5021 } else {
5022 if (shader_time_index >= 0)
5023 emit_shader_time_begin();
5024
5025 calculate_urb_setup();
5026 if (nir->info.inputs_read > 0) {
5027 if (devinfo->gen < 6)
5028 emit_interpolation_setup_gen4();
5029 else
5030 emit_interpolation_setup_gen6();
5031 }
5032
5033 /* We handle discards by keeping track of the still-live pixels in f0.1.
5034 * Initialize it with the dispatched pixels.
5035 */
5036 if (wm_prog_data->uses_kill) {
5037 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
5038 discard_init->flag_subreg = 1;
5039 }
5040
5041 /* Generate FS IR for main(). (the visitor only descends into
5042 * functions called "main").
5043 */
5044 emit_nir_code();
5045
5046 if (failed)
5047 return false;
5048
5049 if (wm_prog_data->uses_kill)
5050 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
5051
5052 if (wm_key->alpha_test_func)
5053 emit_alpha_test();
5054
5055 emit_fb_writes();
5056
5057 if (shader_time_index >= 0)
5058 emit_shader_time_end();
5059
5060 calculate_cfg();
5061
5062 optimize();
5063
5064 assign_curb_setup();
5065 assign_urb_setup();
5066
5067 fixup_3src_null_dest();
5068 allocate_registers();
5069
5070 if (failed)
5071 return false;
5072 }
5073
5074 if (dispatch_width == 8)
5075 wm_prog_data->reg_blocks = brw_register_blocks(grf_used);
5076 else
5077 wm_prog_data->reg_blocks_16 = brw_register_blocks(grf_used);
5078
5079 return !failed;
5080 }
5081
5082 bool
5083 fs_visitor::run_cs()
5084 {
5085 assert(stage == MESA_SHADER_COMPUTE);
5086
5087 setup_cs_payload();
5088
5089 if (shader_time_index >= 0)
5090 emit_shader_time_begin();
5091
5092 emit_nir_code();
5093
5094 if (failed)
5095 return false;
5096
5097 emit_cs_terminate();
5098
5099 if (shader_time_index >= 0)
5100 emit_shader_time_end();
5101
5102 calculate_cfg();
5103
5104 optimize();
5105
5106 assign_curb_setup();
5107
5108 fixup_3src_null_dest();
5109 allocate_registers();
5110
5111 if (failed)
5112 return false;
5113
5114 return !failed;
5115 }
5116
5117 const unsigned *
5118 brw_wm_fs_emit(struct brw_context *brw,
5119 void *mem_ctx,
5120 const struct brw_wm_prog_key *key,
5121 struct brw_wm_prog_data *prog_data,
5122 struct gl_fragment_program *fp,
5123 struct gl_shader_program *prog,
5124 int shader_time_index8, int shader_time_index16,
5125 unsigned *final_assembly_size)
5126 {
5127 /* Now the main event: Visit the shader IR and generate our FS IR for it.
5128 */
5129 fs_visitor v(brw->intelScreen->compiler, brw, mem_ctx, key,
5130 &prog_data->base, &fp->Base, fp->Base.nir, 8, shader_time_index8);
5131 if (!v.run_fs(false /* do_rep_send */)) {
5132 if (prog) {
5133 prog->LinkStatus = false;
5134 ralloc_strcat(&prog->InfoLog, v.fail_msg);
5135 }
5136
5137 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n",
5138 v.fail_msg);
5139
5140 return NULL;
5141 }
5142
5143 cfg_t *simd16_cfg = NULL;
5144 fs_visitor v2(brw->intelScreen->compiler, brw, mem_ctx, key,
5145 &prog_data->base, &fp->Base, fp->Base.nir, 16, shader_time_index16);
5146 if (likely(!(INTEL_DEBUG & DEBUG_NO16) || brw->use_rep_send)) {
5147 if (!v.simd16_unsupported) {
5148 /* Try a SIMD16 compile */
5149 v2.import_uniforms(&v);
5150 if (!v2.run_fs(brw->use_rep_send)) {
5151 perf_debug("SIMD16 shader failed to compile: %s", v2.fail_msg);
5152 } else {
5153 simd16_cfg = v2.cfg;
5154 }
5155 }
5156 }
5157
5158 cfg_t *simd8_cfg;
5159 int no_simd8 = (INTEL_DEBUG & DEBUG_NO8) || brw->no_simd8;
5160 if ((no_simd8 || brw->gen < 5) && simd16_cfg) {
5161 simd8_cfg = NULL;
5162 prog_data->no_8 = true;
5163 } else {
5164 simd8_cfg = v.cfg;
5165 prog_data->no_8 = false;
5166 }
5167
5168 fs_generator g(brw->intelScreen->compiler, brw,
5169 mem_ctx, (void *) key, &prog_data->base,
5170 &fp->Base, v.promoted_constants, v.runtime_check_aads_emit, "FS");
5171
5172 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
5173 char *name;
5174 if (prog)
5175 name = ralloc_asprintf(mem_ctx, "%s fragment shader %d",
5176 prog->Label ? prog->Label : "unnamed",
5177 prog->Name);
5178 else
5179 name = ralloc_asprintf(mem_ctx, "fragment program %d", fp->Base.Id);
5180
5181 g.enable_debug(name);
5182 }
5183
5184 if (simd8_cfg)
5185 g.generate_code(simd8_cfg, 8);
5186 if (simd16_cfg)
5187 prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
5188
5189 return g.get_assembly(final_assembly_size);
5190 }
5191
5192 void
5193 brw_cs_fill_local_id_payload(const struct brw_cs_prog_data *prog_data,
5194 void *buffer, uint32_t threads, uint32_t stride)
5195 {
5196 if (prog_data->local_invocation_id_regs == 0)
5197 return;
5198
5199 /* 'stride' should be an integer number of registers, that is, a multiple
5200 * of 32 bytes.
5201 */
5202 assert(stride % 32 == 0);
5203
5204 unsigned x = 0, y = 0, z = 0;
5205 for (unsigned t = 0; t < threads; t++) {
5206 uint32_t *param = (uint32_t *) buffer + stride * t / 4;
5207
5208 for (unsigned i = 0; i < prog_data->simd_size; i++) {
5209 param[0 * prog_data->simd_size + i] = x;
5210 param[1 * prog_data->simd_size + i] = y;
5211 param[2 * prog_data->simd_size + i] = z;
5212
5213 x++;
5214 if (x == prog_data->local_size[0]) {
5215 x = 0;
5216 y++;
5217 if (y == prog_data->local_size[1]) {
5218 y = 0;
5219 z++;
5220 if (z == prog_data->local_size[2])
5221 z = 0;
5222 }
5223 }
5224 }
5225 }
5226 }
5227
5228 fs_reg *
5229 fs_visitor::emit_cs_local_invocation_id_setup()
5230 {
5231 assert(stage == MESA_SHADER_COMPUTE);
5232
5233 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
5234
5235 struct brw_reg src =
5236 brw_vec8_grf(payload.local_invocation_id_reg, 0);
5237 src = retype(src, BRW_REGISTER_TYPE_UD);
5238 bld.MOV(*reg, src);
5239 src.nr += dispatch_width / 8;
5240 bld.MOV(offset(*reg, bld, 1), src);
5241 src.nr += dispatch_width / 8;
5242 bld.MOV(offset(*reg, bld, 2), src);
5243
5244 return reg;
5245 }
5246
5247 fs_reg *
5248 fs_visitor::emit_cs_work_group_id_setup()
5249 {
5250 assert(stage == MESA_SHADER_COMPUTE);
5251
5252 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
5253
5254 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
5255 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
5256 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
5257
5258 bld.MOV(*reg, r0_1);
5259 bld.MOV(offset(*reg, bld, 1), r0_6);
5260 bld.MOV(offset(*reg, bld, 2), r0_7);
5261
5262 return reg;
5263 }
5264
5265 const unsigned *
5266 brw_cs_emit(struct brw_context *brw,
5267 void *mem_ctx,
5268 const struct brw_cs_prog_key *key,
5269 struct brw_cs_prog_data *prog_data,
5270 struct gl_compute_program *cp,
5271 struct gl_shader_program *prog,
5272 int shader_time_index,
5273 unsigned *final_assembly_size)
5274 {
5275 prog_data->local_size[0] = cp->LocalSize[0];
5276 prog_data->local_size[1] = cp->LocalSize[1];
5277 prog_data->local_size[2] = cp->LocalSize[2];
5278 unsigned local_workgroup_size =
5279 cp->LocalSize[0] * cp->LocalSize[1] * cp->LocalSize[2];
5280
5281 cfg_t *cfg = NULL;
5282 const char *fail_msg = NULL;
5283
5284 /* Now the main event: Visit the shader IR and generate our CS IR for it.
5285 */
5286 fs_visitor v8(brw->intelScreen->compiler, brw, mem_ctx, key,
5287 &prog_data->base, &cp->Base, cp->Base.nir, 8, shader_time_index);
5288 if (!v8.run_cs()) {
5289 fail_msg = v8.fail_msg;
5290 } else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
5291 cfg = v8.cfg;
5292 prog_data->simd_size = 8;
5293 }
5294
5295 fs_visitor v16(brw->intelScreen->compiler, brw, mem_ctx, key,
5296 &prog_data->base, &cp->Base, cp->Base.nir, 16, shader_time_index);
5297 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
5298 !fail_msg && !v8.simd16_unsupported &&
5299 local_workgroup_size <= 16 * brw->max_cs_threads) {
5300 /* Try a SIMD16 compile */
5301 v16.import_uniforms(&v8);
5302 if (!v16.run_cs()) {
5303 perf_debug("SIMD16 shader failed to compile: %s", v16.fail_msg);
5304 if (!cfg) {
5305 fail_msg =
5306 "Couldn't generate SIMD16 program and not "
5307 "enough threads for SIMD8";
5308 }
5309 } else {
5310 cfg = v16.cfg;
5311 prog_data->simd_size = 16;
5312 }
5313 }
5314
5315 if (unlikely(cfg == NULL)) {
5316 assert(fail_msg);
5317 prog->LinkStatus = false;
5318 ralloc_strcat(&prog->InfoLog, fail_msg);
5319 _mesa_problem(NULL, "Failed to compile compute shader: %s\n",
5320 fail_msg);
5321 return NULL;
5322 }
5323
5324 fs_generator g(brw->intelScreen->compiler, brw,
5325 mem_ctx, (void*) key, &prog_data->base, &cp->Base,
5326 v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
5327 if (INTEL_DEBUG & DEBUG_CS) {
5328 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
5329 prog->Label ? prog->Label : "unnamed",
5330 prog->Name);
5331 g.enable_debug(name);
5332 }
5333
5334 g.generate_code(cfg, prog_data->simd_size);
5335
5336 return g.get_assembly(final_assembly_size);
5337 }