bb36a8ef76da576e27019d4c2db4744efa1b36dd
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include "main/macros.h"
32 #include "brw_context.h"
33 #include "brw_eu.h"
34 #include "brw_fs.h"
35 #include "brw_cs.h"
36 #include "brw_nir.h"
37 #include "brw_vec4_gs_visitor.h"
38 #include "brw_cfg.h"
39 #include "brw_program.h"
40 #include "brw_dead_control_flow.h"
41 #include "compiler/glsl_types.h"
42 #include "program/prog_parameter.h"
43
44 using namespace brw;
45
46 void
47 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
48 const fs_reg *src, unsigned sources)
49 {
50 memset(this, 0, sizeof(*this));
51
52 this->src = new fs_reg[MAX2(sources, 3)];
53 for (unsigned i = 0; i < sources; i++)
54 this->src[i] = src[i];
55
56 this->opcode = opcode;
57 this->dst = dst;
58 this->sources = sources;
59 this->exec_size = exec_size;
60
61 assert(dst.file != IMM && dst.file != UNIFORM);
62
63 assert(this->exec_size != 0);
64
65 this->conditional_mod = BRW_CONDITIONAL_NONE;
66
67 /* This will be the case for almost all instructions. */
68 switch (dst.file) {
69 case VGRF:
70 case ARF:
71 case FIXED_GRF:
72 case MRF:
73 case ATTR:
74 this->regs_written = DIV_ROUND_UP(dst.component_size(exec_size),
75 REG_SIZE);
76 break;
77 case BAD_FILE:
78 this->regs_written = 0;
79 break;
80 case IMM:
81 case UNIFORM:
82 unreachable("Invalid destination register file");
83 }
84
85 this->writes_accumulator = false;
86 }
87
88 fs_inst::fs_inst()
89 {
90 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
91 }
92
93 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
94 {
95 init(opcode, exec_size, reg_undef, NULL, 0);
96 }
97
98 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
99 {
100 init(opcode, exec_size, dst, NULL, 0);
101 }
102
103 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
104 const fs_reg &src0)
105 {
106 const fs_reg src[1] = { src0 };
107 init(opcode, exec_size, dst, src, 1);
108 }
109
110 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
111 const fs_reg &src0, const fs_reg &src1)
112 {
113 const fs_reg src[2] = { src0, src1 };
114 init(opcode, exec_size, dst, src, 2);
115 }
116
117 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
118 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
119 {
120 const fs_reg src[3] = { src0, src1, src2 };
121 init(opcode, exec_size, dst, src, 3);
122 }
123
124 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
125 const fs_reg src[], unsigned sources)
126 {
127 init(opcode, exec_width, dst, src, sources);
128 }
129
130 fs_inst::fs_inst(const fs_inst &that)
131 {
132 memcpy(this, &that, sizeof(that));
133
134 this->src = new fs_reg[MAX2(that.sources, 3)];
135
136 for (unsigned i = 0; i < that.sources; i++)
137 this->src[i] = that.src[i];
138 }
139
140 fs_inst::~fs_inst()
141 {
142 delete[] this->src;
143 }
144
145 void
146 fs_inst::resize_sources(uint8_t num_sources)
147 {
148 if (this->sources != num_sources) {
149 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
150
151 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
152 src[i] = this->src[i];
153
154 delete[] this->src;
155 this->src = src;
156 this->sources = num_sources;
157 }
158 }
159
160 void
161 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
162 const fs_reg &dst,
163 const fs_reg &surf_index,
164 const fs_reg &varying_offset,
165 uint32_t const_offset)
166 {
167 /* We have our constant surface use a pitch of 4 bytes, so our index can
168 * be any component of a vector, and then we load 4 contiguous
169 * components starting from that.
170 *
171 * We break down the const_offset to a portion added to the variable
172 * offset and a portion done using reg_offset, which means that if you
173 * have GLSL using something like "uniform vec4 a[20]; gl_FragColor =
174 * a[i]", we'll temporarily generate 4 vec4 loads from offset i * 4, and
175 * CSE can later notice that those loads are all the same and eliminate
176 * the redundant ones.
177 */
178 fs_reg vec4_offset = vgrf(glsl_type::uint_type);
179 bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
180
181 /* The pull load message will load a vec4 (16 bytes). If we are loading
182 * a double this means we are only loading 2 elements worth of data.
183 * We also want to use a 32-bit data type for the dst of the load operation
184 * so other parts of the driver don't get confused about the size of the
185 * result.
186 */
187 fs_reg vec4_result = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
188 fs_inst *inst = bld.emit(FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
189 vec4_result, surf_index, vec4_offset);
190 inst->regs_written = 4 * bld.dispatch_width() / 8;
191
192 if (type_sz(dst.type) == 8) {
193 shuffle_32bit_load_result_to_64bit_data(
194 bld, retype(vec4_result, dst.type), vec4_result, 2);
195 }
196
197 vec4_result.type = dst.type;
198 bld.MOV(dst, offset(vec4_result, bld,
199 (const_offset & 0xf) / type_sz(vec4_result.type)));
200 }
201
202 /**
203 * A helper for MOV generation for fixing up broken hardware SEND dependency
204 * handling.
205 */
206 void
207 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
208 {
209 /* The caller always wants uncompressed to emit the minimal extra
210 * dependencies, and to avoid having to deal with aligning its regs to 2.
211 */
212 const fs_builder ubld = bld.annotate("send dependency resolve")
213 .half(0);
214
215 ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
216 }
217
218 bool
219 fs_inst::equals(fs_inst *inst) const
220 {
221 return (opcode == inst->opcode &&
222 dst.equals(inst->dst) &&
223 src[0].equals(inst->src[0]) &&
224 src[1].equals(inst->src[1]) &&
225 src[2].equals(inst->src[2]) &&
226 saturate == inst->saturate &&
227 predicate == inst->predicate &&
228 conditional_mod == inst->conditional_mod &&
229 mlen == inst->mlen &&
230 base_mrf == inst->base_mrf &&
231 target == inst->target &&
232 eot == inst->eot &&
233 header_size == inst->header_size &&
234 shadow_compare == inst->shadow_compare &&
235 exec_size == inst->exec_size &&
236 offset == inst->offset);
237 }
238
239 bool
240 fs_inst::overwrites_reg(const fs_reg &reg) const
241 {
242 return reg.in_range(dst, regs_written);
243 }
244
245 bool
246 fs_inst::is_send_from_grf() const
247 {
248 switch (opcode) {
249 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
250 case SHADER_OPCODE_SHADER_TIME_ADD:
251 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
252 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
253 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
254 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
255 case SHADER_OPCODE_UNTYPED_ATOMIC:
256 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
257 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
258 case SHADER_OPCODE_TYPED_ATOMIC:
259 case SHADER_OPCODE_TYPED_SURFACE_READ:
260 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
261 case SHADER_OPCODE_URB_WRITE_SIMD8:
262 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
263 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
264 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
265 case SHADER_OPCODE_URB_READ_SIMD8:
266 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
267 return true;
268 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
269 return src[1].file == VGRF;
270 case FS_OPCODE_FB_WRITE:
271 return src[0].file == VGRF;
272 default:
273 if (is_tex())
274 return src[0].file == VGRF;
275
276 return false;
277 }
278 }
279
280 /**
281 * Returns true if this instruction's sources and destinations cannot
282 * safely be the same register.
283 *
284 * In most cases, a register can be written over safely by the same
285 * instruction that is its last use. For a single instruction, the
286 * sources are dereferenced before writing of the destination starts
287 * (naturally).
288 *
289 * However, there are a few cases where this can be problematic:
290 *
291 * - Virtual opcodes that translate to multiple instructions in the
292 * code generator: if src == dst and one instruction writes the
293 * destination before a later instruction reads the source, then
294 * src will have been clobbered.
295 *
296 * - SIMD16 compressed instructions with certain regioning (see below).
297 *
298 * The register allocator uses this information to set up conflicts between
299 * GRF sources and the destination.
300 */
301 bool
302 fs_inst::has_source_and_destination_hazard() const
303 {
304 switch (opcode) {
305 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
306 /* Multiple partial writes to the destination */
307 return true;
308 default:
309 /* The SIMD16 compressed instruction
310 *
311 * add(16) g4<1>F g4<8,8,1>F g6<8,8,1>F
312 *
313 * is actually decoded in hardware as:
314 *
315 * add(8) g4<1>F g4<8,8,1>F g6<8,8,1>F
316 * add(8) g5<1>F g5<8,8,1>F g7<8,8,1>F
317 *
318 * Which is safe. However, if we have uniform accesses
319 * happening, we get into trouble:
320 *
321 * add(8) g4<1>F g4<0,1,0>F g6<8,8,1>F
322 * add(8) g5<1>F g4<0,1,0>F g7<8,8,1>F
323 *
324 * Now our destination for the first instruction overwrote the
325 * second instruction's src0, and we get garbage for those 8
326 * pixels. There's a similar issue for the pre-gen6
327 * pixel_x/pixel_y, which are registers of 16-bit values and thus
328 * would get stomped by the first decode as well.
329 */
330 if (exec_size == 16) {
331 for (int i = 0; i < sources; i++) {
332 if (src[i].file == VGRF && (src[i].stride == 0 ||
333 src[i].type == BRW_REGISTER_TYPE_UW ||
334 src[i].type == BRW_REGISTER_TYPE_W ||
335 src[i].type == BRW_REGISTER_TYPE_UB ||
336 src[i].type == BRW_REGISTER_TYPE_B)) {
337 return true;
338 }
339 }
340 }
341 return false;
342 }
343 }
344
345 bool
346 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
347 {
348 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
349 return false;
350
351 fs_reg reg = this->src[0];
352 if (reg.file != VGRF || reg.reg_offset != 0 || reg.stride == 0)
353 return false;
354
355 if (grf_alloc.sizes[reg.nr] != this->regs_written)
356 return false;
357
358 for (int i = 0; i < this->sources; i++) {
359 reg.type = this->src[i].type;
360 if (!this->src[i].equals(reg))
361 return false;
362
363 if (i < this->header_size) {
364 reg.reg_offset += 1;
365 } else {
366 reg = horiz_offset(reg, this->exec_size);
367 }
368 }
369
370 return true;
371 }
372
373 bool
374 fs_inst::can_do_source_mods(const struct brw_device_info *devinfo)
375 {
376 if (devinfo->gen == 6 && is_math())
377 return false;
378
379 if (is_send_from_grf())
380 return false;
381
382 if (!backend_instruction::can_do_source_mods())
383 return false;
384
385 return true;
386 }
387
388 bool
389 fs_inst::can_change_types() const
390 {
391 return dst.type == src[0].type &&
392 !src[0].abs && !src[0].negate && !saturate &&
393 (opcode == BRW_OPCODE_MOV ||
394 (opcode == BRW_OPCODE_SEL &&
395 dst.type == src[1].type &&
396 predicate != BRW_PREDICATE_NONE &&
397 !src[1].abs && !src[1].negate));
398 }
399
400 bool
401 fs_inst::has_side_effects() const
402 {
403 return this->eot || backend_instruction::has_side_effects();
404 }
405
406 void
407 fs_reg::init()
408 {
409 memset(this, 0, sizeof(*this));
410 stride = 1;
411 }
412
413 /** Generic unset register constructor. */
414 fs_reg::fs_reg()
415 {
416 init();
417 this->file = BAD_FILE;
418 }
419
420 fs_reg::fs_reg(struct ::brw_reg reg) :
421 backend_reg(reg)
422 {
423 this->reg_offset = 0;
424 this->subreg_offset = 0;
425 this->stride = 1;
426 if (this->file == IMM &&
427 (this->type != BRW_REGISTER_TYPE_V &&
428 this->type != BRW_REGISTER_TYPE_UV &&
429 this->type != BRW_REGISTER_TYPE_VF)) {
430 this->stride = 0;
431 }
432 }
433
434 bool
435 fs_reg::equals(const fs_reg &r) const
436 {
437 return (this->backend_reg::equals(r) &&
438 subreg_offset == r.subreg_offset &&
439 stride == r.stride);
440 }
441
442 fs_reg &
443 fs_reg::set_smear(unsigned subreg)
444 {
445 assert(file != ARF && file != FIXED_GRF && file != IMM);
446 subreg_offset = subreg * type_sz(type);
447 stride = 0;
448 return *this;
449 }
450
451 bool
452 fs_reg::is_contiguous() const
453 {
454 return stride == 1;
455 }
456
457 unsigned
458 fs_reg::component_size(unsigned width) const
459 {
460 const unsigned stride = ((file != ARF && file != FIXED_GRF) ? this->stride :
461 hstride == 0 ? 0 :
462 1 << (hstride - 1));
463 return MAX2(width * stride, 1) * type_sz(type);
464 }
465
466 extern "C" int
467 type_size_scalar(const struct glsl_type *type)
468 {
469 unsigned int size, i;
470
471 switch (type->base_type) {
472 case GLSL_TYPE_UINT:
473 case GLSL_TYPE_INT:
474 case GLSL_TYPE_FLOAT:
475 case GLSL_TYPE_BOOL:
476 return type->components();
477 case GLSL_TYPE_DOUBLE:
478 return type->components() * 2;
479 case GLSL_TYPE_ARRAY:
480 return type_size_scalar(type->fields.array) * type->length;
481 case GLSL_TYPE_STRUCT:
482 size = 0;
483 for (i = 0; i < type->length; i++) {
484 size += type_size_scalar(type->fields.structure[i].type);
485 }
486 return size;
487 case GLSL_TYPE_SAMPLER:
488 /* Samplers take up no register space, since they're baked in at
489 * link time.
490 */
491 return 0;
492 case GLSL_TYPE_ATOMIC_UINT:
493 return 0;
494 case GLSL_TYPE_SUBROUTINE:
495 return 1;
496 case GLSL_TYPE_IMAGE:
497 return BRW_IMAGE_PARAM_SIZE;
498 case GLSL_TYPE_VOID:
499 case GLSL_TYPE_ERROR:
500 case GLSL_TYPE_INTERFACE:
501 case GLSL_TYPE_FUNCTION:
502 unreachable("not reached");
503 }
504
505 return 0;
506 }
507
508 /**
509 * Returns the number of scalar components needed to store type, assuming
510 * that vectors are padded out to vec4.
511 *
512 * This has the packing rules of type_size_vec4(), but counts components
513 * similar to type_size_scalar().
514 */
515 extern "C" int
516 type_size_vec4_times_4(const struct glsl_type *type)
517 {
518 return 4 * type_size_vec4(type);
519 }
520
521 /* Attribute arrays are loaded as one vec4 per element (or matrix column),
522 * except for double-precision types, which are loaded as one dvec4.
523 */
524 extern "C" int
525 type_size_vs_input(const struct glsl_type *type)
526 {
527 if (type->is_double()) {
528 return type_size_dvec4(type);
529 } else {
530 return type_size_vec4(type);
531 }
532 }
533
534 /**
535 * Create a MOV to read the timestamp register.
536 *
537 * The caller is responsible for emitting the MOV. The return value is
538 * the destination of the MOV, with extra parameters set.
539 */
540 fs_reg
541 fs_visitor::get_timestamp(const fs_builder &bld)
542 {
543 assert(devinfo->gen >= 7);
544
545 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
546 BRW_ARF_TIMESTAMP,
547 0),
548 BRW_REGISTER_TYPE_UD));
549
550 fs_reg dst = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
551
552 /* We want to read the 3 fields we care about even if it's not enabled in
553 * the dispatch.
554 */
555 bld.group(4, 0).exec_all().MOV(dst, ts);
556
557 return dst;
558 }
559
560 void
561 fs_visitor::emit_shader_time_begin()
562 {
563 shader_start_time = get_timestamp(bld.annotate("shader time start"));
564
565 /* We want only the low 32 bits of the timestamp. Since it's running
566 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
567 * which is plenty of time for our purposes. It is identical across the
568 * EUs, but since it's tracking GPU core speed it will increment at a
569 * varying rate as render P-states change.
570 */
571 shader_start_time.set_smear(0);
572 }
573
574 void
575 fs_visitor::emit_shader_time_end()
576 {
577 /* Insert our code just before the final SEND with EOT. */
578 exec_node *end = this->instructions.get_tail();
579 assert(end && ((fs_inst *) end)->eot);
580 const fs_builder ibld = bld.annotate("shader time end")
581 .exec_all().at(NULL, end);
582
583 fs_reg shader_end_time = get_timestamp(ibld);
584
585 /* We only use the low 32 bits of the timestamp - see
586 * emit_shader_time_begin()).
587 *
588 * We could also check if render P-states have changed (or anything
589 * else that might disrupt timing) by setting smear to 2 and checking if
590 * that field is != 0.
591 */
592 shader_end_time.set_smear(0);
593
594 /* Check that there weren't any timestamp reset events (assuming these
595 * were the only two timestamp reads that happened).
596 */
597 fs_reg reset = shader_end_time;
598 reset.set_smear(2);
599 set_condmod(BRW_CONDITIONAL_Z,
600 ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
601 ibld.IF(BRW_PREDICATE_NORMAL);
602
603 fs_reg start = shader_start_time;
604 start.negate = true;
605 fs_reg diff = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
606 diff.set_smear(0);
607
608 const fs_builder cbld = ibld.group(1, 0);
609 cbld.group(1, 0).ADD(diff, start, shader_end_time);
610
611 /* If there were no instructions between the two timestamp gets, the diff
612 * is 2 cycles. Remove that overhead, so I can forget about that when
613 * trying to determine the time taken for single instructions.
614 */
615 cbld.ADD(diff, diff, brw_imm_ud(-2u));
616 SHADER_TIME_ADD(cbld, 0, diff);
617 SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
618 ibld.emit(BRW_OPCODE_ELSE);
619 SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
620 ibld.emit(BRW_OPCODE_ENDIF);
621 }
622
623 void
624 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
625 int shader_time_subindex,
626 fs_reg value)
627 {
628 int index = shader_time_index * 3 + shader_time_subindex;
629 struct brw_reg offset = brw_imm_d(index * SHADER_TIME_STRIDE);
630
631 fs_reg payload;
632 if (dispatch_width == 8)
633 payload = vgrf(glsl_type::uvec2_type);
634 else
635 payload = vgrf(glsl_type::uint_type);
636
637 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
638 }
639
640 void
641 fs_visitor::vfail(const char *format, va_list va)
642 {
643 char *msg;
644
645 if (failed)
646 return;
647
648 failed = true;
649
650 msg = ralloc_vasprintf(mem_ctx, format, va);
651 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
652
653 this->fail_msg = msg;
654
655 if (debug_enabled) {
656 fprintf(stderr, "%s", msg);
657 }
658 }
659
660 void
661 fs_visitor::fail(const char *format, ...)
662 {
663 va_list va;
664
665 va_start(va, format);
666 vfail(format, va);
667 va_end(va);
668 }
669
670 /**
671 * Mark this program as impossible to compile with dispatch width greater
672 * than n.
673 *
674 * During the SIMD8 compile (which happens first), we can detect and flag
675 * things that are unsupported in SIMD16+ mode, so the compiler can skip the
676 * SIMD16+ compile altogether.
677 *
678 * During a compile of dispatch width greater than n (if one happens anyway),
679 * this just calls fail().
680 */
681 void
682 fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
683 {
684 if (dispatch_width > n) {
685 fail("%s", msg);
686 } else {
687 max_dispatch_width = n;
688 compiler->shader_perf_log(log_data,
689 "Shader dispatch width limited to SIMD%d: %s",
690 n, msg);
691 }
692 }
693
694 /**
695 * Returns true if the instruction has a flag that means it won't
696 * update an entire destination register.
697 *
698 * For example, dead code elimination and live variable analysis want to know
699 * when a write to a variable screens off any preceding values that were in
700 * it.
701 */
702 bool
703 fs_inst::is_partial_write() const
704 {
705 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
706 (this->exec_size * type_sz(this->dst.type)) < 32 ||
707 !this->dst.is_contiguous() ||
708 this->dst.subreg_offset > 0);
709 }
710
711 unsigned
712 fs_inst::components_read(unsigned i) const
713 {
714 switch (opcode) {
715 case FS_OPCODE_LINTERP:
716 if (i == 0)
717 return 2;
718 else
719 return 1;
720
721 case FS_OPCODE_PIXEL_X:
722 case FS_OPCODE_PIXEL_Y:
723 assert(i == 0);
724 return 2;
725
726 case FS_OPCODE_FB_WRITE_LOGICAL:
727 assert(src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
728 /* First/second FB write color. */
729 if (i < 2)
730 return src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
731 else
732 return 1;
733
734 case SHADER_OPCODE_TEX_LOGICAL:
735 case SHADER_OPCODE_TXD_LOGICAL:
736 case SHADER_OPCODE_TXF_LOGICAL:
737 case SHADER_OPCODE_TXL_LOGICAL:
738 case SHADER_OPCODE_TXS_LOGICAL:
739 case FS_OPCODE_TXB_LOGICAL:
740 case SHADER_OPCODE_TXF_CMS_LOGICAL:
741 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
742 case SHADER_OPCODE_TXF_UMS_LOGICAL:
743 case SHADER_OPCODE_TXF_MCS_LOGICAL:
744 case SHADER_OPCODE_LOD_LOGICAL:
745 case SHADER_OPCODE_TG4_LOGICAL:
746 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
747 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
748 assert(src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM &&
749 src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
750 /* Texture coordinates. */
751 if (i == TEX_LOGICAL_SRC_COORDINATE)
752 return src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
753 /* Texture derivatives. */
754 else if ((i == TEX_LOGICAL_SRC_LOD || i == TEX_LOGICAL_SRC_LOD2) &&
755 opcode == SHADER_OPCODE_TXD_LOGICAL)
756 return src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
757 /* Texture offset. */
758 else if (i == TEX_LOGICAL_SRC_OFFSET_VALUE)
759 return 2;
760 /* MCS */
761 else if (i == TEX_LOGICAL_SRC_MCS && opcode == SHADER_OPCODE_TXF_CMS_W_LOGICAL)
762 return 2;
763 else
764 return 1;
765
766 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
767 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
768 assert(src[3].file == IMM);
769 /* Surface coordinates. */
770 if (i == 0)
771 return src[3].ud;
772 /* Surface operation source (ignored for reads). */
773 else if (i == 1)
774 return 0;
775 else
776 return 1;
777
778 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
779 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
780 assert(src[3].file == IMM &&
781 src[4].file == IMM);
782 /* Surface coordinates. */
783 if (i == 0)
784 return src[3].ud;
785 /* Surface operation source. */
786 else if (i == 1)
787 return src[4].ud;
788 else
789 return 1;
790
791 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
792 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
793 assert(src[3].file == IMM &&
794 src[4].file == IMM);
795 const unsigned op = src[4].ud;
796 /* Surface coordinates. */
797 if (i == 0)
798 return src[3].ud;
799 /* Surface operation source. */
800 else if (i == 1 && op == BRW_AOP_CMPWR)
801 return 2;
802 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
803 op == BRW_AOP_PREDEC))
804 return 0;
805 else
806 return 1;
807 }
808
809 default:
810 return 1;
811 }
812 }
813
814 int
815 fs_inst::regs_read(int arg) const
816 {
817 switch (opcode) {
818 case FS_OPCODE_FB_WRITE:
819 case SHADER_OPCODE_URB_WRITE_SIMD8:
820 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
821 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
822 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
823 case SHADER_OPCODE_URB_READ_SIMD8:
824 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
825 case SHADER_OPCODE_UNTYPED_ATOMIC:
826 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
827 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
828 case SHADER_OPCODE_TYPED_ATOMIC:
829 case SHADER_OPCODE_TYPED_SURFACE_READ:
830 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
831 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
832 if (arg == 0)
833 return mlen;
834 break;
835
836 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
837 /* The payload is actually stored in src1 */
838 if (arg == 1)
839 return mlen;
840 break;
841
842 case FS_OPCODE_LINTERP:
843 if (arg == 1)
844 return 1;
845 break;
846
847 case SHADER_OPCODE_LOAD_PAYLOAD:
848 if (arg < this->header_size)
849 return 1;
850 break;
851
852 case CS_OPCODE_CS_TERMINATE:
853 case SHADER_OPCODE_BARRIER:
854 return 1;
855
856 case SHADER_OPCODE_MOV_INDIRECT:
857 if (arg == 0) {
858 assert(src[2].file == IMM);
859 unsigned region_length = src[2].ud;
860
861 if (src[0].file == UNIFORM) {
862 assert(region_length % 4 == 0);
863 return region_length / 4;
864 } else if (src[0].file == FIXED_GRF) {
865 /* If the start of the region is not register aligned, then
866 * there's some portion of the register that's technically
867 * unread at the beginning.
868 *
869 * However, the register allocator works in terms of whole
870 * registers, and does not use subnr. It assumes that the
871 * read starts at the beginning of the register, and extends
872 * regs_read() whole registers beyond that.
873 *
874 * To compensate, we extend the region length to include this
875 * unread portion at the beginning.
876 */
877 if (src[0].subnr)
878 region_length += src[0].subnr;
879
880 return DIV_ROUND_UP(region_length, REG_SIZE);
881 } else {
882 assert(!"Invalid register file");
883 }
884 }
885 break;
886
887 default:
888 if (is_tex() && arg == 0 && src[0].file == VGRF)
889 return mlen;
890 break;
891 }
892
893 switch (src[arg].file) {
894 case BAD_FILE:
895 return 0;
896 case UNIFORM:
897 case IMM:
898 return 1;
899 case ARF:
900 case FIXED_GRF:
901 case VGRF:
902 case ATTR:
903 return DIV_ROUND_UP(components_read(arg) *
904 src[arg].component_size(exec_size),
905 REG_SIZE);
906 case MRF:
907 unreachable("MRF registers are not allowed as sources");
908 }
909 return 0;
910 }
911
912 namespace {
913 /* Return the subset of flag registers that an instruction could
914 * potentially read or write based on the execution controls and flag
915 * subregister number of the instruction.
916 */
917 unsigned
918 flag_mask(const fs_inst *inst)
919 {
920 const unsigned start = inst->flag_subreg * 16 + inst->group;
921 const unsigned end = start + inst->exec_size;
922 return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
923 }
924 }
925
926 unsigned
927 fs_inst::flags_read(const brw_device_info *devinfo) const
928 {
929 /* XXX - This doesn't consider explicit uses of the flag register as source
930 * region.
931 */
932 if (predicate == BRW_PREDICATE_ALIGN1_ANYV ||
933 predicate == BRW_PREDICATE_ALIGN1_ALLV) {
934 /* The vertical predication modes combine corresponding bits from
935 * f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
936 */
937 const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
938 return flag_mask(this) << shift | flag_mask(this);
939 } else if (predicate) {
940 return flag_mask(this);
941 } else {
942 return 0;
943 }
944 }
945
946 unsigned
947 fs_inst::flags_written() const
948 {
949 /* XXX - This doesn't consider explicit uses of the flag register as
950 * destination region.
951 */
952 if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
953 opcode != BRW_OPCODE_IF &&
954 opcode != BRW_OPCODE_WHILE)) ||
955 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
956 return flag_mask(this);
957 } else {
958 return 0;
959 }
960 }
961
962 /**
963 * Returns how many MRFs an FS opcode will write over.
964 *
965 * Note that this is not the 0 or 1 implied writes in an actual gen
966 * instruction -- the FS opcodes often generate MOVs in addition.
967 */
968 int
969 fs_visitor::implied_mrf_writes(fs_inst *inst)
970 {
971 if (inst->mlen == 0)
972 return 0;
973
974 if (inst->base_mrf == -1)
975 return 0;
976
977 switch (inst->opcode) {
978 case SHADER_OPCODE_RCP:
979 case SHADER_OPCODE_RSQ:
980 case SHADER_OPCODE_SQRT:
981 case SHADER_OPCODE_EXP2:
982 case SHADER_OPCODE_LOG2:
983 case SHADER_OPCODE_SIN:
984 case SHADER_OPCODE_COS:
985 return 1 * dispatch_width / 8;
986 case SHADER_OPCODE_POW:
987 case SHADER_OPCODE_INT_QUOTIENT:
988 case SHADER_OPCODE_INT_REMAINDER:
989 return 2 * dispatch_width / 8;
990 case SHADER_OPCODE_TEX:
991 case FS_OPCODE_TXB:
992 case SHADER_OPCODE_TXD:
993 case SHADER_OPCODE_TXF:
994 case SHADER_OPCODE_TXF_LZ:
995 case SHADER_OPCODE_TXF_CMS:
996 case SHADER_OPCODE_TXF_CMS_W:
997 case SHADER_OPCODE_TXF_MCS:
998 case SHADER_OPCODE_TG4:
999 case SHADER_OPCODE_TG4_OFFSET:
1000 case SHADER_OPCODE_TXL:
1001 case SHADER_OPCODE_TXL_LZ:
1002 case SHADER_OPCODE_TXS:
1003 case SHADER_OPCODE_LOD:
1004 case SHADER_OPCODE_SAMPLEINFO:
1005 return 1;
1006 case FS_OPCODE_FB_WRITE:
1007 return 2;
1008 case FS_OPCODE_GET_BUFFER_SIZE:
1009 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
1010 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1011 return 1;
1012 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
1013 return inst->mlen;
1014 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1015 return inst->mlen;
1016 case SHADER_OPCODE_UNTYPED_ATOMIC:
1017 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
1018 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
1019 case SHADER_OPCODE_TYPED_ATOMIC:
1020 case SHADER_OPCODE_TYPED_SURFACE_READ:
1021 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
1022 case SHADER_OPCODE_URB_WRITE_SIMD8:
1023 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
1024 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
1025 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
1026 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
1027 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
1028 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
1029 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
1030 return 0;
1031 default:
1032 unreachable("not reached");
1033 }
1034 }
1035
1036 fs_reg
1037 fs_visitor::vgrf(const glsl_type *const type)
1038 {
1039 int reg_width = dispatch_width / 8;
1040 return fs_reg(VGRF, alloc.allocate(type_size_scalar(type) * reg_width),
1041 brw_type_for_base_type(type));
1042 }
1043
1044 fs_reg::fs_reg(enum brw_reg_file file, int nr)
1045 {
1046 init();
1047 this->file = file;
1048 this->nr = nr;
1049 this->type = BRW_REGISTER_TYPE_F;
1050 this->stride = (file == UNIFORM ? 0 : 1);
1051 }
1052
1053 fs_reg::fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type)
1054 {
1055 init();
1056 this->file = file;
1057 this->nr = nr;
1058 this->type = type;
1059 this->stride = (file == UNIFORM ? 0 : 1);
1060 }
1061
1062 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
1063 * This brings in those uniform definitions
1064 */
1065 void
1066 fs_visitor::import_uniforms(fs_visitor *v)
1067 {
1068 this->push_constant_loc = v->push_constant_loc;
1069 this->pull_constant_loc = v->pull_constant_loc;
1070 this->uniforms = v->uniforms;
1071 }
1072
1073 fs_reg *
1074 fs_visitor::emit_fragcoord_interpolation()
1075 {
1076 assert(stage == MESA_SHADER_FRAGMENT);
1077 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec4_type));
1078 fs_reg wpos = *reg;
1079
1080 /* gl_FragCoord.x */
1081 bld.MOV(wpos, this->pixel_x);
1082 wpos = offset(wpos, bld, 1);
1083
1084 /* gl_FragCoord.y */
1085 bld.MOV(wpos, this->pixel_y);
1086 wpos = offset(wpos, bld, 1);
1087
1088 /* gl_FragCoord.z */
1089 if (devinfo->gen >= 6) {
1090 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
1091 } else {
1092 bld.emit(FS_OPCODE_LINTERP, wpos,
1093 this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1094 interp_reg(VARYING_SLOT_POS, 2));
1095 }
1096 wpos = offset(wpos, bld, 1);
1097
1098 /* gl_FragCoord.w: Already set up in emit_interpolation */
1099 bld.MOV(wpos, this->wpos_w);
1100
1101 return reg;
1102 }
1103
1104 fs_inst *
1105 fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
1106 glsl_interp_qualifier interpolation_mode,
1107 bool is_centroid, bool is_sample)
1108 {
1109 brw_wm_barycentric_interp_mode barycoord_mode;
1110 if (devinfo->gen >= 6) {
1111 if (is_centroid) {
1112 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1113 barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
1114 else
1115 barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
1116 } else if (is_sample) {
1117 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1118 barycoord_mode = BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
1119 else
1120 barycoord_mode = BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
1121 } else {
1122 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1123 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1124 else
1125 barycoord_mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
1126 }
1127 } else {
1128 /* On Ironlake and below, there is only one interpolation mode.
1129 * Centroid interpolation doesn't mean anything on this hardware --
1130 * there is no multisampling.
1131 */
1132 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1133 }
1134 return bld.emit(FS_OPCODE_LINTERP, attr,
1135 this->delta_xy[barycoord_mode], interp);
1136 }
1137
1138 void
1139 fs_visitor::emit_general_interpolation(fs_reg *attr, const char *name,
1140 const glsl_type *type,
1141 glsl_interp_qualifier interpolation_mode,
1142 int *location, bool mod_centroid,
1143 bool mod_sample)
1144 {
1145 assert(stage == MESA_SHADER_FRAGMENT);
1146 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1147 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1148
1149 if (interpolation_mode == INTERP_QUALIFIER_NONE) {
1150 bool is_gl_Color =
1151 *location == VARYING_SLOT_COL0 || *location == VARYING_SLOT_COL1;
1152 if (key->flat_shade && is_gl_Color) {
1153 interpolation_mode = INTERP_QUALIFIER_FLAT;
1154 } else {
1155 interpolation_mode = INTERP_QUALIFIER_SMOOTH;
1156 }
1157 }
1158
1159 if (type->is_array() || type->is_matrix()) {
1160 const glsl_type *elem_type = glsl_get_array_element(type);
1161 const unsigned length = glsl_get_length(type);
1162
1163 for (unsigned i = 0; i < length; i++) {
1164 emit_general_interpolation(attr, name, elem_type, interpolation_mode,
1165 location, mod_centroid, mod_sample);
1166 }
1167 } else if (type->is_record()) {
1168 for (unsigned i = 0; i < type->length; i++) {
1169 const glsl_type *field_type = type->fields.structure[i].type;
1170 emit_general_interpolation(attr, name, field_type, interpolation_mode,
1171 location, mod_centroid, mod_sample);
1172 }
1173 } else {
1174 assert(type->is_scalar() || type->is_vector());
1175
1176 if (prog_data->urb_setup[*location] == -1) {
1177 /* If there's no incoming setup data for this slot, don't
1178 * emit interpolation for it.
1179 */
1180 *attr = offset(*attr, bld, type->vector_elements);
1181 (*location)++;
1182 return;
1183 }
1184
1185 attr->type = brw_type_for_base_type(type->get_scalar_type());
1186
1187 if (interpolation_mode == INTERP_QUALIFIER_FLAT) {
1188 /* Constant interpolation (flat shading) case. The SF has
1189 * handed us defined values in only the constant offset
1190 * field of the setup reg.
1191 */
1192 for (unsigned int i = 0; i < type->vector_elements; i++) {
1193 struct brw_reg interp = interp_reg(*location, i);
1194 interp = suboffset(interp, 3);
1195 interp.type = attr->type;
1196 bld.emit(FS_OPCODE_CINTERP, *attr, fs_reg(interp));
1197 *attr = offset(*attr, bld, 1);
1198 }
1199 } else {
1200 /* Smooth/noperspective interpolation case. */
1201 for (unsigned int i = 0; i < type->vector_elements; i++) {
1202 struct brw_reg interp = interp_reg(*location, i);
1203 if (devinfo->needs_unlit_centroid_workaround && mod_centroid) {
1204 /* Get the pixel/sample mask into f0 so that we know
1205 * which pixels are lit. Then, for each channel that is
1206 * unlit, replace the centroid data with non-centroid
1207 * data.
1208 */
1209 bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
1210
1211 fs_inst *inst;
1212 inst = emit_linterp(*attr, fs_reg(interp), interpolation_mode,
1213 false, false);
1214 inst->predicate = BRW_PREDICATE_NORMAL;
1215 inst->predicate_inverse = true;
1216 if (devinfo->has_pln)
1217 inst->no_dd_clear = true;
1218
1219 inst = emit_linterp(*attr, fs_reg(interp), interpolation_mode,
1220 mod_centroid && !key->persample_interp,
1221 mod_sample || key->persample_interp);
1222 inst->predicate = BRW_PREDICATE_NORMAL;
1223 inst->predicate_inverse = false;
1224 if (devinfo->has_pln)
1225 inst->no_dd_check = true;
1226
1227 } else {
1228 emit_linterp(*attr, fs_reg(interp), interpolation_mode,
1229 mod_centroid && !key->persample_interp,
1230 mod_sample || key->persample_interp);
1231 }
1232 if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
1233 bld.MUL(*attr, *attr, this->pixel_w);
1234 }
1235 *attr = offset(*attr, bld, 1);
1236 }
1237 }
1238 (*location)++;
1239 }
1240 }
1241
1242 fs_reg *
1243 fs_visitor::emit_frontfacing_interpolation()
1244 {
1245 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1246
1247 if (devinfo->gen >= 6) {
1248 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1249 * a boolean result from this (~0/true or 0/false).
1250 *
1251 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1252 * this task in only one instruction:
1253 * - a negation source modifier will flip the bit; and
1254 * - a W -> D type conversion will sign extend the bit into the high
1255 * word of the destination.
1256 *
1257 * An ASR 15 fills the low word of the destination.
1258 */
1259 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1260 g0.negate = true;
1261
1262 bld.ASR(*reg, g0, brw_imm_d(15));
1263 } else {
1264 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1265 * a boolean result from this (1/true or 0/false).
1266 *
1267 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1268 * the negation source modifier to flip it. Unfortunately the SHR
1269 * instruction only operates on UD (or D with an abs source modifier)
1270 * sources without negation.
1271 *
1272 * Instead, use ASR (which will give ~0/true or 0/false).
1273 */
1274 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1275 g1_6.negate = true;
1276
1277 bld.ASR(*reg, g1_6, brw_imm_d(31));
1278 }
1279
1280 return reg;
1281 }
1282
1283 void
1284 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1285 {
1286 assert(stage == MESA_SHADER_FRAGMENT);
1287 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
1288 assert(dst.type == BRW_REGISTER_TYPE_F);
1289
1290 if (wm_prog_data->persample_dispatch) {
1291 /* Convert int_sample_pos to floating point */
1292 bld.MOV(dst, int_sample_pos);
1293 /* Scale to the range [0, 1] */
1294 bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
1295 }
1296 else {
1297 /* From ARB_sample_shading specification:
1298 * "When rendering to a non-multisample buffer, or if multisample
1299 * rasterization is disabled, gl_SamplePosition will always be
1300 * (0.5, 0.5).
1301 */
1302 bld.MOV(dst, brw_imm_f(0.5f));
1303 }
1304 }
1305
1306 fs_reg *
1307 fs_visitor::emit_samplepos_setup()
1308 {
1309 assert(devinfo->gen >= 6);
1310
1311 const fs_builder abld = bld.annotate("compute sample position");
1312 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1313 fs_reg pos = *reg;
1314 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1315 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1316
1317 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1318 * mode will be enabled.
1319 *
1320 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1321 * R31.1:0 Position Offset X/Y for Slot[3:0]
1322 * R31.3:2 Position Offset X/Y for Slot[7:4]
1323 * .....
1324 *
1325 * The X, Y sample positions come in as bytes in thread payload. So, read
1326 * the positions using vstride=16, width=8, hstride=2.
1327 */
1328 struct brw_reg sample_pos_reg =
1329 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1330 BRW_REGISTER_TYPE_B), 16, 8, 2);
1331
1332 if (dispatch_width == 8) {
1333 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1334 } else {
1335 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1336 abld.half(1).MOV(half(int_sample_x, 1),
1337 fs_reg(suboffset(sample_pos_reg, 16)));
1338 }
1339 /* Compute gl_SamplePosition.x */
1340 compute_sample_position(pos, int_sample_x);
1341 pos = offset(pos, abld, 1);
1342 if (dispatch_width == 8) {
1343 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1344 } else {
1345 abld.half(0).MOV(half(int_sample_y, 0),
1346 fs_reg(suboffset(sample_pos_reg, 1)));
1347 abld.half(1).MOV(half(int_sample_y, 1),
1348 fs_reg(suboffset(sample_pos_reg, 17)));
1349 }
1350 /* Compute gl_SamplePosition.y */
1351 compute_sample_position(pos, int_sample_y);
1352 return reg;
1353 }
1354
1355 fs_reg *
1356 fs_visitor::emit_sampleid_setup()
1357 {
1358 assert(stage == MESA_SHADER_FRAGMENT);
1359 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1360 assert(devinfo->gen >= 6);
1361
1362 const fs_builder abld = bld.annotate("compute sample id");
1363 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1364
1365 if (!key->multisample_fbo) {
1366 /* As per GL_ARB_sample_shading specification:
1367 * "When rendering to a non-multisample buffer, or if multisample
1368 * rasterization is disabled, gl_SampleID will always be zero."
1369 */
1370 abld.MOV(*reg, brw_imm_d(0));
1371 } else if (devinfo->gen >= 8) {
1372 /* Sample ID comes in as 4-bit numbers in g1.0:
1373 *
1374 * 15:12 Slot 3 SampleID (only used in SIMD16)
1375 * 11:8 Slot 2 SampleID (only used in SIMD16)
1376 * 7:4 Slot 1 SampleID
1377 * 3:0 Slot 0 SampleID
1378 *
1379 * Each slot corresponds to four channels, so we want to replicate each
1380 * half-byte value to 4 channels in a row:
1381 *
1382 * dst+0: .7 .6 .5 .4 .3 .2 .1 .0
1383 * 7:4 7:4 7:4 7:4 3:0 3:0 3:0 3:0
1384 *
1385 * dst+1: .7 .6 .5 .4 .3 .2 .1 .0 (if SIMD16)
1386 * 15:12 15:12 15:12 15:12 11:8 11:8 11:8 11:8
1387 *
1388 * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
1389 * channels to read the first byte (7:0), and the second group of 8
1390 * channels to read the second byte (15:8). Then, we shift right by
1391 * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
1392 * values into place. Finally, we AND with 0xf to keep the low nibble.
1393 *
1394 * shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
1395 * and(16) dst<1>D tmp<8,8,1>W 0xf:W
1396 *
1397 * TODO: These payload bits exist on Gen7 too, but they appear to always
1398 * be zero, so this code fails to work. We should find out why.
1399 */
1400 fs_reg tmp(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1401
1402 abld.SHR(tmp, fs_reg(stride(retype(brw_vec1_grf(1, 0),
1403 BRW_REGISTER_TYPE_B), 1, 8, 0)),
1404 brw_imm_v(0x44440000));
1405 abld.AND(*reg, tmp, brw_imm_w(0xf));
1406 } else {
1407 fs_reg t1(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_D);
1408 t1.set_smear(0);
1409 fs_reg t2(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1410
1411 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1412 * 8x multisampling, subspan 0 will represent sample N (where N
1413 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1414 * 7. We can find the value of N by looking at R0.0 bits 7:6
1415 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1416 * (since samples are always delivered in pairs). That is, we
1417 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1418 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1419 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1420 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1421 * populating a temporary variable with the sequence (0, 1, 2, 3),
1422 * and then reading from it using vstride=1, width=4, hstride=0.
1423 * These computations hold good for 4x multisampling as well.
1424 *
1425 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1426 * the first four slots are sample 0 of subspan 0; the next four
1427 * are sample 1 of subspan 0; the third group is sample 0 of
1428 * subspan 1, and finally sample 1 of subspan 1.
1429 */
1430
1431 /* SKL+ has an extra bit for the Starting Sample Pair Index to
1432 * accomodate 16x MSAA.
1433 */
1434 abld.exec_all().group(1, 0)
1435 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
1436 brw_imm_ud(0xc0));
1437 abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
1438
1439 /* This works for both SIMD8 and SIMD16 */
1440 abld.exec_all().group(4, 0).MOV(t2, brw_imm_v(0x3210));
1441
1442 /* This special instruction takes care of setting vstride=1,
1443 * width=4, hstride=0 of t2 during an ADD instruction.
1444 */
1445 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1446 }
1447
1448 return reg;
1449 }
1450
1451 fs_reg *
1452 fs_visitor::emit_samplemaskin_setup()
1453 {
1454 assert(stage == MESA_SHADER_FRAGMENT);
1455 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
1456 assert(devinfo->gen >= 6);
1457
1458 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1459
1460 fs_reg coverage_mask(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
1461 BRW_REGISTER_TYPE_D));
1462
1463 if (wm_prog_data->persample_dispatch) {
1464 /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
1465 * and a mask representing which sample is being processed by the
1466 * current shader invocation.
1467 *
1468 * From the OES_sample_variables specification:
1469 * "When per-sample shading is active due to the use of a fragment input
1470 * qualified by "sample" or due to the use of the gl_SampleID or
1471 * gl_SamplePosition variables, only the bit for the current sample is
1472 * set in gl_SampleMaskIn."
1473 */
1474 const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
1475
1476 if (nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
1477 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
1478
1479 fs_reg one = vgrf(glsl_type::int_type);
1480 fs_reg enabled_mask = vgrf(glsl_type::int_type);
1481 abld.MOV(one, brw_imm_d(1));
1482 abld.SHL(enabled_mask, one, nir_system_values[SYSTEM_VALUE_SAMPLE_ID]);
1483 abld.AND(*reg, enabled_mask, coverage_mask);
1484 } else {
1485 /* In per-pixel mode, the coverage mask is sufficient. */
1486 *reg = coverage_mask;
1487 }
1488 return reg;
1489 }
1490
1491 fs_reg
1492 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1493 {
1494 if (!src.abs && !src.negate)
1495 return src;
1496
1497 fs_reg temp = bld.vgrf(src.type);
1498 bld.MOV(temp, src);
1499
1500 return temp;
1501 }
1502
1503 void
1504 fs_visitor::emit_discard_jump()
1505 {
1506 assert(((brw_wm_prog_data*) this->prog_data)->uses_kill);
1507
1508 /* For performance, after a discard, jump to the end of the
1509 * shader if all relevant channels have been discarded.
1510 */
1511 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1512 discard_jump->flag_subreg = 1;
1513
1514 discard_jump->predicate = (dispatch_width == 8)
1515 ? BRW_PREDICATE_ALIGN1_ANY8H
1516 : BRW_PREDICATE_ALIGN1_ANY16H;
1517 discard_jump->predicate_inverse = true;
1518 }
1519
1520 void
1521 fs_visitor::emit_gs_thread_end()
1522 {
1523 assert(stage == MESA_SHADER_GEOMETRY);
1524
1525 struct brw_gs_prog_data *gs_prog_data =
1526 (struct brw_gs_prog_data *) prog_data;
1527
1528 if (gs_compile->control_data_header_size_bits > 0) {
1529 emit_gs_control_data_bits(this->final_gs_vertex_count);
1530 }
1531
1532 const fs_builder abld = bld.annotate("thread end");
1533 fs_inst *inst;
1534
1535 if (gs_prog_data->static_vertex_count != -1) {
1536 foreach_in_list_reverse(fs_inst, prev, &this->instructions) {
1537 if (prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8 ||
1538 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
1539 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
1540 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) {
1541 prev->eot = true;
1542
1543 /* Delete now dead instructions. */
1544 foreach_in_list_reverse_safe(exec_node, dead, &this->instructions) {
1545 if (dead == prev)
1546 break;
1547 dead->remove();
1548 }
1549 return;
1550 } else if (prev->is_control_flow() || prev->has_side_effects()) {
1551 break;
1552 }
1553 }
1554 fs_reg hdr = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1555 abld.MOV(hdr, fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)));
1556 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, hdr);
1557 inst->mlen = 1;
1558 } else {
1559 fs_reg payload = abld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1560 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
1561 sources[0] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1562 sources[1] = this->final_gs_vertex_count;
1563 abld.LOAD_PAYLOAD(payload, sources, 2, 2);
1564 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
1565 inst->mlen = 2;
1566 }
1567 inst->eot = true;
1568 inst->offset = 0;
1569 }
1570
1571 void
1572 fs_visitor::assign_curb_setup()
1573 {
1574 prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
1575
1576 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1577 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1578 for (unsigned int i = 0; i < inst->sources; i++) {
1579 if (inst->src[i].file == UNIFORM) {
1580 int uniform_nr = inst->src[i].nr + inst->src[i].reg_offset;
1581 int constant_nr;
1582 if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1583 constant_nr = push_constant_loc[uniform_nr];
1584 } else {
1585 /* Section 5.11 of the OpenGL 4.1 spec says:
1586 * "Out-of-bounds reads return undefined values, which include
1587 * values from other variables of the active program or zero."
1588 * Just return the first push constant.
1589 */
1590 constant_nr = 0;
1591 }
1592
1593 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1594 constant_nr / 8,
1595 constant_nr % 8);
1596 brw_reg.abs = inst->src[i].abs;
1597 brw_reg.negate = inst->src[i].negate;
1598
1599 assert(inst->src[i].stride == 0);
1600 inst->src[i] = byte_offset(
1601 retype(brw_reg, inst->src[i].type),
1602 inst->src[i].subreg_offset);
1603 }
1604 }
1605 }
1606
1607 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1608 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1609 }
1610
1611 void
1612 fs_visitor::calculate_urb_setup()
1613 {
1614 assert(stage == MESA_SHADER_FRAGMENT);
1615 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1616 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1617
1618 memset(prog_data->urb_setup, -1,
1619 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1620
1621 int urb_next = 0;
1622 /* Figure out where each of the incoming setup attributes lands. */
1623 if (devinfo->gen >= 6) {
1624 if (_mesa_bitcount_64(nir->info.inputs_read &
1625 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1626 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1627 * first 16 varying inputs, so we can put them wherever we want.
1628 * Just put them in order.
1629 *
1630 * This is useful because it means that (a) inputs not used by the
1631 * fragment shader won't take up valuable register space, and (b) we
1632 * won't have to recompile the fragment shader if it gets paired with
1633 * a different vertex (or geometry) shader.
1634 */
1635 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1636 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1637 BITFIELD64_BIT(i)) {
1638 prog_data->urb_setup[i] = urb_next++;
1639 }
1640 }
1641 } else {
1642 bool include_vue_header =
1643 nir->info.inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
1644
1645 /* We have enough input varyings that the SF/SBE pipeline stage can't
1646 * arbitrarily rearrange them to suit our whim; we have to put them
1647 * in an order that matches the output of the previous pipeline stage
1648 * (geometry or vertex shader).
1649 */
1650 struct brw_vue_map prev_stage_vue_map;
1651 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1652 key->input_slots_valid,
1653 nir->info.separate_shader);
1654 int first_slot =
1655 include_vue_header ? 0 : 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
1656
1657 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1658 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1659 slot++) {
1660 int varying = prev_stage_vue_map.slot_to_varying[slot];
1661 if (varying != BRW_VARYING_SLOT_PAD &&
1662 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1663 BITFIELD64_BIT(varying))) {
1664 prog_data->urb_setup[varying] = slot - first_slot;
1665 }
1666 }
1667 urb_next = prev_stage_vue_map.num_slots - first_slot;
1668 }
1669 } else {
1670 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1671 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1672 /* Point size is packed into the header, not as a general attribute */
1673 if (i == VARYING_SLOT_PSIZ)
1674 continue;
1675
1676 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1677 /* The back color slot is skipped when the front color is
1678 * also written to. In addition, some slots can be
1679 * written in the vertex shader and not read in the
1680 * fragment shader. So the register number must always be
1681 * incremented, mapped or not.
1682 */
1683 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1684 prog_data->urb_setup[i] = urb_next;
1685 urb_next++;
1686 }
1687 }
1688
1689 /*
1690 * It's a FS only attribute, and we did interpolation for this attribute
1691 * in SF thread. So, count it here, too.
1692 *
1693 * See compile_sf_prog() for more info.
1694 */
1695 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1696 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1697 }
1698
1699 prog_data->num_varying_inputs = urb_next;
1700 }
1701
1702 void
1703 fs_visitor::assign_urb_setup()
1704 {
1705 assert(stage == MESA_SHADER_FRAGMENT);
1706 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1707
1708 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1709
1710 /* Offset all the urb_setup[] index by the actual position of the
1711 * setup regs, now that the location of the constants has been chosen.
1712 */
1713 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1714 if (inst->opcode == FS_OPCODE_LINTERP) {
1715 assert(inst->src[1].file == FIXED_GRF);
1716 inst->src[1].nr += urb_start;
1717 }
1718
1719 if (inst->opcode == FS_OPCODE_CINTERP) {
1720 assert(inst->src[0].file == FIXED_GRF);
1721 inst->src[0].nr += urb_start;
1722 }
1723 }
1724
1725 /* Each attribute is 4 setup channels, each of which is half a reg. */
1726 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1727 }
1728
1729 void
1730 fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
1731 {
1732 for (int i = 0; i < inst->sources; i++) {
1733 if (inst->src[i].file == ATTR) {
1734 int grf = payload.num_regs +
1735 prog_data->curb_read_length +
1736 inst->src[i].nr +
1737 inst->src[i].reg_offset;
1738
1739 /* As explained at brw_reg_from_fs_reg, From the Haswell PRM:
1740 *
1741 * VertStride must be used to cross GRF register boundaries. This
1742 * rule implies that elements within a 'Width' cannot cross GRF
1743 * boundaries.
1744 *
1745 * So, for registers that are large enough, we have to split the exec
1746 * size in two and trust the compression state to sort it out.
1747 */
1748 unsigned total_size = inst->exec_size *
1749 inst->src[i].stride *
1750 type_sz(inst->src[i].type);
1751
1752 assert(total_size <= 2 * REG_SIZE);
1753 const unsigned exec_size =
1754 (total_size <= REG_SIZE) ? inst->exec_size : inst->exec_size / 2;
1755
1756 unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
1757 struct brw_reg reg =
1758 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1759 inst->src[i].subreg_offset),
1760 exec_size * inst->src[i].stride,
1761 width, inst->src[i].stride);
1762 reg.abs = inst->src[i].abs;
1763 reg.negate = inst->src[i].negate;
1764
1765 inst->src[i] = reg;
1766 }
1767 }
1768 }
1769
1770 void
1771 fs_visitor::assign_vs_urb_setup()
1772 {
1773 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
1774
1775 assert(stage == MESA_SHADER_VERTEX);
1776
1777 /* Each attribute is 4 regs. */
1778 this->first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
1779
1780 assert(vs_prog_data->base.urb_read_length <= 15);
1781
1782 /* Rewrite all ATTR file references to the hw grf that they land in. */
1783 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1784 convert_attr_sources_to_hw_regs(inst);
1785 }
1786 }
1787
1788 void
1789 fs_visitor::assign_tcs_single_patch_urb_setup()
1790 {
1791 assert(stage == MESA_SHADER_TESS_CTRL);
1792
1793 /* Rewrite all ATTR file references to HW_REGs. */
1794 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1795 convert_attr_sources_to_hw_regs(inst);
1796 }
1797 }
1798
1799 void
1800 fs_visitor::assign_tes_urb_setup()
1801 {
1802 assert(stage == MESA_SHADER_TESS_EVAL);
1803
1804 brw_vue_prog_data *vue_prog_data = (brw_vue_prog_data *) prog_data;
1805
1806 first_non_payload_grf += 8 * vue_prog_data->urb_read_length;
1807
1808 /* Rewrite all ATTR file references to HW_REGs. */
1809 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1810 convert_attr_sources_to_hw_regs(inst);
1811 }
1812 }
1813
1814 void
1815 fs_visitor::assign_gs_urb_setup()
1816 {
1817 assert(stage == MESA_SHADER_GEOMETRY);
1818
1819 brw_vue_prog_data *vue_prog_data = (brw_vue_prog_data *) prog_data;
1820
1821 first_non_payload_grf +=
1822 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
1823
1824 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1825 /* Rewrite all ATTR file references to GRFs. */
1826 convert_attr_sources_to_hw_regs(inst);
1827 }
1828 }
1829
1830
1831 /**
1832 * Split large virtual GRFs into separate components if we can.
1833 *
1834 * This is mostly duplicated with what brw_fs_vector_splitting does,
1835 * but that's really conservative because it's afraid of doing
1836 * splitting that doesn't result in real progress after the rest of
1837 * the optimization phases, which would cause infinite looping in
1838 * optimization. We can do it once here, safely. This also has the
1839 * opportunity to split interpolated values, or maybe even uniforms,
1840 * which we don't have at the IR level.
1841 *
1842 * We want to split, because virtual GRFs are what we register
1843 * allocate and spill (due to contiguousness requirements for some
1844 * instructions), and they're what we naturally generate in the
1845 * codegen process, but most virtual GRFs don't actually need to be
1846 * contiguous sets of GRFs. If we split, we'll end up with reduced
1847 * live intervals and better dead code elimination and coalescing.
1848 */
1849 void
1850 fs_visitor::split_virtual_grfs()
1851 {
1852 int num_vars = this->alloc.count;
1853
1854 /* Count the total number of registers */
1855 int reg_count = 0;
1856 int vgrf_to_reg[num_vars];
1857 for (int i = 0; i < num_vars; i++) {
1858 vgrf_to_reg[i] = reg_count;
1859 reg_count += alloc.sizes[i];
1860 }
1861
1862 /* An array of "split points". For each register slot, this indicates
1863 * if this slot can be separated from the previous slot. Every time an
1864 * instruction uses multiple elements of a register (as a source or
1865 * destination), we mark the used slots as inseparable. Then we go
1866 * through and split the registers into the smallest pieces we can.
1867 */
1868 bool split_points[reg_count];
1869 memset(split_points, 0, sizeof(split_points));
1870
1871 /* Mark all used registers as fully splittable */
1872 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1873 if (inst->dst.file == VGRF) {
1874 int reg = vgrf_to_reg[inst->dst.nr];
1875 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.nr]; j++)
1876 split_points[reg + j] = true;
1877 }
1878
1879 for (int i = 0; i < inst->sources; i++) {
1880 if (inst->src[i].file == VGRF) {
1881 int reg = vgrf_to_reg[inst->src[i].nr];
1882 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].nr]; j++)
1883 split_points[reg + j] = true;
1884 }
1885 }
1886 }
1887
1888 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1889 if (inst->dst.file == VGRF) {
1890 int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.reg_offset;
1891 for (int j = 1; j < inst->regs_written; j++)
1892 split_points[reg + j] = false;
1893 }
1894 for (int i = 0; i < inst->sources; i++) {
1895 if (inst->src[i].file == VGRF) {
1896 int reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].reg_offset;
1897 for (int j = 1; j < inst->regs_read(i); j++)
1898 split_points[reg + j] = false;
1899 }
1900 }
1901 }
1902
1903 int new_virtual_grf[reg_count];
1904 int new_reg_offset[reg_count];
1905
1906 int reg = 0;
1907 for (int i = 0; i < num_vars; i++) {
1908 /* The first one should always be 0 as a quick sanity check. */
1909 assert(split_points[reg] == false);
1910
1911 /* j = 0 case */
1912 new_reg_offset[reg] = 0;
1913 reg++;
1914 int offset = 1;
1915
1916 /* j > 0 case */
1917 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1918 /* If this is a split point, reset the offset to 0 and allocate a
1919 * new virtual GRF for the previous offset many registers
1920 */
1921 if (split_points[reg]) {
1922 assert(offset <= MAX_VGRF_SIZE);
1923 int grf = alloc.allocate(offset);
1924 for (int k = reg - offset; k < reg; k++)
1925 new_virtual_grf[k] = grf;
1926 offset = 0;
1927 }
1928 new_reg_offset[reg] = offset;
1929 offset++;
1930 reg++;
1931 }
1932
1933 /* The last one gets the original register number */
1934 assert(offset <= MAX_VGRF_SIZE);
1935 alloc.sizes[i] = offset;
1936 for (int k = reg - offset; k < reg; k++)
1937 new_virtual_grf[k] = i;
1938 }
1939 assert(reg == reg_count);
1940
1941 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1942 if (inst->dst.file == VGRF) {
1943 reg = vgrf_to_reg[inst->dst.nr] + inst->dst.reg_offset;
1944 inst->dst.nr = new_virtual_grf[reg];
1945 inst->dst.reg_offset = new_reg_offset[reg];
1946 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1947 }
1948 for (int i = 0; i < inst->sources; i++) {
1949 if (inst->src[i].file == VGRF) {
1950 reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].reg_offset;
1951 inst->src[i].nr = new_virtual_grf[reg];
1952 inst->src[i].reg_offset = new_reg_offset[reg];
1953 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1954 }
1955 }
1956 }
1957 invalidate_live_intervals();
1958 }
1959
1960 /**
1961 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1962 *
1963 * During code generation, we create tons of temporary variables, many of
1964 * which get immediately killed and are never used again. Yet, in later
1965 * optimization and analysis passes, such as compute_live_intervals, we need
1966 * to loop over all the virtual GRFs. Compacting them can save a lot of
1967 * overhead.
1968 */
1969 bool
1970 fs_visitor::compact_virtual_grfs()
1971 {
1972 bool progress = false;
1973 int remap_table[this->alloc.count];
1974 memset(remap_table, -1, sizeof(remap_table));
1975
1976 /* Mark which virtual GRFs are used. */
1977 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1978 if (inst->dst.file == VGRF)
1979 remap_table[inst->dst.nr] = 0;
1980
1981 for (int i = 0; i < inst->sources; i++) {
1982 if (inst->src[i].file == VGRF)
1983 remap_table[inst->src[i].nr] = 0;
1984 }
1985 }
1986
1987 /* Compact the GRF arrays. */
1988 int new_index = 0;
1989 for (unsigned i = 0; i < this->alloc.count; i++) {
1990 if (remap_table[i] == -1) {
1991 /* We just found an unused register. This means that we are
1992 * actually going to compact something.
1993 */
1994 progress = true;
1995 } else {
1996 remap_table[i] = new_index;
1997 alloc.sizes[new_index] = alloc.sizes[i];
1998 invalidate_live_intervals();
1999 ++new_index;
2000 }
2001 }
2002
2003 this->alloc.count = new_index;
2004
2005 /* Patch all the instructions to use the newly renumbered registers */
2006 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2007 if (inst->dst.file == VGRF)
2008 inst->dst.nr = remap_table[inst->dst.nr];
2009
2010 for (int i = 0; i < inst->sources; i++) {
2011 if (inst->src[i].file == VGRF)
2012 inst->src[i].nr = remap_table[inst->src[i].nr];
2013 }
2014 }
2015
2016 /* Patch all the references to delta_xy, since they're used in register
2017 * allocation. If they're unused, switch them to BAD_FILE so we don't
2018 * think some random VGRF is delta_xy.
2019 */
2020 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2021 if (delta_xy[i].file == VGRF) {
2022 if (remap_table[delta_xy[i].nr] != -1) {
2023 delta_xy[i].nr = remap_table[delta_xy[i].nr];
2024 } else {
2025 delta_xy[i].file = BAD_FILE;
2026 }
2027 }
2028 }
2029
2030 return progress;
2031 }
2032
2033 static void
2034 set_push_pull_constant_loc(unsigned uniform, int *chunk_start, bool contiguous,
2035 int *push_constant_loc, int *pull_constant_loc,
2036 unsigned *num_push_constants,
2037 unsigned *num_pull_constants,
2038 const unsigned max_push_components,
2039 const unsigned max_chunk_size,
2040 struct brw_stage_prog_data *stage_prog_data)
2041 {
2042 /* This is the first live uniform in the chunk */
2043 if (*chunk_start < 0)
2044 *chunk_start = uniform;
2045
2046 /* If this element does not need to be contiguous with the next, we
2047 * split at this point and everything between chunk_start and u forms a
2048 * single chunk.
2049 */
2050 if (!contiguous) {
2051 unsigned chunk_size = uniform - *chunk_start + 1;
2052
2053 /* Decide whether we should push or pull this parameter. In the
2054 * Vulkan driver, push constants are explicitly exposed via the API
2055 * so we push everything. In GL, we only push small arrays.
2056 */
2057 if (stage_prog_data->pull_param == NULL ||
2058 (*num_push_constants + chunk_size <= max_push_components &&
2059 chunk_size <= max_chunk_size)) {
2060 assert(*num_push_constants + chunk_size <= max_push_components);
2061 for (unsigned j = *chunk_start; j <= uniform; j++)
2062 push_constant_loc[j] = (*num_push_constants)++;
2063 } else {
2064 for (unsigned j = *chunk_start; j <= uniform; j++)
2065 pull_constant_loc[j] = (*num_pull_constants)++;
2066 }
2067
2068 *chunk_start = -1;
2069 }
2070 }
2071
2072 /**
2073 * Assign UNIFORM file registers to either push constants or pull constants.
2074 *
2075 * We allow a fragment shader to have more than the specified minimum
2076 * maximum number of fragment shader uniform components (64). If
2077 * there are too many of these, they'd fill up all of register space.
2078 * So, this will push some of them out to the pull constant buffer and
2079 * update the program to load them.
2080 */
2081 void
2082 fs_visitor::assign_constant_locations()
2083 {
2084 /* Only the first compile gets to decide on locations. */
2085 if (dispatch_width != min_dispatch_width)
2086 return;
2087
2088 bool is_live[uniforms];
2089 memset(is_live, 0, sizeof(is_live));
2090 bool is_live_64bit[uniforms];
2091 memset(is_live_64bit, 0, sizeof(is_live_64bit));
2092
2093 /* For each uniform slot, a value of true indicates that the given slot and
2094 * the next slot must remain contiguous. This is used to keep us from
2095 * splitting arrays apart.
2096 */
2097 bool contiguous[uniforms];
2098 memset(contiguous, 0, sizeof(contiguous));
2099
2100 /* First, we walk through the instructions and do two things:
2101 *
2102 * 1) Figure out which uniforms are live.
2103 *
2104 * 2) Mark any indirectly used ranges of registers as contiguous.
2105 *
2106 * Note that we don't move constant-indexed accesses to arrays. No
2107 * testing has been done of the performance impact of this choice.
2108 */
2109 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2110 for (int i = 0 ; i < inst->sources; i++) {
2111 if (inst->src[i].file != UNIFORM)
2112 continue;
2113
2114 int constant_nr = inst->src[i].nr + inst->src[i].reg_offset;
2115
2116 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
2117 assert(inst->src[2].ud % 4 == 0);
2118 unsigned last = constant_nr + (inst->src[2].ud / 4) - 1;
2119 assert(last < uniforms);
2120
2121 for (unsigned j = constant_nr; j < last; j++) {
2122 is_live[j] = true;
2123 contiguous[j] = true;
2124 if (type_sz(inst->src[i].type) == 8) {
2125 is_live_64bit[j] = true;
2126 }
2127 }
2128 is_live[last] = true;
2129 } else {
2130 if (constant_nr >= 0 && constant_nr < (int) uniforms) {
2131 int regs_read = inst->components_read(i) *
2132 type_sz(inst->src[i].type) / 4;
2133 for (int j = 0; j < regs_read; j++) {
2134 is_live[constant_nr + j] = true;
2135 if (type_sz(inst->src[i].type) == 8) {
2136 is_live_64bit[constant_nr + j] = true;
2137 }
2138 }
2139 }
2140 }
2141 }
2142 }
2143
2144 /* Only allow 16 registers (128 uniform components) as push constants.
2145 *
2146 * Just demote the end of the list. We could probably do better
2147 * here, demoting things that are rarely used in the program first.
2148 *
2149 * If changing this value, note the limitation about total_regs in
2150 * brw_curbe.c.
2151 */
2152 const unsigned int max_push_components = 16 * 8;
2153
2154 /* We push small arrays, but no bigger than 16 floats. This is big enough
2155 * for a vec4 but hopefully not large enough to push out other stuff. We
2156 * should probably use a better heuristic at some point.
2157 */
2158 const unsigned int max_chunk_size = 16;
2159
2160 unsigned int num_push_constants = 0;
2161 unsigned int num_pull_constants = 0;
2162
2163 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2164 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2165
2166 /* Default to -1 meaning no location */
2167 memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
2168 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2169
2170 int chunk_start = -1;
2171
2172 /* First push 64-bit uniforms to ensure they are properly aligned */
2173 for (unsigned u = 0; u < uniforms; u++) {
2174 if (!is_live[u] || !is_live_64bit[u])
2175 continue;
2176
2177 set_push_pull_constant_loc(u, &chunk_start, contiguous[u],
2178 push_constant_loc, pull_constant_loc,
2179 &num_push_constants, &num_pull_constants,
2180 max_push_components, max_chunk_size,
2181 stage_prog_data);
2182
2183 }
2184
2185 /* Then push the rest of uniforms */
2186 for (unsigned u = 0; u < uniforms; u++) {
2187 if (!is_live[u] || is_live_64bit[u])
2188 continue;
2189
2190 set_push_pull_constant_loc(u, &chunk_start, contiguous[u],
2191 push_constant_loc, pull_constant_loc,
2192 &num_push_constants, &num_pull_constants,
2193 max_push_components, max_chunk_size,
2194 stage_prog_data);
2195 }
2196
2197 /* As the uniforms are going to be reordered, take the data from a temporary
2198 * copy of the original param[].
2199 */
2200 gl_constant_value **param = ralloc_array(NULL, gl_constant_value*,
2201 stage_prog_data->nr_params);
2202 memcpy(param, stage_prog_data->param,
2203 sizeof(gl_constant_value*) * stage_prog_data->nr_params);
2204 stage_prog_data->nr_params = num_push_constants;
2205 stage_prog_data->nr_pull_params = num_pull_constants;
2206
2207 /* Up until now, the param[] array has been indexed by reg + reg_offset
2208 * of UNIFORM registers. Move pull constants into pull_param[] and
2209 * condense param[] to only contain the uniforms we chose to push.
2210 *
2211 * NOTE: Because we are condensing the params[] array, we know that
2212 * push_constant_loc[i] <= i and we can do it in one smooth loop without
2213 * having to make a copy.
2214 */
2215 for (unsigned int i = 0; i < uniforms; i++) {
2216 const gl_constant_value *value = param[i];
2217
2218 if (pull_constant_loc[i] != -1) {
2219 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
2220 } else if (push_constant_loc[i] != -1) {
2221 stage_prog_data->param[push_constant_loc[i]] = value;
2222 }
2223 }
2224 ralloc_free(param);
2225 }
2226
2227 /**
2228 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
2229 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
2230 */
2231 void
2232 fs_visitor::lower_constant_loads()
2233 {
2234 const unsigned index = stage_prog_data->binding_table.pull_constants_start;
2235
2236 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2237 /* Set up the annotation tracking for new generated instructions. */
2238 const fs_builder ibld(this, block, inst);
2239
2240 for (int i = 0; i < inst->sources; i++) {
2241 if (inst->src[i].file != UNIFORM)
2242 continue;
2243
2244 /* We'll handle this case later */
2245 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
2246 continue;
2247
2248 unsigned location = inst->src[i].nr + inst->src[i].reg_offset;
2249 if (location >= uniforms)
2250 continue; /* Out of bounds access */
2251
2252 int pull_index = pull_constant_loc[location];
2253
2254 if (pull_index == -1)
2255 continue;
2256
2257 const unsigned index = stage_prog_data->binding_table.pull_constants_start;
2258 fs_reg dst;
2259
2260 if (type_sz(inst->src[i].type) <= 4)
2261 dst = vgrf(glsl_type::float_type);
2262 else
2263 dst = vgrf(glsl_type::double_type);
2264
2265 assert(inst->src[i].stride == 0);
2266
2267 const fs_builder ubld = ibld.exec_all().group(8, 0);
2268 struct brw_reg offset = brw_imm_ud((unsigned)(pull_index * 4) & ~15);
2269 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
2270 dst, brw_imm_ud(index), offset);
2271
2272 /* Rewrite the instruction to use the temporary VGRF. */
2273 inst->src[i].file = VGRF;
2274 inst->src[i].nr = dst.nr;
2275 inst->src[i].reg_offset = 0;
2276 inst->src[i].set_smear((pull_index & 3) * 4 /
2277 type_sz(inst->src[i].type));
2278
2279 brw_mark_surface_used(prog_data, index);
2280 }
2281
2282 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
2283 inst->src[0].file == UNIFORM) {
2284
2285 unsigned location = inst->src[0].nr + inst->src[0].reg_offset;
2286 if (location >= uniforms)
2287 continue; /* Out of bounds access */
2288
2289 int pull_index = pull_constant_loc[location];
2290
2291 if (pull_index == -1)
2292 continue;
2293
2294 VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
2295 brw_imm_ud(index),
2296 inst->src[1],
2297 pull_index * 4);
2298 inst->remove(block);
2299
2300 brw_mark_surface_used(prog_data, index);
2301 }
2302 }
2303 invalidate_live_intervals();
2304 }
2305
2306 bool
2307 fs_visitor::opt_algebraic()
2308 {
2309 bool progress = false;
2310
2311 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2312 switch (inst->opcode) {
2313 case BRW_OPCODE_MOV:
2314 if (inst->src[0].file != IMM)
2315 break;
2316
2317 if (inst->saturate) {
2318 if (inst->dst.type != inst->src[0].type)
2319 assert(!"unimplemented: saturate mixed types");
2320
2321 if (brw_saturate_immediate(inst->dst.type,
2322 &inst->src[0].as_brw_reg())) {
2323 inst->saturate = false;
2324 progress = true;
2325 }
2326 }
2327 break;
2328
2329 case BRW_OPCODE_MUL:
2330 if (inst->src[1].file != IMM)
2331 continue;
2332
2333 /* a * 1.0 = a */
2334 if (inst->src[1].is_one()) {
2335 inst->opcode = BRW_OPCODE_MOV;
2336 inst->src[1] = reg_undef;
2337 progress = true;
2338 break;
2339 }
2340
2341 /* a * -1.0 = -a */
2342 if (inst->src[1].is_negative_one()) {
2343 inst->opcode = BRW_OPCODE_MOV;
2344 inst->src[0].negate = !inst->src[0].negate;
2345 inst->src[1] = reg_undef;
2346 progress = true;
2347 break;
2348 }
2349
2350 /* a * 0.0 = 0.0 */
2351 if (inst->src[1].is_zero()) {
2352 inst->opcode = BRW_OPCODE_MOV;
2353 inst->src[0] = inst->src[1];
2354 inst->src[1] = reg_undef;
2355 progress = true;
2356 break;
2357 }
2358
2359 if (inst->src[0].file == IMM) {
2360 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2361 inst->opcode = BRW_OPCODE_MOV;
2362 inst->src[0].f *= inst->src[1].f;
2363 inst->src[1] = reg_undef;
2364 progress = true;
2365 break;
2366 }
2367 break;
2368 case BRW_OPCODE_ADD:
2369 if (inst->src[1].file != IMM)
2370 continue;
2371
2372 /* a + 0.0 = a */
2373 if (inst->src[1].is_zero()) {
2374 inst->opcode = BRW_OPCODE_MOV;
2375 inst->src[1] = reg_undef;
2376 progress = true;
2377 break;
2378 }
2379
2380 if (inst->src[0].file == IMM) {
2381 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2382 inst->opcode = BRW_OPCODE_MOV;
2383 inst->src[0].f += inst->src[1].f;
2384 inst->src[1] = reg_undef;
2385 progress = true;
2386 break;
2387 }
2388 break;
2389 case BRW_OPCODE_OR:
2390 if (inst->src[0].equals(inst->src[1])) {
2391 inst->opcode = BRW_OPCODE_MOV;
2392 inst->src[1] = reg_undef;
2393 progress = true;
2394 break;
2395 }
2396 break;
2397 case BRW_OPCODE_LRP:
2398 if (inst->src[1].equals(inst->src[2])) {
2399 inst->opcode = BRW_OPCODE_MOV;
2400 inst->src[0] = inst->src[1];
2401 inst->src[1] = reg_undef;
2402 inst->src[2] = reg_undef;
2403 progress = true;
2404 break;
2405 }
2406 break;
2407 case BRW_OPCODE_CMP:
2408 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
2409 inst->src[0].abs &&
2410 inst->src[0].negate &&
2411 inst->src[1].is_zero()) {
2412 inst->src[0].abs = false;
2413 inst->src[0].negate = false;
2414 inst->conditional_mod = BRW_CONDITIONAL_Z;
2415 progress = true;
2416 break;
2417 }
2418 break;
2419 case BRW_OPCODE_SEL:
2420 if (inst->src[0].equals(inst->src[1])) {
2421 inst->opcode = BRW_OPCODE_MOV;
2422 inst->src[1] = reg_undef;
2423 inst->predicate = BRW_PREDICATE_NONE;
2424 inst->predicate_inverse = false;
2425 progress = true;
2426 } else if (inst->saturate && inst->src[1].file == IMM) {
2427 switch (inst->conditional_mod) {
2428 case BRW_CONDITIONAL_LE:
2429 case BRW_CONDITIONAL_L:
2430 switch (inst->src[1].type) {
2431 case BRW_REGISTER_TYPE_F:
2432 if (inst->src[1].f >= 1.0f) {
2433 inst->opcode = BRW_OPCODE_MOV;
2434 inst->src[1] = reg_undef;
2435 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2436 progress = true;
2437 }
2438 break;
2439 default:
2440 break;
2441 }
2442 break;
2443 case BRW_CONDITIONAL_GE:
2444 case BRW_CONDITIONAL_G:
2445 switch (inst->src[1].type) {
2446 case BRW_REGISTER_TYPE_F:
2447 if (inst->src[1].f <= 0.0f) {
2448 inst->opcode = BRW_OPCODE_MOV;
2449 inst->src[1] = reg_undef;
2450 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2451 progress = true;
2452 }
2453 break;
2454 default:
2455 break;
2456 }
2457 default:
2458 break;
2459 }
2460 }
2461 break;
2462 case BRW_OPCODE_MAD:
2463 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2464 inst->opcode = BRW_OPCODE_MOV;
2465 inst->src[1] = reg_undef;
2466 inst->src[2] = reg_undef;
2467 progress = true;
2468 } else if (inst->src[0].is_zero()) {
2469 inst->opcode = BRW_OPCODE_MUL;
2470 inst->src[0] = inst->src[2];
2471 inst->src[2] = reg_undef;
2472 progress = true;
2473 } else if (inst->src[1].is_one()) {
2474 inst->opcode = BRW_OPCODE_ADD;
2475 inst->src[1] = inst->src[2];
2476 inst->src[2] = reg_undef;
2477 progress = true;
2478 } else if (inst->src[2].is_one()) {
2479 inst->opcode = BRW_OPCODE_ADD;
2480 inst->src[2] = reg_undef;
2481 progress = true;
2482 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2483 inst->opcode = BRW_OPCODE_ADD;
2484 inst->src[1].f *= inst->src[2].f;
2485 inst->src[2] = reg_undef;
2486 progress = true;
2487 }
2488 break;
2489 case SHADER_OPCODE_BROADCAST:
2490 if (is_uniform(inst->src[0])) {
2491 inst->opcode = BRW_OPCODE_MOV;
2492 inst->sources = 1;
2493 inst->force_writemask_all = true;
2494 progress = true;
2495 } else if (inst->src[1].file == IMM) {
2496 inst->opcode = BRW_OPCODE_MOV;
2497 inst->src[0] = component(inst->src[0],
2498 inst->src[1].ud);
2499 inst->sources = 1;
2500 inst->force_writemask_all = true;
2501 progress = true;
2502 }
2503 break;
2504
2505 default:
2506 break;
2507 }
2508
2509 /* Swap if src[0] is immediate. */
2510 if (progress && inst->is_commutative()) {
2511 if (inst->src[0].file == IMM) {
2512 fs_reg tmp = inst->src[1];
2513 inst->src[1] = inst->src[0];
2514 inst->src[0] = tmp;
2515 }
2516 }
2517 }
2518 return progress;
2519 }
2520
2521 /**
2522 * Optimize sample messages that have constant zero values for the trailing
2523 * texture coordinates. We can just reduce the message length for these
2524 * instructions instead of reserving a register for it. Trailing parameters
2525 * that aren't sent default to zero anyway. This will cause the dead code
2526 * eliminator to remove the MOV instruction that would otherwise be emitted to
2527 * set up the zero value.
2528 */
2529 bool
2530 fs_visitor::opt_zero_samples()
2531 {
2532 /* Gen4 infers the texturing opcode based on the message length so we can't
2533 * change it.
2534 */
2535 if (devinfo->gen < 5)
2536 return false;
2537
2538 bool progress = false;
2539
2540 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2541 if (!inst->is_tex())
2542 continue;
2543
2544 fs_inst *load_payload = (fs_inst *) inst->prev;
2545
2546 if (load_payload->is_head_sentinel() ||
2547 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2548 continue;
2549
2550 /* We don't want to remove the message header or the first parameter.
2551 * Removing the first parameter is not allowed, see the Haswell PRM
2552 * volume 7, page 149:
2553 *
2554 * "Parameter 0 is required except for the sampleinfo message, which
2555 * has no parameter 0"
2556 */
2557 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2558 load_payload->src[(inst->mlen - inst->header_size) /
2559 (inst->exec_size / 8) +
2560 inst->header_size - 1].is_zero()) {
2561 inst->mlen -= inst->exec_size / 8;
2562 progress = true;
2563 }
2564 }
2565
2566 if (progress)
2567 invalidate_live_intervals();
2568
2569 return progress;
2570 }
2571
2572 /**
2573 * Optimize sample messages which are followed by the final RT write.
2574 *
2575 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2576 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2577 * final texturing results copied to the framebuffer write payload and modify
2578 * them to write to the framebuffer directly.
2579 */
2580 bool
2581 fs_visitor::opt_sampler_eot()
2582 {
2583 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2584
2585 if (stage != MESA_SHADER_FRAGMENT)
2586 return false;
2587
2588 if (devinfo->gen < 9 && !devinfo->is_cherryview)
2589 return false;
2590
2591 /* FINISHME: It should be possible to implement this optimization when there
2592 * are multiple drawbuffers.
2593 */
2594 if (key->nr_color_regions != 1)
2595 return false;
2596
2597 /* Requires emitting a bunch of saturating MOV instructions during logical
2598 * send lowering to clamp the color payload, which the sampler unit isn't
2599 * going to do for us.
2600 */
2601 if (key->clamp_fragment_color)
2602 return false;
2603
2604 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2605 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2606 fs_inst *fb_write = (fs_inst *)block->end();
2607 assert(fb_write->eot);
2608 assert(fb_write->opcode == FS_OPCODE_FB_WRITE_LOGICAL);
2609
2610 /* There wasn't one; nothing to do. */
2611 if (unlikely(fb_write->prev->is_head_sentinel()))
2612 return false;
2613
2614 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2615
2616 /* 3D Sampler » Messages » Message Format
2617 *
2618 * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
2619 * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
2620 */
2621 if (tex_inst->opcode != SHADER_OPCODE_TEX_LOGICAL &&
2622 tex_inst->opcode != SHADER_OPCODE_TXD_LOGICAL &&
2623 tex_inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
2624 tex_inst->opcode != SHADER_OPCODE_TXL_LOGICAL &&
2625 tex_inst->opcode != FS_OPCODE_TXB_LOGICAL &&
2626 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL &&
2627 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_W_LOGICAL &&
2628 tex_inst->opcode != SHADER_OPCODE_TXF_UMS_LOGICAL)
2629 return false;
2630
2631 /* XXX - This shouldn't be necessary. */
2632 if (tex_inst->prev->is_head_sentinel())
2633 return false;
2634
2635 /* Check that the FB write sources are fully initialized by the single
2636 * texturing instruction.
2637 */
2638 for (unsigned i = 0; i < FB_WRITE_LOGICAL_NUM_SRCS; i++) {
2639 if (i == FB_WRITE_LOGICAL_SRC_COLOR0) {
2640 if (!fb_write->src[i].equals(tex_inst->dst) ||
2641 fb_write->regs_read(i) != tex_inst->regs_written)
2642 return false;
2643 } else if (i != FB_WRITE_LOGICAL_SRC_COMPONENTS) {
2644 if (fb_write->src[i].file != BAD_FILE)
2645 return false;
2646 }
2647 }
2648
2649 assert(!tex_inst->eot); /* We can't get here twice */
2650 assert((tex_inst->offset & (0xff << 24)) == 0);
2651
2652 const fs_builder ibld(this, block, tex_inst);
2653
2654 tex_inst->offset |= fb_write->target << 24;
2655 tex_inst->eot = true;
2656 tex_inst->dst = ibld.null_reg_ud();
2657 tex_inst->regs_written = 0;
2658 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2659
2660 /* Marking EOT is sufficient, lower_logical_sends() will notice the EOT
2661 * flag and submit a header together with the sampler message as required
2662 * by the hardware.
2663 */
2664 invalidate_live_intervals();
2665 return true;
2666 }
2667
2668 bool
2669 fs_visitor::opt_register_renaming()
2670 {
2671 bool progress = false;
2672 int depth = 0;
2673
2674 int remap[alloc.count];
2675 memset(remap, -1, sizeof(int) * alloc.count);
2676
2677 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2678 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2679 depth++;
2680 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2681 inst->opcode == BRW_OPCODE_WHILE) {
2682 depth--;
2683 }
2684
2685 /* Rewrite instruction sources. */
2686 for (int i = 0; i < inst->sources; i++) {
2687 if (inst->src[i].file == VGRF &&
2688 remap[inst->src[i].nr] != -1 &&
2689 remap[inst->src[i].nr] != inst->src[i].nr) {
2690 inst->src[i].nr = remap[inst->src[i].nr];
2691 progress = true;
2692 }
2693 }
2694
2695 const int dst = inst->dst.nr;
2696
2697 if (depth == 0 &&
2698 inst->dst.file == VGRF &&
2699 alloc.sizes[inst->dst.nr] == inst->regs_written &&
2700 !inst->is_partial_write()) {
2701 if (remap[dst] == -1) {
2702 remap[dst] = dst;
2703 } else {
2704 remap[dst] = alloc.allocate(inst->regs_written);
2705 inst->dst.nr = remap[dst];
2706 progress = true;
2707 }
2708 } else if (inst->dst.file == VGRF &&
2709 remap[dst] != -1 &&
2710 remap[dst] != dst) {
2711 inst->dst.nr = remap[dst];
2712 progress = true;
2713 }
2714 }
2715
2716 if (progress) {
2717 invalidate_live_intervals();
2718
2719 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2720 if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
2721 delta_xy[i].nr = remap[delta_xy[i].nr];
2722 }
2723 }
2724 }
2725
2726 return progress;
2727 }
2728
2729 /**
2730 * Remove redundant or useless discard jumps.
2731 *
2732 * For example, we can eliminate jumps in the following sequence:
2733 *
2734 * discard-jump (redundant with the next jump)
2735 * discard-jump (useless; jumps to the next instruction)
2736 * placeholder-halt
2737 */
2738 bool
2739 fs_visitor::opt_redundant_discard_jumps()
2740 {
2741 bool progress = false;
2742
2743 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2744
2745 fs_inst *placeholder_halt = NULL;
2746 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2747 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2748 placeholder_halt = inst;
2749 break;
2750 }
2751 }
2752
2753 if (!placeholder_halt)
2754 return false;
2755
2756 /* Delete any HALTs immediately before the placeholder halt. */
2757 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2758 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2759 prev = (fs_inst *) placeholder_halt->prev) {
2760 prev->remove(last_bblock);
2761 progress = true;
2762 }
2763
2764 if (progress)
2765 invalidate_live_intervals();
2766
2767 return progress;
2768 }
2769
2770 bool
2771 fs_visitor::compute_to_mrf()
2772 {
2773 bool progress = false;
2774 int next_ip = 0;
2775
2776 /* No MRFs on Gen >= 7. */
2777 if (devinfo->gen >= 7)
2778 return false;
2779
2780 calculate_live_intervals();
2781
2782 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2783 int ip = next_ip;
2784 next_ip++;
2785
2786 if (inst->opcode != BRW_OPCODE_MOV ||
2787 inst->is_partial_write() ||
2788 inst->dst.file != MRF || inst->src[0].file != VGRF ||
2789 inst->dst.type != inst->src[0].type ||
2790 inst->src[0].abs || inst->src[0].negate ||
2791 !inst->src[0].is_contiguous() ||
2792 inst->src[0].subreg_offset)
2793 continue;
2794
2795 /* Can't compute-to-MRF this GRF if someone else was going to
2796 * read it later.
2797 */
2798 if (this->virtual_grf_end[inst->src[0].nr] > ip)
2799 continue;
2800
2801 /* Found a move of a GRF to a MRF. Let's see if we can go
2802 * rewrite the thing that made this GRF to write into the MRF.
2803 */
2804 bool found = false;
2805
2806 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2807 if (regions_overlap(scan_inst->dst, scan_inst->regs_written * REG_SIZE,
2808 inst->src[0], inst->regs_read(0) * REG_SIZE)) {
2809 /* Found the last thing to write our reg we want to turn
2810 * into a compute-to-MRF.
2811 */
2812
2813 /* If this one instruction didn't populate all the
2814 * channels, bail. We might be able to rewrite everything
2815 * that writes that reg, but it would require smarter
2816 * tracking.
2817 */
2818 if (scan_inst->is_partial_write())
2819 break;
2820
2821 /* Handling things not fully contained in the source of the copy
2822 * would need us to understand coalescing out more than one MOV at
2823 * a time.
2824 */
2825 if (scan_inst->dst.reg_offset < inst->src[0].reg_offset ||
2826 scan_inst->dst.reg_offset + scan_inst->regs_written >
2827 inst->src[0].reg_offset + inst->regs_read(0))
2828 break;
2829
2830 /* SEND instructions can't have MRF as a destination. */
2831 if (scan_inst->mlen)
2832 break;
2833
2834 if (devinfo->gen == 6) {
2835 /* gen6 math instructions must have the destination be
2836 * GRF, so no compute-to-MRF for them.
2837 */
2838 if (scan_inst->is_math()) {
2839 break;
2840 }
2841 }
2842
2843 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset)
2844 found = true;
2845
2846 break;
2847 }
2848
2849 /* We don't handle control flow here. Most computation of
2850 * values that end up in MRFs are shortly before the MRF
2851 * write anyway.
2852 */
2853 if (block->start() == scan_inst)
2854 break;
2855
2856 /* You can't read from an MRF, so if someone else reads our
2857 * MRF's source GRF that we wanted to rewrite, that stops us.
2858 */
2859 bool interfered = false;
2860 for (int i = 0; i < scan_inst->sources; i++) {
2861 if (regions_overlap(scan_inst->src[i], scan_inst->regs_read(i) * REG_SIZE,
2862 inst->src[0], inst->regs_read(0) * REG_SIZE)) {
2863 interfered = true;
2864 }
2865 }
2866 if (interfered)
2867 break;
2868
2869 if (regions_overlap(scan_inst->dst, scan_inst->regs_written * REG_SIZE,
2870 inst->dst, inst->regs_written * REG_SIZE)) {
2871 /* If somebody else writes our MRF here, we can't
2872 * compute-to-MRF before that.
2873 */
2874 break;
2875 }
2876
2877 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1 &&
2878 regions_overlap(fs_reg(MRF, scan_inst->base_mrf), scan_inst->mlen * REG_SIZE,
2879 inst->dst, inst->regs_written * REG_SIZE)) {
2880 /* Found a SEND instruction, which means that there are
2881 * live values in MRFs from base_mrf to base_mrf +
2882 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2883 * above it.
2884 */
2885 break;
2886 }
2887 }
2888
2889 if (!found)
2890 continue;
2891
2892 /* Found all generating instructions of our MRF's source value.
2893 */
2894 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2895 if (regions_overlap(scan_inst->dst, scan_inst->regs_written * REG_SIZE,
2896 inst->src[0], inst->regs_read(0) * REG_SIZE)) {
2897 scan_inst->dst.file = MRF;
2898 scan_inst->dst.nr = inst->dst.nr;
2899 scan_inst->dst.reg_offset = 0;
2900 scan_inst->saturate |= inst->saturate;
2901 break;
2902 }
2903 }
2904
2905 inst->remove(block);
2906 progress = true;
2907 }
2908
2909 if (progress)
2910 invalidate_live_intervals();
2911
2912 return progress;
2913 }
2914
2915 /**
2916 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2917 * flow. We could probably do better here with some form of divergence
2918 * analysis.
2919 */
2920 bool
2921 fs_visitor::eliminate_find_live_channel()
2922 {
2923 bool progress = false;
2924 unsigned depth = 0;
2925
2926 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2927 switch (inst->opcode) {
2928 case BRW_OPCODE_IF:
2929 case BRW_OPCODE_DO:
2930 depth++;
2931 break;
2932
2933 case BRW_OPCODE_ENDIF:
2934 case BRW_OPCODE_WHILE:
2935 depth--;
2936 break;
2937
2938 case FS_OPCODE_DISCARD_JUMP:
2939 /* This can potentially make control flow non-uniform until the end
2940 * of the program.
2941 */
2942 return progress;
2943
2944 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2945 if (depth == 0) {
2946 inst->opcode = BRW_OPCODE_MOV;
2947 inst->src[0] = brw_imm_ud(0u);
2948 inst->sources = 1;
2949 inst->force_writemask_all = true;
2950 progress = true;
2951 }
2952 break;
2953
2954 default:
2955 break;
2956 }
2957 }
2958
2959 return progress;
2960 }
2961
2962 /**
2963 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2964 * instructions to FS_OPCODE_REP_FB_WRITE.
2965 */
2966 void
2967 fs_visitor::emit_repclear_shader()
2968 {
2969 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2970 int base_mrf = 0;
2971 int color_mrf = base_mrf + 2;
2972 fs_inst *mov;
2973
2974 if (uniforms > 0) {
2975 mov = bld.exec_all().group(4, 0)
2976 .MOV(brw_message_reg(color_mrf),
2977 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
2978 } else {
2979 struct brw_reg reg =
2980 brw_reg(BRW_GENERAL_REGISTER_FILE, 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
2981 BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
2982 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2983
2984 mov = bld.exec_all().group(4, 0)
2985 .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
2986 }
2987
2988 fs_inst *write;
2989 if (key->nr_color_regions == 1) {
2990 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2991 write->saturate = key->clamp_fragment_color;
2992 write->base_mrf = color_mrf;
2993 write->target = 0;
2994 write->header_size = 0;
2995 write->mlen = 1;
2996 } else {
2997 assume(key->nr_color_regions > 0);
2998 for (int i = 0; i < key->nr_color_regions; ++i) {
2999 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3000 write->saturate = key->clamp_fragment_color;
3001 write->base_mrf = base_mrf;
3002 write->target = i;
3003 write->header_size = 2;
3004 write->mlen = 3;
3005 }
3006 }
3007 write->eot = true;
3008
3009 calculate_cfg();
3010
3011 assign_constant_locations();
3012 assign_curb_setup();
3013
3014 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
3015 if (uniforms > 0) {
3016 assert(mov->src[0].file == FIXED_GRF);
3017 mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
3018 }
3019 }
3020
3021 /**
3022 * Walks through basic blocks, looking for repeated MRF writes and
3023 * removing the later ones.
3024 */
3025 bool
3026 fs_visitor::remove_duplicate_mrf_writes()
3027 {
3028 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
3029 bool progress = false;
3030
3031 /* Need to update the MRF tracking for compressed instructions. */
3032 if (dispatch_width >= 16)
3033 return false;
3034
3035 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3036
3037 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3038 if (inst->is_control_flow()) {
3039 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3040 }
3041
3042 if (inst->opcode == BRW_OPCODE_MOV &&
3043 inst->dst.file == MRF) {
3044 fs_inst *prev_inst = last_mrf_move[inst->dst.nr];
3045 if (prev_inst && inst->equals(prev_inst)) {
3046 inst->remove(block);
3047 progress = true;
3048 continue;
3049 }
3050 }
3051
3052 /* Clear out the last-write records for MRFs that were overwritten. */
3053 if (inst->dst.file == MRF) {
3054 last_mrf_move[inst->dst.nr] = NULL;
3055 }
3056
3057 if (inst->mlen > 0 && inst->base_mrf != -1) {
3058 /* Found a SEND instruction, which will include two or fewer
3059 * implied MRF writes. We could do better here.
3060 */
3061 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3062 last_mrf_move[inst->base_mrf + i] = NULL;
3063 }
3064 }
3065
3066 /* Clear out any MRF move records whose sources got overwritten. */
3067 if (inst->dst.file == VGRF) {
3068 for (unsigned int i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
3069 if (last_mrf_move[i] &&
3070 last_mrf_move[i]->src[0].nr == inst->dst.nr) {
3071 last_mrf_move[i] = NULL;
3072 }
3073 }
3074 }
3075
3076 if (inst->opcode == BRW_OPCODE_MOV &&
3077 inst->dst.file == MRF &&
3078 inst->src[0].file == VGRF &&
3079 !inst->is_partial_write()) {
3080 last_mrf_move[inst->dst.nr] = inst;
3081 }
3082 }
3083
3084 if (progress)
3085 invalidate_live_intervals();
3086
3087 return progress;
3088 }
3089
3090 static void
3091 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
3092 {
3093 /* Clear the flag for registers that actually got read (as expected). */
3094 for (int i = 0; i < inst->sources; i++) {
3095 int grf;
3096 if (inst->src[i].file == VGRF || inst->src[i].file == FIXED_GRF) {
3097 grf = inst->src[i].nr;
3098 } else {
3099 continue;
3100 }
3101
3102 if (grf >= first_grf &&
3103 grf < first_grf + grf_len) {
3104 deps[grf - first_grf] = false;
3105 if (inst->exec_size == 16)
3106 deps[grf - first_grf + 1] = false;
3107 }
3108 }
3109 }
3110
3111 /**
3112 * Implements this workaround for the original 965:
3113 *
3114 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
3115 * check for post destination dependencies on this instruction, software
3116 * must ensure that there is no destination hazard for the case of ‘write
3117 * followed by a posted write’ shown in the following example.
3118 *
3119 * 1. mov r3 0
3120 * 2. send r3.xy <rest of send instruction>
3121 * 3. mov r2 r3
3122 *
3123 * Due to no post-destination dependency check on the ‘send’, the above
3124 * code sequence could have two instructions (1 and 2) in flight at the
3125 * same time that both consider ‘r3’ as the target of their final writes.
3126 */
3127 void
3128 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
3129 fs_inst *inst)
3130 {
3131 int write_len = inst->regs_written;
3132 int first_write_grf = inst->dst.nr;
3133 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3134 assert(write_len < (int)sizeof(needs_dep) - 1);
3135
3136 memset(needs_dep, false, sizeof(needs_dep));
3137 memset(needs_dep, true, write_len);
3138
3139 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
3140
3141 /* Walk backwards looking for writes to registers we're writing which
3142 * aren't read since being written. If we hit the start of the program,
3143 * we assume that there are no outstanding dependencies on entry to the
3144 * program.
3145 */
3146 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3147 /* If we hit control flow, assume that there *are* outstanding
3148 * dependencies, and force their cleanup before our instruction.
3149 */
3150 if (block->start() == scan_inst && block->num != 0) {
3151 for (int i = 0; i < write_len; i++) {
3152 if (needs_dep[i])
3153 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
3154 first_write_grf + i);
3155 }
3156 return;
3157 }
3158
3159 /* We insert our reads as late as possible on the assumption that any
3160 * instruction but a MOV that might have left us an outstanding
3161 * dependency has more latency than a MOV.
3162 */
3163 if (scan_inst->dst.file == VGRF) {
3164 for (int i = 0; i < scan_inst->regs_written; i++) {
3165 int reg = scan_inst->dst.nr + i;
3166
3167 if (reg >= first_write_grf &&
3168 reg < first_write_grf + write_len &&
3169 needs_dep[reg - first_write_grf]) {
3170 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
3171 needs_dep[reg - first_write_grf] = false;
3172 if (scan_inst->exec_size == 16)
3173 needs_dep[reg - first_write_grf + 1] = false;
3174 }
3175 }
3176 }
3177
3178 /* Clear the flag for registers that actually got read (as expected). */
3179 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3180
3181 /* Continue the loop only if we haven't resolved all the dependencies */
3182 int i;
3183 for (i = 0; i < write_len; i++) {
3184 if (needs_dep[i])
3185 break;
3186 }
3187 if (i == write_len)
3188 return;
3189 }
3190 }
3191
3192 /**
3193 * Implements this workaround for the original 965:
3194 *
3195 * "[DevBW, DevCL] Errata: A destination register from a send can not be
3196 * used as a destination register until after it has been sourced by an
3197 * instruction with a different destination register.
3198 */
3199 void
3200 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
3201 {
3202 int write_len = inst->regs_written;
3203 int first_write_grf = inst->dst.nr;
3204 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3205 assert(write_len < (int)sizeof(needs_dep) - 1);
3206
3207 memset(needs_dep, false, sizeof(needs_dep));
3208 memset(needs_dep, true, write_len);
3209 /* Walk forwards looking for writes to registers we're writing which aren't
3210 * read before being written.
3211 */
3212 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst) {
3213 /* If we hit control flow, force resolve all remaining dependencies. */
3214 if (block->end() == scan_inst && block->num != cfg->num_blocks - 1) {
3215 for (int i = 0; i < write_len; i++) {
3216 if (needs_dep[i])
3217 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3218 first_write_grf + i);
3219 }
3220 return;
3221 }
3222
3223 /* Clear the flag for registers that actually got read (as expected). */
3224 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3225
3226 /* We insert our reads as late as possible since they're reading the
3227 * result of a SEND, which has massive latency.
3228 */
3229 if (scan_inst->dst.file == VGRF &&
3230 scan_inst->dst.nr >= first_write_grf &&
3231 scan_inst->dst.nr < first_write_grf + write_len &&
3232 needs_dep[scan_inst->dst.nr - first_write_grf]) {
3233 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3234 scan_inst->dst.nr);
3235 needs_dep[scan_inst->dst.nr - first_write_grf] = false;
3236 }
3237
3238 /* Continue the loop only if we haven't resolved all the dependencies */
3239 int i;
3240 for (i = 0; i < write_len; i++) {
3241 if (needs_dep[i])
3242 break;
3243 }
3244 if (i == write_len)
3245 return;
3246 }
3247 }
3248
3249 void
3250 fs_visitor::insert_gen4_send_dependency_workarounds()
3251 {
3252 if (devinfo->gen != 4 || devinfo->is_g4x)
3253 return;
3254
3255 bool progress = false;
3256
3257 /* Note that we're done with register allocation, so GRF fs_regs always
3258 * have a .reg_offset of 0.
3259 */
3260
3261 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3262 if (inst->mlen != 0 && inst->dst.file == VGRF) {
3263 insert_gen4_pre_send_dependency_workarounds(block, inst);
3264 insert_gen4_post_send_dependency_workarounds(block, inst);
3265 progress = true;
3266 }
3267 }
3268
3269 if (progress)
3270 invalidate_live_intervals();
3271 }
3272
3273 /**
3274 * Turns the generic expression-style uniform pull constant load instruction
3275 * into a hardware-specific series of instructions for loading a pull
3276 * constant.
3277 *
3278 * The expression style allows the CSE pass before this to optimize out
3279 * repeated loads from the same offset, and gives the pre-register-allocation
3280 * scheduling full flexibility, while the conversion to native instructions
3281 * allows the post-register-allocation scheduler the best information
3282 * possible.
3283 *
3284 * Note that execution masking for setting up pull constant loads is special:
3285 * the channels that need to be written are unrelated to the current execution
3286 * mask, since a later instruction will use one of the result channels as a
3287 * source operand for all 8 or 16 of its channels.
3288 */
3289 void
3290 fs_visitor::lower_uniform_pull_constant_loads()
3291 {
3292 foreach_block_and_inst (block, fs_inst, inst, cfg) {
3293 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
3294 continue;
3295
3296 if (devinfo->gen >= 7) {
3297 /* The offset arg is a vec4-aligned immediate byte offset. */
3298 fs_reg const_offset_reg = inst->src[1];
3299 assert(const_offset_reg.file == IMM &&
3300 const_offset_reg.type == BRW_REGISTER_TYPE_UD);
3301 assert(const_offset_reg.ud % 16 == 0);
3302
3303 fs_reg payload, offset;
3304 if (devinfo->gen >= 9) {
3305 /* We have to use a message header on Skylake to get SIMD4x2
3306 * mode. Reserve space for the register.
3307 */
3308 offset = payload = fs_reg(VGRF, alloc.allocate(2));
3309 offset.reg_offset++;
3310 inst->mlen = 2;
3311 } else {
3312 offset = payload = fs_reg(VGRF, alloc.allocate(1));
3313 inst->mlen = 1;
3314 }
3315
3316 /* This is actually going to be a MOV, but since only the first dword
3317 * is accessed, we have a special opcode to do just that one. Note
3318 * that this needs to be an operation that will be considered a def
3319 * by live variable analysis, or register allocation will explode.
3320 */
3321 fs_inst *setup = new(mem_ctx) fs_inst(FS_OPCODE_SET_SIMD4X2_OFFSET,
3322 8, offset, const_offset_reg);
3323 setup->force_writemask_all = true;
3324
3325 setup->ir = inst->ir;
3326 setup->annotation = inst->annotation;
3327 inst->insert_before(block, setup);
3328
3329 /* Similarly, this will only populate the first 4 channels of the
3330 * result register (since we only use smear values from 0-3), but we
3331 * don't tell the optimizer.
3332 */
3333 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3334 inst->src[1] = payload;
3335 inst->base_mrf = -1;
3336
3337 invalidate_live_intervals();
3338 } else {
3339 /* Before register allocation, we didn't tell the scheduler about the
3340 * MRF we use. We know it's safe to use this MRF because nothing
3341 * else does except for register spill/unspill, which generates and
3342 * uses its MRF within a single IR instruction.
3343 */
3344 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3345 inst->mlen = 1;
3346 }
3347 }
3348 }
3349
3350 bool
3351 fs_visitor::lower_load_payload()
3352 {
3353 bool progress = false;
3354
3355 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3356 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3357 continue;
3358
3359 assert(inst->dst.file == MRF || inst->dst.file == VGRF);
3360 assert(inst->saturate == false);
3361 fs_reg dst = inst->dst;
3362
3363 /* Get rid of COMPR4. We'll add it back in if we need it */
3364 if (dst.file == MRF)
3365 dst.nr = dst.nr & ~BRW_MRF_COMPR4;
3366
3367 const fs_builder ibld(this, block, inst);
3368 const fs_builder hbld = ibld.exec_all().group(8, 0);
3369
3370 for (uint8_t i = 0; i < inst->header_size; i++) {
3371 if (inst->src[i].file != BAD_FILE) {
3372 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3373 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3374 hbld.MOV(mov_dst, mov_src);
3375 }
3376 dst = offset(dst, hbld, 1);
3377 }
3378
3379 if (inst->dst.file == MRF && (inst->dst.nr & BRW_MRF_COMPR4) &&
3380 inst->exec_size > 8) {
3381 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3382 * a straightforward copy. Instead, the result of the
3383 * LOAD_PAYLOAD is treated as interleaved and the first four
3384 * non-header sources are unpacked as:
3385 *
3386 * m + 0: r0
3387 * m + 1: g0
3388 * m + 2: b0
3389 * m + 3: a0
3390 * m + 4: r1
3391 * m + 5: g1
3392 * m + 6: b1
3393 * m + 7: a1
3394 *
3395 * This is used for gen <= 5 fb writes.
3396 */
3397 assert(inst->exec_size == 16);
3398 assert(inst->header_size + 4 <= inst->sources);
3399 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3400 if (inst->src[i].file != BAD_FILE) {
3401 if (devinfo->has_compr4) {
3402 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3403 compr4_dst.nr |= BRW_MRF_COMPR4;
3404 ibld.MOV(compr4_dst, inst->src[i]);
3405 } else {
3406 /* Platform doesn't have COMPR4. We have to fake it */
3407 fs_reg mov_dst = retype(dst, inst->src[i].type);
3408 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3409 mov_dst.nr += 4;
3410 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3411 }
3412 }
3413
3414 dst.nr++;
3415 }
3416
3417 /* The loop above only ever incremented us through the first set
3418 * of 4 registers. However, thanks to the magic of COMPR4, we
3419 * actually wrote to the first 8 registers, so we need to take
3420 * that into account now.
3421 */
3422 dst.nr += 4;
3423
3424 /* The COMPR4 code took care of the first 4 sources. We'll let
3425 * the regular path handle any remaining sources. Yes, we are
3426 * modifying the instruction but we're about to delete it so
3427 * this really doesn't hurt anything.
3428 */
3429 inst->header_size += 4;
3430 }
3431
3432 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3433 if (inst->src[i].file != BAD_FILE)
3434 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3435 dst = offset(dst, ibld, 1);
3436 }
3437
3438 inst->remove(block);
3439 progress = true;
3440 }
3441
3442 if (progress)
3443 invalidate_live_intervals();
3444
3445 return progress;
3446 }
3447
3448 bool
3449 fs_visitor::lower_integer_multiplication()
3450 {
3451 bool progress = false;
3452
3453 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3454 const fs_builder ibld(this, block, inst);
3455
3456 if (inst->opcode == BRW_OPCODE_MUL) {
3457 if (inst->dst.is_accumulator() ||
3458 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3459 inst->dst.type != BRW_REGISTER_TYPE_UD))
3460 continue;
3461
3462 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
3463 * operation directly, but CHV/BXT cannot.
3464 */
3465 if (devinfo->gen >= 8 &&
3466 !devinfo->is_cherryview && !devinfo->is_broxton)
3467 continue;
3468
3469 if (inst->src[1].file == IMM &&
3470 inst->src[1].ud < (1 << 16)) {
3471 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3472 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3473 * src1 are used.
3474 *
3475 * If multiplying by an immediate value that fits in 16-bits, do a
3476 * single MUL instruction with that value in the proper location.
3477 */
3478 if (devinfo->gen < 7) {
3479 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8),
3480 inst->dst.type);
3481 ibld.MOV(imm, inst->src[1]);
3482 ibld.MUL(inst->dst, imm, inst->src[0]);
3483 } else {
3484 ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
3485 }
3486 } else {
3487 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3488 * do 32-bit integer multiplication in one instruction, but instead
3489 * must do a sequence (which actually calculates a 64-bit result):
3490 *
3491 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3492 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3493 * mov(8) g2<1>D acc0<8,8,1>D
3494 *
3495 * But on Gen > 6, the ability to use second accumulator register
3496 * (acc1) for non-float data types was removed, preventing a simple
3497 * implementation in SIMD16. A 16-channel result can be calculated by
3498 * executing the three instructions twice in SIMD8, once with quarter
3499 * control of 1Q for the first eight channels and again with 2Q for
3500 * the second eight channels.
3501 *
3502 * Which accumulator register is implicitly accessed (by AccWrEnable
3503 * for instance) is determined by the quarter control. Unfortunately
3504 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3505 * implicit accumulator access by an instruction with 2Q will access
3506 * acc1 regardless of whether the data type is usable in acc1.
3507 *
3508 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3509 * integer data types.
3510 *
3511 * Since we only want the low 32-bits of the result, we can do two
3512 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3513 * adjust the high result and add them (like the mach is doing):
3514 *
3515 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3516 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3517 * shl(8) g9<1>D g8<8,8,1>D 16D
3518 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3519 *
3520 * We avoid the shl instruction by realizing that we only want to add
3521 * the low 16-bits of the "high" result to the high 16-bits of the
3522 * "low" result and using proper regioning on the add:
3523 *
3524 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3525 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3526 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3527 *
3528 * Since it does not use the (single) accumulator register, we can
3529 * schedule multi-component multiplications much better.
3530 */
3531
3532 fs_reg orig_dst = inst->dst;
3533 if (orig_dst.is_null() || orig_dst.file == MRF) {
3534 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
3535 inst->dst.type);
3536 }
3537 fs_reg low = inst->dst;
3538 fs_reg high(VGRF, alloc.allocate(dispatch_width / 8),
3539 inst->dst.type);
3540
3541 if (devinfo->gen >= 7) {
3542 fs_reg src1_0_w = inst->src[1];
3543 fs_reg src1_1_w = inst->src[1];
3544
3545 if (inst->src[1].file == IMM) {
3546 src1_0_w.ud &= 0xffff;
3547 src1_1_w.ud >>= 16;
3548 } else {
3549 src1_0_w.type = BRW_REGISTER_TYPE_UW;
3550 if (src1_0_w.stride != 0) {
3551 assert(src1_0_w.stride == 1);
3552 src1_0_w.stride = 2;
3553 }
3554
3555 src1_1_w.type = BRW_REGISTER_TYPE_UW;
3556 if (src1_1_w.stride != 0) {
3557 assert(src1_1_w.stride == 1);
3558 src1_1_w.stride = 2;
3559 }
3560 src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3561 }
3562 ibld.MUL(low, inst->src[0], src1_0_w);
3563 ibld.MUL(high, inst->src[0], src1_1_w);
3564 } else {
3565 fs_reg src0_0_w = inst->src[0];
3566 fs_reg src0_1_w = inst->src[0];
3567
3568 src0_0_w.type = BRW_REGISTER_TYPE_UW;
3569 if (src0_0_w.stride != 0) {
3570 assert(src0_0_w.stride == 1);
3571 src0_0_w.stride = 2;
3572 }
3573
3574 src0_1_w.type = BRW_REGISTER_TYPE_UW;
3575 if (src0_1_w.stride != 0) {
3576 assert(src0_1_w.stride == 1);
3577 src0_1_w.stride = 2;
3578 }
3579 src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3580
3581 ibld.MUL(low, src0_0_w, inst->src[1]);
3582 ibld.MUL(high, src0_1_w, inst->src[1]);
3583 }
3584
3585 fs_reg dst = inst->dst;
3586 dst.type = BRW_REGISTER_TYPE_UW;
3587 dst.subreg_offset = 2;
3588 dst.stride = 2;
3589
3590 high.type = BRW_REGISTER_TYPE_UW;
3591 high.stride = 2;
3592
3593 low.type = BRW_REGISTER_TYPE_UW;
3594 low.subreg_offset = 2;
3595 low.stride = 2;
3596
3597 ibld.ADD(dst, low, high);
3598
3599 if (inst->conditional_mod || orig_dst.file == MRF) {
3600 set_condmod(inst->conditional_mod,
3601 ibld.MOV(orig_dst, inst->dst));
3602 }
3603 }
3604
3605 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3606 /* Should have been lowered to 8-wide. */
3607 assert(inst->exec_size <= 8);
3608 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3609 inst->dst.type);
3610 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3611 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3612
3613 if (devinfo->gen >= 8) {
3614 /* Until Gen8, integer multiplies read 32-bits from one source,
3615 * and 16-bits from the other, and relying on the MACH instruction
3616 * to generate the high bits of the result.
3617 *
3618 * On Gen8, the multiply instruction does a full 32x32-bit
3619 * multiply, but in order to do a 64-bit multiply we can simulate
3620 * the previous behavior and then use a MACH instruction.
3621 *
3622 * FINISHME: Don't use source modifiers on src1.
3623 */
3624 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3625 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3626 mul->src[1].type = BRW_REGISTER_TYPE_UW;
3627 mul->src[1].stride *= 2;
3628
3629 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3630 inst->group > 0) {
3631 /* Among other things the quarter control bits influence which
3632 * accumulator register is used by the hardware for instructions
3633 * that access the accumulator implicitly (e.g. MACH). A
3634 * second-half instruction would normally map to acc1, which
3635 * doesn't exist on Gen7 and up (the hardware does emulate it for
3636 * floating-point instructions *only* by taking advantage of the
3637 * extra precision of acc0 not normally used for floating point
3638 * arithmetic).
3639 *
3640 * HSW and up are careful enough not to try to access an
3641 * accumulator register that doesn't exist, but on earlier Gen7
3642 * hardware we need to make sure that the quarter control bits are
3643 * zero to avoid non-deterministic behaviour and emit an extra MOV
3644 * to get the result masked correctly according to the current
3645 * channel enables.
3646 */
3647 mach->group = 0;
3648 mach->force_writemask_all = true;
3649 mach->dst = ibld.vgrf(inst->dst.type);
3650 ibld.MOV(inst->dst, mach->dst);
3651 }
3652 } else {
3653 continue;
3654 }
3655
3656 inst->remove(block);
3657 progress = true;
3658 }
3659
3660 if (progress)
3661 invalidate_live_intervals();
3662
3663 return progress;
3664 }
3665
3666 bool
3667 fs_visitor::lower_minmax()
3668 {
3669 assert(devinfo->gen < 6);
3670
3671 bool progress = false;
3672
3673 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3674 const fs_builder ibld(this, block, inst);
3675
3676 if (inst->opcode == BRW_OPCODE_SEL &&
3677 inst->predicate == BRW_PREDICATE_NONE) {
3678 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
3679 * the original SEL.L/GE instruction
3680 */
3681 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
3682 inst->conditional_mod);
3683 inst->predicate = BRW_PREDICATE_NORMAL;
3684 inst->conditional_mod = BRW_CONDITIONAL_NONE;
3685
3686 progress = true;
3687 }
3688 }
3689
3690 if (progress)
3691 invalidate_live_intervals();
3692
3693 return progress;
3694 }
3695
3696 static void
3697 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
3698 fs_reg *dst, fs_reg color, unsigned components)
3699 {
3700 if (key->clamp_fragment_color) {
3701 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
3702 assert(color.type == BRW_REGISTER_TYPE_F);
3703
3704 for (unsigned i = 0; i < components; i++)
3705 set_saturate(true,
3706 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
3707
3708 color = tmp;
3709 }
3710
3711 for (unsigned i = 0; i < components; i++)
3712 dst[i] = offset(color, bld, i);
3713 }
3714
3715 static void
3716 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
3717 const brw_wm_prog_data *prog_data,
3718 const brw_wm_prog_key *key,
3719 const fs_visitor::thread_payload &payload)
3720 {
3721 assert(inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
3722 const brw_device_info *devinfo = bld.shader->devinfo;
3723 const fs_reg &color0 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR0];
3724 const fs_reg &color1 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR1];
3725 const fs_reg &src0_alpha = inst->src[FB_WRITE_LOGICAL_SRC_SRC0_ALPHA];
3726 const fs_reg &src_depth = inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH];
3727 const fs_reg &dst_depth = inst->src[FB_WRITE_LOGICAL_SRC_DST_DEPTH];
3728 const fs_reg &src_stencil = inst->src[FB_WRITE_LOGICAL_SRC_SRC_STENCIL];
3729 fs_reg sample_mask = inst->src[FB_WRITE_LOGICAL_SRC_OMASK];
3730 const unsigned components =
3731 inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
3732
3733 /* We can potentially have a message length of up to 15, so we have to set
3734 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3735 */
3736 fs_reg sources[15];
3737 int header_size = 2, payload_header_size;
3738 unsigned length = 0;
3739
3740 /* From the Sandy Bridge PRM, volume 4, page 198:
3741 *
3742 * "Dispatched Pixel Enables. One bit per pixel indicating
3743 * which pixels were originally enabled when the thread was
3744 * dispatched. This field is only required for the end-of-
3745 * thread message and on all dual-source messages."
3746 */
3747 if (devinfo->gen >= 6 &&
3748 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3749 color1.file == BAD_FILE &&
3750 key->nr_color_regions == 1) {
3751 header_size = 0;
3752 }
3753
3754 if (header_size != 0) {
3755 assert(header_size == 2);
3756 /* Allocate 2 registers for a header */
3757 length += 2;
3758 }
3759
3760 if (payload.aa_dest_stencil_reg) {
3761 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
3762 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
3763 .MOV(sources[length],
3764 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
3765 length++;
3766 }
3767
3768 if (sample_mask.file != BAD_FILE) {
3769 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1),
3770 BRW_REGISTER_TYPE_UD);
3771
3772 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
3773 * relevant. Since it's unsigned single words one vgrf is always
3774 * 16-wide, but only the lower or higher 8 channels will be used by the
3775 * hardware when doing a SIMD8 write depending on whether we have
3776 * selected the subspans for the first or second half respectively.
3777 */
3778 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
3779 sample_mask.type = BRW_REGISTER_TYPE_UW;
3780 sample_mask.stride *= 2;
3781
3782 bld.exec_all().annotate("FB write oMask")
3783 .MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
3784 inst->group),
3785 sample_mask);
3786 length++;
3787 }
3788
3789 payload_header_size = length;
3790
3791 if (src0_alpha.file != BAD_FILE) {
3792 /* FIXME: This is being passed at the wrong location in the payload and
3793 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
3794 * It's supposed to be immediately before oMask but there seems to be no
3795 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
3796 * requires header sources to form a contiguous segment at the beginning
3797 * of the message and src0_alpha has per-channel semantics.
3798 */
3799 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
3800 length++;
3801 }
3802
3803 setup_color_payload(bld, key, &sources[length], color0, components);
3804 length += 4;
3805
3806 if (color1.file != BAD_FILE) {
3807 setup_color_payload(bld, key, &sources[length], color1, components);
3808 length += 4;
3809 }
3810
3811 if (src_depth.file != BAD_FILE) {
3812 sources[length] = src_depth;
3813 length++;
3814 }
3815
3816 if (dst_depth.file != BAD_FILE) {
3817 sources[length] = dst_depth;
3818 length++;
3819 }
3820
3821 if (src_stencil.file != BAD_FILE) {
3822 assert(devinfo->gen >= 9);
3823 assert(bld.dispatch_width() != 16);
3824
3825 /* XXX: src_stencil is only available on gen9+. dst_depth is never
3826 * available on gen9+. As such it's impossible to have both enabled at the
3827 * same time and therefore length cannot overrun the array.
3828 */
3829 assert(length < 15);
3830
3831 sources[length] = bld.vgrf(BRW_REGISTER_TYPE_UD);
3832 bld.exec_all().annotate("FB write OS")
3833 .MOV(retype(sources[length], BRW_REGISTER_TYPE_UB),
3834 subscript(src_stencil, BRW_REGISTER_TYPE_UB, 0));
3835 length++;
3836 }
3837
3838 fs_inst *load;
3839 if (devinfo->gen >= 7) {
3840 /* Send from the GRF */
3841 fs_reg payload = fs_reg(VGRF, -1, BRW_REGISTER_TYPE_F);
3842 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
3843 payload.nr = bld.shader->alloc.allocate(load->regs_written);
3844 load->dst = payload;
3845
3846 inst->src[0] = payload;
3847 inst->resize_sources(1);
3848 inst->base_mrf = -1;
3849 } else {
3850 /* Send from the MRF */
3851 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
3852 sources, length, payload_header_size);
3853
3854 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3855 * will do this for us if we just give it a COMPR4 destination.
3856 */
3857 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
3858 load->dst.nr |= BRW_MRF_COMPR4;
3859
3860 inst->resize_sources(0);
3861 inst->base_mrf = 1;
3862 }
3863
3864 inst->opcode = FS_OPCODE_FB_WRITE;
3865 inst->mlen = load->regs_written;
3866 inst->header_size = header_size;
3867 }
3868
3869 static void
3870 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
3871 const fs_reg &coordinate,
3872 const fs_reg &shadow_c,
3873 const fs_reg &lod, const fs_reg &lod2,
3874 const fs_reg &surface,
3875 const fs_reg &sampler,
3876 unsigned coord_components,
3877 unsigned grad_components)
3878 {
3879 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
3880 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
3881 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
3882 fs_reg msg_end = msg_begin;
3883
3884 /* g0 header. */
3885 msg_end = offset(msg_end, bld.group(8, 0), 1);
3886
3887 for (unsigned i = 0; i < coord_components; i++)
3888 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
3889 offset(coordinate, bld, i));
3890
3891 msg_end = offset(msg_end, bld, coord_components);
3892
3893 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
3894 * require all three components to be present and zero if they are unused.
3895 */
3896 if (coord_components > 0 &&
3897 (has_lod || shadow_c.file != BAD_FILE ||
3898 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
3899 for (unsigned i = coord_components; i < 3; i++)
3900 bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
3901
3902 msg_end = offset(msg_end, bld, 3 - coord_components);
3903 }
3904
3905 if (op == SHADER_OPCODE_TXD) {
3906 /* TXD unsupported in SIMD16 mode. */
3907 assert(bld.dispatch_width() == 8);
3908
3909 /* the slots for u and v are always present, but r is optional */
3910 if (coord_components < 2)
3911 msg_end = offset(msg_end, bld, 2 - coord_components);
3912
3913 /* P = u, v, r
3914 * dPdx = dudx, dvdx, drdx
3915 * dPdy = dudy, dvdy, drdy
3916 *
3917 * 1-arg: Does not exist.
3918 *
3919 * 2-arg: dudx dvdx dudy dvdy
3920 * dPdx.x dPdx.y dPdy.x dPdy.y
3921 * m4 m5 m6 m7
3922 *
3923 * 3-arg: dudx dvdx drdx dudy dvdy drdy
3924 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
3925 * m5 m6 m7 m8 m9 m10
3926 */
3927 for (unsigned i = 0; i < grad_components; i++)
3928 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
3929
3930 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3931
3932 for (unsigned i = 0; i < grad_components; i++)
3933 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
3934
3935 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3936 }
3937
3938 if (has_lod) {
3939 /* Bias/LOD with shadow comparitor is unsupported in SIMD16 -- *Without*
3940 * shadow comparitor (including RESINFO) it's unsupported in SIMD8 mode.
3941 */
3942 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
3943 bld.dispatch_width() == 16);
3944
3945 const brw_reg_type type =
3946 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
3947 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
3948 bld.MOV(retype(msg_end, type), lod);
3949 msg_end = offset(msg_end, bld, 1);
3950 }
3951
3952 if (shadow_c.file != BAD_FILE) {
3953 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
3954 /* There's no plain shadow compare message, so we use shadow
3955 * compare with a bias of 0.0.
3956 */
3957 bld.MOV(msg_end, brw_imm_f(0.0f));
3958 msg_end = offset(msg_end, bld, 1);
3959 }
3960
3961 bld.MOV(msg_end, shadow_c);
3962 msg_end = offset(msg_end, bld, 1);
3963 }
3964
3965 inst->opcode = op;
3966 inst->src[0] = reg_undef;
3967 inst->src[1] = surface;
3968 inst->src[2] = sampler;
3969 inst->resize_sources(3);
3970 inst->base_mrf = msg_begin.nr;
3971 inst->mlen = msg_end.nr - msg_begin.nr;
3972 inst->header_size = 1;
3973 }
3974
3975 static void
3976 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
3977 const fs_reg &coordinate,
3978 const fs_reg &shadow_c,
3979 const fs_reg &lod, const fs_reg &lod2,
3980 const fs_reg &sample_index,
3981 const fs_reg &surface,
3982 const fs_reg &sampler,
3983 const fs_reg &offset_value,
3984 unsigned coord_components,
3985 unsigned grad_components)
3986 {
3987 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
3988 fs_reg msg_coords = message;
3989 unsigned header_size = 0;
3990
3991 if (offset_value.file != BAD_FILE) {
3992 /* The offsets set up by the visitor are in the m1 header, so we can't
3993 * go headerless.
3994 */
3995 header_size = 1;
3996 message.nr--;
3997 }
3998
3999 for (unsigned i = 0; i < coord_components; i++)
4000 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type),
4001 offset(coordinate, bld, i));
4002
4003 fs_reg msg_end = offset(msg_coords, bld, coord_components);
4004 fs_reg msg_lod = offset(msg_coords, bld, 4);
4005
4006 if (shadow_c.file != BAD_FILE) {
4007 fs_reg msg_shadow = msg_lod;
4008 bld.MOV(msg_shadow, shadow_c);
4009 msg_lod = offset(msg_shadow, bld, 1);
4010 msg_end = msg_lod;
4011 }
4012
4013 switch (op) {
4014 case SHADER_OPCODE_TXL:
4015 case FS_OPCODE_TXB:
4016 bld.MOV(msg_lod, lod);
4017 msg_end = offset(msg_lod, bld, 1);
4018 break;
4019 case SHADER_OPCODE_TXD:
4020 /**
4021 * P = u, v, r
4022 * dPdx = dudx, dvdx, drdx
4023 * dPdy = dudy, dvdy, drdy
4024 *
4025 * Load up these values:
4026 * - dudx dudy dvdx dvdy drdx drdy
4027 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
4028 */
4029 msg_end = msg_lod;
4030 for (unsigned i = 0; i < grad_components; i++) {
4031 bld.MOV(msg_end, offset(lod, bld, i));
4032 msg_end = offset(msg_end, bld, 1);
4033
4034 bld.MOV(msg_end, offset(lod2, bld, i));
4035 msg_end = offset(msg_end, bld, 1);
4036 }
4037 break;
4038 case SHADER_OPCODE_TXS:
4039 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
4040 bld.MOV(msg_lod, lod);
4041 msg_end = offset(msg_lod, bld, 1);
4042 break;
4043 case SHADER_OPCODE_TXF:
4044 msg_lod = offset(msg_coords, bld, 3);
4045 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
4046 msg_end = offset(msg_lod, bld, 1);
4047 break;
4048 case SHADER_OPCODE_TXF_CMS:
4049 msg_lod = offset(msg_coords, bld, 3);
4050 /* lod */
4051 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
4052 /* sample index */
4053 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
4054 msg_end = offset(msg_lod, bld, 2);
4055 break;
4056 default:
4057 break;
4058 }
4059
4060 inst->opcode = op;
4061 inst->src[0] = reg_undef;
4062 inst->src[1] = surface;
4063 inst->src[2] = sampler;
4064 inst->resize_sources(3);
4065 inst->base_mrf = message.nr;
4066 inst->mlen = msg_end.nr - message.nr;
4067 inst->header_size = header_size;
4068
4069 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4070 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4071 }
4072
4073 static bool
4074 is_high_sampler(const struct brw_device_info *devinfo, const fs_reg &sampler)
4075 {
4076 if (devinfo->gen < 8 && !devinfo->is_haswell)
4077 return false;
4078
4079 return sampler.file != IMM || sampler.ud >= 16;
4080 }
4081
4082 static void
4083 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
4084 const fs_reg &coordinate,
4085 const fs_reg &shadow_c,
4086 fs_reg lod, const fs_reg &lod2,
4087 const fs_reg &sample_index,
4088 const fs_reg &mcs,
4089 const fs_reg &surface,
4090 const fs_reg &sampler,
4091 const fs_reg &offset_value,
4092 unsigned coord_components,
4093 unsigned grad_components)
4094 {
4095 const brw_device_info *devinfo = bld.shader->devinfo;
4096 int reg_width = bld.dispatch_width() / 8;
4097 unsigned header_size = 0, length = 0;
4098 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
4099 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
4100 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
4101
4102 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
4103 offset_value.file != BAD_FILE || inst->eot ||
4104 op == SHADER_OPCODE_SAMPLEINFO ||
4105 is_high_sampler(devinfo, sampler)) {
4106 /* For general texture offsets (no txf workaround), we need a header to
4107 * put them in. Note that we're only reserving space for it in the
4108 * message payload as it will be initialized implicitly by the
4109 * generator.
4110 *
4111 * TG4 needs to place its channel select in the header, for interaction
4112 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
4113 * larger sampler numbers we need to offset the Sampler State Pointer in
4114 * the header.
4115 */
4116 header_size = 1;
4117 sources[0] = fs_reg();
4118 length++;
4119
4120 /* If we're requesting fewer than four channels worth of response,
4121 * and we have an explicit header, we need to set up the sampler
4122 * writemask. It's reversed from normal: 1 means "don't write".
4123 */
4124 if (!inst->eot && inst->regs_written != 4 * reg_width) {
4125 assert((inst->regs_written % reg_width) == 0);
4126 unsigned mask = ~((1 << (inst->regs_written / reg_width)) - 1) & 0xf;
4127 inst->offset |= mask << 12;
4128 }
4129 }
4130
4131 if (shadow_c.file != BAD_FILE) {
4132 bld.MOV(sources[length], shadow_c);
4133 length++;
4134 }
4135
4136 bool coordinate_done = false;
4137
4138 /* The sampler can only meaningfully compute LOD for fragment shader
4139 * messages. For all other stages, we change the opcode to TXL and
4140 * hardcode the LOD to 0.
4141 */
4142 if (bld.shader->stage != MESA_SHADER_FRAGMENT &&
4143 op == SHADER_OPCODE_TEX) {
4144 op = SHADER_OPCODE_TXL;
4145 lod = brw_imm_f(0.0f);
4146 }
4147
4148 /* Set up the LOD info */
4149 switch (op) {
4150 case FS_OPCODE_TXB:
4151 case SHADER_OPCODE_TXL:
4152 if (devinfo->gen >= 9 && op == SHADER_OPCODE_TXL && lod.is_zero()) {
4153 op = SHADER_OPCODE_TXL_LZ;
4154 break;
4155 }
4156 bld.MOV(sources[length], lod);
4157 length++;
4158 break;
4159 case SHADER_OPCODE_TXD:
4160 /* TXD should have been lowered in SIMD16 mode. */
4161 assert(bld.dispatch_width() == 8);
4162
4163 /* Load dPdx and the coordinate together:
4164 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
4165 */
4166 for (unsigned i = 0; i < coord_components; i++) {
4167 bld.MOV(sources[length++], offset(coordinate, bld, i));
4168
4169 /* For cube map array, the coordinate is (u,v,r,ai) but there are
4170 * only derivatives for (u, v, r).
4171 */
4172 if (i < grad_components) {
4173 bld.MOV(sources[length++], offset(lod, bld, i));
4174 bld.MOV(sources[length++], offset(lod2, bld, i));
4175 }
4176 }
4177
4178 coordinate_done = true;
4179 break;
4180 case SHADER_OPCODE_TXS:
4181 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
4182 length++;
4183 break;
4184 case SHADER_OPCODE_TXF:
4185 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
4186 * On Gen9 they are u, v, lod, r
4187 */
4188 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D), coordinate);
4189
4190 if (devinfo->gen >= 9) {
4191 if (coord_components >= 2) {
4192 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D),
4193 offset(coordinate, bld, 1));
4194 }
4195 length++;
4196 }
4197
4198 if (devinfo->gen >= 9 && lod.is_zero()) {
4199 op = SHADER_OPCODE_TXF_LZ;
4200 } else {
4201 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
4202 length++;
4203 }
4204
4205 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++)
4206 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4207 offset(coordinate, bld, i));
4208
4209 coordinate_done = true;
4210 break;
4211
4212 case SHADER_OPCODE_TXF_CMS:
4213 case SHADER_OPCODE_TXF_CMS_W:
4214 case SHADER_OPCODE_TXF_UMS:
4215 case SHADER_OPCODE_TXF_MCS:
4216 if (op == SHADER_OPCODE_TXF_UMS ||
4217 op == SHADER_OPCODE_TXF_CMS ||
4218 op == SHADER_OPCODE_TXF_CMS_W) {
4219 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
4220 length++;
4221 }
4222
4223 if (op == SHADER_OPCODE_TXF_CMS || op == SHADER_OPCODE_TXF_CMS_W) {
4224 /* Data from the multisample control surface. */
4225 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
4226 length++;
4227
4228 /* On Gen9+ we'll use ld2dms_w instead which has two registers for
4229 * the MCS data.
4230 */
4231 if (op == SHADER_OPCODE_TXF_CMS_W) {
4232 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD),
4233 mcs.file == IMM ?
4234 mcs :
4235 offset(mcs, bld, 1));
4236 length++;
4237 }
4238 }
4239
4240 /* There is no offsetting for this message; just copy in the integer
4241 * texture coordinates.
4242 */
4243 for (unsigned i = 0; i < coord_components; i++)
4244 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4245 offset(coordinate, bld, i));
4246
4247 coordinate_done = true;
4248 break;
4249 case SHADER_OPCODE_TG4_OFFSET:
4250 /* gather4_po_c should have been lowered in SIMD16 mode. */
4251 assert(bld.dispatch_width() == 8 || shadow_c.file == BAD_FILE);
4252
4253 /* More crazy intermixing */
4254 for (unsigned i = 0; i < 2; i++) /* u, v */
4255 bld.MOV(sources[length++], offset(coordinate, bld, i));
4256
4257 for (unsigned i = 0; i < 2; i++) /* offu, offv */
4258 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4259 offset(offset_value, bld, i));
4260
4261 if (coord_components == 3) /* r if present */
4262 bld.MOV(sources[length++], offset(coordinate, bld, 2));
4263
4264 coordinate_done = true;
4265 break;
4266 default:
4267 break;
4268 }
4269
4270 /* Set up the coordinate (except for cases where it was done above) */
4271 if (!coordinate_done) {
4272 for (unsigned i = 0; i < coord_components; i++)
4273 bld.MOV(sources[length++], offset(coordinate, bld, i));
4274 }
4275
4276 int mlen;
4277 if (reg_width == 2)
4278 mlen = length * reg_width - header_size;
4279 else
4280 mlen = length * reg_width;
4281
4282 const fs_reg src_payload = fs_reg(VGRF, bld.shader->alloc.allocate(mlen),
4283 BRW_REGISTER_TYPE_F);
4284 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
4285
4286 /* Generate the SEND. */
4287 inst->opcode = op;
4288 inst->src[0] = src_payload;
4289 inst->src[1] = surface;
4290 inst->src[2] = sampler;
4291 inst->resize_sources(3);
4292 inst->base_mrf = -1;
4293 inst->mlen = mlen;
4294 inst->header_size = header_size;
4295
4296 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4297 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4298 }
4299
4300 static void
4301 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
4302 {
4303 const brw_device_info *devinfo = bld.shader->devinfo;
4304 const fs_reg &coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
4305 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4306 const fs_reg &lod = inst->src[TEX_LOGICAL_SRC_LOD];
4307 const fs_reg &lod2 = inst->src[TEX_LOGICAL_SRC_LOD2];
4308 const fs_reg &sample_index = inst->src[TEX_LOGICAL_SRC_SAMPLE_INDEX];
4309 const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
4310 const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
4311 const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
4312 const fs_reg &offset_value = inst->src[TEX_LOGICAL_SRC_OFFSET_VALUE];
4313 assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
4314 const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4315 assert(inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
4316 const unsigned grad_components = inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
4317
4318 if (devinfo->gen >= 7) {
4319 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
4320 shadow_c, lod, lod2, sample_index,
4321 mcs, surface, sampler, offset_value,
4322 coord_components, grad_components);
4323 } else if (devinfo->gen >= 5) {
4324 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
4325 shadow_c, lod, lod2, sample_index,
4326 surface, sampler, offset_value,
4327 coord_components, grad_components);
4328 } else {
4329 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
4330 shadow_c, lod, lod2,
4331 surface, sampler,
4332 coord_components, grad_components);
4333 }
4334 }
4335
4336 /**
4337 * Initialize the header present in some typed and untyped surface
4338 * messages.
4339 */
4340 static fs_reg
4341 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
4342 {
4343 fs_builder ubld = bld.exec_all().group(8, 0);
4344 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4345 ubld.MOV(dst, brw_imm_d(0));
4346 ubld.MOV(component(dst, 7), sample_mask);
4347 return dst;
4348 }
4349
4350 static void
4351 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
4352 const fs_reg &sample_mask)
4353 {
4354 /* Get the logical send arguments. */
4355 const fs_reg &addr = inst->src[0];
4356 const fs_reg &src = inst->src[1];
4357 const fs_reg &surface = inst->src[2];
4358 const UNUSED fs_reg &dims = inst->src[3];
4359 const fs_reg &arg = inst->src[4];
4360
4361 /* Calculate the total number of components of the payload. */
4362 const unsigned addr_sz = inst->components_read(0);
4363 const unsigned src_sz = inst->components_read(1);
4364 const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
4365 const unsigned sz = header_sz + addr_sz + src_sz;
4366
4367 /* Allocate space for the payload. */
4368 fs_reg *const components = new fs_reg[sz];
4369 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
4370 unsigned n = 0;
4371
4372 /* Construct the payload. */
4373 if (header_sz)
4374 components[n++] = emit_surface_header(bld, sample_mask);
4375
4376 for (unsigned i = 0; i < addr_sz; i++)
4377 components[n++] = offset(addr, bld, i);
4378
4379 for (unsigned i = 0; i < src_sz; i++)
4380 components[n++] = offset(src, bld, i);
4381
4382 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
4383
4384 /* Update the original instruction. */
4385 inst->opcode = op;
4386 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
4387 inst->header_size = header_sz;
4388
4389 inst->src[0] = payload;
4390 inst->src[1] = surface;
4391 inst->src[2] = arg;
4392 inst->resize_sources(3);
4393
4394 delete[] components;
4395 }
4396
4397 static void
4398 lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
4399 {
4400 const brw_device_info *devinfo = bld.shader->devinfo;
4401
4402 if (devinfo->gen >= 7) {
4403 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
4404
4405 } else {
4406 const fs_reg payload(MRF, FIRST_PULL_LOAD_MRF(devinfo->gen),
4407 BRW_REGISTER_TYPE_UD);
4408
4409 bld.MOV(byte_offset(payload, REG_SIZE), inst->src[1]);
4410
4411 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4;
4412 inst->resize_sources(1);
4413 inst->base_mrf = payload.nr;
4414 inst->header_size = 1;
4415 inst->mlen = 1 + inst->exec_size / 8;
4416 }
4417 }
4418
4419 static void
4420 lower_math_logical_send(const fs_builder &bld, fs_inst *inst)
4421 {
4422 assert(bld.shader->devinfo->gen < 6);
4423
4424 inst->base_mrf = 2;
4425 inst->mlen = inst->sources * inst->exec_size / 8;
4426
4427 if (inst->sources > 1) {
4428 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
4429 * "Message Payload":
4430 *
4431 * "Operand0[7]. For the INT DIV functions, this operand is the
4432 * denominator."
4433 * ...
4434 * "Operand1[7]. For the INT DIV functions, this operand is the
4435 * numerator."
4436 */
4437 const bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
4438 const fs_reg src0 = is_int_div ? inst->src[1] : inst->src[0];
4439 const fs_reg src1 = is_int_div ? inst->src[0] : inst->src[1];
4440
4441 inst->resize_sources(1);
4442 inst->src[0] = src0;
4443
4444 assert(inst->exec_size == 8);
4445 bld.MOV(fs_reg(MRF, inst->base_mrf + 1, src1.type), src1);
4446 }
4447 }
4448
4449 bool
4450 fs_visitor::lower_logical_sends()
4451 {
4452 bool progress = false;
4453
4454 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4455 const fs_builder ibld(this, block, inst);
4456
4457 switch (inst->opcode) {
4458 case FS_OPCODE_FB_WRITE_LOGICAL:
4459 assert(stage == MESA_SHADER_FRAGMENT);
4460 lower_fb_write_logical_send(ibld, inst,
4461 (const brw_wm_prog_data *)prog_data,
4462 (const brw_wm_prog_key *)key,
4463 payload);
4464 break;
4465
4466 case SHADER_OPCODE_TEX_LOGICAL:
4467 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
4468 break;
4469
4470 case SHADER_OPCODE_TXD_LOGICAL:
4471 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
4472 break;
4473
4474 case SHADER_OPCODE_TXF_LOGICAL:
4475 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
4476 break;
4477
4478 case SHADER_OPCODE_TXL_LOGICAL:
4479 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
4480 break;
4481
4482 case SHADER_OPCODE_TXS_LOGICAL:
4483 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
4484 break;
4485
4486 case FS_OPCODE_TXB_LOGICAL:
4487 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
4488 break;
4489
4490 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4491 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
4492 break;
4493
4494 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
4495 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS_W);
4496 break;
4497
4498 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4499 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
4500 break;
4501
4502 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4503 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
4504 break;
4505
4506 case SHADER_OPCODE_LOD_LOGICAL:
4507 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
4508 break;
4509
4510 case SHADER_OPCODE_TG4_LOGICAL:
4511 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
4512 break;
4513
4514 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4515 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
4516 break;
4517
4518 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4519 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_SAMPLEINFO);
4520 break;
4521
4522 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4523 lower_surface_logical_send(ibld, inst,
4524 SHADER_OPCODE_UNTYPED_SURFACE_READ,
4525 fs_reg());
4526 break;
4527
4528 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4529 lower_surface_logical_send(ibld, inst,
4530 SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
4531 ibld.sample_mask_reg());
4532 break;
4533
4534 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4535 lower_surface_logical_send(ibld, inst,
4536 SHADER_OPCODE_UNTYPED_ATOMIC,
4537 ibld.sample_mask_reg());
4538 break;
4539
4540 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4541 lower_surface_logical_send(ibld, inst,
4542 SHADER_OPCODE_TYPED_SURFACE_READ,
4543 brw_imm_d(0xffff));
4544 break;
4545
4546 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4547 lower_surface_logical_send(ibld, inst,
4548 SHADER_OPCODE_TYPED_SURFACE_WRITE,
4549 ibld.sample_mask_reg());
4550 break;
4551
4552 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4553 lower_surface_logical_send(ibld, inst,
4554 SHADER_OPCODE_TYPED_ATOMIC,
4555 ibld.sample_mask_reg());
4556 break;
4557
4558 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4559 lower_varying_pull_constant_logical_send(ibld, inst);
4560 break;
4561
4562 case SHADER_OPCODE_RCP:
4563 case SHADER_OPCODE_RSQ:
4564 case SHADER_OPCODE_SQRT:
4565 case SHADER_OPCODE_EXP2:
4566 case SHADER_OPCODE_LOG2:
4567 case SHADER_OPCODE_SIN:
4568 case SHADER_OPCODE_COS:
4569 case SHADER_OPCODE_POW:
4570 case SHADER_OPCODE_INT_QUOTIENT:
4571 case SHADER_OPCODE_INT_REMAINDER:
4572 /* The math opcodes are overloaded for the send-like and
4573 * expression-like instructions which seems kind of icky. Gen6+ has
4574 * a native (but rather quirky) MATH instruction so we don't need to
4575 * do anything here. On Gen4-5 we'll have to lower the Gen6-like
4576 * logical instructions (which we can easily recognize because they
4577 * have mlen = 0) into send-like virtual instructions.
4578 */
4579 if (devinfo->gen < 6 && inst->mlen == 0) {
4580 lower_math_logical_send(ibld, inst);
4581 break;
4582
4583 } else {
4584 continue;
4585 }
4586
4587 default:
4588 continue;
4589 }
4590
4591 progress = true;
4592 }
4593
4594 if (progress)
4595 invalidate_live_intervals();
4596
4597 return progress;
4598 }
4599
4600 /**
4601 * Get the closest allowed SIMD width for instruction \p inst accounting for
4602 * some common regioning and execution control restrictions that apply to FPU
4603 * instructions. These restrictions don't necessarily have any relevance to
4604 * instructions not executed by the FPU pipeline like extended math, control
4605 * flow or send message instructions.
4606 *
4607 * For virtual opcodes it's really up to the instruction -- In some cases
4608 * (e.g. where a virtual instruction unrolls into a simple sequence of FPU
4609 * instructions) it may simplify virtual instruction lowering if we can
4610 * enforce FPU-like regioning restrictions already on the virtual instruction,
4611 * in other cases (e.g. virtual send-like instructions) this may be
4612 * excessively restrictive.
4613 */
4614 static unsigned
4615 get_fpu_lowered_simd_width(const struct brw_device_info *devinfo,
4616 const fs_inst *inst)
4617 {
4618 /* Maximum execution size representable in the instruction controls. */
4619 unsigned max_width = MIN2(32, inst->exec_size);
4620
4621 /* According to the PRMs:
4622 * "A. In Direct Addressing mode, a source cannot span more than 2
4623 * adjacent GRF registers.
4624 * B. A destination cannot span more than 2 adjacent GRF registers."
4625 *
4626 * Look for the source or destination with the largest register region
4627 * which is the one that is going to limit the overall execution size of
4628 * the instruction due to this rule.
4629 */
4630 unsigned reg_count = inst->regs_written;
4631
4632 for (unsigned i = 0; i < inst->sources; i++)
4633 reg_count = MAX2(reg_count, (unsigned)inst->regs_read(i));
4634
4635 /* Calculate the maximum execution size of the instruction based on the
4636 * factor by which it goes over the hardware limit of 2 GRFs.
4637 */
4638 if (reg_count > 2)
4639 max_width = MIN2(max_width, inst->exec_size / DIV_ROUND_UP(reg_count, 2));
4640
4641 /* According to the IVB PRMs:
4642 * "When destination spans two registers, the source MUST span two
4643 * registers. The exception to the above rule:
4644 *
4645 * - When source is scalar, the source registers are not incremented.
4646 * - When source is packed integer Word and destination is packed
4647 * integer DWord, the source register is not incremented but the
4648 * source sub register is incremented."
4649 *
4650 * The hardware specs from Gen4 to Gen7.5 mention similar regioning
4651 * restrictions. The code below intentionally doesn't check whether the
4652 * destination type is integer because empirically the hardware doesn't
4653 * seem to care what the actual type is as long as it's dword-aligned.
4654 */
4655 if (devinfo->gen < 8) {
4656 for (unsigned i = 0; i < inst->sources; i++) {
4657 if (inst->regs_written == 2 &&
4658 inst->regs_read(i) != 0 && inst->regs_read(i) != 2 &&
4659 !is_uniform(inst->src[i]) &&
4660 !(type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
4661 type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1))
4662 max_width = MIN2(max_width, inst->exec_size /
4663 inst->regs_written);
4664 }
4665 }
4666
4667 /* From the IVB PRMs:
4668 * "When an instruction is SIMD32, the low 16 bits of the execution mask
4669 * are applied for both halves of the SIMD32 instruction. If different
4670 * execution mask channels are required, split the instruction into two
4671 * SIMD16 instructions."
4672 *
4673 * There is similar text in the HSW PRMs. Gen4-6 don't even implement
4674 * 32-wide control flow support in hardware and will behave similarly.
4675 */
4676 if (devinfo->gen < 8 && !inst->force_writemask_all)
4677 max_width = MIN2(max_width, 16);
4678
4679 /* From the IVB PRMs (applies to HSW too):
4680 * "Instructions with condition modifiers must not use SIMD32."
4681 *
4682 * From the BDW PRMs (applies to later hardware too):
4683 * "Ternary instruction with condition modifiers must not use SIMD32."
4684 */
4685 if (inst->conditional_mod && (devinfo->gen < 8 || inst->is_3src(devinfo)))
4686 max_width = MIN2(max_width, 16);
4687
4688 /* From the IVB PRMs (applies to other devices that don't have the
4689 * brw_device_info::supports_simd16_3src flag set):
4690 * "In Align16 access mode, SIMD16 is not allowed for DW operations and
4691 * SIMD8 is not allowed for DF operations."
4692 */
4693 if (inst->is_3src(devinfo) && !devinfo->supports_simd16_3src)
4694 max_width = MIN2(max_width, inst->exec_size / reg_count);
4695
4696 /* Only power-of-two execution sizes are representable in the instruction
4697 * control fields.
4698 */
4699 return 1 << _mesa_logbase2(max_width);
4700 }
4701
4702 /**
4703 * Get the closest native SIMD width supported by the hardware for instruction
4704 * \p inst. The instruction will be left untouched by
4705 * fs_visitor::lower_simd_width() if the returned value is equal to the
4706 * original execution size.
4707 */
4708 static unsigned
4709 get_lowered_simd_width(const struct brw_device_info *devinfo,
4710 const fs_inst *inst)
4711 {
4712 switch (inst->opcode) {
4713 case BRW_OPCODE_MOV:
4714 case BRW_OPCODE_SEL:
4715 case BRW_OPCODE_NOT:
4716 case BRW_OPCODE_AND:
4717 case BRW_OPCODE_OR:
4718 case BRW_OPCODE_XOR:
4719 case BRW_OPCODE_SHR:
4720 case BRW_OPCODE_SHL:
4721 case BRW_OPCODE_ASR:
4722 case BRW_OPCODE_CMPN:
4723 case BRW_OPCODE_CSEL:
4724 case BRW_OPCODE_F32TO16:
4725 case BRW_OPCODE_F16TO32:
4726 case BRW_OPCODE_BFREV:
4727 case BRW_OPCODE_BFE:
4728 case BRW_OPCODE_ADD:
4729 case BRW_OPCODE_MUL:
4730 case BRW_OPCODE_AVG:
4731 case BRW_OPCODE_FRC:
4732 case BRW_OPCODE_RNDU:
4733 case BRW_OPCODE_RNDD:
4734 case BRW_OPCODE_RNDE:
4735 case BRW_OPCODE_RNDZ:
4736 case BRW_OPCODE_LZD:
4737 case BRW_OPCODE_FBH:
4738 case BRW_OPCODE_FBL:
4739 case BRW_OPCODE_CBIT:
4740 case BRW_OPCODE_SAD2:
4741 case BRW_OPCODE_MAD:
4742 case BRW_OPCODE_LRP:
4743 case FS_OPCODE_PACK:
4744 return get_fpu_lowered_simd_width(devinfo, inst);
4745
4746 case BRW_OPCODE_CMP: {
4747 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says that
4748 * when the destination is a GRF the dependency-clear bit on the flag
4749 * register is cleared early.
4750 *
4751 * Suggested workarounds are to disable coissuing CMP instructions
4752 * or to split CMP(16) instructions into two CMP(8) instructions.
4753 *
4754 * We choose to split into CMP(8) instructions since disabling
4755 * coissuing would affect CMP instructions not otherwise affected by
4756 * the errata.
4757 */
4758 const unsigned max_width = (devinfo->gen == 7 && !devinfo->is_haswell &&
4759 !inst->dst.is_null() ? 8 : ~0);
4760 return MIN2(max_width, get_fpu_lowered_simd_width(devinfo, inst));
4761 }
4762 case BRW_OPCODE_BFI1:
4763 case BRW_OPCODE_BFI2:
4764 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
4765 * should
4766 * "Force BFI instructions to be executed always in SIMD8."
4767 */
4768 return MIN2(devinfo->is_haswell ? 8 : ~0u,
4769 get_fpu_lowered_simd_width(devinfo, inst));
4770
4771 case BRW_OPCODE_IF:
4772 assert(inst->src[0].file == BAD_FILE || inst->exec_size <= 16);
4773 return inst->exec_size;
4774
4775 case SHADER_OPCODE_RCP:
4776 case SHADER_OPCODE_RSQ:
4777 case SHADER_OPCODE_SQRT:
4778 case SHADER_OPCODE_EXP2:
4779 case SHADER_OPCODE_LOG2:
4780 case SHADER_OPCODE_SIN:
4781 case SHADER_OPCODE_COS:
4782 /* Unary extended math instructions are limited to SIMD8 on Gen4 and
4783 * Gen6.
4784 */
4785 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4786 devinfo->gen == 5 || devinfo->is_g4x ? MIN2(16, inst->exec_size) :
4787 MIN2(8, inst->exec_size));
4788
4789 case SHADER_OPCODE_POW:
4790 /* SIMD16 is only allowed on Gen7+. */
4791 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4792 MIN2(8, inst->exec_size));
4793
4794 case SHADER_OPCODE_INT_QUOTIENT:
4795 case SHADER_OPCODE_INT_REMAINDER:
4796 /* Integer division is limited to SIMD8 on all generations. */
4797 return MIN2(8, inst->exec_size);
4798
4799 case FS_OPCODE_LINTERP:
4800 case FS_OPCODE_GET_BUFFER_SIZE:
4801 case FS_OPCODE_DDX_COARSE:
4802 case FS_OPCODE_DDX_FINE:
4803 case FS_OPCODE_DDY_COARSE:
4804 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
4805 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
4806 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
4807 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
4808 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
4809 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
4810 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
4811 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
4812 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
4813 return MIN2(16, inst->exec_size);
4814
4815 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4816 /* Pre-ILK hardware doesn't have a SIMD8 variant of the texel fetch
4817 * message used to implement varying pull constant loads, so expand it
4818 * to SIMD16. An alternative with longer message payload length but
4819 * shorter return payload would be to use the SIMD8 sampler message that
4820 * takes (header, u, v, r) as parameters instead of (header, u).
4821 */
4822 return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
4823
4824 case FS_OPCODE_DDY_FINE:
4825 /* The implementation of this virtual opcode may require emitting
4826 * compressed Align16 instructions, which are severely limited on some
4827 * generations.
4828 *
4829 * From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
4830 * Region Restrictions):
4831 *
4832 * "In Align16 access mode, SIMD16 is not allowed for DW operations
4833 * and SIMD8 is not allowed for DF operations."
4834 *
4835 * In this context, "DW operations" means "operations acting on 32-bit
4836 * values", so it includes operations on floats.
4837 *
4838 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
4839 * (Instruction Compression -> Rules and Restrictions):
4840 *
4841 * "A compressed instruction must be in Align1 access mode. Align16
4842 * mode instructions cannot be compressed."
4843 *
4844 * Similar text exists in the g45 PRM.
4845 *
4846 * Empirically, compressed align16 instructions using odd register
4847 * numbers don't appear to work on Sandybridge either.
4848 */
4849 return (devinfo->gen == 4 || devinfo->gen == 6 ||
4850 (devinfo->gen == 7 && !devinfo->is_haswell) ?
4851 MIN2(8, inst->exec_size) : MIN2(16, inst->exec_size));
4852
4853 case SHADER_OPCODE_MULH:
4854 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
4855 * is 8-wide on Gen7+.
4856 */
4857 return (devinfo->gen >= 7 ? 8 :
4858 get_fpu_lowered_simd_width(devinfo, inst));
4859
4860 case FS_OPCODE_FB_WRITE_LOGICAL:
4861 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
4862 * here.
4863 */
4864 assert(devinfo->gen != 6 ||
4865 inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH].file == BAD_FILE ||
4866 inst->exec_size == 8);
4867 /* Dual-source FB writes are unsupported in SIMD16 mode. */
4868 return (inst->src[FB_WRITE_LOGICAL_SRC_COLOR1].file != BAD_FILE ?
4869 8 : MIN2(16, inst->exec_size));
4870
4871 case SHADER_OPCODE_TEX_LOGICAL:
4872 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4873 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4874 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4875 case SHADER_OPCODE_LOD_LOGICAL:
4876 case SHADER_OPCODE_TG4_LOGICAL:
4877 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4878 return MIN2(16, inst->exec_size);
4879
4880 case SHADER_OPCODE_TXD_LOGICAL:
4881 /* TXD is unsupported in SIMD16 mode. */
4882 return 8;
4883
4884 case SHADER_OPCODE_TG4_OFFSET_LOGICAL: {
4885 /* gather4_po_c is unsupported in SIMD16 mode. */
4886 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4887 return (shadow_c.file != BAD_FILE ? 8 : MIN2(16, inst->exec_size));
4888 }
4889 case SHADER_OPCODE_TXL_LOGICAL:
4890 case FS_OPCODE_TXB_LOGICAL: {
4891 /* Gen4 doesn't have SIMD8 non-shadow-compare bias/LOD instructions, and
4892 * Gen4-6 can't support TXL and TXB with shadow comparison in SIMD16
4893 * mode because the message exceeds the maximum length of 11.
4894 */
4895 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4896 if (devinfo->gen == 4 && shadow_c.file == BAD_FILE)
4897 return 16;
4898 else if (devinfo->gen < 7 && shadow_c.file != BAD_FILE)
4899 return 8;
4900 else
4901 return MIN2(16, inst->exec_size);
4902 }
4903 case SHADER_OPCODE_TXF_LOGICAL:
4904 case SHADER_OPCODE_TXS_LOGICAL:
4905 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
4906 * messages. Use SIMD16 instead.
4907 */
4908 if (devinfo->gen == 4)
4909 return 16;
4910 else
4911 return MIN2(16, inst->exec_size);
4912
4913 case SHADER_OPCODE_TXF_CMS_W_LOGICAL: {
4914 /* This opcode can take up to 6 arguments which means that in some
4915 * circumstances it can end up with a message that is too long in SIMD16
4916 * mode.
4917 */
4918 const unsigned coord_components =
4919 inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4920 /* First three arguments are the sample index and the two arguments for
4921 * the MCS data.
4922 */
4923 if ((coord_components + 3) * 2 > MAX_SAMPLER_MESSAGE_SIZE)
4924 return 8;
4925 else
4926 return MIN2(16, inst->exec_size);
4927 }
4928
4929 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4930 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4931 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4932 return 8;
4933
4934 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4935 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4936 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4937 return MIN2(16, inst->exec_size);
4938
4939 case SHADER_OPCODE_URB_READ_SIMD8:
4940 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
4941 case SHADER_OPCODE_URB_WRITE_SIMD8:
4942 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
4943 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
4944 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
4945 return MIN2(8, inst->exec_size);
4946
4947 case SHADER_OPCODE_MOV_INDIRECT:
4948 /* Prior to Broadwell, we only have 8 address subregisters */
4949 return MIN3(devinfo->gen >= 8 ? 16 : 8,
4950 2 * REG_SIZE / (inst->dst.stride * type_sz(inst->dst.type)),
4951 inst->exec_size);
4952
4953 case SHADER_OPCODE_LOAD_PAYLOAD: {
4954 const unsigned reg_count =
4955 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
4956
4957 if (reg_count > 2) {
4958 /* Only LOAD_PAYLOAD instructions with per-channel destination region
4959 * can be easily lowered (which excludes headers and heterogeneous
4960 * types).
4961 */
4962 assert(!inst->header_size);
4963 for (unsigned i = 0; i < inst->sources; i++)
4964 assert(type_sz(inst->dst.type) == type_sz(inst->src[i].type) ||
4965 inst->src[i].file == BAD_FILE);
4966
4967 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
4968 } else {
4969 return inst->exec_size;
4970 }
4971 }
4972 default:
4973 return inst->exec_size;
4974 }
4975 }
4976
4977 /**
4978 * Return true if splitting out the group of channels of instruction \p inst
4979 * given by lbld.group() requires allocating a temporary for the i-th source
4980 * of the lowered instruction.
4981 */
4982 static inline bool
4983 needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
4984 {
4985 return !(is_periodic(inst->src[i], lbld.dispatch_width()) ||
4986 (inst->components_read(i) == 1 &&
4987 lbld.dispatch_width() <= inst->exec_size));
4988 }
4989
4990 /**
4991 * Extract the data that would be consumed by the channel group given by
4992 * lbld.group() from the i-th source region of instruction \p inst and return
4993 * it as result in packed form. If any copy instructions are required they
4994 * will be emitted before the given \p inst in \p block.
4995 */
4996 static fs_reg
4997 emit_unzip(const fs_builder &lbld, bblock_t *block, fs_inst *inst,
4998 unsigned i)
4999 {
5000 /* Specified channel group from the source region. */
5001 const fs_reg src = horiz_offset(inst->src[i], lbld.group());
5002
5003 if (needs_src_copy(lbld, inst, i)) {
5004 /* Builder of the right width to perform the copy avoiding uninitialized
5005 * data if the lowered execution size is greater than the original
5006 * execution size of the instruction.
5007 */
5008 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
5009 inst->exec_size), 0);
5010 const fs_reg tmp = lbld.vgrf(inst->src[i].type, inst->components_read(i));
5011
5012 for (unsigned k = 0; k < inst->components_read(i); ++k)
5013 cbld.at(block, inst)
5014 .MOV(offset(tmp, lbld, k), offset(src, inst->exec_size, k));
5015
5016 return tmp;
5017
5018 } else if (is_periodic(inst->src[i], lbld.dispatch_width())) {
5019 /* The source is invariant for all dispatch_width-wide groups of the
5020 * original region.
5021 */
5022 return inst->src[i];
5023
5024 } else {
5025 /* We can just point the lowered instruction at the right channel group
5026 * from the original region.
5027 */
5028 return src;
5029 }
5030 }
5031
5032 /**
5033 * Insert data from a packed temporary into the channel group given by
5034 * lbld.group() of the destination region of instruction \p inst and return
5035 * the temporary as result. If any copy instructions are required they will
5036 * be emitted around the given \p inst in \p block.
5037 */
5038 static fs_reg
5039 emit_zip(const fs_builder &lbld, bblock_t *block, fs_inst *inst)
5040 {
5041 /* Builder of the right width to perform the copy avoiding uninitialized
5042 * data if the lowered execution size is greater than the original
5043 * execution size of the instruction.
5044 */
5045 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
5046 inst->exec_size), 0);
5047
5048 /* Specified channel group from the destination region. */
5049 const fs_reg dst = horiz_offset(inst->dst, lbld.group());
5050 const unsigned dst_size = inst->regs_written * REG_SIZE /
5051 inst->dst.component_size(inst->exec_size);
5052 const fs_reg tmp = lbld.vgrf(inst->dst.type, dst_size);
5053
5054 if (inst->predicate) {
5055 /* Handle predication by copying the original contents of the
5056 * destination into the temporary before emitting the lowered
5057 * instruction.
5058 */
5059 for (unsigned k = 0; k < dst_size; ++k)
5060 cbld.at(block, inst)
5061 .MOV(offset(tmp, lbld, k), offset(dst, inst->exec_size, k));
5062 }
5063
5064 for (unsigned k = 0; k < dst_size; ++k)
5065 cbld.at(block, inst->next)
5066 .MOV(offset(dst, inst->exec_size, k), offset(tmp, lbld, k));
5067
5068 return tmp;
5069 }
5070
5071 bool
5072 fs_visitor::lower_simd_width()
5073 {
5074 bool progress = false;
5075
5076 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5077 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
5078
5079 if (lower_width != inst->exec_size) {
5080 /* Builder matching the original instruction. We may also need to
5081 * emit an instruction of width larger than the original, set the
5082 * execution size of the builder to the highest of both for now so
5083 * we're sure that both cases can be handled.
5084 */
5085 const unsigned max_width = MAX2(inst->exec_size, lower_width);
5086 const fs_builder ibld = bld.at(block, inst)
5087 .exec_all(inst->force_writemask_all)
5088 .group(max_width, inst->group / max_width);
5089
5090 /* Split the copies in chunks of the execution width of either the
5091 * original or the lowered instruction, whichever is lower.
5092 */
5093 const unsigned n = DIV_ROUND_UP(inst->exec_size, lower_width);
5094 const unsigned dst_size = inst->regs_written * REG_SIZE /
5095 inst->dst.component_size(inst->exec_size);
5096
5097 assert(!inst->writes_accumulator && !inst->mlen);
5098
5099 for (unsigned i = 0; i < n; i++) {
5100 /* Emit a copy of the original instruction with the lowered width.
5101 * If the EOT flag was set throw it away except for the last
5102 * instruction to avoid killing the thread prematurely.
5103 */
5104 fs_inst split_inst = *inst;
5105 split_inst.exec_size = lower_width;
5106 split_inst.eot = inst->eot && i == n - 1;
5107
5108 /* Select the correct channel enables for the i-th group, then
5109 * transform the sources and destination and emit the lowered
5110 * instruction.
5111 */
5112 const fs_builder lbld = ibld.group(lower_width, i);
5113
5114 for (unsigned j = 0; j < inst->sources; j++)
5115 split_inst.src[j] = emit_unzip(lbld, block, inst, j);
5116
5117 split_inst.dst = emit_zip(lbld, block, inst);
5118 split_inst.regs_written =
5119 DIV_ROUND_UP(type_sz(inst->dst.type) * dst_size * lower_width,
5120 REG_SIZE);
5121
5122 lbld.emit(split_inst);
5123 }
5124
5125 inst->remove(block);
5126 progress = true;
5127 }
5128 }
5129
5130 if (progress)
5131 invalidate_live_intervals();
5132
5133 return progress;
5134 }
5135
5136 void
5137 fs_visitor::dump_instructions()
5138 {
5139 dump_instructions(NULL);
5140 }
5141
5142 void
5143 fs_visitor::dump_instructions(const char *name)
5144 {
5145 FILE *file = stderr;
5146 if (name && geteuid() != 0) {
5147 file = fopen(name, "w");
5148 if (!file)
5149 file = stderr;
5150 }
5151
5152 if (cfg) {
5153 calculate_register_pressure();
5154 int ip = 0, max_pressure = 0;
5155 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
5156 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
5157 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
5158 dump_instruction(inst, file);
5159 ip++;
5160 }
5161 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
5162 } else {
5163 int ip = 0;
5164 foreach_in_list(backend_instruction, inst, &instructions) {
5165 fprintf(file, "%4d: ", ip++);
5166 dump_instruction(inst, file);
5167 }
5168 }
5169
5170 if (file != stderr) {
5171 fclose(file);
5172 }
5173 }
5174
5175 void
5176 fs_visitor::dump_instruction(backend_instruction *be_inst)
5177 {
5178 dump_instruction(be_inst, stderr);
5179 }
5180
5181 void
5182 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
5183 {
5184 fs_inst *inst = (fs_inst *)be_inst;
5185
5186 if (inst->predicate) {
5187 fprintf(file, "(%cf0.%d) ",
5188 inst->predicate_inverse ? '-' : '+',
5189 inst->flag_subreg);
5190 }
5191
5192 fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
5193 if (inst->saturate)
5194 fprintf(file, ".sat");
5195 if (inst->conditional_mod) {
5196 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
5197 if (!inst->predicate &&
5198 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
5199 inst->opcode != BRW_OPCODE_IF &&
5200 inst->opcode != BRW_OPCODE_WHILE))) {
5201 fprintf(file, ".f0.%d", inst->flag_subreg);
5202 }
5203 }
5204 fprintf(file, "(%d) ", inst->exec_size);
5205
5206 if (inst->mlen) {
5207 fprintf(file, "(mlen: %d) ", inst->mlen);
5208 }
5209
5210 switch (inst->dst.file) {
5211 case VGRF:
5212 fprintf(file, "vgrf%d", inst->dst.nr);
5213 if (alloc.sizes[inst->dst.nr] != inst->regs_written ||
5214 inst->dst.subreg_offset)
5215 fprintf(file, "+%d.%d",
5216 inst->dst.reg_offset, inst->dst.subreg_offset);
5217 break;
5218 case FIXED_GRF:
5219 fprintf(file, "g%d", inst->dst.nr);
5220 break;
5221 case MRF:
5222 fprintf(file, "m%d", inst->dst.nr);
5223 break;
5224 case BAD_FILE:
5225 fprintf(file, "(null)");
5226 break;
5227 case UNIFORM:
5228 fprintf(file, "***u%d***", inst->dst.nr + inst->dst.reg_offset);
5229 break;
5230 case ATTR:
5231 fprintf(file, "***attr%d***", inst->dst.nr + inst->dst.reg_offset);
5232 break;
5233 case ARF:
5234 switch (inst->dst.nr) {
5235 case BRW_ARF_NULL:
5236 fprintf(file, "null");
5237 break;
5238 case BRW_ARF_ADDRESS:
5239 fprintf(file, "a0.%d", inst->dst.subnr);
5240 break;
5241 case BRW_ARF_ACCUMULATOR:
5242 fprintf(file, "acc%d", inst->dst.subnr);
5243 break;
5244 case BRW_ARF_FLAG:
5245 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5246 break;
5247 default:
5248 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5249 break;
5250 }
5251 if (inst->dst.subnr)
5252 fprintf(file, "+%d", inst->dst.subnr);
5253 break;
5254 case IMM:
5255 unreachable("not reached");
5256 }
5257 if (inst->dst.stride != 1)
5258 fprintf(file, "<%u>", inst->dst.stride);
5259 fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
5260
5261 for (int i = 0; i < inst->sources; i++) {
5262 if (inst->src[i].negate)
5263 fprintf(file, "-");
5264 if (inst->src[i].abs)
5265 fprintf(file, "|");
5266 switch (inst->src[i].file) {
5267 case VGRF:
5268 fprintf(file, "vgrf%d", inst->src[i].nr);
5269 if (alloc.sizes[inst->src[i].nr] != (unsigned)inst->regs_read(i) ||
5270 inst->src[i].subreg_offset)
5271 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
5272 inst->src[i].subreg_offset);
5273 break;
5274 case FIXED_GRF:
5275 fprintf(file, "g%d", inst->src[i].nr);
5276 break;
5277 case MRF:
5278 fprintf(file, "***m%d***", inst->src[i].nr);
5279 break;
5280 case ATTR:
5281 fprintf(file, "attr%d+%d", inst->src[i].nr, inst->src[i].reg_offset);
5282 break;
5283 case UNIFORM:
5284 fprintf(file, "u%d", inst->src[i].nr + inst->src[i].reg_offset);
5285 if (inst->src[i].subreg_offset) {
5286 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
5287 inst->src[i].subreg_offset);
5288 }
5289 break;
5290 case BAD_FILE:
5291 fprintf(file, "(null)");
5292 break;
5293 case IMM:
5294 switch (inst->src[i].type) {
5295 case BRW_REGISTER_TYPE_F:
5296 fprintf(file, "%-gf", inst->src[i].f);
5297 break;
5298 case BRW_REGISTER_TYPE_DF:
5299 fprintf(file, "%fdf", inst->src[i].df);
5300 break;
5301 case BRW_REGISTER_TYPE_W:
5302 case BRW_REGISTER_TYPE_D:
5303 fprintf(file, "%dd", inst->src[i].d);
5304 break;
5305 case BRW_REGISTER_TYPE_UW:
5306 case BRW_REGISTER_TYPE_UD:
5307 fprintf(file, "%uu", inst->src[i].ud);
5308 break;
5309 case BRW_REGISTER_TYPE_VF:
5310 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
5311 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
5312 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
5313 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
5314 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
5315 break;
5316 default:
5317 fprintf(file, "???");
5318 break;
5319 }
5320 break;
5321 case ARF:
5322 switch (inst->src[i].nr) {
5323 case BRW_ARF_NULL:
5324 fprintf(file, "null");
5325 break;
5326 case BRW_ARF_ADDRESS:
5327 fprintf(file, "a0.%d", inst->src[i].subnr);
5328 break;
5329 case BRW_ARF_ACCUMULATOR:
5330 fprintf(file, "acc%d", inst->src[i].subnr);
5331 break;
5332 case BRW_ARF_FLAG:
5333 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5334 break;
5335 default:
5336 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5337 break;
5338 }
5339 if (inst->src[i].subnr)
5340 fprintf(file, "+%d", inst->src[i].subnr);
5341 break;
5342 }
5343 if (inst->src[i].abs)
5344 fprintf(file, "|");
5345
5346 if (inst->src[i].file != IMM) {
5347 unsigned stride;
5348 if (inst->src[i].file == ARF || inst->src[i].file == FIXED_GRF) {
5349 unsigned hstride = inst->src[i].hstride;
5350 stride = (hstride == 0 ? 0 : (1 << (hstride - 1)));
5351 } else {
5352 stride = inst->src[i].stride;
5353 }
5354 if (stride != 1)
5355 fprintf(file, "<%u>", stride);
5356
5357 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
5358 }
5359
5360 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
5361 fprintf(file, ", ");
5362 }
5363
5364 fprintf(file, " ");
5365
5366 if (inst->force_writemask_all)
5367 fprintf(file, "NoMask ");
5368
5369 if (inst->exec_size != dispatch_width)
5370 fprintf(file, "group%d ", inst->group);
5371
5372 fprintf(file, "\n");
5373 }
5374
5375 /**
5376 * Possibly returns an instruction that set up @param reg.
5377 *
5378 * Sometimes we want to take the result of some expression/variable
5379 * dereference tree and rewrite the instruction generating the result
5380 * of the tree. When processing the tree, we know that the
5381 * instructions generated are all writing temporaries that are dead
5382 * outside of this tree. So, if we have some instructions that write
5383 * a temporary, we're free to point that temp write somewhere else.
5384 *
5385 * Note that this doesn't guarantee that the instruction generated
5386 * only reg -- it might be the size=4 destination of a texture instruction.
5387 */
5388 fs_inst *
5389 fs_visitor::get_instruction_generating_reg(fs_inst *start,
5390 fs_inst *end,
5391 const fs_reg &reg)
5392 {
5393 if (end == start ||
5394 end->is_partial_write() ||
5395 !reg.equals(end->dst)) {
5396 return NULL;
5397 } else {
5398 return end;
5399 }
5400 }
5401
5402 void
5403 fs_visitor::setup_fs_payload_gen6()
5404 {
5405 assert(stage == MESA_SHADER_FRAGMENT);
5406 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
5407
5408 unsigned barycentric_interp_modes =
5409 (stage == MESA_SHADER_FRAGMENT) ?
5410 ((brw_wm_prog_data*) this->prog_data)->barycentric_interp_modes : 0;
5411
5412 assert(devinfo->gen >= 6);
5413
5414 /* R0-1: masks, pixel X/Y coordinates. */
5415 payload.num_regs = 2;
5416 /* R2: only for 32-pixel dispatch.*/
5417
5418 /* R3-26: barycentric interpolation coordinates. These appear in the
5419 * same order that they appear in the brw_wm_barycentric_interp_mode
5420 * enum. Each set of coordinates occupies 2 registers if dispatch width
5421 * == 8 and 4 registers if dispatch width == 16. Coordinates only
5422 * appear if they were enabled using the "Barycentric Interpolation
5423 * Mode" bits in WM_STATE.
5424 */
5425 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
5426 if (barycentric_interp_modes & (1 << i)) {
5427 payload.barycentric_coord_reg[i] = payload.num_regs;
5428 payload.num_regs += 2;
5429 if (dispatch_width == 16) {
5430 payload.num_regs += 2;
5431 }
5432 }
5433 }
5434
5435 /* R27: interpolated depth if uses source depth */
5436 prog_data->uses_src_depth =
5437 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5438 if (prog_data->uses_src_depth) {
5439 payload.source_depth_reg = payload.num_regs;
5440 payload.num_regs++;
5441 if (dispatch_width == 16) {
5442 /* R28: interpolated depth if not SIMD8. */
5443 payload.num_regs++;
5444 }
5445 }
5446
5447 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
5448 prog_data->uses_src_w =
5449 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5450 if (prog_data->uses_src_w) {
5451 payload.source_w_reg = payload.num_regs;
5452 payload.num_regs++;
5453 if (dispatch_width == 16) {
5454 /* R30: interpolated W if not SIMD8. */
5455 payload.num_regs++;
5456 }
5457 }
5458
5459 /* R31: MSAA position offsets. */
5460 if (prog_data->persample_dispatch &&
5461 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
5462 /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
5463 *
5464 * "MSDISPMODE_PERSAMPLE is required in order to select
5465 * POSOFFSET_SAMPLE"
5466 *
5467 * So we can only really get sample positions if we are doing real
5468 * per-sample dispatch. If we need gl_SamplePosition and we don't have
5469 * persample dispatch, we hard-code it to 0.5.
5470 */
5471 prog_data->uses_pos_offset = true;
5472 payload.sample_pos_reg = payload.num_regs;
5473 payload.num_regs++;
5474 }
5475
5476 /* R32: MSAA input coverage mask */
5477 prog_data->uses_sample_mask =
5478 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
5479 if (prog_data->uses_sample_mask) {
5480 assert(devinfo->gen >= 7);
5481 payload.sample_mask_in_reg = payload.num_regs;
5482 payload.num_regs++;
5483 if (dispatch_width == 16) {
5484 /* R33: input coverage mask if not SIMD8. */
5485 payload.num_regs++;
5486 }
5487 }
5488
5489 /* R34-: bary for 32-pixel. */
5490 /* R58-59: interp W for 32-pixel. */
5491
5492 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
5493 source_depth_to_render_target = true;
5494 }
5495 }
5496
5497 void
5498 fs_visitor::setup_vs_payload()
5499 {
5500 /* R0: thread header, R1: urb handles */
5501 payload.num_regs = 2;
5502 }
5503
5504 /**
5505 * We are building the local ID push constant data using the simplest possible
5506 * method. We simply push the local IDs directly as they should appear in the
5507 * registers for the uvec3 gl_LocalInvocationID variable.
5508 *
5509 * Therefore, for SIMD8, we use 3 full registers, and for SIMD16 we use 6
5510 * registers worth of push constant space.
5511 *
5512 * Note: Any updates to brw_cs_prog_local_id_payload_dwords,
5513 * fill_local_id_payload or fs_visitor::emit_cs_local_invocation_id_setup need
5514 * to coordinated.
5515 *
5516 * FINISHME: There are a few easy optimizations to consider.
5517 *
5518 * 1. If gl_WorkGroupSize x, y or z is 1, we can just use zero, and there is
5519 * no need for using push constant space for that dimension.
5520 *
5521 * 2. Since GL_MAX_COMPUTE_WORK_GROUP_SIZE is currently 1024 or less, we can
5522 * easily use 16-bit words rather than 32-bit dwords in the push constant
5523 * data.
5524 *
5525 * 3. If gl_WorkGroupSize x, y or z is small, then we can use bytes for
5526 * conveying the data, and thereby reduce push constant usage.
5527 *
5528 */
5529 void
5530 fs_visitor::setup_gs_payload()
5531 {
5532 assert(stage == MESA_SHADER_GEOMETRY);
5533
5534 struct brw_gs_prog_data *gs_prog_data =
5535 (struct brw_gs_prog_data *) prog_data;
5536 struct brw_vue_prog_data *vue_prog_data =
5537 (struct brw_vue_prog_data *) prog_data;
5538
5539 /* R0: thread header, R1: output URB handles */
5540 payload.num_regs = 2;
5541
5542 if (gs_prog_data->include_primitive_id) {
5543 /* R2: Primitive ID 0..7 */
5544 payload.num_regs++;
5545 }
5546
5547 /* Use a maximum of 24 registers for push-model inputs. */
5548 const unsigned max_push_components = 24;
5549
5550 /* If pushing our inputs would take too many registers, reduce the URB read
5551 * length (which is in HWords, or 8 registers), and resort to pulling.
5552 *
5553 * Note that the GS reads <URB Read Length> HWords for every vertex - so we
5554 * have to multiply by VerticesIn to obtain the total storage requirement.
5555 */
5556 if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
5557 max_push_components) {
5558 gs_prog_data->base.include_vue_handles = true;
5559
5560 /* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
5561 payload.num_regs += nir->info.gs.vertices_in;
5562
5563 vue_prog_data->urb_read_length =
5564 ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
5565 }
5566 }
5567
5568 void
5569 fs_visitor::setup_cs_payload()
5570 {
5571 assert(devinfo->gen >= 7);
5572 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
5573
5574 payload.num_regs = 1;
5575
5576 if (nir->info.system_values_read & SYSTEM_BIT_LOCAL_INVOCATION_ID) {
5577 prog_data->local_invocation_id_regs = dispatch_width * 3 / 8;
5578 payload.local_invocation_id_reg = payload.num_regs;
5579 payload.num_regs += prog_data->local_invocation_id_regs;
5580 }
5581 }
5582
5583 void
5584 fs_visitor::calculate_register_pressure()
5585 {
5586 invalidate_live_intervals();
5587 calculate_live_intervals();
5588
5589 unsigned num_instructions = 0;
5590 foreach_block(block, cfg)
5591 num_instructions += block->instructions.length();
5592
5593 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
5594
5595 for (unsigned reg = 0; reg < alloc.count; reg++) {
5596 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
5597 regs_live_at_ip[ip] += alloc.sizes[reg];
5598 }
5599 }
5600
5601 /**
5602 * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
5603 *
5604 * The needs_unlit_centroid_workaround ends up producing one of these per
5605 * channel of centroid input, so it's good to clean them up.
5606 *
5607 * An assumption here is that nothing ever modifies the dispatched pixels
5608 * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
5609 * dictates that anyway.
5610 */
5611 bool
5612 fs_visitor::opt_drop_redundant_mov_to_flags()
5613 {
5614 bool flag_mov_found[2] = {false};
5615 bool progress = false;
5616
5617 /* Instructions removed by this pass can only be added if this were true */
5618 if (!devinfo->needs_unlit_centroid_workaround)
5619 return false;
5620
5621 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5622 if (inst->is_control_flow()) {
5623 memset(flag_mov_found, 0, sizeof(flag_mov_found));
5624 } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
5625 if (!flag_mov_found[inst->flag_subreg]) {
5626 flag_mov_found[inst->flag_subreg] = true;
5627 } else {
5628 inst->remove(block);
5629 progress = true;
5630 }
5631 } else if (inst->flags_written()) {
5632 flag_mov_found[inst->flag_subreg] = false;
5633 }
5634 }
5635
5636 return progress;
5637 }
5638
5639 void
5640 fs_visitor::optimize()
5641 {
5642 /* Start by validating the shader we currently have. */
5643 validate();
5644
5645 /* bld is the common builder object pointing at the end of the program we
5646 * used to translate it into i965 IR. For the optimization and lowering
5647 * passes coming next, any code added after the end of the program without
5648 * having explicitly called fs_builder::at() clearly points at a mistake.
5649 * Ideally optimization passes wouldn't be part of the visitor so they
5650 * wouldn't have access to bld at all, but they do, so just in case some
5651 * pass forgets to ask for a location explicitly set it to NULL here to
5652 * make it trip. The dispatch width is initialized to a bogus value to
5653 * make sure that optimizations set the execution controls explicitly to
5654 * match the code they are manipulating instead of relying on the defaults.
5655 */
5656 bld = fs_builder(this, 64);
5657
5658 assign_constant_locations();
5659 lower_constant_loads();
5660
5661 validate();
5662
5663 split_virtual_grfs();
5664 validate();
5665
5666 #define OPT(pass, args...) ({ \
5667 pass_num++; \
5668 bool this_progress = pass(args); \
5669 \
5670 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
5671 char filename[64]; \
5672 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
5673 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
5674 \
5675 backend_shader::dump_instructions(filename); \
5676 } \
5677 \
5678 validate(); \
5679 \
5680 progress = progress || this_progress; \
5681 this_progress; \
5682 })
5683
5684 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
5685 char filename[64];
5686 snprintf(filename, 64, "%s%d-%s-00-00-start",
5687 stage_abbrev, dispatch_width, nir->info.name);
5688
5689 backend_shader::dump_instructions(filename);
5690 }
5691
5692 bool progress = false;
5693 int iteration = 0;
5694 int pass_num = 0;
5695
5696 OPT(opt_drop_redundant_mov_to_flags);
5697
5698 do {
5699 progress = false;
5700 pass_num = 0;
5701 iteration++;
5702
5703 OPT(remove_duplicate_mrf_writes);
5704
5705 OPT(opt_algebraic);
5706 OPT(opt_cse);
5707 OPT(opt_copy_propagate);
5708 OPT(opt_predicated_break, this);
5709 OPT(opt_cmod_propagation);
5710 OPT(dead_code_eliminate);
5711 OPT(opt_peephole_sel);
5712 OPT(dead_control_flow_eliminate, this);
5713 OPT(opt_register_renaming);
5714 OPT(opt_saturate_propagation);
5715 OPT(register_coalesce);
5716 OPT(compute_to_mrf);
5717 OPT(eliminate_find_live_channel);
5718
5719 OPT(compact_virtual_grfs);
5720 } while (progress);
5721
5722 progress = false;
5723 pass_num = 0;
5724
5725 OPT(lower_simd_width);
5726
5727 /* After SIMD lowering just in case we had to unroll the EOT send. */
5728 OPT(opt_sampler_eot);
5729
5730 OPT(lower_logical_sends);
5731
5732 if (progress) {
5733 OPT(opt_copy_propagate);
5734 /* Only run after logical send lowering because it's easier to implement
5735 * in terms of physical sends.
5736 */
5737 if (OPT(opt_zero_samples))
5738 OPT(opt_copy_propagate);
5739 /* Run after logical send lowering to give it a chance to CSE the
5740 * LOAD_PAYLOAD instructions created to construct the payloads of
5741 * e.g. texturing messages in cases where it wasn't possible to CSE the
5742 * whole logical instruction.
5743 */
5744 OPT(opt_cse);
5745 OPT(register_coalesce);
5746 OPT(compute_to_mrf);
5747 OPT(dead_code_eliminate);
5748 OPT(remove_duplicate_mrf_writes);
5749 OPT(opt_peephole_sel);
5750 }
5751
5752 OPT(opt_redundant_discard_jumps);
5753
5754 if (OPT(lower_load_payload)) {
5755 split_virtual_grfs();
5756 OPT(register_coalesce);
5757 OPT(compute_to_mrf);
5758 OPT(dead_code_eliminate);
5759 }
5760
5761 if (OPT(lower_pack)) {
5762 OPT(register_coalesce);
5763 OPT(dead_code_eliminate);
5764 }
5765
5766 if (OPT(lower_d2x)) {
5767 OPT(opt_copy_propagate);
5768 OPT(dead_code_eliminate);
5769 }
5770
5771 OPT(opt_combine_constants);
5772 OPT(lower_integer_multiplication);
5773
5774 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
5775 OPT(opt_cmod_propagation);
5776 OPT(opt_cse);
5777 OPT(opt_copy_propagate);
5778 OPT(dead_code_eliminate);
5779 }
5780
5781 lower_uniform_pull_constant_loads();
5782
5783 validate();
5784 }
5785
5786 /**
5787 * Three source instruction must have a GRF/MRF destination register.
5788 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
5789 */
5790 void
5791 fs_visitor::fixup_3src_null_dest()
5792 {
5793 bool progress = false;
5794
5795 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
5796 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
5797 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
5798 inst->dst.type);
5799 progress = true;
5800 }
5801 }
5802
5803 if (progress)
5804 invalidate_live_intervals();
5805 }
5806
5807 void
5808 fs_visitor::allocate_registers(bool allow_spilling)
5809 {
5810 bool allocated_without_spills;
5811
5812 static const enum instruction_scheduler_mode pre_modes[] = {
5813 SCHEDULE_PRE,
5814 SCHEDULE_PRE_NON_LIFO,
5815 SCHEDULE_PRE_LIFO,
5816 };
5817
5818 bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
5819
5820 /* Try each scheduling heuristic to see if it can successfully register
5821 * allocate without spilling. They should be ordered by decreasing
5822 * performance but increasing likelihood of allocating.
5823 */
5824 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
5825 schedule_instructions(pre_modes[i]);
5826
5827 if (0) {
5828 assign_regs_trivial();
5829 allocated_without_spills = true;
5830 } else {
5831 allocated_without_spills = assign_regs(false, spill_all);
5832 }
5833 if (allocated_without_spills)
5834 break;
5835 }
5836
5837 if (!allocated_without_spills) {
5838 /* We assume that any spilling is worse than just dropping back to
5839 * SIMD8. There's probably actually some intermediate point where
5840 * SIMD16 with a couple of spills is still better.
5841 */
5842 if (dispatch_width > min_dispatch_width) {
5843 fail("Failure to register allocate. Reduce number of "
5844 "live scalar values to avoid this.");
5845 } else {
5846 compiler->shader_perf_log(log_data,
5847 "%s shader triggered register spilling. "
5848 "Try reducing the number of live scalar "
5849 "values to improve performance.\n",
5850 stage_name);
5851 }
5852
5853 /* Since we're out of heuristics, just go spill registers until we
5854 * get an allocation.
5855 */
5856 while (!assign_regs(true, spill_all)) {
5857 if (failed)
5858 break;
5859 }
5860 }
5861
5862 assert(last_scratch == 0 || allow_spilling);
5863
5864 /* This must come after all optimization and register allocation, since
5865 * it inserts dead code that happens to have side effects, and it does
5866 * so based on the actual physical registers in use.
5867 */
5868 insert_gen4_send_dependency_workarounds();
5869
5870 if (failed)
5871 return;
5872
5873 schedule_instructions(SCHEDULE_POST);
5874
5875 if (last_scratch > 0)
5876 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
5877 }
5878
5879 bool
5880 fs_visitor::run_vs(gl_clip_plane *clip_planes)
5881 {
5882 assert(stage == MESA_SHADER_VERTEX);
5883
5884 setup_vs_payload();
5885
5886 if (shader_time_index >= 0)
5887 emit_shader_time_begin();
5888
5889 emit_nir_code();
5890
5891 if (failed)
5892 return false;
5893
5894 compute_clip_distance(clip_planes);
5895
5896 emit_urb_writes();
5897
5898 if (shader_time_index >= 0)
5899 emit_shader_time_end();
5900
5901 calculate_cfg();
5902
5903 optimize();
5904
5905 assign_curb_setup();
5906 assign_vs_urb_setup();
5907
5908 fixup_3src_null_dest();
5909 allocate_registers(true);
5910
5911 return !failed;
5912 }
5913
5914 bool
5915 fs_visitor::run_tcs_single_patch()
5916 {
5917 assert(stage == MESA_SHADER_TESS_CTRL);
5918
5919 struct brw_tcs_prog_data *tcs_prog_data =
5920 (struct brw_tcs_prog_data *) prog_data;
5921
5922 /* r1-r4 contain the ICP handles. */
5923 payload.num_regs = 5;
5924
5925 if (shader_time_index >= 0)
5926 emit_shader_time_begin();
5927
5928 /* Initialize gl_InvocationID */
5929 fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
5930 fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
5931 bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
5932 bld.MOV(channels_ud, channels_uw);
5933
5934 if (tcs_prog_data->instances == 1) {
5935 invocation_id = channels_ud;
5936 } else {
5937 invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
5938
5939 /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
5940 fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
5941 fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
5942 bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
5943 brw_imm_ud(INTEL_MASK(23, 17)));
5944 bld.SHR(instance_times_8, t, brw_imm_ud(17 - 3));
5945
5946 bld.ADD(invocation_id, instance_times_8, channels_ud);
5947 }
5948
5949 /* Fix the disptach mask */
5950 if (nir->info.tcs.vertices_out % 8) {
5951 bld.CMP(bld.null_reg_ud(), invocation_id,
5952 brw_imm_ud(nir->info.tcs.vertices_out), BRW_CONDITIONAL_L);
5953 bld.IF(BRW_PREDICATE_NORMAL);
5954 }
5955
5956 emit_nir_code();
5957
5958 if (nir->info.tcs.vertices_out % 8) {
5959 bld.emit(BRW_OPCODE_ENDIF);
5960 }
5961
5962 /* Emit EOT write; set TR DS Cache bit */
5963 fs_reg srcs[3] = {
5964 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
5965 fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
5966 fs_reg(brw_imm_ud(0)),
5967 };
5968 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
5969 bld.LOAD_PAYLOAD(payload, srcs, 3, 2);
5970
5971 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
5972 bld.null_reg_ud(), payload);
5973 inst->mlen = 3;
5974 inst->base_mrf = -1;
5975 inst->eot = true;
5976
5977 if (shader_time_index >= 0)
5978 emit_shader_time_end();
5979
5980 if (failed)
5981 return false;
5982
5983 calculate_cfg();
5984
5985 optimize();
5986
5987 assign_curb_setup();
5988 assign_tcs_single_patch_urb_setup();
5989
5990 fixup_3src_null_dest();
5991 allocate_registers(true);
5992
5993 return !failed;
5994 }
5995
5996 bool
5997 fs_visitor::run_tes()
5998 {
5999 assert(stage == MESA_SHADER_TESS_EVAL);
6000
6001 /* R0: thread header, R1-3: gl_TessCoord.xyz, R4: URB handles */
6002 payload.num_regs = 5;
6003
6004 if (shader_time_index >= 0)
6005 emit_shader_time_begin();
6006
6007 emit_nir_code();
6008
6009 if (failed)
6010 return false;
6011
6012 emit_urb_writes();
6013
6014 if (shader_time_index >= 0)
6015 emit_shader_time_end();
6016
6017 calculate_cfg();
6018
6019 optimize();
6020
6021 assign_curb_setup();
6022 assign_tes_urb_setup();
6023
6024 fixup_3src_null_dest();
6025 allocate_registers(true);
6026
6027 return !failed;
6028 }
6029
6030 bool
6031 fs_visitor::run_gs()
6032 {
6033 assert(stage == MESA_SHADER_GEOMETRY);
6034
6035 setup_gs_payload();
6036
6037 this->final_gs_vertex_count = vgrf(glsl_type::uint_type);
6038
6039 if (gs_compile->control_data_header_size_bits > 0) {
6040 /* Create a VGRF to store accumulated control data bits. */
6041 this->control_data_bits = vgrf(glsl_type::uint_type);
6042
6043 /* If we're outputting more than 32 control data bits, then EmitVertex()
6044 * will set control_data_bits to 0 after emitting the first vertex.
6045 * Otherwise, we need to initialize it to 0 here.
6046 */
6047 if (gs_compile->control_data_header_size_bits <= 32) {
6048 const fs_builder abld = bld.annotate("initialize control data bits");
6049 abld.MOV(this->control_data_bits, brw_imm_ud(0u));
6050 }
6051 }
6052
6053 if (shader_time_index >= 0)
6054 emit_shader_time_begin();
6055
6056 emit_nir_code();
6057
6058 emit_gs_thread_end();
6059
6060 if (shader_time_index >= 0)
6061 emit_shader_time_end();
6062
6063 if (failed)
6064 return false;
6065
6066 calculate_cfg();
6067
6068 optimize();
6069
6070 assign_curb_setup();
6071 assign_gs_urb_setup();
6072
6073 fixup_3src_null_dest();
6074 allocate_registers(true);
6075
6076 return !failed;
6077 }
6078
6079 bool
6080 fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
6081 {
6082 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
6083 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
6084
6085 assert(stage == MESA_SHADER_FRAGMENT);
6086
6087 if (devinfo->gen >= 6)
6088 setup_fs_payload_gen6();
6089 else
6090 setup_fs_payload_gen4();
6091
6092 if (0) {
6093 emit_dummy_fs();
6094 } else if (do_rep_send) {
6095 assert(dispatch_width == 16);
6096 emit_repclear_shader();
6097 } else {
6098 if (shader_time_index >= 0)
6099 emit_shader_time_begin();
6100
6101 calculate_urb_setup();
6102 if (nir->info.inputs_read > 0) {
6103 if (devinfo->gen < 6)
6104 emit_interpolation_setup_gen4();
6105 else
6106 emit_interpolation_setup_gen6();
6107 }
6108
6109 /* We handle discards by keeping track of the still-live pixels in f0.1.
6110 * Initialize it with the dispatched pixels.
6111 */
6112 if (wm_prog_data->uses_kill) {
6113 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
6114 discard_init->flag_subreg = 1;
6115 }
6116
6117 /* Generate FS IR for main(). (the visitor only descends into
6118 * functions called "main").
6119 */
6120 emit_nir_code();
6121
6122 if (failed)
6123 return false;
6124
6125 if (wm_prog_data->uses_kill)
6126 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
6127
6128 if (wm_key->alpha_test_func)
6129 emit_alpha_test();
6130
6131 emit_fb_writes();
6132
6133 if (shader_time_index >= 0)
6134 emit_shader_time_end();
6135
6136 calculate_cfg();
6137
6138 optimize();
6139
6140 assign_curb_setup();
6141 assign_urb_setup();
6142
6143 fixup_3src_null_dest();
6144 allocate_registers(allow_spilling);
6145
6146 if (failed)
6147 return false;
6148 }
6149
6150 return !failed;
6151 }
6152
6153 bool
6154 fs_visitor::run_cs()
6155 {
6156 assert(stage == MESA_SHADER_COMPUTE);
6157
6158 setup_cs_payload();
6159
6160 if (shader_time_index >= 0)
6161 emit_shader_time_begin();
6162
6163 if (devinfo->is_haswell && prog_data->total_shared > 0) {
6164 /* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
6165 const fs_builder abld = bld.exec_all().group(1, 0);
6166 abld.MOV(retype(suboffset(brw_sr0_reg(), 1), BRW_REGISTER_TYPE_UW),
6167 suboffset(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW), 1));
6168 }
6169
6170 emit_nir_code();
6171
6172 if (failed)
6173 return false;
6174
6175 emit_cs_terminate();
6176
6177 if (shader_time_index >= 0)
6178 emit_shader_time_end();
6179
6180 calculate_cfg();
6181
6182 optimize();
6183
6184 assign_curb_setup();
6185
6186 fixup_3src_null_dest();
6187 allocate_registers(true);
6188
6189 if (failed)
6190 return false;
6191
6192 return !failed;
6193 }
6194
6195 /**
6196 * Return a bitfield where bit n is set if barycentric interpolation mode n
6197 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
6198 */
6199 static unsigned
6200 brw_compute_barycentric_interp_modes(const struct brw_device_info *devinfo,
6201 bool shade_model_flat,
6202 bool persample_shading,
6203 const nir_shader *shader)
6204 {
6205 unsigned barycentric_interp_modes = 0;
6206
6207 nir_foreach_variable(var, &shader->inputs) {
6208 enum glsl_interp_qualifier interp_qualifier =
6209 (enum glsl_interp_qualifier)var->data.interpolation;
6210 bool is_centroid = var->data.centroid && !persample_shading;
6211 bool is_sample = var->data.sample || persample_shading;
6212 bool is_gl_Color = (var->data.location == VARYING_SLOT_COL0) ||
6213 (var->data.location == VARYING_SLOT_COL1);
6214
6215 /* Ignore WPOS and FACE, because they don't require interpolation. */
6216 if (var->data.location == VARYING_SLOT_POS ||
6217 var->data.location == VARYING_SLOT_FACE)
6218 continue;
6219
6220 /* Determine the set (or sets) of barycentric coordinates needed to
6221 * interpolate this variable. Note that when
6222 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
6223 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
6224 * for lit pixels, so we need both sets of barycentric coordinates.
6225 */
6226 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
6227 if (is_centroid) {
6228 barycentric_interp_modes |=
6229 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
6230 } else if (is_sample) {
6231 barycentric_interp_modes |=
6232 1 << BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
6233 }
6234 if ((!is_centroid && !is_sample) ||
6235 devinfo->needs_unlit_centroid_workaround) {
6236 barycentric_interp_modes |=
6237 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
6238 }
6239 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
6240 (!(shade_model_flat && is_gl_Color) &&
6241 interp_qualifier == INTERP_QUALIFIER_NONE)) {
6242 if (is_centroid) {
6243 barycentric_interp_modes |=
6244 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
6245 } else if (is_sample) {
6246 barycentric_interp_modes |=
6247 1 << BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
6248 }
6249 if ((!is_centroid && !is_sample) ||
6250 devinfo->needs_unlit_centroid_workaround) {
6251 barycentric_interp_modes |=
6252 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
6253 }
6254 }
6255 }
6256
6257 return barycentric_interp_modes;
6258 }
6259
6260 static void
6261 brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
6262 bool shade_model_flat, const nir_shader *shader)
6263 {
6264 prog_data->flat_inputs = 0;
6265
6266 nir_foreach_variable(var, &shader->inputs) {
6267 enum glsl_interp_qualifier interp_qualifier =
6268 (enum glsl_interp_qualifier)var->data.interpolation;
6269 bool is_gl_Color = (var->data.location == VARYING_SLOT_COL0) ||
6270 (var->data.location == VARYING_SLOT_COL1);
6271
6272 int input_index = prog_data->urb_setup[var->data.location];
6273
6274 if (input_index < 0)
6275 continue;
6276
6277 /* flat shading */
6278 if (interp_qualifier == INTERP_QUALIFIER_FLAT ||
6279 (shade_model_flat && is_gl_Color &&
6280 interp_qualifier == INTERP_QUALIFIER_NONE))
6281 prog_data->flat_inputs |= (1 << input_index);
6282 }
6283 }
6284
6285 static uint8_t
6286 computed_depth_mode(const nir_shader *shader)
6287 {
6288 if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
6289 switch (shader->info.fs.depth_layout) {
6290 case FRAG_DEPTH_LAYOUT_NONE:
6291 case FRAG_DEPTH_LAYOUT_ANY:
6292 return BRW_PSCDEPTH_ON;
6293 case FRAG_DEPTH_LAYOUT_GREATER:
6294 return BRW_PSCDEPTH_ON_GE;
6295 case FRAG_DEPTH_LAYOUT_LESS:
6296 return BRW_PSCDEPTH_ON_LE;
6297 case FRAG_DEPTH_LAYOUT_UNCHANGED:
6298 return BRW_PSCDEPTH_OFF;
6299 }
6300 }
6301 return BRW_PSCDEPTH_OFF;
6302 }
6303
6304 const unsigned *
6305 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
6306 void *mem_ctx,
6307 const struct brw_wm_prog_key *key,
6308 struct brw_wm_prog_data *prog_data,
6309 const nir_shader *src_shader,
6310 struct gl_program *prog,
6311 int shader_time_index8, int shader_time_index16,
6312 bool allow_spilling,
6313 bool use_rep_send,
6314 unsigned *final_assembly_size,
6315 char **error_str)
6316 {
6317 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6318 shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
6319 true);
6320 brw_nir_lower_fs_inputs(shader);
6321 brw_nir_lower_fs_outputs(shader);
6322 shader = brw_postprocess_nir(shader, compiler->devinfo, true);
6323
6324 /* key->alpha_test_func means simulating alpha testing via discards,
6325 * so the shader definitely kills pixels.
6326 */
6327 prog_data->uses_kill = shader->info.fs.uses_discard || key->alpha_test_func;
6328 prog_data->uses_omask = key->multisample_fbo &&
6329 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
6330 prog_data->computed_depth_mode = computed_depth_mode(shader);
6331 prog_data->computed_stencil =
6332 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
6333
6334 prog_data->persample_dispatch =
6335 key->multisample_fbo &&
6336 (key->persample_interp ||
6337 (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
6338 SYSTEM_BIT_SAMPLE_POS)) ||
6339 shader->info.fs.uses_sample_qualifier);
6340
6341 prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
6342
6343 prog_data->barycentric_interp_modes =
6344 brw_compute_barycentric_interp_modes(compiler->devinfo,
6345 key->flat_shade,
6346 key->persample_interp,
6347 shader);
6348
6349 cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL;
6350 uint8_t simd8_grf_start = 0, simd16_grf_start = 0;
6351 unsigned simd8_grf_used = 0, simd16_grf_used = 0;
6352
6353 fs_visitor v8(compiler, log_data, mem_ctx, key,
6354 &prog_data->base, prog, shader, 8,
6355 shader_time_index8);
6356 if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
6357 if (error_str)
6358 *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
6359
6360 return NULL;
6361 } else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
6362 simd8_cfg = v8.cfg;
6363 simd8_grf_start = v8.payload.num_regs;
6364 simd8_grf_used = v8.grf_used;
6365 }
6366
6367 if (v8.max_dispatch_width >= 16 &&
6368 likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
6369 /* Try a SIMD16 compile */
6370 fs_visitor v16(compiler, log_data, mem_ctx, key,
6371 &prog_data->base, prog, shader, 16,
6372 shader_time_index16);
6373 v16.import_uniforms(&v8);
6374 if (!v16.run_fs(allow_spilling, use_rep_send)) {
6375 compiler->shader_perf_log(log_data,
6376 "SIMD16 shader failed to compile: %s",
6377 v16.fail_msg);
6378 } else {
6379 simd16_cfg = v16.cfg;
6380 simd16_grf_start = v16.payload.num_regs;
6381 simd16_grf_used = v16.grf_used;
6382 }
6383 }
6384
6385 /* When the caller requests a repclear shader, they want SIMD16-only */
6386 if (use_rep_send)
6387 simd8_cfg = NULL;
6388
6389 /* Prior to Iron Lake, the PS had a single shader offset with a jump table
6390 * at the top to select the shader. We've never implemented that.
6391 * Instead, we just give them exactly one shader and we pick the widest one
6392 * available.
6393 */
6394 if (compiler->devinfo->gen < 5 && simd16_cfg)
6395 simd8_cfg = NULL;
6396
6397 if (prog_data->persample_dispatch) {
6398 /* Starting with SandyBridge (where we first get MSAA), the different
6399 * pixel dispatch combinations are grouped into classifications A
6400 * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
6401 * generations, the only configurations supporting persample dispatch
6402 * are are this in which only one dispatch width is enabled.
6403 *
6404 * If computed depth is enabled, SNB only allows SIMD8 while IVB+
6405 * allow SIMD8 or SIMD16 so we choose SIMD16 if available.
6406 */
6407 if (compiler->devinfo->gen == 6 &&
6408 prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF) {
6409 simd16_cfg = NULL;
6410 } else if (simd16_cfg) {
6411 simd8_cfg = NULL;
6412 }
6413 }
6414
6415 /* We have to compute the flat inputs after the visitor is finished running
6416 * because it relies on prog_data->urb_setup which is computed in
6417 * fs_visitor::calculate_urb_setup().
6418 */
6419 brw_compute_flat_inputs(prog_data, key->flat_shade, shader);
6420
6421 fs_generator g(compiler, log_data, mem_ctx, (void *) key, &prog_data->base,
6422 v8.promoted_constants, v8.runtime_check_aads_emit,
6423 MESA_SHADER_FRAGMENT);
6424
6425 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
6426 g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
6427 shader->info.label ? shader->info.label :
6428 "unnamed",
6429 shader->info.name));
6430 }
6431
6432 if (simd8_cfg) {
6433 prog_data->dispatch_8 = true;
6434 g.generate_code(simd8_cfg, 8);
6435 prog_data->base.dispatch_grf_start_reg = simd8_grf_start;
6436 prog_data->reg_blocks_0 = brw_register_blocks(simd8_grf_used);
6437
6438 if (simd16_cfg) {
6439 prog_data->dispatch_16 = true;
6440 prog_data->prog_offset_2 = g.generate_code(simd16_cfg, 16);
6441 prog_data->dispatch_grf_start_reg_2 = simd16_grf_start;
6442 prog_data->reg_blocks_2 = brw_register_blocks(simd16_grf_used);
6443 }
6444 } else if (simd16_cfg) {
6445 prog_data->dispatch_16 = true;
6446 g.generate_code(simd16_cfg, 16);
6447 prog_data->base.dispatch_grf_start_reg = simd16_grf_start;
6448 prog_data->reg_blocks_0 = brw_register_blocks(simd16_grf_used);
6449 }
6450
6451 return g.get_assembly(final_assembly_size);
6452 }
6453
6454 fs_reg *
6455 fs_visitor::emit_cs_local_invocation_id_setup()
6456 {
6457 assert(stage == MESA_SHADER_COMPUTE);
6458
6459 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
6460
6461 struct brw_reg src =
6462 brw_vec8_grf(payload.local_invocation_id_reg, 0);
6463 src = retype(src, BRW_REGISTER_TYPE_UD);
6464 bld.MOV(*reg, src);
6465 src.nr += dispatch_width / 8;
6466 bld.MOV(offset(*reg, bld, 1), src);
6467 src.nr += dispatch_width / 8;
6468 bld.MOV(offset(*reg, bld, 2), src);
6469
6470 return reg;
6471 }
6472
6473 fs_reg *
6474 fs_visitor::emit_cs_work_group_id_setup()
6475 {
6476 assert(stage == MESA_SHADER_COMPUTE);
6477
6478 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
6479
6480 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
6481 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
6482 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
6483
6484 bld.MOV(*reg, r0_1);
6485 bld.MOV(offset(*reg, bld, 1), r0_6);
6486 bld.MOV(offset(*reg, bld, 2), r0_7);
6487
6488 return reg;
6489 }
6490
6491 const unsigned *
6492 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
6493 void *mem_ctx,
6494 const struct brw_cs_prog_key *key,
6495 struct brw_cs_prog_data *prog_data,
6496 const nir_shader *src_shader,
6497 int shader_time_index,
6498 unsigned *final_assembly_size,
6499 char **error_str)
6500 {
6501 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6502 shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
6503 true);
6504 brw_nir_lower_cs_shared(shader);
6505 prog_data->base.total_shared += shader->num_shared;
6506 shader = brw_postprocess_nir(shader, compiler->devinfo, true);
6507
6508 prog_data->local_size[0] = shader->info.cs.local_size[0];
6509 prog_data->local_size[1] = shader->info.cs.local_size[1];
6510 prog_data->local_size[2] = shader->info.cs.local_size[2];
6511 unsigned local_workgroup_size =
6512 shader->info.cs.local_size[0] * shader->info.cs.local_size[1] *
6513 shader->info.cs.local_size[2];
6514
6515 unsigned max_cs_threads = compiler->devinfo->max_cs_threads;
6516 unsigned simd_required = DIV_ROUND_UP(local_workgroup_size, max_cs_threads);
6517
6518 cfg_t *cfg = NULL;
6519 const char *fail_msg = NULL;
6520
6521 /* Now the main event: Visit the shader IR and generate our CS IR for it.
6522 */
6523 fs_visitor v8(compiler, log_data, mem_ctx, key, &prog_data->base,
6524 NULL, /* Never used in core profile */
6525 shader, 8, shader_time_index);
6526 if (simd_required <= 8) {
6527 if (!v8.run_cs()) {
6528 fail_msg = v8.fail_msg;
6529 } else {
6530 cfg = v8.cfg;
6531 prog_data->simd_size = 8;
6532 prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
6533 }
6534 }
6535
6536 fs_visitor v16(compiler, log_data, mem_ctx, key, &prog_data->base,
6537 NULL, /* Never used in core profile */
6538 shader, 16, shader_time_index);
6539 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
6540 !fail_msg && v8.max_dispatch_width >= 16 &&
6541 simd_required <= 16) {
6542 /* Try a SIMD16 compile */
6543 if (simd_required <= 8)
6544 v16.import_uniforms(&v8);
6545 if (!v16.run_cs()) {
6546 compiler->shader_perf_log(log_data,
6547 "SIMD16 shader failed to compile: %s",
6548 v16.fail_msg);
6549 if (!cfg) {
6550 fail_msg =
6551 "Couldn't generate SIMD16 program and not "
6552 "enough threads for SIMD8";
6553 }
6554 } else {
6555 cfg = v16.cfg;
6556 prog_data->simd_size = 16;
6557 prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
6558 }
6559 }
6560
6561 fs_visitor v32(compiler, log_data, mem_ctx, key, &prog_data->base,
6562 NULL, /* Never used in core profile */
6563 shader, 32, shader_time_index);
6564 if (!fail_msg && v8.max_dispatch_width >= 32 &&
6565 (simd_required > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
6566 /* Try a SIMD32 compile */
6567 if (simd_required <= 8)
6568 v32.import_uniforms(&v8);
6569 else if (simd_required <= 16)
6570 v32.import_uniforms(&v16);
6571
6572 if (!v32.run_cs()) {
6573 compiler->shader_perf_log(log_data,
6574 "SIMD32 shader failed to compile: %s",
6575 v16.fail_msg);
6576 if (!cfg) {
6577 fail_msg =
6578 "Couldn't generate SIMD32 program and not "
6579 "enough threads for SIMD16";
6580 }
6581 } else {
6582 cfg = v32.cfg;
6583 prog_data->simd_size = 32;
6584 }
6585 }
6586
6587 if (unlikely(cfg == NULL)) {
6588 assert(fail_msg);
6589 if (error_str)
6590 *error_str = ralloc_strdup(mem_ctx, fail_msg);
6591
6592 return NULL;
6593 }
6594
6595 fs_generator g(compiler, log_data, mem_ctx, (void*) key, &prog_data->base,
6596 v8.promoted_constants, v8.runtime_check_aads_emit,
6597 MESA_SHADER_COMPUTE);
6598 if (INTEL_DEBUG & DEBUG_CS) {
6599 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
6600 shader->info.label ? shader->info.label :
6601 "unnamed",
6602 shader->info.name);
6603 g.enable_debug(name);
6604 }
6605
6606 g.generate_code(cfg, prog_data->simd_size);
6607
6608 return g.get_assembly(final_assembly_size);
6609 }
6610
6611 void
6612 brw_cs_fill_local_id_payload(const struct brw_cs_prog_data *prog_data,
6613 void *buffer, uint32_t threads, uint32_t stride)
6614 {
6615 if (prog_data->local_invocation_id_regs == 0)
6616 return;
6617
6618 /* 'stride' should be an integer number of registers, that is, a multiple
6619 * of 32 bytes.
6620 */
6621 assert(stride % 32 == 0);
6622
6623 unsigned x = 0, y = 0, z = 0;
6624 for (unsigned t = 0; t < threads; t++) {
6625 uint32_t *param = (uint32_t *) buffer + stride * t / 4;
6626
6627 for (unsigned i = 0; i < prog_data->simd_size; i++) {
6628 param[0 * prog_data->simd_size + i] = x;
6629 param[1 * prog_data->simd_size + i] = y;
6630 param[2 * prog_data->simd_size + i] = z;
6631
6632 x++;
6633 if (x == prog_data->local_size[0]) {
6634 x = 0;
6635 y++;
6636 if (y == prog_data->local_size[1]) {
6637 y = 0;
6638 z++;
6639 if (z == prog_data->local_size[2])
6640 z = 0;
6641 }
6642 }
6643 }
6644 }
6645 }