32f1d757f0cd8730b9afa225f8d2ff817b0b2636
[mesa.git] / src / intel / compiler / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include "main/macros.h"
32 #include "brw_eu.h"
33 #include "brw_fs.h"
34 #include "brw_nir.h"
35 #include "brw_vec4_gs_visitor.h"
36 #include "brw_cfg.h"
37 #include "brw_dead_control_flow.h"
38 #include "common/gen_debug.h"
39 #include "compiler/glsl_types.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "program/prog_parameter.h"
42
43 using namespace brw;
44
45 static unsigned get_lowered_simd_width(const struct gen_device_info *devinfo,
46 const fs_inst *inst);
47
48 void
49 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
50 const fs_reg *src, unsigned sources)
51 {
52 memset(this, 0, sizeof(*this));
53
54 this->src = new fs_reg[MAX2(sources, 3)];
55 for (unsigned i = 0; i < sources; i++)
56 this->src[i] = src[i];
57
58 this->opcode = opcode;
59 this->dst = dst;
60 this->sources = sources;
61 this->exec_size = exec_size;
62 this->base_mrf = -1;
63
64 assert(dst.file != IMM && dst.file != UNIFORM);
65
66 assert(this->exec_size != 0);
67
68 this->conditional_mod = BRW_CONDITIONAL_NONE;
69
70 /* This will be the case for almost all instructions. */
71 switch (dst.file) {
72 case VGRF:
73 case ARF:
74 case FIXED_GRF:
75 case MRF:
76 case ATTR:
77 this->size_written = dst.component_size(exec_size);
78 break;
79 case BAD_FILE:
80 this->size_written = 0;
81 break;
82 case IMM:
83 case UNIFORM:
84 unreachable("Invalid destination register file");
85 }
86
87 this->writes_accumulator = false;
88 }
89
90 fs_inst::fs_inst()
91 {
92 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
93 }
94
95 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
96 {
97 init(opcode, exec_size, reg_undef, NULL, 0);
98 }
99
100 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
101 {
102 init(opcode, exec_size, dst, NULL, 0);
103 }
104
105 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
106 const fs_reg &src0)
107 {
108 const fs_reg src[1] = { src0 };
109 init(opcode, exec_size, dst, src, 1);
110 }
111
112 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
113 const fs_reg &src0, const fs_reg &src1)
114 {
115 const fs_reg src[2] = { src0, src1 };
116 init(opcode, exec_size, dst, src, 2);
117 }
118
119 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
120 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
121 {
122 const fs_reg src[3] = { src0, src1, src2 };
123 init(opcode, exec_size, dst, src, 3);
124 }
125
126 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
127 const fs_reg src[], unsigned sources)
128 {
129 init(opcode, exec_width, dst, src, sources);
130 }
131
132 fs_inst::fs_inst(const fs_inst &that)
133 {
134 memcpy(this, &that, sizeof(that));
135
136 this->src = new fs_reg[MAX2(that.sources, 3)];
137
138 for (unsigned i = 0; i < that.sources; i++)
139 this->src[i] = that.src[i];
140 }
141
142 fs_inst::~fs_inst()
143 {
144 delete[] this->src;
145 }
146
147 void
148 fs_inst::resize_sources(uint8_t num_sources)
149 {
150 if (this->sources != num_sources) {
151 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
152
153 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
154 src[i] = this->src[i];
155
156 delete[] this->src;
157 this->src = src;
158 this->sources = num_sources;
159 }
160 }
161
162 void
163 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
164 const fs_reg &dst,
165 const fs_reg &surf_index,
166 const fs_reg &varying_offset,
167 uint32_t const_offset)
168 {
169 /* We have our constant surface use a pitch of 4 bytes, so our index can
170 * be any component of a vector, and then we load 4 contiguous
171 * components starting from that.
172 *
173 * We break down the const_offset to a portion added to the variable offset
174 * and a portion done using fs_reg::offset, which means that if you have
175 * GLSL using something like "uniform vec4 a[20]; gl_FragColor = a[i]",
176 * we'll temporarily generate 4 vec4 loads from offset i * 4, and CSE can
177 * later notice that those loads are all the same and eliminate the
178 * redundant ones.
179 */
180 fs_reg vec4_offset = vgrf(glsl_type::uint_type);
181 bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
182
183 /* The pull load message will load a vec4 (16 bytes). If we are loading
184 * a double this means we are only loading 2 elements worth of data.
185 * We also want to use a 32-bit data type for the dst of the load operation
186 * so other parts of the driver don't get confused about the size of the
187 * result.
188 */
189 fs_reg vec4_result = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
190 fs_inst *inst = bld.emit(FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
191 vec4_result, surf_index, vec4_offset);
192 inst->size_written = 4 * vec4_result.component_size(inst->exec_size);
193
194 if (type_sz(dst.type) == 8) {
195 shuffle_32bit_load_result_to_64bit_data(
196 bld, retype(vec4_result, dst.type), vec4_result, 2);
197 }
198
199 vec4_result.type = dst.type;
200 bld.MOV(dst, offset(vec4_result, bld,
201 (const_offset & 0xf) / type_sz(vec4_result.type)));
202 }
203
204 /**
205 * A helper for MOV generation for fixing up broken hardware SEND dependency
206 * handling.
207 */
208 void
209 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
210 {
211 /* The caller always wants uncompressed to emit the minimal extra
212 * dependencies, and to avoid having to deal with aligning its regs to 2.
213 */
214 const fs_builder ubld = bld.annotate("send dependency resolve")
215 .half(0);
216
217 ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
218 }
219
220 bool
221 fs_inst::equals(fs_inst *inst) const
222 {
223 return (opcode == inst->opcode &&
224 dst.equals(inst->dst) &&
225 src[0].equals(inst->src[0]) &&
226 src[1].equals(inst->src[1]) &&
227 src[2].equals(inst->src[2]) &&
228 saturate == inst->saturate &&
229 predicate == inst->predicate &&
230 conditional_mod == inst->conditional_mod &&
231 mlen == inst->mlen &&
232 base_mrf == inst->base_mrf &&
233 target == inst->target &&
234 eot == inst->eot &&
235 header_size == inst->header_size &&
236 shadow_compare == inst->shadow_compare &&
237 exec_size == inst->exec_size &&
238 offset == inst->offset);
239 }
240
241 bool
242 fs_inst::is_send_from_grf() const
243 {
244 switch (opcode) {
245 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
246 case SHADER_OPCODE_SHADER_TIME_ADD:
247 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
248 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
249 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
250 case SHADER_OPCODE_UNTYPED_ATOMIC:
251 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
252 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
253 case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
254 case SHADER_OPCODE_TYPED_ATOMIC:
255 case SHADER_OPCODE_TYPED_SURFACE_READ:
256 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
257 case SHADER_OPCODE_URB_WRITE_SIMD8:
258 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
259 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
260 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
261 case SHADER_OPCODE_URB_READ_SIMD8:
262 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
263 return true;
264 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
265 return src[1].file == VGRF;
266 case FS_OPCODE_FB_WRITE:
267 case FS_OPCODE_FB_READ:
268 return src[0].file == VGRF;
269 default:
270 if (is_tex())
271 return src[0].file == VGRF;
272
273 return false;
274 }
275 }
276
277 /**
278 * Returns true if this instruction's sources and destinations cannot
279 * safely be the same register.
280 *
281 * In most cases, a register can be written over safely by the same
282 * instruction that is its last use. For a single instruction, the
283 * sources are dereferenced before writing of the destination starts
284 * (naturally).
285 *
286 * However, there are a few cases where this can be problematic:
287 *
288 * - Virtual opcodes that translate to multiple instructions in the
289 * code generator: if src == dst and one instruction writes the
290 * destination before a later instruction reads the source, then
291 * src will have been clobbered.
292 *
293 * - SIMD16 compressed instructions with certain regioning (see below).
294 *
295 * The register allocator uses this information to set up conflicts between
296 * GRF sources and the destination.
297 */
298 bool
299 fs_inst::has_source_and_destination_hazard() const
300 {
301 switch (opcode) {
302 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
303 /* Multiple partial writes to the destination */
304 return true;
305 default:
306 /* The SIMD16 compressed instruction
307 *
308 * add(16) g4<1>F g4<8,8,1>F g6<8,8,1>F
309 *
310 * is actually decoded in hardware as:
311 *
312 * add(8) g4<1>F g4<8,8,1>F g6<8,8,1>F
313 * add(8) g5<1>F g5<8,8,1>F g7<8,8,1>F
314 *
315 * Which is safe. However, if we have uniform accesses
316 * happening, we get into trouble:
317 *
318 * add(8) g4<1>F g4<0,1,0>F g6<8,8,1>F
319 * add(8) g5<1>F g4<0,1,0>F g7<8,8,1>F
320 *
321 * Now our destination for the first instruction overwrote the
322 * second instruction's src0, and we get garbage for those 8
323 * pixels. There's a similar issue for the pre-gen6
324 * pixel_x/pixel_y, which are registers of 16-bit values and thus
325 * would get stomped by the first decode as well.
326 */
327 if (exec_size == 16) {
328 for (int i = 0; i < sources; i++) {
329 if (src[i].file == VGRF && (src[i].stride == 0 ||
330 src[i].type == BRW_REGISTER_TYPE_UW ||
331 src[i].type == BRW_REGISTER_TYPE_W ||
332 src[i].type == BRW_REGISTER_TYPE_UB ||
333 src[i].type == BRW_REGISTER_TYPE_B)) {
334 return true;
335 }
336 }
337 }
338 return false;
339 }
340 }
341
342 bool
343 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
344 {
345 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
346 return false;
347
348 fs_reg reg = this->src[0];
349 if (reg.file != VGRF || reg.offset != 0 || reg.stride != 1)
350 return false;
351
352 if (grf_alloc.sizes[reg.nr] * REG_SIZE != this->size_written)
353 return false;
354
355 for (int i = 0; i < this->sources; i++) {
356 reg.type = this->src[i].type;
357 if (!this->src[i].equals(reg))
358 return false;
359
360 if (i < this->header_size) {
361 reg.offset += REG_SIZE;
362 } else {
363 reg = horiz_offset(reg, this->exec_size);
364 }
365 }
366
367 return true;
368 }
369
370 bool
371 fs_inst::can_do_source_mods(const struct gen_device_info *devinfo)
372 {
373 if (devinfo->gen == 6 && is_math())
374 return false;
375
376 if (is_send_from_grf())
377 return false;
378
379 if (!backend_instruction::can_do_source_mods())
380 return false;
381
382 return true;
383 }
384
385 bool
386 fs_inst::can_change_types() const
387 {
388 return dst.type == src[0].type &&
389 !src[0].abs && !src[0].negate && !saturate &&
390 (opcode == BRW_OPCODE_MOV ||
391 (opcode == BRW_OPCODE_SEL &&
392 dst.type == src[1].type &&
393 predicate != BRW_PREDICATE_NONE &&
394 !src[1].abs && !src[1].negate));
395 }
396
397 void
398 fs_reg::init()
399 {
400 memset(this, 0, sizeof(*this));
401 type = BRW_REGISTER_TYPE_UD;
402 stride = 1;
403 }
404
405 /** Generic unset register constructor. */
406 fs_reg::fs_reg()
407 {
408 init();
409 this->file = BAD_FILE;
410 }
411
412 fs_reg::fs_reg(struct ::brw_reg reg) :
413 backend_reg(reg)
414 {
415 this->offset = 0;
416 this->stride = 1;
417 if (this->file == IMM &&
418 (this->type != BRW_REGISTER_TYPE_V &&
419 this->type != BRW_REGISTER_TYPE_UV &&
420 this->type != BRW_REGISTER_TYPE_VF)) {
421 this->stride = 0;
422 }
423 }
424
425 bool
426 fs_reg::equals(const fs_reg &r) const
427 {
428 return (this->backend_reg::equals(r) &&
429 stride == r.stride);
430 }
431
432 bool
433 fs_reg::is_contiguous() const
434 {
435 return stride == 1;
436 }
437
438 unsigned
439 fs_reg::component_size(unsigned width) const
440 {
441 const unsigned stride = ((file != ARF && file != FIXED_GRF) ? this->stride :
442 hstride == 0 ? 0 :
443 1 << (hstride - 1));
444 return MAX2(width * stride, 1) * type_sz(type);
445 }
446
447 extern "C" int
448 type_size_scalar(const struct glsl_type *type)
449 {
450 unsigned int size, i;
451
452 switch (type->base_type) {
453 case GLSL_TYPE_UINT:
454 case GLSL_TYPE_INT:
455 case GLSL_TYPE_FLOAT:
456 case GLSL_TYPE_BOOL:
457 return type->components();
458 case GLSL_TYPE_UINT16:
459 case GLSL_TYPE_INT16:
460 case GLSL_TYPE_FLOAT16:
461 return DIV_ROUND_UP(type->components(), 2);
462 case GLSL_TYPE_DOUBLE:
463 case GLSL_TYPE_UINT64:
464 case GLSL_TYPE_INT64:
465 return type->components() * 2;
466 case GLSL_TYPE_ARRAY:
467 return type_size_scalar(type->fields.array) * type->length;
468 case GLSL_TYPE_STRUCT:
469 size = 0;
470 for (i = 0; i < type->length; i++) {
471 size += type_size_scalar(type->fields.structure[i].type);
472 }
473 return size;
474 case GLSL_TYPE_SAMPLER:
475 /* Samplers take up no register space, since they're baked in at
476 * link time.
477 */
478 return 0;
479 case GLSL_TYPE_ATOMIC_UINT:
480 return 0;
481 case GLSL_TYPE_SUBROUTINE:
482 return 1;
483 case GLSL_TYPE_IMAGE:
484 return BRW_IMAGE_PARAM_SIZE;
485 case GLSL_TYPE_VOID:
486 case GLSL_TYPE_ERROR:
487 case GLSL_TYPE_INTERFACE:
488 case GLSL_TYPE_FUNCTION:
489 unreachable("not reached");
490 }
491
492 return 0;
493 }
494
495 /**
496 * Create a MOV to read the timestamp register.
497 *
498 * The caller is responsible for emitting the MOV. The return value is
499 * the destination of the MOV, with extra parameters set.
500 */
501 fs_reg
502 fs_visitor::get_timestamp(const fs_builder &bld)
503 {
504 assert(devinfo->gen >= 7);
505
506 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
507 BRW_ARF_TIMESTAMP,
508 0),
509 BRW_REGISTER_TYPE_UD));
510
511 fs_reg dst = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
512
513 /* We want to read the 3 fields we care about even if it's not enabled in
514 * the dispatch.
515 */
516 bld.group(4, 0).exec_all().MOV(dst, ts);
517
518 return dst;
519 }
520
521 void
522 fs_visitor::emit_shader_time_begin()
523 {
524 /* We want only the low 32 bits of the timestamp. Since it's running
525 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
526 * which is plenty of time for our purposes. It is identical across the
527 * EUs, but since it's tracking GPU core speed it will increment at a
528 * varying rate as render P-states change.
529 */
530 shader_start_time = component(
531 get_timestamp(bld.annotate("shader time start")), 0);
532 }
533
534 void
535 fs_visitor::emit_shader_time_end()
536 {
537 /* Insert our code just before the final SEND with EOT. */
538 exec_node *end = this->instructions.get_tail();
539 assert(end && ((fs_inst *) end)->eot);
540 const fs_builder ibld = bld.annotate("shader time end")
541 .exec_all().at(NULL, end);
542 const fs_reg timestamp = get_timestamp(ibld);
543
544 /* We only use the low 32 bits of the timestamp - see
545 * emit_shader_time_begin()).
546 *
547 * We could also check if render P-states have changed (or anything
548 * else that might disrupt timing) by setting smear to 2 and checking if
549 * that field is != 0.
550 */
551 const fs_reg shader_end_time = component(timestamp, 0);
552
553 /* Check that there weren't any timestamp reset events (assuming these
554 * were the only two timestamp reads that happened).
555 */
556 const fs_reg reset = component(timestamp, 2);
557 set_condmod(BRW_CONDITIONAL_Z,
558 ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
559 ibld.IF(BRW_PREDICATE_NORMAL);
560
561 fs_reg start = shader_start_time;
562 start.negate = true;
563 const fs_reg diff = component(fs_reg(VGRF, alloc.allocate(1),
564 BRW_REGISTER_TYPE_UD),
565 0);
566 const fs_builder cbld = ibld.group(1, 0);
567 cbld.group(1, 0).ADD(diff, start, shader_end_time);
568
569 /* If there were no instructions between the two timestamp gets, the diff
570 * is 2 cycles. Remove that overhead, so I can forget about that when
571 * trying to determine the time taken for single instructions.
572 */
573 cbld.ADD(diff, diff, brw_imm_ud(-2u));
574 SHADER_TIME_ADD(cbld, 0, diff);
575 SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
576 ibld.emit(BRW_OPCODE_ELSE);
577 SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
578 ibld.emit(BRW_OPCODE_ENDIF);
579 }
580
581 void
582 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
583 int shader_time_subindex,
584 fs_reg value)
585 {
586 int index = shader_time_index * 3 + shader_time_subindex;
587 struct brw_reg offset = brw_imm_d(index * BRW_SHADER_TIME_STRIDE);
588
589 fs_reg payload;
590 if (dispatch_width == 8)
591 payload = vgrf(glsl_type::uvec2_type);
592 else
593 payload = vgrf(glsl_type::uint_type);
594
595 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
596 }
597
598 void
599 fs_visitor::vfail(const char *format, va_list va)
600 {
601 char *msg;
602
603 if (failed)
604 return;
605
606 failed = true;
607
608 msg = ralloc_vasprintf(mem_ctx, format, va);
609 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
610
611 this->fail_msg = msg;
612
613 if (debug_enabled) {
614 fprintf(stderr, "%s", msg);
615 }
616 }
617
618 void
619 fs_visitor::fail(const char *format, ...)
620 {
621 va_list va;
622
623 va_start(va, format);
624 vfail(format, va);
625 va_end(va);
626 }
627
628 /**
629 * Mark this program as impossible to compile with dispatch width greater
630 * than n.
631 *
632 * During the SIMD8 compile (which happens first), we can detect and flag
633 * things that are unsupported in SIMD16+ mode, so the compiler can skip the
634 * SIMD16+ compile altogether.
635 *
636 * During a compile of dispatch width greater than n (if one happens anyway),
637 * this just calls fail().
638 */
639 void
640 fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
641 {
642 if (dispatch_width > n) {
643 fail("%s", msg);
644 } else {
645 max_dispatch_width = n;
646 compiler->shader_perf_log(log_data,
647 "Shader dispatch width limited to SIMD%d: %s",
648 n, msg);
649 }
650 }
651
652 /**
653 * Returns true if the instruction has a flag that means it won't
654 * update an entire destination register.
655 *
656 * For example, dead code elimination and live variable analysis want to know
657 * when a write to a variable screens off any preceding values that were in
658 * it.
659 */
660 bool
661 fs_inst::is_partial_write() const
662 {
663 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
664 (this->exec_size * type_sz(this->dst.type)) < 32 ||
665 !this->dst.is_contiguous() ||
666 this->dst.offset % REG_SIZE != 0);
667 }
668
669 unsigned
670 fs_inst::components_read(unsigned i) const
671 {
672 /* Return zero if the source is not present. */
673 if (src[i].file == BAD_FILE)
674 return 0;
675
676 switch (opcode) {
677 case FS_OPCODE_LINTERP:
678 if (i == 0)
679 return 2;
680 else
681 return 1;
682
683 case FS_OPCODE_PIXEL_X:
684 case FS_OPCODE_PIXEL_Y:
685 assert(i == 0);
686 return 2;
687
688 case FS_OPCODE_FB_WRITE_LOGICAL:
689 assert(src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
690 /* First/second FB write color. */
691 if (i < 2)
692 return src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
693 else
694 return 1;
695
696 case SHADER_OPCODE_TEX_LOGICAL:
697 case SHADER_OPCODE_TXD_LOGICAL:
698 case SHADER_OPCODE_TXF_LOGICAL:
699 case SHADER_OPCODE_TXL_LOGICAL:
700 case SHADER_OPCODE_TXS_LOGICAL:
701 case FS_OPCODE_TXB_LOGICAL:
702 case SHADER_OPCODE_TXF_CMS_LOGICAL:
703 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
704 case SHADER_OPCODE_TXF_UMS_LOGICAL:
705 case SHADER_OPCODE_TXF_MCS_LOGICAL:
706 case SHADER_OPCODE_LOD_LOGICAL:
707 case SHADER_OPCODE_TG4_LOGICAL:
708 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
709 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
710 assert(src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM &&
711 src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
712 /* Texture coordinates. */
713 if (i == TEX_LOGICAL_SRC_COORDINATE)
714 return src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
715 /* Texture derivatives. */
716 else if ((i == TEX_LOGICAL_SRC_LOD || i == TEX_LOGICAL_SRC_LOD2) &&
717 opcode == SHADER_OPCODE_TXD_LOGICAL)
718 return src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
719 /* Texture offset. */
720 else if (i == TEX_LOGICAL_SRC_TG4_OFFSET)
721 return 2;
722 /* MCS */
723 else if (i == TEX_LOGICAL_SRC_MCS && opcode == SHADER_OPCODE_TXF_CMS_W_LOGICAL)
724 return 2;
725 else
726 return 1;
727
728 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
729 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
730 assert(src[3].file == IMM);
731 /* Surface coordinates. */
732 if (i == 0)
733 return src[3].ud;
734 /* Surface operation source (ignored for reads). */
735 else if (i == 1)
736 return 0;
737 else
738 return 1;
739
740 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
741 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
742 assert(src[3].file == IMM &&
743 src[4].file == IMM);
744 /* Surface coordinates. */
745 if (i == 0)
746 return src[3].ud;
747 /* Surface operation source. */
748 else if (i == 1)
749 return src[4].ud;
750 else
751 return 1;
752
753 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
754 assert(src[3].file == IMM &&
755 src[4].file == IMM);
756 return 1;
757
758 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
759 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
760 assert(src[3].file == IMM &&
761 src[4].file == IMM);
762 const unsigned op = src[4].ud;
763 /* Surface coordinates. */
764 if (i == 0)
765 return src[3].ud;
766 /* Surface operation source. */
767 else if (i == 1 && op == BRW_AOP_CMPWR)
768 return 2;
769 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
770 op == BRW_AOP_PREDEC))
771 return 0;
772 else
773 return 1;
774 }
775
776 default:
777 return 1;
778 }
779 }
780
781 unsigned
782 fs_inst::size_read(int arg) const
783 {
784 switch (opcode) {
785 case FS_OPCODE_FB_WRITE:
786 case FS_OPCODE_FB_READ:
787 case SHADER_OPCODE_URB_WRITE_SIMD8:
788 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
789 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
790 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
791 case SHADER_OPCODE_URB_READ_SIMD8:
792 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
793 case SHADER_OPCODE_UNTYPED_ATOMIC:
794 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
795 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
796 case SHADER_OPCODE_TYPED_ATOMIC:
797 case SHADER_OPCODE_TYPED_SURFACE_READ:
798 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
799 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
800 case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
801 if (arg == 0)
802 return mlen * REG_SIZE;
803 break;
804
805 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
806 /* The payload is actually stored in src1 */
807 if (arg == 1)
808 return mlen * REG_SIZE;
809 break;
810
811 case FS_OPCODE_LINTERP:
812 if (arg == 1)
813 return 16;
814 break;
815
816 case SHADER_OPCODE_LOAD_PAYLOAD:
817 if (arg < this->header_size)
818 return REG_SIZE;
819 break;
820
821 case CS_OPCODE_CS_TERMINATE:
822 case SHADER_OPCODE_BARRIER:
823 return REG_SIZE;
824
825 case SHADER_OPCODE_MOV_INDIRECT:
826 if (arg == 0) {
827 assert(src[2].file == IMM);
828 return src[2].ud;
829 }
830 break;
831
832 default:
833 if (is_tex() && arg == 0 && src[0].file == VGRF)
834 return mlen * REG_SIZE;
835 break;
836 }
837
838 switch (src[arg].file) {
839 case UNIFORM:
840 case IMM:
841 return components_read(arg) * type_sz(src[arg].type);
842 case BAD_FILE:
843 case ARF:
844 case FIXED_GRF:
845 case VGRF:
846 case ATTR:
847 return components_read(arg) * src[arg].component_size(exec_size);
848 case MRF:
849 unreachable("MRF registers are not allowed as sources");
850 }
851 return 0;
852 }
853
854 namespace {
855 /* Return the subset of flag registers that an instruction could
856 * potentially read or write based on the execution controls and flag
857 * subregister number of the instruction.
858 */
859 unsigned
860 flag_mask(const fs_inst *inst)
861 {
862 const unsigned start = inst->flag_subreg * 16 + inst->group;
863 const unsigned end = start + inst->exec_size;
864 return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
865 }
866
867 unsigned
868 bit_mask(unsigned n)
869 {
870 return (n >= CHAR_BIT * sizeof(bit_mask(n)) ? ~0u : (1u << n) - 1);
871 }
872
873 unsigned
874 flag_mask(const fs_reg &r, unsigned sz)
875 {
876 if (r.file == ARF) {
877 const unsigned start = (r.nr - BRW_ARF_FLAG) * 4 + r.subnr;
878 const unsigned end = start + sz;
879 return bit_mask(end) & ~bit_mask(start);
880 } else {
881 return 0;
882 }
883 }
884 }
885
886 unsigned
887 fs_inst::flags_read(const gen_device_info *devinfo) const
888 {
889 if (predicate == BRW_PREDICATE_ALIGN1_ANYV ||
890 predicate == BRW_PREDICATE_ALIGN1_ALLV) {
891 /* The vertical predication modes combine corresponding bits from
892 * f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
893 */
894 const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
895 return flag_mask(this) << shift | flag_mask(this);
896 } else if (predicate) {
897 return flag_mask(this);
898 } else {
899 unsigned mask = 0;
900 for (int i = 0; i < sources; i++) {
901 mask |= flag_mask(src[i], size_read(i));
902 }
903 return mask;
904 }
905 }
906
907 unsigned
908 fs_inst::flags_written() const
909 {
910 if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
911 opcode != BRW_OPCODE_IF &&
912 opcode != BRW_OPCODE_WHILE)) ||
913 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
914 return flag_mask(this);
915 } else {
916 return flag_mask(dst, size_written);
917 }
918 }
919
920 /**
921 * Returns how many MRFs an FS opcode will write over.
922 *
923 * Note that this is not the 0 or 1 implied writes in an actual gen
924 * instruction -- the FS opcodes often generate MOVs in addition.
925 */
926 int
927 fs_visitor::implied_mrf_writes(fs_inst *inst)
928 {
929 if (inst->mlen == 0)
930 return 0;
931
932 if (inst->base_mrf == -1)
933 return 0;
934
935 switch (inst->opcode) {
936 case SHADER_OPCODE_RCP:
937 case SHADER_OPCODE_RSQ:
938 case SHADER_OPCODE_SQRT:
939 case SHADER_OPCODE_EXP2:
940 case SHADER_OPCODE_LOG2:
941 case SHADER_OPCODE_SIN:
942 case SHADER_OPCODE_COS:
943 return 1 * dispatch_width / 8;
944 case SHADER_OPCODE_POW:
945 case SHADER_OPCODE_INT_QUOTIENT:
946 case SHADER_OPCODE_INT_REMAINDER:
947 return 2 * dispatch_width / 8;
948 case SHADER_OPCODE_TEX:
949 case FS_OPCODE_TXB:
950 case SHADER_OPCODE_TXD:
951 case SHADER_OPCODE_TXF:
952 case SHADER_OPCODE_TXF_CMS:
953 case SHADER_OPCODE_TXF_MCS:
954 case SHADER_OPCODE_TG4:
955 case SHADER_OPCODE_TG4_OFFSET:
956 case SHADER_OPCODE_TXL:
957 case SHADER_OPCODE_TXS:
958 case SHADER_OPCODE_LOD:
959 case SHADER_OPCODE_SAMPLEINFO:
960 return 1;
961 case FS_OPCODE_FB_WRITE:
962 return 2;
963 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
964 case SHADER_OPCODE_GEN4_SCRATCH_READ:
965 return 1;
966 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
967 return inst->mlen;
968 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
969 return inst->mlen;
970 default:
971 unreachable("not reached");
972 }
973 }
974
975 fs_reg
976 fs_visitor::vgrf(const glsl_type *const type)
977 {
978 int reg_width = dispatch_width / 8;
979 return fs_reg(VGRF, alloc.allocate(type_size_scalar(type) * reg_width),
980 brw_type_for_base_type(type));
981 }
982
983 fs_reg::fs_reg(enum brw_reg_file file, int nr)
984 {
985 init();
986 this->file = file;
987 this->nr = nr;
988 this->type = BRW_REGISTER_TYPE_F;
989 this->stride = (file == UNIFORM ? 0 : 1);
990 }
991
992 fs_reg::fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type)
993 {
994 init();
995 this->file = file;
996 this->nr = nr;
997 this->type = type;
998 this->stride = (file == UNIFORM ? 0 : 1);
999 }
1000
1001 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
1002 * This brings in those uniform definitions
1003 */
1004 void
1005 fs_visitor::import_uniforms(fs_visitor *v)
1006 {
1007 this->push_constant_loc = v->push_constant_loc;
1008 this->pull_constant_loc = v->pull_constant_loc;
1009 this->uniforms = v->uniforms;
1010 this->subgroup_id = v->subgroup_id;
1011 }
1012
1013 void
1014 fs_visitor::emit_fragcoord_interpolation(fs_reg wpos)
1015 {
1016 assert(stage == MESA_SHADER_FRAGMENT);
1017
1018 /* gl_FragCoord.x */
1019 bld.MOV(wpos, this->pixel_x);
1020 wpos = offset(wpos, bld, 1);
1021
1022 /* gl_FragCoord.y */
1023 bld.MOV(wpos, this->pixel_y);
1024 wpos = offset(wpos, bld, 1);
1025
1026 /* gl_FragCoord.z */
1027 if (devinfo->gen >= 6) {
1028 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
1029 } else {
1030 bld.emit(FS_OPCODE_LINTERP, wpos,
1031 this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
1032 interp_reg(VARYING_SLOT_POS, 2));
1033 }
1034 wpos = offset(wpos, bld, 1);
1035
1036 /* gl_FragCoord.w: Already set up in emit_interpolation */
1037 bld.MOV(wpos, this->wpos_w);
1038 }
1039
1040 enum brw_barycentric_mode
1041 brw_barycentric_mode(enum glsl_interp_mode mode, nir_intrinsic_op op)
1042 {
1043 /* Barycentric modes don't make sense for flat inputs. */
1044 assert(mode != INTERP_MODE_FLAT);
1045
1046 unsigned bary;
1047 switch (op) {
1048 case nir_intrinsic_load_barycentric_pixel:
1049 case nir_intrinsic_load_barycentric_at_offset:
1050 bary = BRW_BARYCENTRIC_PERSPECTIVE_PIXEL;
1051 break;
1052 case nir_intrinsic_load_barycentric_centroid:
1053 bary = BRW_BARYCENTRIC_PERSPECTIVE_CENTROID;
1054 break;
1055 case nir_intrinsic_load_barycentric_sample:
1056 case nir_intrinsic_load_barycentric_at_sample:
1057 bary = BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE;
1058 break;
1059 default:
1060 unreachable("invalid intrinsic");
1061 }
1062
1063 if (mode == INTERP_MODE_NOPERSPECTIVE)
1064 bary += 3;
1065
1066 return (enum brw_barycentric_mode) bary;
1067 }
1068
1069 /**
1070 * Turn one of the two CENTROID barycentric modes into PIXEL mode.
1071 */
1072 static enum brw_barycentric_mode
1073 centroid_to_pixel(enum brw_barycentric_mode bary)
1074 {
1075 assert(bary == BRW_BARYCENTRIC_PERSPECTIVE_CENTROID ||
1076 bary == BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID);
1077 return (enum brw_barycentric_mode) ((unsigned) bary - 1);
1078 }
1079
1080 fs_reg *
1081 fs_visitor::emit_frontfacing_interpolation()
1082 {
1083 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1084
1085 if (devinfo->gen >= 6) {
1086 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1087 * a boolean result from this (~0/true or 0/false).
1088 *
1089 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1090 * this task in only one instruction:
1091 * - a negation source modifier will flip the bit; and
1092 * - a W -> D type conversion will sign extend the bit into the high
1093 * word of the destination.
1094 *
1095 * An ASR 15 fills the low word of the destination.
1096 */
1097 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1098 g0.negate = true;
1099
1100 bld.ASR(*reg, g0, brw_imm_d(15));
1101 } else {
1102 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1103 * a boolean result from this (1/true or 0/false).
1104 *
1105 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1106 * the negation source modifier to flip it. Unfortunately the SHR
1107 * instruction only operates on UD (or D with an abs source modifier)
1108 * sources without negation.
1109 *
1110 * Instead, use ASR (which will give ~0/true or 0/false).
1111 */
1112 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1113 g1_6.negate = true;
1114
1115 bld.ASR(*reg, g1_6, brw_imm_d(31));
1116 }
1117
1118 return reg;
1119 }
1120
1121 void
1122 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1123 {
1124 assert(stage == MESA_SHADER_FRAGMENT);
1125 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1126 assert(dst.type == BRW_REGISTER_TYPE_F);
1127
1128 if (wm_prog_data->persample_dispatch) {
1129 /* Convert int_sample_pos to floating point */
1130 bld.MOV(dst, int_sample_pos);
1131 /* Scale to the range [0, 1] */
1132 bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
1133 }
1134 else {
1135 /* From ARB_sample_shading specification:
1136 * "When rendering to a non-multisample buffer, or if multisample
1137 * rasterization is disabled, gl_SamplePosition will always be
1138 * (0.5, 0.5).
1139 */
1140 bld.MOV(dst, brw_imm_f(0.5f));
1141 }
1142 }
1143
1144 fs_reg *
1145 fs_visitor::emit_samplepos_setup()
1146 {
1147 assert(devinfo->gen >= 6);
1148
1149 const fs_builder abld = bld.annotate("compute sample position");
1150 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1151 fs_reg pos = *reg;
1152 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1153 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1154
1155 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1156 * mode will be enabled.
1157 *
1158 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1159 * R31.1:0 Position Offset X/Y for Slot[3:0]
1160 * R31.3:2 Position Offset X/Y for Slot[7:4]
1161 * .....
1162 *
1163 * The X, Y sample positions come in as bytes in thread payload. So, read
1164 * the positions using vstride=16, width=8, hstride=2.
1165 */
1166 struct brw_reg sample_pos_reg =
1167 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1168 BRW_REGISTER_TYPE_B), 16, 8, 2);
1169
1170 if (dispatch_width == 8) {
1171 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1172 } else {
1173 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1174 abld.half(1).MOV(half(int_sample_x, 1),
1175 fs_reg(suboffset(sample_pos_reg, 16)));
1176 }
1177 /* Compute gl_SamplePosition.x */
1178 compute_sample_position(pos, int_sample_x);
1179 pos = offset(pos, abld, 1);
1180 if (dispatch_width == 8) {
1181 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1182 } else {
1183 abld.half(0).MOV(half(int_sample_y, 0),
1184 fs_reg(suboffset(sample_pos_reg, 1)));
1185 abld.half(1).MOV(half(int_sample_y, 1),
1186 fs_reg(suboffset(sample_pos_reg, 17)));
1187 }
1188 /* Compute gl_SamplePosition.y */
1189 compute_sample_position(pos, int_sample_y);
1190 return reg;
1191 }
1192
1193 fs_reg *
1194 fs_visitor::emit_sampleid_setup()
1195 {
1196 assert(stage == MESA_SHADER_FRAGMENT);
1197 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1198 assert(devinfo->gen >= 6);
1199
1200 const fs_builder abld = bld.annotate("compute sample id");
1201 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1202
1203 if (!key->multisample_fbo) {
1204 /* As per GL_ARB_sample_shading specification:
1205 * "When rendering to a non-multisample buffer, or if multisample
1206 * rasterization is disabled, gl_SampleID will always be zero."
1207 */
1208 abld.MOV(*reg, brw_imm_d(0));
1209 } else if (devinfo->gen >= 8) {
1210 /* Sample ID comes in as 4-bit numbers in g1.0:
1211 *
1212 * 15:12 Slot 3 SampleID (only used in SIMD16)
1213 * 11:8 Slot 2 SampleID (only used in SIMD16)
1214 * 7:4 Slot 1 SampleID
1215 * 3:0 Slot 0 SampleID
1216 *
1217 * Each slot corresponds to four channels, so we want to replicate each
1218 * half-byte value to 4 channels in a row:
1219 *
1220 * dst+0: .7 .6 .5 .4 .3 .2 .1 .0
1221 * 7:4 7:4 7:4 7:4 3:0 3:0 3:0 3:0
1222 *
1223 * dst+1: .7 .6 .5 .4 .3 .2 .1 .0 (if SIMD16)
1224 * 15:12 15:12 15:12 15:12 11:8 11:8 11:8 11:8
1225 *
1226 * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
1227 * channels to read the first byte (7:0), and the second group of 8
1228 * channels to read the second byte (15:8). Then, we shift right by
1229 * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
1230 * values into place. Finally, we AND with 0xf to keep the low nibble.
1231 *
1232 * shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
1233 * and(16) dst<1>D tmp<8,8,1>W 0xf:W
1234 *
1235 * TODO: These payload bits exist on Gen7 too, but they appear to always
1236 * be zero, so this code fails to work. We should find out why.
1237 */
1238 fs_reg tmp(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1239
1240 abld.SHR(tmp, fs_reg(stride(retype(brw_vec1_grf(1, 0),
1241 BRW_REGISTER_TYPE_B), 1, 8, 0)),
1242 brw_imm_v(0x44440000));
1243 abld.AND(*reg, tmp, brw_imm_w(0xf));
1244 } else {
1245 const fs_reg t1 = component(fs_reg(VGRF, alloc.allocate(1),
1246 BRW_REGISTER_TYPE_D), 0);
1247 const fs_reg t2(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1248
1249 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1250 * 8x multisampling, subspan 0 will represent sample N (where N
1251 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1252 * 7. We can find the value of N by looking at R0.0 bits 7:6
1253 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1254 * (since samples are always delivered in pairs). That is, we
1255 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1256 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1257 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1258 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1259 * populating a temporary variable with the sequence (0, 1, 2, 3),
1260 * and then reading from it using vstride=1, width=4, hstride=0.
1261 * These computations hold good for 4x multisampling as well.
1262 *
1263 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1264 * the first four slots are sample 0 of subspan 0; the next four
1265 * are sample 1 of subspan 0; the third group is sample 0 of
1266 * subspan 1, and finally sample 1 of subspan 1.
1267 */
1268
1269 /* SKL+ has an extra bit for the Starting Sample Pair Index to
1270 * accomodate 16x MSAA.
1271 */
1272 abld.exec_all().group(1, 0)
1273 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
1274 brw_imm_ud(0xc0));
1275 abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
1276
1277 /* This works for both SIMD8 and SIMD16 */
1278 abld.exec_all().group(4, 0).MOV(t2, brw_imm_v(0x3210));
1279
1280 /* This special instruction takes care of setting vstride=1,
1281 * width=4, hstride=0 of t2 during an ADD instruction.
1282 */
1283 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1284 }
1285
1286 return reg;
1287 }
1288
1289 fs_reg *
1290 fs_visitor::emit_samplemaskin_setup()
1291 {
1292 assert(stage == MESA_SHADER_FRAGMENT);
1293 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1294 assert(devinfo->gen >= 6);
1295
1296 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1297
1298 fs_reg coverage_mask(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
1299 BRW_REGISTER_TYPE_D));
1300
1301 if (wm_prog_data->persample_dispatch) {
1302 /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
1303 * and a mask representing which sample is being processed by the
1304 * current shader invocation.
1305 *
1306 * From the OES_sample_variables specification:
1307 * "When per-sample shading is active due to the use of a fragment input
1308 * qualified by "sample" or due to the use of the gl_SampleID or
1309 * gl_SamplePosition variables, only the bit for the current sample is
1310 * set in gl_SampleMaskIn."
1311 */
1312 const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
1313
1314 if (nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
1315 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
1316
1317 fs_reg one = vgrf(glsl_type::int_type);
1318 fs_reg enabled_mask = vgrf(glsl_type::int_type);
1319 abld.MOV(one, brw_imm_d(1));
1320 abld.SHL(enabled_mask, one, nir_system_values[SYSTEM_VALUE_SAMPLE_ID]);
1321 abld.AND(*reg, enabled_mask, coverage_mask);
1322 } else {
1323 /* In per-pixel mode, the coverage mask is sufficient. */
1324 *reg = coverage_mask;
1325 }
1326 return reg;
1327 }
1328
1329 fs_reg
1330 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1331 {
1332 if (!src.abs && !src.negate)
1333 return src;
1334
1335 fs_reg temp = bld.vgrf(src.type);
1336 bld.MOV(temp, src);
1337
1338 return temp;
1339 }
1340
1341 void
1342 fs_visitor::emit_discard_jump()
1343 {
1344 assert(brw_wm_prog_data(this->prog_data)->uses_kill);
1345
1346 /* For performance, after a discard, jump to the end of the
1347 * shader if all relevant channels have been discarded.
1348 */
1349 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1350 discard_jump->flag_subreg = 1;
1351
1352 discard_jump->predicate = BRW_PREDICATE_ALIGN1_ANY4H;
1353 discard_jump->predicate_inverse = true;
1354 }
1355
1356 void
1357 fs_visitor::emit_gs_thread_end()
1358 {
1359 assert(stage == MESA_SHADER_GEOMETRY);
1360
1361 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1362
1363 if (gs_compile->control_data_header_size_bits > 0) {
1364 emit_gs_control_data_bits(this->final_gs_vertex_count);
1365 }
1366
1367 const fs_builder abld = bld.annotate("thread end");
1368 fs_inst *inst;
1369
1370 if (gs_prog_data->static_vertex_count != -1) {
1371 foreach_in_list_reverse(fs_inst, prev, &this->instructions) {
1372 if (prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8 ||
1373 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
1374 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
1375 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) {
1376 prev->eot = true;
1377
1378 /* Delete now dead instructions. */
1379 foreach_in_list_reverse_safe(exec_node, dead, &this->instructions) {
1380 if (dead == prev)
1381 break;
1382 dead->remove();
1383 }
1384 return;
1385 } else if (prev->is_control_flow() || prev->has_side_effects()) {
1386 break;
1387 }
1388 }
1389 fs_reg hdr = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1390 abld.MOV(hdr, fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)));
1391 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, hdr);
1392 inst->mlen = 1;
1393 } else {
1394 fs_reg payload = abld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1395 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
1396 sources[0] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1397 sources[1] = this->final_gs_vertex_count;
1398 abld.LOAD_PAYLOAD(payload, sources, 2, 2);
1399 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
1400 inst->mlen = 2;
1401 }
1402 inst->eot = true;
1403 inst->offset = 0;
1404 }
1405
1406 void
1407 fs_visitor::assign_curb_setup()
1408 {
1409 unsigned uniform_push_length = DIV_ROUND_UP(stage_prog_data->nr_params, 8);
1410
1411 unsigned ubo_push_length = 0;
1412 unsigned ubo_push_start[4];
1413 for (int i = 0; i < 4; i++) {
1414 ubo_push_start[i] = 8 * (ubo_push_length + uniform_push_length);
1415 ubo_push_length += stage_prog_data->ubo_ranges[i].length;
1416 }
1417
1418 prog_data->curb_read_length = uniform_push_length + ubo_push_length;
1419
1420 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1421 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1422 for (unsigned int i = 0; i < inst->sources; i++) {
1423 if (inst->src[i].file == UNIFORM) {
1424 int uniform_nr = inst->src[i].nr + inst->src[i].offset / 4;
1425 int constant_nr;
1426 if (inst->src[i].nr >= UBO_START) {
1427 /* constant_nr is in 32-bit units, the rest are in bytes */
1428 constant_nr = ubo_push_start[inst->src[i].nr - UBO_START] +
1429 inst->src[i].offset / 4;
1430 } else if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1431 constant_nr = push_constant_loc[uniform_nr];
1432 } else {
1433 /* Section 5.11 of the OpenGL 4.1 spec says:
1434 * "Out-of-bounds reads return undefined values, which include
1435 * values from other variables of the active program or zero."
1436 * Just return the first push constant.
1437 */
1438 constant_nr = 0;
1439 }
1440
1441 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1442 constant_nr / 8,
1443 constant_nr % 8);
1444 brw_reg.abs = inst->src[i].abs;
1445 brw_reg.negate = inst->src[i].negate;
1446
1447 assert(inst->src[i].stride == 0);
1448 inst->src[i] = byte_offset(
1449 retype(brw_reg, inst->src[i].type),
1450 inst->src[i].offset % 4);
1451 }
1452 }
1453 }
1454
1455 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1456 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1457 }
1458
1459 void
1460 fs_visitor::calculate_urb_setup()
1461 {
1462 assert(stage == MESA_SHADER_FRAGMENT);
1463 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1464 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1465
1466 memset(prog_data->urb_setup, -1,
1467 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1468
1469 int urb_next = 0;
1470 /* Figure out where each of the incoming setup attributes lands. */
1471 if (devinfo->gen >= 6) {
1472 if (_mesa_bitcount_64(nir->info.inputs_read &
1473 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1474 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1475 * first 16 varying inputs, so we can put them wherever we want.
1476 * Just put them in order.
1477 *
1478 * This is useful because it means that (a) inputs not used by the
1479 * fragment shader won't take up valuable register space, and (b) we
1480 * won't have to recompile the fragment shader if it gets paired with
1481 * a different vertex (or geometry) shader.
1482 */
1483 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1484 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1485 BITFIELD64_BIT(i)) {
1486 prog_data->urb_setup[i] = urb_next++;
1487 }
1488 }
1489 } else {
1490 /* We have enough input varyings that the SF/SBE pipeline stage can't
1491 * arbitrarily rearrange them to suit our whim; we have to put them
1492 * in an order that matches the output of the previous pipeline stage
1493 * (geometry or vertex shader).
1494 */
1495 struct brw_vue_map prev_stage_vue_map;
1496 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1497 key->input_slots_valid,
1498 nir->info.separate_shader);
1499
1500 int first_slot =
1501 brw_compute_first_urb_slot_required(nir->info.inputs_read,
1502 &prev_stage_vue_map);
1503
1504 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1505 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1506 slot++) {
1507 int varying = prev_stage_vue_map.slot_to_varying[slot];
1508 if (varying != BRW_VARYING_SLOT_PAD &&
1509 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1510 BITFIELD64_BIT(varying))) {
1511 prog_data->urb_setup[varying] = slot - first_slot;
1512 }
1513 }
1514 urb_next = prev_stage_vue_map.num_slots - first_slot;
1515 }
1516 } else {
1517 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1518 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1519 /* Point size is packed into the header, not as a general attribute */
1520 if (i == VARYING_SLOT_PSIZ)
1521 continue;
1522
1523 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1524 /* The back color slot is skipped when the front color is
1525 * also written to. In addition, some slots can be
1526 * written in the vertex shader and not read in the
1527 * fragment shader. So the register number must always be
1528 * incremented, mapped or not.
1529 */
1530 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1531 prog_data->urb_setup[i] = urb_next;
1532 urb_next++;
1533 }
1534 }
1535
1536 /*
1537 * It's a FS only attribute, and we did interpolation for this attribute
1538 * in SF thread. So, count it here, too.
1539 *
1540 * See compile_sf_prog() for more info.
1541 */
1542 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1543 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1544 }
1545
1546 prog_data->num_varying_inputs = urb_next;
1547 }
1548
1549 void
1550 fs_visitor::assign_urb_setup()
1551 {
1552 assert(stage == MESA_SHADER_FRAGMENT);
1553 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1554
1555 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1556
1557 /* Offset all the urb_setup[] index by the actual position of the
1558 * setup regs, now that the location of the constants has been chosen.
1559 */
1560 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1561 if (inst->opcode == FS_OPCODE_LINTERP) {
1562 assert(inst->src[1].file == FIXED_GRF);
1563 inst->src[1].nr += urb_start;
1564 }
1565
1566 if (inst->opcode == FS_OPCODE_CINTERP) {
1567 assert(inst->src[0].file == FIXED_GRF);
1568 inst->src[0].nr += urb_start;
1569 }
1570 }
1571
1572 /* Each attribute is 4 setup channels, each of which is half a reg. */
1573 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1574 }
1575
1576 void
1577 fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
1578 {
1579 for (int i = 0; i < inst->sources; i++) {
1580 if (inst->src[i].file == ATTR) {
1581 int grf = payload.num_regs +
1582 prog_data->curb_read_length +
1583 inst->src[i].nr +
1584 inst->src[i].offset / REG_SIZE;
1585
1586 /* As explained at brw_reg_from_fs_reg, From the Haswell PRM:
1587 *
1588 * VertStride must be used to cross GRF register boundaries. This
1589 * rule implies that elements within a 'Width' cannot cross GRF
1590 * boundaries.
1591 *
1592 * So, for registers that are large enough, we have to split the exec
1593 * size in two and trust the compression state to sort it out.
1594 */
1595 unsigned total_size = inst->exec_size *
1596 inst->src[i].stride *
1597 type_sz(inst->src[i].type);
1598
1599 assert(total_size <= 2 * REG_SIZE);
1600 const unsigned exec_size =
1601 (total_size <= REG_SIZE) ? inst->exec_size : inst->exec_size / 2;
1602
1603 unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
1604 struct brw_reg reg =
1605 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1606 inst->src[i].offset % REG_SIZE),
1607 exec_size * inst->src[i].stride,
1608 width, inst->src[i].stride);
1609 reg.abs = inst->src[i].abs;
1610 reg.negate = inst->src[i].negate;
1611
1612 inst->src[i] = reg;
1613 }
1614 }
1615 }
1616
1617 void
1618 fs_visitor::assign_vs_urb_setup()
1619 {
1620 struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
1621
1622 assert(stage == MESA_SHADER_VERTEX);
1623
1624 /* Each attribute is 4 regs. */
1625 this->first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
1626
1627 assert(vs_prog_data->base.urb_read_length <= 15);
1628
1629 /* Rewrite all ATTR file references to the hw grf that they land in. */
1630 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1631 convert_attr_sources_to_hw_regs(inst);
1632 }
1633 }
1634
1635 void
1636 fs_visitor::assign_tcs_single_patch_urb_setup()
1637 {
1638 assert(stage == MESA_SHADER_TESS_CTRL);
1639
1640 /* Rewrite all ATTR file references to HW_REGs. */
1641 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1642 convert_attr_sources_to_hw_regs(inst);
1643 }
1644 }
1645
1646 void
1647 fs_visitor::assign_tes_urb_setup()
1648 {
1649 assert(stage == MESA_SHADER_TESS_EVAL);
1650
1651 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1652
1653 first_non_payload_grf += 8 * vue_prog_data->urb_read_length;
1654
1655 /* Rewrite all ATTR file references to HW_REGs. */
1656 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1657 convert_attr_sources_to_hw_regs(inst);
1658 }
1659 }
1660
1661 void
1662 fs_visitor::assign_gs_urb_setup()
1663 {
1664 assert(stage == MESA_SHADER_GEOMETRY);
1665
1666 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1667
1668 first_non_payload_grf +=
1669 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
1670
1671 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1672 /* Rewrite all ATTR file references to GRFs. */
1673 convert_attr_sources_to_hw_regs(inst);
1674 }
1675 }
1676
1677
1678 /**
1679 * Split large virtual GRFs into separate components if we can.
1680 *
1681 * This is mostly duplicated with what brw_fs_vector_splitting does,
1682 * but that's really conservative because it's afraid of doing
1683 * splitting that doesn't result in real progress after the rest of
1684 * the optimization phases, which would cause infinite looping in
1685 * optimization. We can do it once here, safely. This also has the
1686 * opportunity to split interpolated values, or maybe even uniforms,
1687 * which we don't have at the IR level.
1688 *
1689 * We want to split, because virtual GRFs are what we register
1690 * allocate and spill (due to contiguousness requirements for some
1691 * instructions), and they're what we naturally generate in the
1692 * codegen process, but most virtual GRFs don't actually need to be
1693 * contiguous sets of GRFs. If we split, we'll end up with reduced
1694 * live intervals and better dead code elimination and coalescing.
1695 */
1696 void
1697 fs_visitor::split_virtual_grfs()
1698 {
1699 /* Compact the register file so we eliminate dead vgrfs. This
1700 * only defines split points for live registers, so if we have
1701 * too large dead registers they will hit assertions later.
1702 */
1703 compact_virtual_grfs();
1704
1705 int num_vars = this->alloc.count;
1706
1707 /* Count the total number of registers */
1708 int reg_count = 0;
1709 int vgrf_to_reg[num_vars];
1710 for (int i = 0; i < num_vars; i++) {
1711 vgrf_to_reg[i] = reg_count;
1712 reg_count += alloc.sizes[i];
1713 }
1714
1715 /* An array of "split points". For each register slot, this indicates
1716 * if this slot can be separated from the previous slot. Every time an
1717 * instruction uses multiple elements of a register (as a source or
1718 * destination), we mark the used slots as inseparable. Then we go
1719 * through and split the registers into the smallest pieces we can.
1720 */
1721 bool split_points[reg_count];
1722 memset(split_points, 0, sizeof(split_points));
1723
1724 /* Mark all used registers as fully splittable */
1725 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1726 if (inst->dst.file == VGRF) {
1727 int reg = vgrf_to_reg[inst->dst.nr];
1728 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.nr]; j++)
1729 split_points[reg + j] = true;
1730 }
1731
1732 for (int i = 0; i < inst->sources; i++) {
1733 if (inst->src[i].file == VGRF) {
1734 int reg = vgrf_to_reg[inst->src[i].nr];
1735 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].nr]; j++)
1736 split_points[reg + j] = true;
1737 }
1738 }
1739 }
1740
1741 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1742 if (inst->dst.file == VGRF) {
1743 int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
1744 for (unsigned j = 1; j < regs_written(inst); j++)
1745 split_points[reg + j] = false;
1746 }
1747 for (int i = 0; i < inst->sources; i++) {
1748 if (inst->src[i].file == VGRF) {
1749 int reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
1750 for (unsigned j = 1; j < regs_read(inst, i); j++)
1751 split_points[reg + j] = false;
1752 }
1753 }
1754 }
1755
1756 int new_virtual_grf[reg_count];
1757 int new_reg_offset[reg_count];
1758
1759 int reg = 0;
1760 for (int i = 0; i < num_vars; i++) {
1761 /* The first one should always be 0 as a quick sanity check. */
1762 assert(split_points[reg] == false);
1763
1764 /* j = 0 case */
1765 new_reg_offset[reg] = 0;
1766 reg++;
1767 int offset = 1;
1768
1769 /* j > 0 case */
1770 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1771 /* If this is a split point, reset the offset to 0 and allocate a
1772 * new virtual GRF for the previous offset many registers
1773 */
1774 if (split_points[reg]) {
1775 assert(offset <= MAX_VGRF_SIZE);
1776 int grf = alloc.allocate(offset);
1777 for (int k = reg - offset; k < reg; k++)
1778 new_virtual_grf[k] = grf;
1779 offset = 0;
1780 }
1781 new_reg_offset[reg] = offset;
1782 offset++;
1783 reg++;
1784 }
1785
1786 /* The last one gets the original register number */
1787 assert(offset <= MAX_VGRF_SIZE);
1788 alloc.sizes[i] = offset;
1789 for (int k = reg - offset; k < reg; k++)
1790 new_virtual_grf[k] = i;
1791 }
1792 assert(reg == reg_count);
1793
1794 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1795 if (inst->dst.file == VGRF) {
1796 reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
1797 inst->dst.nr = new_virtual_grf[reg];
1798 inst->dst.offset = new_reg_offset[reg] * REG_SIZE +
1799 inst->dst.offset % REG_SIZE;
1800 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1801 }
1802 for (int i = 0; i < inst->sources; i++) {
1803 if (inst->src[i].file == VGRF) {
1804 reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
1805 inst->src[i].nr = new_virtual_grf[reg];
1806 inst->src[i].offset = new_reg_offset[reg] * REG_SIZE +
1807 inst->src[i].offset % REG_SIZE;
1808 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1809 }
1810 }
1811 }
1812 invalidate_live_intervals();
1813 }
1814
1815 /**
1816 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1817 *
1818 * During code generation, we create tons of temporary variables, many of
1819 * which get immediately killed and are never used again. Yet, in later
1820 * optimization and analysis passes, such as compute_live_intervals, we need
1821 * to loop over all the virtual GRFs. Compacting them can save a lot of
1822 * overhead.
1823 */
1824 bool
1825 fs_visitor::compact_virtual_grfs()
1826 {
1827 bool progress = false;
1828 int remap_table[this->alloc.count];
1829 memset(remap_table, -1, sizeof(remap_table));
1830
1831 /* Mark which virtual GRFs are used. */
1832 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1833 if (inst->dst.file == VGRF)
1834 remap_table[inst->dst.nr] = 0;
1835
1836 for (int i = 0; i < inst->sources; i++) {
1837 if (inst->src[i].file == VGRF)
1838 remap_table[inst->src[i].nr] = 0;
1839 }
1840 }
1841
1842 /* Compact the GRF arrays. */
1843 int new_index = 0;
1844 for (unsigned i = 0; i < this->alloc.count; i++) {
1845 if (remap_table[i] == -1) {
1846 /* We just found an unused register. This means that we are
1847 * actually going to compact something.
1848 */
1849 progress = true;
1850 } else {
1851 remap_table[i] = new_index;
1852 alloc.sizes[new_index] = alloc.sizes[i];
1853 invalidate_live_intervals();
1854 ++new_index;
1855 }
1856 }
1857
1858 this->alloc.count = new_index;
1859
1860 /* Patch all the instructions to use the newly renumbered registers */
1861 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1862 if (inst->dst.file == VGRF)
1863 inst->dst.nr = remap_table[inst->dst.nr];
1864
1865 for (int i = 0; i < inst->sources; i++) {
1866 if (inst->src[i].file == VGRF)
1867 inst->src[i].nr = remap_table[inst->src[i].nr];
1868 }
1869 }
1870
1871 /* Patch all the references to delta_xy, since they're used in register
1872 * allocation. If they're unused, switch them to BAD_FILE so we don't
1873 * think some random VGRF is delta_xy.
1874 */
1875 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
1876 if (delta_xy[i].file == VGRF) {
1877 if (remap_table[delta_xy[i].nr] != -1) {
1878 delta_xy[i].nr = remap_table[delta_xy[i].nr];
1879 } else {
1880 delta_xy[i].file = BAD_FILE;
1881 }
1882 }
1883 }
1884
1885 return progress;
1886 }
1887
1888 static void
1889 set_push_pull_constant_loc(unsigned uniform, int *chunk_start,
1890 unsigned *max_chunk_bitsize,
1891 bool contiguous, unsigned bitsize,
1892 const unsigned target_bitsize,
1893 int *push_constant_loc, int *pull_constant_loc,
1894 unsigned *num_push_constants,
1895 unsigned *num_pull_constants,
1896 const unsigned max_push_components,
1897 const unsigned max_chunk_size,
1898 bool allow_pull_constants,
1899 struct brw_stage_prog_data *stage_prog_data)
1900 {
1901 /* This is the first live uniform in the chunk */
1902 if (*chunk_start < 0)
1903 *chunk_start = uniform;
1904
1905 /* Keep track of the maximum bit size access in contiguous uniforms */
1906 *max_chunk_bitsize = MAX2(*max_chunk_bitsize, bitsize);
1907
1908 /* If this element does not need to be contiguous with the next, we
1909 * split at this point and everything between chunk_start and u forms a
1910 * single chunk.
1911 */
1912 if (!contiguous) {
1913 /* If bitsize doesn't match the target one, skip it */
1914 if (*max_chunk_bitsize != target_bitsize) {
1915 /* FIXME: right now we only support 32 and 64-bit accesses */
1916 assert(*max_chunk_bitsize == 4 || *max_chunk_bitsize == 8);
1917 *max_chunk_bitsize = 0;
1918 *chunk_start = -1;
1919 return;
1920 }
1921
1922 unsigned chunk_size = uniform - *chunk_start + 1;
1923
1924 /* Decide whether we should push or pull this parameter. In the
1925 * Vulkan driver, push constants are explicitly exposed via the API
1926 * so we push everything. In GL, we only push small arrays.
1927 */
1928 if (!allow_pull_constants ||
1929 (*num_push_constants + chunk_size <= max_push_components &&
1930 chunk_size <= max_chunk_size)) {
1931 assert(*num_push_constants + chunk_size <= max_push_components);
1932 for (unsigned j = *chunk_start; j <= uniform; j++)
1933 push_constant_loc[j] = (*num_push_constants)++;
1934 } else {
1935 for (unsigned j = *chunk_start; j <= uniform; j++)
1936 pull_constant_loc[j] = (*num_pull_constants)++;
1937 }
1938
1939 *max_chunk_bitsize = 0;
1940 *chunk_start = -1;
1941 }
1942 }
1943
1944 static int
1945 get_subgroup_id_param_index(const brw_stage_prog_data *prog_data)
1946 {
1947 if (prog_data->nr_params == 0)
1948 return -1;
1949
1950 /* The local thread id is always the last parameter in the list */
1951 uint32_t last_param = prog_data->param[prog_data->nr_params - 1];
1952 if (last_param == BRW_PARAM_BUILTIN_SUBGROUP_ID)
1953 return prog_data->nr_params - 1;
1954
1955 return -1;
1956 }
1957
1958 /**
1959 * Assign UNIFORM file registers to either push constants or pull constants.
1960 *
1961 * We allow a fragment shader to have more than the specified minimum
1962 * maximum number of fragment shader uniform components (64). If
1963 * there are too many of these, they'd fill up all of register space.
1964 * So, this will push some of them out to the pull constant buffer and
1965 * update the program to load them.
1966 */
1967 void
1968 fs_visitor::assign_constant_locations()
1969 {
1970 /* Only the first compile gets to decide on locations. */
1971 if (push_constant_loc) {
1972 assert(pull_constant_loc);
1973 return;
1974 }
1975
1976 bool is_live[uniforms];
1977 memset(is_live, 0, sizeof(is_live));
1978 unsigned bitsize_access[uniforms];
1979 memset(bitsize_access, 0, sizeof(bitsize_access));
1980
1981 /* For each uniform slot, a value of true indicates that the given slot and
1982 * the next slot must remain contiguous. This is used to keep us from
1983 * splitting arrays and 64-bit values apart.
1984 */
1985 bool contiguous[uniforms];
1986 memset(contiguous, 0, sizeof(contiguous));
1987
1988 /* First, we walk through the instructions and do two things:
1989 *
1990 * 1) Figure out which uniforms are live.
1991 *
1992 * 2) Mark any indirectly used ranges of registers as contiguous.
1993 *
1994 * Note that we don't move constant-indexed accesses to arrays. No
1995 * testing has been done of the performance impact of this choice.
1996 */
1997 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
1998 for (int i = 0 ; i < inst->sources; i++) {
1999 if (inst->src[i].file != UNIFORM)
2000 continue;
2001
2002 int constant_nr = inst->src[i].nr + inst->src[i].offset / 4;
2003
2004 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
2005 assert(inst->src[2].ud % 4 == 0);
2006 unsigned last = constant_nr + (inst->src[2].ud / 4) - 1;
2007 assert(last < uniforms);
2008
2009 for (unsigned j = constant_nr; j < last; j++) {
2010 is_live[j] = true;
2011 contiguous[j] = true;
2012 bitsize_access[j] = MAX2(bitsize_access[j], type_sz(inst->src[i].type));
2013 }
2014 is_live[last] = true;
2015 bitsize_access[last] = MAX2(bitsize_access[last], type_sz(inst->src[i].type));
2016 } else {
2017 if (constant_nr >= 0 && constant_nr < (int) uniforms) {
2018 int regs_read = inst->components_read(i) *
2019 type_sz(inst->src[i].type) / 4;
2020 assert(regs_read <= 2);
2021 if (regs_read == 2)
2022 contiguous[constant_nr] = true;
2023 for (int j = 0; j < regs_read; j++) {
2024 is_live[constant_nr + j] = true;
2025 bitsize_access[constant_nr + j] =
2026 MAX2(bitsize_access[constant_nr + j], type_sz(inst->src[i].type));
2027 }
2028 }
2029 }
2030 }
2031 }
2032
2033 int subgroup_id_index = get_subgroup_id_param_index(stage_prog_data);
2034
2035 /* Only allow 16 registers (128 uniform components) as push constants.
2036 *
2037 * Just demote the end of the list. We could probably do better
2038 * here, demoting things that are rarely used in the program first.
2039 *
2040 * If changing this value, note the limitation about total_regs in
2041 * brw_curbe.c.
2042 */
2043 unsigned int max_push_components = 16 * 8;
2044 if (subgroup_id_index >= 0)
2045 max_push_components--; /* Save a slot for the thread ID */
2046
2047 /* We push small arrays, but no bigger than 16 floats. This is big enough
2048 * for a vec4 but hopefully not large enough to push out other stuff. We
2049 * should probably use a better heuristic at some point.
2050 */
2051 const unsigned int max_chunk_size = 16;
2052
2053 unsigned int num_push_constants = 0;
2054 unsigned int num_pull_constants = 0;
2055
2056 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2057 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2058
2059 /* Default to -1 meaning no location */
2060 memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
2061 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2062
2063 int chunk_start = -1;
2064 unsigned max_chunk_bitsize = 0;
2065
2066 /* First push 64-bit uniforms to ensure they are properly aligned */
2067 const unsigned uniform_64_bit_size = type_sz(BRW_REGISTER_TYPE_DF);
2068 for (unsigned u = 0; u < uniforms; u++) {
2069 if (!is_live[u])
2070 continue;
2071
2072 set_push_pull_constant_loc(u, &chunk_start, &max_chunk_bitsize,
2073 contiguous[u], bitsize_access[u],
2074 uniform_64_bit_size,
2075 push_constant_loc, pull_constant_loc,
2076 &num_push_constants, &num_pull_constants,
2077 max_push_components, max_chunk_size,
2078 compiler->supports_pull_constants,
2079 stage_prog_data);
2080
2081 }
2082
2083 /* Then push the rest of uniforms */
2084 const unsigned uniform_32_bit_size = type_sz(BRW_REGISTER_TYPE_F);
2085 for (unsigned u = 0; u < uniforms; u++) {
2086 if (!is_live[u])
2087 continue;
2088
2089 /* Skip subgroup_id_index to put it in the last push register. */
2090 if (subgroup_id_index == (int)u)
2091 continue;
2092
2093 set_push_pull_constant_loc(u, &chunk_start, &max_chunk_bitsize,
2094 contiguous[u], bitsize_access[u],
2095 uniform_32_bit_size,
2096 push_constant_loc, pull_constant_loc,
2097 &num_push_constants, &num_pull_constants,
2098 max_push_components, max_chunk_size,
2099 compiler->supports_pull_constants,
2100 stage_prog_data);
2101 }
2102
2103 /* Add the CS local thread ID uniform at the end of the push constants */
2104 if (subgroup_id_index >= 0)
2105 push_constant_loc[subgroup_id_index] = num_push_constants++;
2106
2107 /* As the uniforms are going to be reordered, stash the old array and
2108 * create two new arrays for push/pull params.
2109 */
2110 uint32_t *param = stage_prog_data->param;
2111 stage_prog_data->nr_params = num_push_constants;
2112 if (num_push_constants) {
2113 stage_prog_data->param = ralloc_array(mem_ctx, uint32_t,
2114 num_push_constants);
2115 } else {
2116 stage_prog_data->param = NULL;
2117 }
2118 assert(stage_prog_data->nr_pull_params == 0);
2119 assert(stage_prog_data->pull_param == NULL);
2120 if (num_pull_constants > 0) {
2121 stage_prog_data->nr_pull_params = num_pull_constants;
2122 stage_prog_data->pull_param = ralloc_array(mem_ctx, uint32_t,
2123 num_pull_constants);
2124 }
2125
2126 /* Now that we know how many regular uniforms we'll push, reduce the
2127 * UBO push ranges so we don't exceed the 3DSTATE_CONSTANT limits.
2128 */
2129 unsigned push_length = DIV_ROUND_UP(stage_prog_data->nr_params, 8);
2130 for (int i = 0; i < 4; i++) {
2131 struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
2132
2133 if (push_length + range->length > 64)
2134 range->length = 64 - push_length;
2135
2136 push_length += range->length;
2137 }
2138 assert(push_length <= 64);
2139
2140 /* Up until now, the param[] array has been indexed by reg + offset
2141 * of UNIFORM registers. Move pull constants into pull_param[] and
2142 * condense param[] to only contain the uniforms we chose to push.
2143 *
2144 * NOTE: Because we are condensing the params[] array, we know that
2145 * push_constant_loc[i] <= i and we can do it in one smooth loop without
2146 * having to make a copy.
2147 */
2148 for (unsigned int i = 0; i < uniforms; i++) {
2149 uint32_t value = param[i];
2150 if (pull_constant_loc[i] != -1) {
2151 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
2152 } else if (push_constant_loc[i] != -1) {
2153 stage_prog_data->param[push_constant_loc[i]] = value;
2154 }
2155 }
2156 ralloc_free(param);
2157 }
2158
2159 bool
2160 fs_visitor::get_pull_locs(const fs_reg &src,
2161 unsigned *out_surf_index,
2162 unsigned *out_pull_index)
2163 {
2164 assert(src.file == UNIFORM);
2165
2166 if (src.nr >= UBO_START) {
2167 const struct brw_ubo_range *range =
2168 &prog_data->ubo_ranges[src.nr - UBO_START];
2169
2170 /* If this access is in our (reduced) range, use the push data. */
2171 if (src.offset / 32 < range->length)
2172 return false;
2173
2174 *out_surf_index = prog_data->binding_table.ubo_start + range->block;
2175 *out_pull_index = (32 * range->start + src.offset) / 4;
2176 return true;
2177 }
2178
2179 const unsigned location = src.nr + src.offset / 4;
2180
2181 if (location < uniforms && pull_constant_loc[location] != -1) {
2182 /* A regular uniform push constant */
2183 *out_surf_index = stage_prog_data->binding_table.pull_constants_start;
2184 *out_pull_index = pull_constant_loc[location];
2185 return true;
2186 }
2187
2188 return false;
2189 }
2190
2191 /**
2192 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
2193 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
2194 */
2195 void
2196 fs_visitor::lower_constant_loads()
2197 {
2198 unsigned index, pull_index;
2199
2200 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2201 /* Set up the annotation tracking for new generated instructions. */
2202 const fs_builder ibld(this, block, inst);
2203
2204 for (int i = 0; i < inst->sources; i++) {
2205 if (inst->src[i].file != UNIFORM)
2206 continue;
2207
2208 /* We'll handle this case later */
2209 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
2210 continue;
2211
2212 if (!get_pull_locs(inst->src[i], &index, &pull_index))
2213 continue;
2214
2215 assert(inst->src[i].stride == 0);
2216
2217 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
2218 const fs_builder ubld = ibld.exec_all().group(block_sz / 4, 0);
2219 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
2220 const unsigned base = pull_index * 4;
2221
2222 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
2223 dst, brw_imm_ud(index), brw_imm_ud(base & ~(block_sz - 1)));
2224
2225 /* Rewrite the instruction to use the temporary VGRF. */
2226 inst->src[i].file = VGRF;
2227 inst->src[i].nr = dst.nr;
2228 inst->src[i].offset = (base & (block_sz - 1)) +
2229 inst->src[i].offset % 4;
2230
2231 brw_mark_surface_used(prog_data, index);
2232 }
2233
2234 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
2235 inst->src[0].file == UNIFORM) {
2236
2237 if (!get_pull_locs(inst->src[0], &index, &pull_index))
2238 continue;
2239
2240 VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
2241 brw_imm_ud(index),
2242 inst->src[1],
2243 pull_index * 4);
2244 inst->remove(block);
2245
2246 brw_mark_surface_used(prog_data, index);
2247 }
2248 }
2249 invalidate_live_intervals();
2250 }
2251
2252 bool
2253 fs_visitor::opt_algebraic()
2254 {
2255 bool progress = false;
2256
2257 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2258 switch (inst->opcode) {
2259 case BRW_OPCODE_MOV:
2260 if (inst->src[0].file != IMM)
2261 break;
2262
2263 if (inst->saturate) {
2264 if (inst->dst.type != inst->src[0].type)
2265 assert(!"unimplemented: saturate mixed types");
2266
2267 if (brw_saturate_immediate(inst->dst.type,
2268 &inst->src[0].as_brw_reg())) {
2269 inst->saturate = false;
2270 progress = true;
2271 }
2272 }
2273 break;
2274
2275 case BRW_OPCODE_MUL:
2276 if (inst->src[1].file != IMM)
2277 continue;
2278
2279 /* a * 1.0 = a */
2280 if (inst->src[1].is_one()) {
2281 inst->opcode = BRW_OPCODE_MOV;
2282 inst->src[1] = reg_undef;
2283 progress = true;
2284 break;
2285 }
2286
2287 /* a * -1.0 = -a */
2288 if (inst->src[1].is_negative_one()) {
2289 inst->opcode = BRW_OPCODE_MOV;
2290 inst->src[0].negate = !inst->src[0].negate;
2291 inst->src[1] = reg_undef;
2292 progress = true;
2293 break;
2294 }
2295
2296 /* a * 0.0 = 0.0 */
2297 if (inst->src[1].is_zero()) {
2298 inst->opcode = BRW_OPCODE_MOV;
2299 inst->src[0] = inst->src[1];
2300 inst->src[1] = reg_undef;
2301 progress = true;
2302 break;
2303 }
2304
2305 if (inst->src[0].file == IMM) {
2306 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2307 inst->opcode = BRW_OPCODE_MOV;
2308 inst->src[0].f *= inst->src[1].f;
2309 inst->src[1] = reg_undef;
2310 progress = true;
2311 break;
2312 }
2313 break;
2314 case BRW_OPCODE_ADD:
2315 if (inst->src[1].file != IMM)
2316 continue;
2317
2318 /* a + 0.0 = a */
2319 if (inst->src[1].is_zero()) {
2320 inst->opcode = BRW_OPCODE_MOV;
2321 inst->src[1] = reg_undef;
2322 progress = true;
2323 break;
2324 }
2325
2326 if (inst->src[0].file == IMM) {
2327 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2328 inst->opcode = BRW_OPCODE_MOV;
2329 inst->src[0].f += inst->src[1].f;
2330 inst->src[1] = reg_undef;
2331 progress = true;
2332 break;
2333 }
2334 break;
2335 case BRW_OPCODE_OR:
2336 if (inst->src[0].equals(inst->src[1])) {
2337 inst->opcode = BRW_OPCODE_MOV;
2338 inst->src[1] = reg_undef;
2339 progress = true;
2340 break;
2341 }
2342 break;
2343 case BRW_OPCODE_LRP:
2344 if (inst->src[1].equals(inst->src[2])) {
2345 inst->opcode = BRW_OPCODE_MOV;
2346 inst->src[0] = inst->src[1];
2347 inst->src[1] = reg_undef;
2348 inst->src[2] = reg_undef;
2349 progress = true;
2350 break;
2351 }
2352 break;
2353 case BRW_OPCODE_CMP:
2354 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
2355 inst->src[0].abs &&
2356 inst->src[0].negate &&
2357 inst->src[1].is_zero()) {
2358 inst->src[0].abs = false;
2359 inst->src[0].negate = false;
2360 inst->conditional_mod = BRW_CONDITIONAL_Z;
2361 progress = true;
2362 break;
2363 }
2364 break;
2365 case BRW_OPCODE_SEL:
2366 if (inst->src[0].equals(inst->src[1])) {
2367 inst->opcode = BRW_OPCODE_MOV;
2368 inst->src[1] = reg_undef;
2369 inst->predicate = BRW_PREDICATE_NONE;
2370 inst->predicate_inverse = false;
2371 progress = true;
2372 } else if (inst->saturate && inst->src[1].file == IMM) {
2373 switch (inst->conditional_mod) {
2374 case BRW_CONDITIONAL_LE:
2375 case BRW_CONDITIONAL_L:
2376 switch (inst->src[1].type) {
2377 case BRW_REGISTER_TYPE_F:
2378 if (inst->src[1].f >= 1.0f) {
2379 inst->opcode = BRW_OPCODE_MOV;
2380 inst->src[1] = reg_undef;
2381 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2382 progress = true;
2383 }
2384 break;
2385 default:
2386 break;
2387 }
2388 break;
2389 case BRW_CONDITIONAL_GE:
2390 case BRW_CONDITIONAL_G:
2391 switch (inst->src[1].type) {
2392 case BRW_REGISTER_TYPE_F:
2393 if (inst->src[1].f <= 0.0f) {
2394 inst->opcode = BRW_OPCODE_MOV;
2395 inst->src[1] = reg_undef;
2396 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2397 progress = true;
2398 }
2399 break;
2400 default:
2401 break;
2402 }
2403 default:
2404 break;
2405 }
2406 }
2407 break;
2408 case BRW_OPCODE_MAD:
2409 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2410 inst->opcode = BRW_OPCODE_MOV;
2411 inst->src[1] = reg_undef;
2412 inst->src[2] = reg_undef;
2413 progress = true;
2414 } else if (inst->src[0].is_zero()) {
2415 inst->opcode = BRW_OPCODE_MUL;
2416 inst->src[0] = inst->src[2];
2417 inst->src[2] = reg_undef;
2418 progress = true;
2419 } else if (inst->src[1].is_one()) {
2420 inst->opcode = BRW_OPCODE_ADD;
2421 inst->src[1] = inst->src[2];
2422 inst->src[2] = reg_undef;
2423 progress = true;
2424 } else if (inst->src[2].is_one()) {
2425 inst->opcode = BRW_OPCODE_ADD;
2426 inst->src[2] = reg_undef;
2427 progress = true;
2428 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2429 inst->opcode = BRW_OPCODE_ADD;
2430 inst->src[1].f *= inst->src[2].f;
2431 inst->src[2] = reg_undef;
2432 progress = true;
2433 }
2434 break;
2435 case SHADER_OPCODE_BROADCAST:
2436 if (is_uniform(inst->src[0])) {
2437 inst->opcode = BRW_OPCODE_MOV;
2438 inst->sources = 1;
2439 inst->force_writemask_all = true;
2440 progress = true;
2441 } else if (inst->src[1].file == IMM) {
2442 inst->opcode = BRW_OPCODE_MOV;
2443 /* It's possible that the selected component will be too large and
2444 * overflow the register. This can happen if someone does a
2445 * readInvocation() from GLSL or SPIR-V and provides an OOB
2446 * invocationIndex. If this happens and we some how manage
2447 * to constant fold it in and get here, then component() may cause
2448 * us to start reading outside of the VGRF which will lead to an
2449 * assert later. Instead, just let it wrap around if it goes over
2450 * exec_size.
2451 */
2452 const unsigned comp = inst->src[1].ud & (inst->exec_size - 1);
2453 inst->src[0] = component(inst->src[0], comp);
2454 inst->sources = 1;
2455 inst->force_writemask_all = true;
2456 progress = true;
2457 }
2458 break;
2459
2460 default:
2461 break;
2462 }
2463
2464 /* Swap if src[0] is immediate. */
2465 if (progress && inst->is_commutative()) {
2466 if (inst->src[0].file == IMM) {
2467 fs_reg tmp = inst->src[1];
2468 inst->src[1] = inst->src[0];
2469 inst->src[0] = tmp;
2470 }
2471 }
2472 }
2473 return progress;
2474 }
2475
2476 /**
2477 * Optimize sample messages that have constant zero values for the trailing
2478 * texture coordinates. We can just reduce the message length for these
2479 * instructions instead of reserving a register for it. Trailing parameters
2480 * that aren't sent default to zero anyway. This will cause the dead code
2481 * eliminator to remove the MOV instruction that would otherwise be emitted to
2482 * set up the zero value.
2483 */
2484 bool
2485 fs_visitor::opt_zero_samples()
2486 {
2487 /* Gen4 infers the texturing opcode based on the message length so we can't
2488 * change it.
2489 */
2490 if (devinfo->gen < 5)
2491 return false;
2492
2493 bool progress = false;
2494
2495 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2496 if (!inst->is_tex())
2497 continue;
2498
2499 fs_inst *load_payload = (fs_inst *) inst->prev;
2500
2501 if (load_payload->is_head_sentinel() ||
2502 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2503 continue;
2504
2505 /* We don't want to remove the message header or the first parameter.
2506 * Removing the first parameter is not allowed, see the Haswell PRM
2507 * volume 7, page 149:
2508 *
2509 * "Parameter 0 is required except for the sampleinfo message, which
2510 * has no parameter 0"
2511 */
2512 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2513 load_payload->src[(inst->mlen - inst->header_size) /
2514 (inst->exec_size / 8) +
2515 inst->header_size - 1].is_zero()) {
2516 inst->mlen -= inst->exec_size / 8;
2517 progress = true;
2518 }
2519 }
2520
2521 if (progress)
2522 invalidate_live_intervals();
2523
2524 return progress;
2525 }
2526
2527 /**
2528 * Optimize sample messages which are followed by the final RT write.
2529 *
2530 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2531 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2532 * final texturing results copied to the framebuffer write payload and modify
2533 * them to write to the framebuffer directly.
2534 */
2535 bool
2536 fs_visitor::opt_sampler_eot()
2537 {
2538 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2539
2540 if (stage != MESA_SHADER_FRAGMENT)
2541 return false;
2542
2543 if (devinfo->gen != 9 && !devinfo->is_cherryview)
2544 return false;
2545
2546 /* FINISHME: It should be possible to implement this optimization when there
2547 * are multiple drawbuffers.
2548 */
2549 if (key->nr_color_regions != 1)
2550 return false;
2551
2552 /* Requires emitting a bunch of saturating MOV instructions during logical
2553 * send lowering to clamp the color payload, which the sampler unit isn't
2554 * going to do for us.
2555 */
2556 if (key->clamp_fragment_color)
2557 return false;
2558
2559 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2560 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2561 fs_inst *fb_write = (fs_inst *)block->end();
2562 assert(fb_write->eot);
2563 assert(fb_write->opcode == FS_OPCODE_FB_WRITE_LOGICAL);
2564
2565 /* There wasn't one; nothing to do. */
2566 if (unlikely(fb_write->prev->is_head_sentinel()))
2567 return false;
2568
2569 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2570
2571 /* 3D Sampler » Messages » Message Format
2572 *
2573 * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
2574 * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
2575 */
2576 if (tex_inst->opcode != SHADER_OPCODE_TEX_LOGICAL &&
2577 tex_inst->opcode != SHADER_OPCODE_TXD_LOGICAL &&
2578 tex_inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
2579 tex_inst->opcode != SHADER_OPCODE_TXL_LOGICAL &&
2580 tex_inst->opcode != FS_OPCODE_TXB_LOGICAL &&
2581 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL &&
2582 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_W_LOGICAL &&
2583 tex_inst->opcode != SHADER_OPCODE_TXF_UMS_LOGICAL)
2584 return false;
2585
2586 /* XXX - This shouldn't be necessary. */
2587 if (tex_inst->prev->is_head_sentinel())
2588 return false;
2589
2590 /* Check that the FB write sources are fully initialized by the single
2591 * texturing instruction.
2592 */
2593 for (unsigned i = 0; i < FB_WRITE_LOGICAL_NUM_SRCS; i++) {
2594 if (i == FB_WRITE_LOGICAL_SRC_COLOR0) {
2595 if (!fb_write->src[i].equals(tex_inst->dst) ||
2596 fb_write->size_read(i) != tex_inst->size_written)
2597 return false;
2598 } else if (i != FB_WRITE_LOGICAL_SRC_COMPONENTS) {
2599 if (fb_write->src[i].file != BAD_FILE)
2600 return false;
2601 }
2602 }
2603
2604 assert(!tex_inst->eot); /* We can't get here twice */
2605 assert((tex_inst->offset & (0xff << 24)) == 0);
2606
2607 const fs_builder ibld(this, block, tex_inst);
2608
2609 tex_inst->offset |= fb_write->target << 24;
2610 tex_inst->eot = true;
2611 tex_inst->dst = ibld.null_reg_ud();
2612 tex_inst->size_written = 0;
2613 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2614
2615 /* Marking EOT is sufficient, lower_logical_sends() will notice the EOT
2616 * flag and submit a header together with the sampler message as required
2617 * by the hardware.
2618 */
2619 invalidate_live_intervals();
2620 return true;
2621 }
2622
2623 bool
2624 fs_visitor::opt_register_renaming()
2625 {
2626 bool progress = false;
2627 int depth = 0;
2628
2629 int remap[alloc.count];
2630 memset(remap, -1, sizeof(int) * alloc.count);
2631
2632 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2633 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2634 depth++;
2635 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2636 inst->opcode == BRW_OPCODE_WHILE) {
2637 depth--;
2638 }
2639
2640 /* Rewrite instruction sources. */
2641 for (int i = 0; i < inst->sources; i++) {
2642 if (inst->src[i].file == VGRF &&
2643 remap[inst->src[i].nr] != -1 &&
2644 remap[inst->src[i].nr] != inst->src[i].nr) {
2645 inst->src[i].nr = remap[inst->src[i].nr];
2646 progress = true;
2647 }
2648 }
2649
2650 const int dst = inst->dst.nr;
2651
2652 if (depth == 0 &&
2653 inst->dst.file == VGRF &&
2654 alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
2655 !inst->is_partial_write()) {
2656 if (remap[dst] == -1) {
2657 remap[dst] = dst;
2658 } else {
2659 remap[dst] = alloc.allocate(regs_written(inst));
2660 inst->dst.nr = remap[dst];
2661 progress = true;
2662 }
2663 } else if (inst->dst.file == VGRF &&
2664 remap[dst] != -1 &&
2665 remap[dst] != dst) {
2666 inst->dst.nr = remap[dst];
2667 progress = true;
2668 }
2669 }
2670
2671 if (progress) {
2672 invalidate_live_intervals();
2673
2674 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2675 if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
2676 delta_xy[i].nr = remap[delta_xy[i].nr];
2677 }
2678 }
2679 }
2680
2681 return progress;
2682 }
2683
2684 /**
2685 * Remove redundant or useless discard jumps.
2686 *
2687 * For example, we can eliminate jumps in the following sequence:
2688 *
2689 * discard-jump (redundant with the next jump)
2690 * discard-jump (useless; jumps to the next instruction)
2691 * placeholder-halt
2692 */
2693 bool
2694 fs_visitor::opt_redundant_discard_jumps()
2695 {
2696 bool progress = false;
2697
2698 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2699
2700 fs_inst *placeholder_halt = NULL;
2701 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2702 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2703 placeholder_halt = inst;
2704 break;
2705 }
2706 }
2707
2708 if (!placeholder_halt)
2709 return false;
2710
2711 /* Delete any HALTs immediately before the placeholder halt. */
2712 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2713 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2714 prev = (fs_inst *) placeholder_halt->prev) {
2715 prev->remove(last_bblock);
2716 progress = true;
2717 }
2718
2719 if (progress)
2720 invalidate_live_intervals();
2721
2722 return progress;
2723 }
2724
2725 /**
2726 * Compute a bitmask with GRF granularity with a bit set for each GRF starting
2727 * from \p r.offset which overlaps the region starting at \p s.offset and
2728 * spanning \p ds bytes.
2729 */
2730 static inline unsigned
2731 mask_relative_to(const fs_reg &r, const fs_reg &s, unsigned ds)
2732 {
2733 const int rel_offset = reg_offset(s) - reg_offset(r);
2734 const int shift = rel_offset / REG_SIZE;
2735 const unsigned n = DIV_ROUND_UP(rel_offset % REG_SIZE + ds, REG_SIZE);
2736 assert(reg_space(r) == reg_space(s) &&
2737 shift >= 0 && shift < int(8 * sizeof(unsigned)));
2738 return ((1 << n) - 1) << shift;
2739 }
2740
2741 bool
2742 fs_visitor::compute_to_mrf()
2743 {
2744 bool progress = false;
2745 int next_ip = 0;
2746
2747 /* No MRFs on Gen >= 7. */
2748 if (devinfo->gen >= 7)
2749 return false;
2750
2751 calculate_live_intervals();
2752
2753 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2754 int ip = next_ip;
2755 next_ip++;
2756
2757 if (inst->opcode != BRW_OPCODE_MOV ||
2758 inst->is_partial_write() ||
2759 inst->dst.file != MRF || inst->src[0].file != VGRF ||
2760 inst->dst.type != inst->src[0].type ||
2761 inst->src[0].abs || inst->src[0].negate ||
2762 !inst->src[0].is_contiguous() ||
2763 inst->src[0].offset % REG_SIZE != 0)
2764 continue;
2765
2766 /* Can't compute-to-MRF this GRF if someone else was going to
2767 * read it later.
2768 */
2769 if (this->virtual_grf_end[inst->src[0].nr] > ip)
2770 continue;
2771
2772 /* Found a move of a GRF to a MRF. Let's see if we can go rewrite the
2773 * things that computed the value of all GRFs of the source region. The
2774 * regs_left bitset keeps track of the registers we haven't yet found a
2775 * generating instruction for.
2776 */
2777 unsigned regs_left = (1 << regs_read(inst, 0)) - 1;
2778
2779 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2780 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
2781 inst->src[0], inst->size_read(0))) {
2782 /* Found the last thing to write our reg we want to turn
2783 * into a compute-to-MRF.
2784 */
2785
2786 /* If this one instruction didn't populate all the
2787 * channels, bail. We might be able to rewrite everything
2788 * that writes that reg, but it would require smarter
2789 * tracking.
2790 */
2791 if (scan_inst->is_partial_write())
2792 break;
2793
2794 /* Handling things not fully contained in the source of the copy
2795 * would need us to understand coalescing out more than one MOV at
2796 * a time.
2797 */
2798 if (!region_contained_in(scan_inst->dst, scan_inst->size_written,
2799 inst->src[0], inst->size_read(0)))
2800 break;
2801
2802 /* SEND instructions can't have MRF as a destination. */
2803 if (scan_inst->mlen)
2804 break;
2805
2806 if (devinfo->gen == 6) {
2807 /* gen6 math instructions must have the destination be
2808 * GRF, so no compute-to-MRF for them.
2809 */
2810 if (scan_inst->is_math()) {
2811 break;
2812 }
2813 }
2814
2815 /* Clear the bits for any registers this instruction overwrites. */
2816 regs_left &= ~mask_relative_to(
2817 inst->src[0], scan_inst->dst, scan_inst->size_written);
2818 if (!regs_left)
2819 break;
2820 }
2821
2822 /* We don't handle control flow here. Most computation of
2823 * values that end up in MRFs are shortly before the MRF
2824 * write anyway.
2825 */
2826 if (block->start() == scan_inst)
2827 break;
2828
2829 /* You can't read from an MRF, so if someone else reads our
2830 * MRF's source GRF that we wanted to rewrite, that stops us.
2831 */
2832 bool interfered = false;
2833 for (int i = 0; i < scan_inst->sources; i++) {
2834 if (regions_overlap(scan_inst->src[i], scan_inst->size_read(i),
2835 inst->src[0], inst->size_read(0))) {
2836 interfered = true;
2837 }
2838 }
2839 if (interfered)
2840 break;
2841
2842 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
2843 inst->dst, inst->size_written)) {
2844 /* If somebody else writes our MRF here, we can't
2845 * compute-to-MRF before that.
2846 */
2847 break;
2848 }
2849
2850 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1 &&
2851 regions_overlap(fs_reg(MRF, scan_inst->base_mrf), scan_inst->mlen * REG_SIZE,
2852 inst->dst, inst->size_written)) {
2853 /* Found a SEND instruction, which means that there are
2854 * live values in MRFs from base_mrf to base_mrf +
2855 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2856 * above it.
2857 */
2858 break;
2859 }
2860 }
2861
2862 if (regs_left)
2863 continue;
2864
2865 /* Found all generating instructions of our MRF's source value, so it
2866 * should be safe to rewrite them to point to the MRF directly.
2867 */
2868 regs_left = (1 << regs_read(inst, 0)) - 1;
2869
2870 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2871 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
2872 inst->src[0], inst->size_read(0))) {
2873 /* Clear the bits for any registers this instruction overwrites. */
2874 regs_left &= ~mask_relative_to(
2875 inst->src[0], scan_inst->dst, scan_inst->size_written);
2876
2877 const unsigned rel_offset = reg_offset(scan_inst->dst) -
2878 reg_offset(inst->src[0]);
2879
2880 if (inst->dst.nr & BRW_MRF_COMPR4) {
2881 /* Apply the same address transformation done by the hardware
2882 * for COMPR4 MRF writes.
2883 */
2884 assert(rel_offset < 2 * REG_SIZE);
2885 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE * 4;
2886
2887 /* Clear the COMPR4 bit if the generating instruction is not
2888 * compressed.
2889 */
2890 if (scan_inst->size_written < 2 * REG_SIZE)
2891 scan_inst->dst.nr &= ~BRW_MRF_COMPR4;
2892
2893 } else {
2894 /* Calculate the MRF number the result of this instruction is
2895 * ultimately written to.
2896 */
2897 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE;
2898 }
2899
2900 scan_inst->dst.file = MRF;
2901 scan_inst->dst.offset = inst->dst.offset + rel_offset % REG_SIZE;
2902 scan_inst->saturate |= inst->saturate;
2903 if (!regs_left)
2904 break;
2905 }
2906 }
2907
2908 assert(!regs_left);
2909 inst->remove(block);
2910 progress = true;
2911 }
2912
2913 if (progress)
2914 invalidate_live_intervals();
2915
2916 return progress;
2917 }
2918
2919 /**
2920 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2921 * flow. We could probably do better here with some form of divergence
2922 * analysis.
2923 */
2924 bool
2925 fs_visitor::eliminate_find_live_channel()
2926 {
2927 bool progress = false;
2928 unsigned depth = 0;
2929
2930 if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
2931 /* The optimization below assumes that channel zero is live on thread
2932 * dispatch, which may not be the case if the fixed function dispatches
2933 * threads sparsely.
2934 */
2935 return false;
2936 }
2937
2938 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2939 switch (inst->opcode) {
2940 case BRW_OPCODE_IF:
2941 case BRW_OPCODE_DO:
2942 depth++;
2943 break;
2944
2945 case BRW_OPCODE_ENDIF:
2946 case BRW_OPCODE_WHILE:
2947 depth--;
2948 break;
2949
2950 case FS_OPCODE_DISCARD_JUMP:
2951 /* This can potentially make control flow non-uniform until the end
2952 * of the program.
2953 */
2954 return progress;
2955
2956 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2957 if (depth == 0) {
2958 inst->opcode = BRW_OPCODE_MOV;
2959 inst->src[0] = brw_imm_ud(0u);
2960 inst->sources = 1;
2961 inst->force_writemask_all = true;
2962 progress = true;
2963 }
2964 break;
2965
2966 default:
2967 break;
2968 }
2969 }
2970
2971 return progress;
2972 }
2973
2974 /**
2975 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2976 * instructions to FS_OPCODE_REP_FB_WRITE.
2977 */
2978 void
2979 fs_visitor::emit_repclear_shader()
2980 {
2981 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2982 int base_mrf = 0;
2983 int color_mrf = base_mrf + 2;
2984 fs_inst *mov;
2985
2986 if (uniforms > 0) {
2987 mov = bld.exec_all().group(4, 0)
2988 .MOV(brw_message_reg(color_mrf),
2989 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
2990 } else {
2991 struct brw_reg reg =
2992 brw_reg(BRW_GENERAL_REGISTER_FILE, 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
2993 BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
2994 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2995
2996 mov = bld.exec_all().group(4, 0)
2997 .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
2998 }
2999
3000 fs_inst *write;
3001 if (key->nr_color_regions == 1) {
3002 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3003 write->saturate = key->clamp_fragment_color;
3004 write->base_mrf = color_mrf;
3005 write->target = 0;
3006 write->header_size = 0;
3007 write->mlen = 1;
3008 } else {
3009 assume(key->nr_color_regions > 0);
3010 for (int i = 0; i < key->nr_color_regions; ++i) {
3011 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3012 write->saturate = key->clamp_fragment_color;
3013 write->base_mrf = base_mrf;
3014 write->target = i;
3015 write->header_size = 2;
3016 write->mlen = 3;
3017 }
3018 }
3019 write->eot = true;
3020
3021 calculate_cfg();
3022
3023 assign_constant_locations();
3024 assign_curb_setup();
3025
3026 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
3027 if (uniforms > 0) {
3028 assert(mov->src[0].file == FIXED_GRF);
3029 mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
3030 }
3031 }
3032
3033 /**
3034 * Walks through basic blocks, looking for repeated MRF writes and
3035 * removing the later ones.
3036 */
3037 bool
3038 fs_visitor::remove_duplicate_mrf_writes()
3039 {
3040 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
3041 bool progress = false;
3042
3043 /* Need to update the MRF tracking for compressed instructions. */
3044 if (dispatch_width >= 16)
3045 return false;
3046
3047 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3048
3049 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3050 if (inst->is_control_flow()) {
3051 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3052 }
3053
3054 if (inst->opcode == BRW_OPCODE_MOV &&
3055 inst->dst.file == MRF) {
3056 fs_inst *prev_inst = last_mrf_move[inst->dst.nr];
3057 if (prev_inst && inst->equals(prev_inst)) {
3058 inst->remove(block);
3059 progress = true;
3060 continue;
3061 }
3062 }
3063
3064 /* Clear out the last-write records for MRFs that were overwritten. */
3065 if (inst->dst.file == MRF) {
3066 last_mrf_move[inst->dst.nr] = NULL;
3067 }
3068
3069 if (inst->mlen > 0 && inst->base_mrf != -1) {
3070 /* Found a SEND instruction, which will include two or fewer
3071 * implied MRF writes. We could do better here.
3072 */
3073 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3074 last_mrf_move[inst->base_mrf + i] = NULL;
3075 }
3076 }
3077
3078 /* Clear out any MRF move records whose sources got overwritten. */
3079 for (unsigned i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
3080 if (last_mrf_move[i] &&
3081 regions_overlap(inst->dst, inst->size_written,
3082 last_mrf_move[i]->src[0],
3083 last_mrf_move[i]->size_read(0))) {
3084 last_mrf_move[i] = NULL;
3085 }
3086 }
3087
3088 if (inst->opcode == BRW_OPCODE_MOV &&
3089 inst->dst.file == MRF &&
3090 inst->src[0].file != ARF &&
3091 !inst->is_partial_write()) {
3092 last_mrf_move[inst->dst.nr] = inst;
3093 }
3094 }
3095
3096 if (progress)
3097 invalidate_live_intervals();
3098
3099 return progress;
3100 }
3101
3102 /**
3103 * Rounding modes for conversion instructions are included for each
3104 * conversion, but right now it is a state. So once it is set,
3105 * we don't need to call it again for subsequent calls.
3106 *
3107 * This is useful for vector/matrices conversions, as setting the
3108 * mode once is enough for the full vector/matrix
3109 */
3110 bool
3111 fs_visitor::remove_extra_rounding_modes()
3112 {
3113 bool progress = false;
3114
3115 foreach_block (block, cfg) {
3116 brw_rnd_mode prev_mode = BRW_RND_MODE_UNSPECIFIED;
3117
3118 foreach_inst_in_block_safe (fs_inst, inst, block) {
3119 if (inst->opcode == SHADER_OPCODE_RND_MODE) {
3120 assert(inst->src[0].file == BRW_IMMEDIATE_VALUE);
3121 const brw_rnd_mode mode = (brw_rnd_mode) inst->src[0].d;
3122 if (mode == prev_mode) {
3123 inst->remove(block);
3124 progress = true;
3125 } else {
3126 prev_mode = mode;
3127 }
3128 }
3129 }
3130 }
3131
3132 if (progress)
3133 invalidate_live_intervals();
3134
3135 return progress;
3136 }
3137
3138 static void
3139 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
3140 {
3141 /* Clear the flag for registers that actually got read (as expected). */
3142 for (int i = 0; i < inst->sources; i++) {
3143 int grf;
3144 if (inst->src[i].file == VGRF || inst->src[i].file == FIXED_GRF) {
3145 grf = inst->src[i].nr;
3146 } else {
3147 continue;
3148 }
3149
3150 if (grf >= first_grf &&
3151 grf < first_grf + grf_len) {
3152 deps[grf - first_grf] = false;
3153 if (inst->exec_size == 16)
3154 deps[grf - first_grf + 1] = false;
3155 }
3156 }
3157 }
3158
3159 /**
3160 * Implements this workaround for the original 965:
3161 *
3162 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
3163 * check for post destination dependencies on this instruction, software
3164 * must ensure that there is no destination hazard for the case of ‘write
3165 * followed by a posted write’ shown in the following example.
3166 *
3167 * 1. mov r3 0
3168 * 2. send r3.xy <rest of send instruction>
3169 * 3. mov r2 r3
3170 *
3171 * Due to no post-destination dependency check on the ‘send’, the above
3172 * code sequence could have two instructions (1 and 2) in flight at the
3173 * same time that both consider ‘r3’ as the target of their final writes.
3174 */
3175 void
3176 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
3177 fs_inst *inst)
3178 {
3179 int write_len = regs_written(inst);
3180 int first_write_grf = inst->dst.nr;
3181 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3182 assert(write_len < (int)sizeof(needs_dep) - 1);
3183
3184 memset(needs_dep, false, sizeof(needs_dep));
3185 memset(needs_dep, true, write_len);
3186
3187 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
3188
3189 /* Walk backwards looking for writes to registers we're writing which
3190 * aren't read since being written. If we hit the start of the program,
3191 * we assume that there are no outstanding dependencies on entry to the
3192 * program.
3193 */
3194 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3195 /* If we hit control flow, assume that there *are* outstanding
3196 * dependencies, and force their cleanup before our instruction.
3197 */
3198 if (block->start() == scan_inst && block->num != 0) {
3199 for (int i = 0; i < write_len; i++) {
3200 if (needs_dep[i])
3201 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
3202 first_write_grf + i);
3203 }
3204 return;
3205 }
3206
3207 /* We insert our reads as late as possible on the assumption that any
3208 * instruction but a MOV that might have left us an outstanding
3209 * dependency has more latency than a MOV.
3210 */
3211 if (scan_inst->dst.file == VGRF) {
3212 for (unsigned i = 0; i < regs_written(scan_inst); i++) {
3213 int reg = scan_inst->dst.nr + i;
3214
3215 if (reg >= first_write_grf &&
3216 reg < first_write_grf + write_len &&
3217 needs_dep[reg - first_write_grf]) {
3218 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
3219 needs_dep[reg - first_write_grf] = false;
3220 if (scan_inst->exec_size == 16)
3221 needs_dep[reg - first_write_grf + 1] = false;
3222 }
3223 }
3224 }
3225
3226 /* Clear the flag for registers that actually got read (as expected). */
3227 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3228
3229 /* Continue the loop only if we haven't resolved all the dependencies */
3230 int i;
3231 for (i = 0; i < write_len; i++) {
3232 if (needs_dep[i])
3233 break;
3234 }
3235 if (i == write_len)
3236 return;
3237 }
3238 }
3239
3240 /**
3241 * Implements this workaround for the original 965:
3242 *
3243 * "[DevBW, DevCL] Errata: A destination register from a send can not be
3244 * used as a destination register until after it has been sourced by an
3245 * instruction with a different destination register.
3246 */
3247 void
3248 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
3249 {
3250 int write_len = regs_written(inst);
3251 int first_write_grf = inst->dst.nr;
3252 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3253 assert(write_len < (int)sizeof(needs_dep) - 1);
3254
3255 memset(needs_dep, false, sizeof(needs_dep));
3256 memset(needs_dep, true, write_len);
3257 /* Walk forwards looking for writes to registers we're writing which aren't
3258 * read before being written.
3259 */
3260 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst) {
3261 /* If we hit control flow, force resolve all remaining dependencies. */
3262 if (block->end() == scan_inst && block->num != cfg->num_blocks - 1) {
3263 for (int i = 0; i < write_len; i++) {
3264 if (needs_dep[i])
3265 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3266 first_write_grf + i);
3267 }
3268 return;
3269 }
3270
3271 /* Clear the flag for registers that actually got read (as expected). */
3272 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3273
3274 /* We insert our reads as late as possible since they're reading the
3275 * result of a SEND, which has massive latency.
3276 */
3277 if (scan_inst->dst.file == VGRF &&
3278 scan_inst->dst.nr >= first_write_grf &&
3279 scan_inst->dst.nr < first_write_grf + write_len &&
3280 needs_dep[scan_inst->dst.nr - first_write_grf]) {
3281 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3282 scan_inst->dst.nr);
3283 needs_dep[scan_inst->dst.nr - first_write_grf] = false;
3284 }
3285
3286 /* Continue the loop only if we haven't resolved all the dependencies */
3287 int i;
3288 for (i = 0; i < write_len; i++) {
3289 if (needs_dep[i])
3290 break;
3291 }
3292 if (i == write_len)
3293 return;
3294 }
3295 }
3296
3297 void
3298 fs_visitor::insert_gen4_send_dependency_workarounds()
3299 {
3300 if (devinfo->gen != 4 || devinfo->is_g4x)
3301 return;
3302
3303 bool progress = false;
3304
3305 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3306 if (inst->mlen != 0 && inst->dst.file == VGRF) {
3307 insert_gen4_pre_send_dependency_workarounds(block, inst);
3308 insert_gen4_post_send_dependency_workarounds(block, inst);
3309 progress = true;
3310 }
3311 }
3312
3313 if (progress)
3314 invalidate_live_intervals();
3315 }
3316
3317 /**
3318 * Turns the generic expression-style uniform pull constant load instruction
3319 * into a hardware-specific series of instructions for loading a pull
3320 * constant.
3321 *
3322 * The expression style allows the CSE pass before this to optimize out
3323 * repeated loads from the same offset, and gives the pre-register-allocation
3324 * scheduling full flexibility, while the conversion to native instructions
3325 * allows the post-register-allocation scheduler the best information
3326 * possible.
3327 *
3328 * Note that execution masking for setting up pull constant loads is special:
3329 * the channels that need to be written are unrelated to the current execution
3330 * mask, since a later instruction will use one of the result channels as a
3331 * source operand for all 8 or 16 of its channels.
3332 */
3333 void
3334 fs_visitor::lower_uniform_pull_constant_loads()
3335 {
3336 foreach_block_and_inst (block, fs_inst, inst, cfg) {
3337 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
3338 continue;
3339
3340 if (devinfo->gen >= 7) {
3341 const fs_builder ubld = fs_builder(this, block, inst).exec_all();
3342 const fs_reg payload = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD);
3343
3344 ubld.group(8, 0).MOV(payload,
3345 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3346 ubld.group(1, 0).MOV(component(payload, 2),
3347 brw_imm_ud(inst->src[1].ud / 16));
3348
3349 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3350 inst->src[1] = payload;
3351 inst->header_size = 1;
3352 inst->mlen = 1;
3353
3354 invalidate_live_intervals();
3355 } else {
3356 /* Before register allocation, we didn't tell the scheduler about the
3357 * MRF we use. We know it's safe to use this MRF because nothing
3358 * else does except for register spill/unspill, which generates and
3359 * uses its MRF within a single IR instruction.
3360 */
3361 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3362 inst->mlen = 1;
3363 }
3364 }
3365 }
3366
3367 bool
3368 fs_visitor::lower_load_payload()
3369 {
3370 bool progress = false;
3371
3372 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3373 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3374 continue;
3375
3376 assert(inst->dst.file == MRF || inst->dst.file == VGRF);
3377 assert(inst->saturate == false);
3378 fs_reg dst = inst->dst;
3379
3380 /* Get rid of COMPR4. We'll add it back in if we need it */
3381 if (dst.file == MRF)
3382 dst.nr = dst.nr & ~BRW_MRF_COMPR4;
3383
3384 const fs_builder ibld(this, block, inst);
3385 const fs_builder hbld = ibld.exec_all().group(8, 0);
3386
3387 for (uint8_t i = 0; i < inst->header_size; i++) {
3388 if (inst->src[i].file != BAD_FILE) {
3389 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3390 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3391 hbld.MOV(mov_dst, mov_src);
3392 }
3393 dst = offset(dst, hbld, 1);
3394 }
3395
3396 if (inst->dst.file == MRF && (inst->dst.nr & BRW_MRF_COMPR4) &&
3397 inst->exec_size > 8) {
3398 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3399 * a straightforward copy. Instead, the result of the
3400 * LOAD_PAYLOAD is treated as interleaved and the first four
3401 * non-header sources are unpacked as:
3402 *
3403 * m + 0: r0
3404 * m + 1: g0
3405 * m + 2: b0
3406 * m + 3: a0
3407 * m + 4: r1
3408 * m + 5: g1
3409 * m + 6: b1
3410 * m + 7: a1
3411 *
3412 * This is used for gen <= 5 fb writes.
3413 */
3414 assert(inst->exec_size == 16);
3415 assert(inst->header_size + 4 <= inst->sources);
3416 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3417 if (inst->src[i].file != BAD_FILE) {
3418 if (devinfo->has_compr4) {
3419 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3420 compr4_dst.nr |= BRW_MRF_COMPR4;
3421 ibld.MOV(compr4_dst, inst->src[i]);
3422 } else {
3423 /* Platform doesn't have COMPR4. We have to fake it */
3424 fs_reg mov_dst = retype(dst, inst->src[i].type);
3425 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3426 mov_dst.nr += 4;
3427 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3428 }
3429 }
3430
3431 dst.nr++;
3432 }
3433
3434 /* The loop above only ever incremented us through the first set
3435 * of 4 registers. However, thanks to the magic of COMPR4, we
3436 * actually wrote to the first 8 registers, so we need to take
3437 * that into account now.
3438 */
3439 dst.nr += 4;
3440
3441 /* The COMPR4 code took care of the first 4 sources. We'll let
3442 * the regular path handle any remaining sources. Yes, we are
3443 * modifying the instruction but we're about to delete it so
3444 * this really doesn't hurt anything.
3445 */
3446 inst->header_size += 4;
3447 }
3448
3449 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3450 if (inst->src[i].file != BAD_FILE)
3451 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3452 dst = offset(dst, ibld, 1);
3453 }
3454
3455 inst->remove(block);
3456 progress = true;
3457 }
3458
3459 if (progress)
3460 invalidate_live_intervals();
3461
3462 return progress;
3463 }
3464
3465 bool
3466 fs_visitor::lower_integer_multiplication()
3467 {
3468 bool progress = false;
3469
3470 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3471 const fs_builder ibld(this, block, inst);
3472
3473 if (inst->opcode == BRW_OPCODE_MUL) {
3474 if (inst->dst.is_accumulator() ||
3475 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3476 inst->dst.type != BRW_REGISTER_TYPE_UD))
3477 continue;
3478
3479 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
3480 * operation directly, but CHV/BXT cannot.
3481 */
3482 if (devinfo->gen >= 8 &&
3483 !devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo))
3484 continue;
3485
3486 if (inst->src[1].file == IMM &&
3487 inst->src[1].ud < (1 << 16)) {
3488 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3489 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3490 * src1 are used.
3491 *
3492 * If multiplying by an immediate value that fits in 16-bits, do a
3493 * single MUL instruction with that value in the proper location.
3494 */
3495 if (devinfo->gen < 7) {
3496 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8),
3497 inst->dst.type);
3498 ibld.MOV(imm, inst->src[1]);
3499 ibld.MUL(inst->dst, imm, inst->src[0]);
3500 } else {
3501 const bool ud = (inst->src[1].type == BRW_REGISTER_TYPE_UD);
3502 ibld.MUL(inst->dst, inst->src[0],
3503 ud ? brw_imm_uw(inst->src[1].ud)
3504 : brw_imm_w(inst->src[1].d));
3505 }
3506 } else {
3507 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3508 * do 32-bit integer multiplication in one instruction, but instead
3509 * must do a sequence (which actually calculates a 64-bit result):
3510 *
3511 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3512 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3513 * mov(8) g2<1>D acc0<8,8,1>D
3514 *
3515 * But on Gen > 6, the ability to use second accumulator register
3516 * (acc1) for non-float data types was removed, preventing a simple
3517 * implementation in SIMD16. A 16-channel result can be calculated by
3518 * executing the three instructions twice in SIMD8, once with quarter
3519 * control of 1Q for the first eight channels and again with 2Q for
3520 * the second eight channels.
3521 *
3522 * Which accumulator register is implicitly accessed (by AccWrEnable
3523 * for instance) is determined by the quarter control. Unfortunately
3524 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3525 * implicit accumulator access by an instruction with 2Q will access
3526 * acc1 regardless of whether the data type is usable in acc1.
3527 *
3528 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3529 * integer data types.
3530 *
3531 * Since we only want the low 32-bits of the result, we can do two
3532 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3533 * adjust the high result and add them (like the mach is doing):
3534 *
3535 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3536 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3537 * shl(8) g9<1>D g8<8,8,1>D 16D
3538 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3539 *
3540 * We avoid the shl instruction by realizing that we only want to add
3541 * the low 16-bits of the "high" result to the high 16-bits of the
3542 * "low" result and using proper regioning on the add:
3543 *
3544 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3545 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3546 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3547 *
3548 * Since it does not use the (single) accumulator register, we can
3549 * schedule multi-component multiplications much better.
3550 */
3551
3552 bool needs_mov = false;
3553 fs_reg orig_dst = inst->dst;
3554 fs_reg low = inst->dst;
3555 if (orig_dst.is_null() || orig_dst.file == MRF ||
3556 regions_overlap(inst->dst, inst->size_written,
3557 inst->src[0], inst->size_read(0)) ||
3558 regions_overlap(inst->dst, inst->size_written,
3559 inst->src[1], inst->size_read(1))) {
3560 needs_mov = true;
3561 low.nr = alloc.allocate(regs_written(inst));
3562 low.offset = low.offset % REG_SIZE;
3563 }
3564
3565 fs_reg high = inst->dst;
3566 high.nr = alloc.allocate(regs_written(inst));
3567 high.offset = high.offset % REG_SIZE;
3568
3569 if (devinfo->gen >= 7) {
3570 if (inst->src[1].file == IMM) {
3571 ibld.MUL(low, inst->src[0],
3572 brw_imm_uw(inst->src[1].ud & 0xffff));
3573 ibld.MUL(high, inst->src[0],
3574 brw_imm_uw(inst->src[1].ud >> 16));
3575 } else {
3576 ibld.MUL(low, inst->src[0],
3577 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
3578 ibld.MUL(high, inst->src[0],
3579 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 1));
3580 }
3581 } else {
3582 ibld.MUL(low, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 0),
3583 inst->src[1]);
3584 ibld.MUL(high, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 1),
3585 inst->src[1]);
3586 }
3587
3588 ibld.ADD(subscript(low, BRW_REGISTER_TYPE_UW, 1),
3589 subscript(low, BRW_REGISTER_TYPE_UW, 1),
3590 subscript(high, BRW_REGISTER_TYPE_UW, 0));
3591
3592 if (needs_mov || inst->conditional_mod) {
3593 set_condmod(inst->conditional_mod,
3594 ibld.MOV(orig_dst, low));
3595 }
3596 }
3597
3598 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3599 /* Should have been lowered to 8-wide. */
3600 assert(inst->exec_size <= get_lowered_simd_width(devinfo, inst));
3601 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3602 inst->dst.type);
3603 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3604 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3605
3606 if (devinfo->gen >= 8) {
3607 /* Until Gen8, integer multiplies read 32-bits from one source,
3608 * and 16-bits from the other, and relying on the MACH instruction
3609 * to generate the high bits of the result.
3610 *
3611 * On Gen8, the multiply instruction does a full 32x32-bit
3612 * multiply, but in order to do a 64-bit multiply we can simulate
3613 * the previous behavior and then use a MACH instruction.
3614 *
3615 * FINISHME: Don't use source modifiers on src1.
3616 */
3617 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3618 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3619 mul->src[1].type = BRW_REGISTER_TYPE_UW;
3620 mul->src[1].stride *= 2;
3621
3622 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3623 inst->group > 0) {
3624 /* Among other things the quarter control bits influence which
3625 * accumulator register is used by the hardware for instructions
3626 * that access the accumulator implicitly (e.g. MACH). A
3627 * second-half instruction would normally map to acc1, which
3628 * doesn't exist on Gen7 and up (the hardware does emulate it for
3629 * floating-point instructions *only* by taking advantage of the
3630 * extra precision of acc0 not normally used for floating point
3631 * arithmetic).
3632 *
3633 * HSW and up are careful enough not to try to access an
3634 * accumulator register that doesn't exist, but on earlier Gen7
3635 * hardware we need to make sure that the quarter control bits are
3636 * zero to avoid non-deterministic behaviour and emit an extra MOV
3637 * to get the result masked correctly according to the current
3638 * channel enables.
3639 */
3640 mach->group = 0;
3641 mach->force_writemask_all = true;
3642 mach->dst = ibld.vgrf(inst->dst.type);
3643 ibld.MOV(inst->dst, mach->dst);
3644 }
3645 } else {
3646 continue;
3647 }
3648
3649 inst->remove(block);
3650 progress = true;
3651 }
3652
3653 if (progress)
3654 invalidate_live_intervals();
3655
3656 return progress;
3657 }
3658
3659 bool
3660 fs_visitor::lower_minmax()
3661 {
3662 assert(devinfo->gen < 6);
3663
3664 bool progress = false;
3665
3666 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3667 const fs_builder ibld(this, block, inst);
3668
3669 if (inst->opcode == BRW_OPCODE_SEL &&
3670 inst->predicate == BRW_PREDICATE_NONE) {
3671 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
3672 * the original SEL.L/GE instruction
3673 */
3674 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
3675 inst->conditional_mod);
3676 inst->predicate = BRW_PREDICATE_NORMAL;
3677 inst->conditional_mod = BRW_CONDITIONAL_NONE;
3678
3679 progress = true;
3680 }
3681 }
3682
3683 if (progress)
3684 invalidate_live_intervals();
3685
3686 return progress;
3687 }
3688
3689 static void
3690 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
3691 fs_reg *dst, fs_reg color, unsigned components)
3692 {
3693 if (key->clamp_fragment_color) {
3694 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
3695 assert(color.type == BRW_REGISTER_TYPE_F);
3696
3697 for (unsigned i = 0; i < components; i++)
3698 set_saturate(true,
3699 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
3700
3701 color = tmp;
3702 }
3703
3704 for (unsigned i = 0; i < components; i++)
3705 dst[i] = offset(color, bld, i);
3706 }
3707
3708 static void
3709 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
3710 const struct brw_wm_prog_data *prog_data,
3711 const brw_wm_prog_key *key,
3712 const fs_visitor::thread_payload &payload)
3713 {
3714 assert(inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
3715 const gen_device_info *devinfo = bld.shader->devinfo;
3716 const fs_reg &color0 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR0];
3717 const fs_reg &color1 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR1];
3718 const fs_reg &src0_alpha = inst->src[FB_WRITE_LOGICAL_SRC_SRC0_ALPHA];
3719 const fs_reg &src_depth = inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH];
3720 const fs_reg &dst_depth = inst->src[FB_WRITE_LOGICAL_SRC_DST_DEPTH];
3721 const fs_reg &src_stencil = inst->src[FB_WRITE_LOGICAL_SRC_SRC_STENCIL];
3722 fs_reg sample_mask = inst->src[FB_WRITE_LOGICAL_SRC_OMASK];
3723 const unsigned components =
3724 inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
3725
3726 /* We can potentially have a message length of up to 15, so we have to set
3727 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3728 */
3729 fs_reg sources[15];
3730 int header_size = 2, payload_header_size;
3731 unsigned length = 0;
3732
3733 /* From the Sandy Bridge PRM, volume 4, page 198:
3734 *
3735 * "Dispatched Pixel Enables. One bit per pixel indicating
3736 * which pixels were originally enabled when the thread was
3737 * dispatched. This field is only required for the end-of-
3738 * thread message and on all dual-source messages."
3739 */
3740 if (devinfo->gen >= 6 &&
3741 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3742 color1.file == BAD_FILE &&
3743 key->nr_color_regions == 1) {
3744 header_size = 0;
3745 }
3746
3747 if (header_size != 0) {
3748 assert(header_size == 2);
3749 /* Allocate 2 registers for a header */
3750 length += 2;
3751 }
3752
3753 if (payload.aa_dest_stencil_reg) {
3754 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
3755 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
3756 .MOV(sources[length],
3757 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
3758 length++;
3759 }
3760
3761 if (sample_mask.file != BAD_FILE) {
3762 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1),
3763 BRW_REGISTER_TYPE_UD);
3764
3765 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
3766 * relevant. Since it's unsigned single words one vgrf is always
3767 * 16-wide, but only the lower or higher 8 channels will be used by the
3768 * hardware when doing a SIMD8 write depending on whether we have
3769 * selected the subspans for the first or second half respectively.
3770 */
3771 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
3772 sample_mask.type = BRW_REGISTER_TYPE_UW;
3773 sample_mask.stride *= 2;
3774
3775 bld.exec_all().annotate("FB write oMask")
3776 .MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
3777 inst->group),
3778 sample_mask);
3779 length++;
3780 }
3781
3782 payload_header_size = length;
3783
3784 if (src0_alpha.file != BAD_FILE) {
3785 /* FIXME: This is being passed at the wrong location in the payload and
3786 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
3787 * It's supposed to be immediately before oMask but there seems to be no
3788 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
3789 * requires header sources to form a contiguous segment at the beginning
3790 * of the message and src0_alpha has per-channel semantics.
3791 */
3792 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
3793 length++;
3794 } else if (key->replicate_alpha && inst->target != 0) {
3795 /* Handle the case when fragment shader doesn't write to draw buffer
3796 * zero. No need to call setup_color_payload() for src0_alpha because
3797 * alpha value will be undefined.
3798 */
3799 length++;
3800 }
3801
3802 setup_color_payload(bld, key, &sources[length], color0, components);
3803 length += 4;
3804
3805 if (color1.file != BAD_FILE) {
3806 setup_color_payload(bld, key, &sources[length], color1, components);
3807 length += 4;
3808 }
3809
3810 if (src_depth.file != BAD_FILE) {
3811 sources[length] = src_depth;
3812 length++;
3813 }
3814
3815 if (dst_depth.file != BAD_FILE) {
3816 sources[length] = dst_depth;
3817 length++;
3818 }
3819
3820 if (src_stencil.file != BAD_FILE) {
3821 assert(devinfo->gen >= 9);
3822 assert(bld.dispatch_width() != 16);
3823
3824 /* XXX: src_stencil is only available on gen9+. dst_depth is never
3825 * available on gen9+. As such it's impossible to have both enabled at the
3826 * same time and therefore length cannot overrun the array.
3827 */
3828 assert(length < 15);
3829
3830 sources[length] = bld.vgrf(BRW_REGISTER_TYPE_UD);
3831 bld.exec_all().annotate("FB write OS")
3832 .MOV(retype(sources[length], BRW_REGISTER_TYPE_UB),
3833 subscript(src_stencil, BRW_REGISTER_TYPE_UB, 0));
3834 length++;
3835 }
3836
3837 fs_inst *load;
3838 if (devinfo->gen >= 7) {
3839 /* Send from the GRF */
3840 fs_reg payload = fs_reg(VGRF, -1, BRW_REGISTER_TYPE_F);
3841 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
3842 payload.nr = bld.shader->alloc.allocate(regs_written(load));
3843 load->dst = payload;
3844
3845 inst->src[0] = payload;
3846 inst->resize_sources(1);
3847 } else {
3848 /* Send from the MRF */
3849 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
3850 sources, length, payload_header_size);
3851
3852 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3853 * will do this for us if we just give it a COMPR4 destination.
3854 */
3855 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
3856 load->dst.nr |= BRW_MRF_COMPR4;
3857
3858 inst->resize_sources(0);
3859 inst->base_mrf = 1;
3860 }
3861
3862 inst->opcode = FS_OPCODE_FB_WRITE;
3863 inst->mlen = regs_written(load);
3864 inst->header_size = header_size;
3865 }
3866
3867 static void
3868 lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
3869 {
3870 const fs_builder &ubld = bld.exec_all();
3871 const unsigned length = 2;
3872 const fs_reg header = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD, length);
3873
3874 ubld.group(16, 0)
3875 .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3876
3877 inst->resize_sources(1);
3878 inst->src[0] = header;
3879 inst->opcode = FS_OPCODE_FB_READ;
3880 inst->mlen = length;
3881 inst->header_size = length;
3882 }
3883
3884 static void
3885 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
3886 const fs_reg &coordinate,
3887 const fs_reg &shadow_c,
3888 const fs_reg &lod, const fs_reg &lod2,
3889 const fs_reg &surface,
3890 const fs_reg &sampler,
3891 unsigned coord_components,
3892 unsigned grad_components)
3893 {
3894 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
3895 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
3896 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
3897 fs_reg msg_end = msg_begin;
3898
3899 /* g0 header. */
3900 msg_end = offset(msg_end, bld.group(8, 0), 1);
3901
3902 for (unsigned i = 0; i < coord_components; i++)
3903 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
3904 offset(coordinate, bld, i));
3905
3906 msg_end = offset(msg_end, bld, coord_components);
3907
3908 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
3909 * require all three components to be present and zero if they are unused.
3910 */
3911 if (coord_components > 0 &&
3912 (has_lod || shadow_c.file != BAD_FILE ||
3913 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
3914 for (unsigned i = coord_components; i < 3; i++)
3915 bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
3916
3917 msg_end = offset(msg_end, bld, 3 - coord_components);
3918 }
3919
3920 if (op == SHADER_OPCODE_TXD) {
3921 /* TXD unsupported in SIMD16 mode. */
3922 assert(bld.dispatch_width() == 8);
3923
3924 /* the slots for u and v are always present, but r is optional */
3925 if (coord_components < 2)
3926 msg_end = offset(msg_end, bld, 2 - coord_components);
3927
3928 /* P = u, v, r
3929 * dPdx = dudx, dvdx, drdx
3930 * dPdy = dudy, dvdy, drdy
3931 *
3932 * 1-arg: Does not exist.
3933 *
3934 * 2-arg: dudx dvdx dudy dvdy
3935 * dPdx.x dPdx.y dPdy.x dPdy.y
3936 * m4 m5 m6 m7
3937 *
3938 * 3-arg: dudx dvdx drdx dudy dvdy drdy
3939 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
3940 * m5 m6 m7 m8 m9 m10
3941 */
3942 for (unsigned i = 0; i < grad_components; i++)
3943 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
3944
3945 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3946
3947 for (unsigned i = 0; i < grad_components; i++)
3948 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
3949
3950 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3951 }
3952
3953 if (has_lod) {
3954 /* Bias/LOD with shadow comparator is unsupported in SIMD16 -- *Without*
3955 * shadow comparator (including RESINFO) it's unsupported in SIMD8 mode.
3956 */
3957 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
3958 bld.dispatch_width() == 16);
3959
3960 const brw_reg_type type =
3961 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
3962 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
3963 bld.MOV(retype(msg_end, type), lod);
3964 msg_end = offset(msg_end, bld, 1);
3965 }
3966
3967 if (shadow_c.file != BAD_FILE) {
3968 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
3969 /* There's no plain shadow compare message, so we use shadow
3970 * compare with a bias of 0.0.
3971 */
3972 bld.MOV(msg_end, brw_imm_f(0.0f));
3973 msg_end = offset(msg_end, bld, 1);
3974 }
3975
3976 bld.MOV(msg_end, shadow_c);
3977 msg_end = offset(msg_end, bld, 1);
3978 }
3979
3980 inst->opcode = op;
3981 inst->src[0] = reg_undef;
3982 inst->src[1] = surface;
3983 inst->src[2] = sampler;
3984 inst->resize_sources(3);
3985 inst->base_mrf = msg_begin.nr;
3986 inst->mlen = msg_end.nr - msg_begin.nr;
3987 inst->header_size = 1;
3988 }
3989
3990 static void
3991 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
3992 const fs_reg &coordinate,
3993 const fs_reg &shadow_c,
3994 const fs_reg &lod, const fs_reg &lod2,
3995 const fs_reg &sample_index,
3996 const fs_reg &surface,
3997 const fs_reg &sampler,
3998 unsigned coord_components,
3999 unsigned grad_components)
4000 {
4001 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
4002 fs_reg msg_coords = message;
4003 unsigned header_size = 0;
4004
4005 if (inst->offset != 0) {
4006 /* The offsets set up by the visitor are in the m1 header, so we can't
4007 * go headerless.
4008 */
4009 header_size = 1;
4010 message.nr--;
4011 }
4012
4013 for (unsigned i = 0; i < coord_components; i++)
4014 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type),
4015 offset(coordinate, bld, i));
4016
4017 fs_reg msg_end = offset(msg_coords, bld, coord_components);
4018 fs_reg msg_lod = offset(msg_coords, bld, 4);
4019
4020 if (shadow_c.file != BAD_FILE) {
4021 fs_reg msg_shadow = msg_lod;
4022 bld.MOV(msg_shadow, shadow_c);
4023 msg_lod = offset(msg_shadow, bld, 1);
4024 msg_end = msg_lod;
4025 }
4026
4027 switch (op) {
4028 case SHADER_OPCODE_TXL:
4029 case FS_OPCODE_TXB:
4030 bld.MOV(msg_lod, lod);
4031 msg_end = offset(msg_lod, bld, 1);
4032 break;
4033 case SHADER_OPCODE_TXD:
4034 /**
4035 * P = u, v, r
4036 * dPdx = dudx, dvdx, drdx
4037 * dPdy = dudy, dvdy, drdy
4038 *
4039 * Load up these values:
4040 * - dudx dudy dvdx dvdy drdx drdy
4041 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
4042 */
4043 msg_end = msg_lod;
4044 for (unsigned i = 0; i < grad_components; i++) {
4045 bld.MOV(msg_end, offset(lod, bld, i));
4046 msg_end = offset(msg_end, bld, 1);
4047
4048 bld.MOV(msg_end, offset(lod2, bld, i));
4049 msg_end = offset(msg_end, bld, 1);
4050 }
4051 break;
4052 case SHADER_OPCODE_TXS:
4053 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
4054 bld.MOV(msg_lod, lod);
4055 msg_end = offset(msg_lod, bld, 1);
4056 break;
4057 case SHADER_OPCODE_TXF:
4058 msg_lod = offset(msg_coords, bld, 3);
4059 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
4060 msg_end = offset(msg_lod, bld, 1);
4061 break;
4062 case SHADER_OPCODE_TXF_CMS:
4063 msg_lod = offset(msg_coords, bld, 3);
4064 /* lod */
4065 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
4066 /* sample index */
4067 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
4068 msg_end = offset(msg_lod, bld, 2);
4069 break;
4070 default:
4071 break;
4072 }
4073
4074 inst->opcode = op;
4075 inst->src[0] = reg_undef;
4076 inst->src[1] = surface;
4077 inst->src[2] = sampler;
4078 inst->resize_sources(3);
4079 inst->base_mrf = message.nr;
4080 inst->mlen = msg_end.nr - message.nr;
4081 inst->header_size = header_size;
4082
4083 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4084 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4085 }
4086
4087 static bool
4088 is_high_sampler(const struct gen_device_info *devinfo, const fs_reg &sampler)
4089 {
4090 if (devinfo->gen < 8 && !devinfo->is_haswell)
4091 return false;
4092
4093 return sampler.file != IMM || sampler.ud >= 16;
4094 }
4095
4096 static void
4097 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
4098 const fs_reg &coordinate,
4099 const fs_reg &shadow_c,
4100 fs_reg lod, const fs_reg &lod2,
4101 const fs_reg &sample_index,
4102 const fs_reg &mcs,
4103 const fs_reg &surface,
4104 const fs_reg &sampler,
4105 const fs_reg &tg4_offset,
4106 unsigned coord_components,
4107 unsigned grad_components)
4108 {
4109 const gen_device_info *devinfo = bld.shader->devinfo;
4110 unsigned reg_width = bld.dispatch_width() / 8;
4111 unsigned header_size = 0, length = 0;
4112 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
4113 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
4114 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
4115
4116 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
4117 inst->offset != 0 || inst->eot ||
4118 op == SHADER_OPCODE_SAMPLEINFO ||
4119 is_high_sampler(devinfo, sampler)) {
4120 /* For general texture offsets (no txf workaround), we need a header to
4121 * put them in. Note that we're only reserving space for it in the
4122 * message payload as it will be initialized implicitly by the
4123 * generator.
4124 *
4125 * TG4 needs to place its channel select in the header, for interaction
4126 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
4127 * larger sampler numbers we need to offset the Sampler State Pointer in
4128 * the header.
4129 */
4130 header_size = 1;
4131 sources[0] = fs_reg();
4132 length++;
4133
4134 /* If we're requesting fewer than four channels worth of response,
4135 * and we have an explicit header, we need to set up the sampler
4136 * writemask. It's reversed from normal: 1 means "don't write".
4137 */
4138 if (!inst->eot && regs_written(inst) != 4 * reg_width) {
4139 assert(regs_written(inst) % reg_width == 0);
4140 unsigned mask = ~((1 << (regs_written(inst) / reg_width)) - 1) & 0xf;
4141 inst->offset |= mask << 12;
4142 }
4143 }
4144
4145 if (shadow_c.file != BAD_FILE) {
4146 bld.MOV(sources[length], shadow_c);
4147 length++;
4148 }
4149
4150 bool coordinate_done = false;
4151
4152 /* Set up the LOD info */
4153 switch (op) {
4154 case FS_OPCODE_TXB:
4155 case SHADER_OPCODE_TXL:
4156 if (devinfo->gen >= 9 && op == SHADER_OPCODE_TXL && lod.is_zero()) {
4157 op = SHADER_OPCODE_TXL_LZ;
4158 break;
4159 }
4160 bld.MOV(sources[length], lod);
4161 length++;
4162 break;
4163 case SHADER_OPCODE_TXD:
4164 /* TXD should have been lowered in SIMD16 mode. */
4165 assert(bld.dispatch_width() == 8);
4166
4167 /* Load dPdx and the coordinate together:
4168 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
4169 */
4170 for (unsigned i = 0; i < coord_components; i++) {
4171 bld.MOV(sources[length++], offset(coordinate, bld, i));
4172
4173 /* For cube map array, the coordinate is (u,v,r,ai) but there are
4174 * only derivatives for (u, v, r).
4175 */
4176 if (i < grad_components) {
4177 bld.MOV(sources[length++], offset(lod, bld, i));
4178 bld.MOV(sources[length++], offset(lod2, bld, i));
4179 }
4180 }
4181
4182 coordinate_done = true;
4183 break;
4184 case SHADER_OPCODE_TXS:
4185 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
4186 length++;
4187 break;
4188 case SHADER_OPCODE_TXF:
4189 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
4190 * On Gen9 they are u, v, lod, r
4191 */
4192 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D), coordinate);
4193
4194 if (devinfo->gen >= 9) {
4195 if (coord_components >= 2) {
4196 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D),
4197 offset(coordinate, bld, 1));
4198 } else {
4199 sources[length] = brw_imm_d(0);
4200 }
4201 length++;
4202 }
4203
4204 if (devinfo->gen >= 9 && lod.is_zero()) {
4205 op = SHADER_OPCODE_TXF_LZ;
4206 } else {
4207 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
4208 length++;
4209 }
4210
4211 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++)
4212 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4213 offset(coordinate, bld, i));
4214
4215 coordinate_done = true;
4216 break;
4217
4218 case SHADER_OPCODE_TXF_CMS:
4219 case SHADER_OPCODE_TXF_CMS_W:
4220 case SHADER_OPCODE_TXF_UMS:
4221 case SHADER_OPCODE_TXF_MCS:
4222 if (op == SHADER_OPCODE_TXF_UMS ||
4223 op == SHADER_OPCODE_TXF_CMS ||
4224 op == SHADER_OPCODE_TXF_CMS_W) {
4225 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
4226 length++;
4227 }
4228
4229 if (op == SHADER_OPCODE_TXF_CMS || op == SHADER_OPCODE_TXF_CMS_W) {
4230 /* Data from the multisample control surface. */
4231 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
4232 length++;
4233
4234 /* On Gen9+ we'll use ld2dms_w instead which has two registers for
4235 * the MCS data.
4236 */
4237 if (op == SHADER_OPCODE_TXF_CMS_W) {
4238 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD),
4239 mcs.file == IMM ?
4240 mcs :
4241 offset(mcs, bld, 1));
4242 length++;
4243 }
4244 }
4245
4246 /* There is no offsetting for this message; just copy in the integer
4247 * texture coordinates.
4248 */
4249 for (unsigned i = 0; i < coord_components; i++)
4250 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4251 offset(coordinate, bld, i));
4252
4253 coordinate_done = true;
4254 break;
4255 case SHADER_OPCODE_TG4_OFFSET:
4256 /* More crazy intermixing */
4257 for (unsigned i = 0; i < 2; i++) /* u, v */
4258 bld.MOV(sources[length++], offset(coordinate, bld, i));
4259
4260 for (unsigned i = 0; i < 2; i++) /* offu, offv */
4261 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4262 offset(tg4_offset, bld, i));
4263
4264 if (coord_components == 3) /* r if present */
4265 bld.MOV(sources[length++], offset(coordinate, bld, 2));
4266
4267 coordinate_done = true;
4268 break;
4269 default:
4270 break;
4271 }
4272
4273 /* Set up the coordinate (except for cases where it was done above) */
4274 if (!coordinate_done) {
4275 for (unsigned i = 0; i < coord_components; i++)
4276 bld.MOV(sources[length++], offset(coordinate, bld, i));
4277 }
4278
4279 int mlen;
4280 if (reg_width == 2)
4281 mlen = length * reg_width - header_size;
4282 else
4283 mlen = length * reg_width;
4284
4285 const fs_reg src_payload = fs_reg(VGRF, bld.shader->alloc.allocate(mlen),
4286 BRW_REGISTER_TYPE_F);
4287 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
4288
4289 /* Generate the SEND. */
4290 inst->opcode = op;
4291 inst->src[0] = src_payload;
4292 inst->src[1] = surface;
4293 inst->src[2] = sampler;
4294 inst->resize_sources(3);
4295 inst->mlen = mlen;
4296 inst->header_size = header_size;
4297
4298 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4299 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4300 }
4301
4302 static void
4303 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
4304 {
4305 const gen_device_info *devinfo = bld.shader->devinfo;
4306 const fs_reg &coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
4307 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4308 const fs_reg &lod = inst->src[TEX_LOGICAL_SRC_LOD];
4309 const fs_reg &lod2 = inst->src[TEX_LOGICAL_SRC_LOD2];
4310 const fs_reg &sample_index = inst->src[TEX_LOGICAL_SRC_SAMPLE_INDEX];
4311 const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
4312 const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
4313 const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
4314 const fs_reg &tg4_offset = inst->src[TEX_LOGICAL_SRC_TG4_OFFSET];
4315 assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
4316 const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4317 assert(inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
4318 const unsigned grad_components = inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
4319
4320 if (devinfo->gen >= 7) {
4321 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
4322 shadow_c, lod, lod2, sample_index,
4323 mcs, surface, sampler, tg4_offset,
4324 coord_components, grad_components);
4325 } else if (devinfo->gen >= 5) {
4326 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
4327 shadow_c, lod, lod2, sample_index,
4328 surface, sampler,
4329 coord_components, grad_components);
4330 } else {
4331 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
4332 shadow_c, lod, lod2,
4333 surface, sampler,
4334 coord_components, grad_components);
4335 }
4336 }
4337
4338 /**
4339 * Initialize the header present in some typed and untyped surface
4340 * messages.
4341 */
4342 static fs_reg
4343 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
4344 {
4345 fs_builder ubld = bld.exec_all().group(8, 0);
4346 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4347 ubld.MOV(dst, brw_imm_d(0));
4348 ubld.group(1, 0).MOV(component(dst, 7), sample_mask);
4349 return dst;
4350 }
4351
4352 static void
4353 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
4354 const fs_reg &sample_mask)
4355 {
4356 /* Get the logical send arguments. */
4357 const fs_reg &addr = inst->src[0];
4358 const fs_reg &src = inst->src[1];
4359 const fs_reg &surface = inst->src[2];
4360 const UNUSED fs_reg &dims = inst->src[3];
4361 const fs_reg &arg = inst->src[4];
4362
4363 /* Calculate the total number of components of the payload. */
4364 const unsigned addr_sz = inst->components_read(0);
4365 const unsigned src_sz = inst->components_read(1);
4366 const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
4367 const unsigned sz = header_sz + addr_sz + src_sz;
4368
4369 /* Allocate space for the payload. */
4370 fs_reg *const components = new fs_reg[sz];
4371 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
4372 unsigned n = 0;
4373
4374 /* Construct the payload. */
4375 if (header_sz)
4376 components[n++] = emit_surface_header(bld, sample_mask);
4377
4378 for (unsigned i = 0; i < addr_sz; i++)
4379 components[n++] = offset(addr, bld, i);
4380
4381 for (unsigned i = 0; i < src_sz; i++)
4382 components[n++] = offset(src, bld, i);
4383
4384 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
4385
4386 /* Update the original instruction. */
4387 inst->opcode = op;
4388 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
4389 inst->header_size = header_sz;
4390
4391 inst->src[0] = payload;
4392 inst->src[1] = surface;
4393 inst->src[2] = arg;
4394 inst->resize_sources(3);
4395
4396 delete[] components;
4397 }
4398
4399 static void
4400 lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
4401 {
4402 const gen_device_info *devinfo = bld.shader->devinfo;
4403
4404 if (devinfo->gen >= 7) {
4405 /* We are switching the instruction from an ALU-like instruction to a
4406 * send-from-grf instruction. Since sends can't handle strides or
4407 * source modifiers, we have to make a copy of the offset source.
4408 */
4409 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4410 bld.MOV(tmp, inst->src[1]);
4411 inst->src[1] = tmp;
4412
4413 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
4414
4415 } else {
4416 const fs_reg payload(MRF, FIRST_PULL_LOAD_MRF(devinfo->gen),
4417 BRW_REGISTER_TYPE_UD);
4418
4419 bld.MOV(byte_offset(payload, REG_SIZE), inst->src[1]);
4420
4421 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4;
4422 inst->resize_sources(1);
4423 inst->base_mrf = payload.nr;
4424 inst->header_size = 1;
4425 inst->mlen = 1 + inst->exec_size / 8;
4426 }
4427 }
4428
4429 static void
4430 lower_math_logical_send(const fs_builder &bld, fs_inst *inst)
4431 {
4432 assert(bld.shader->devinfo->gen < 6);
4433
4434 inst->base_mrf = 2;
4435 inst->mlen = inst->sources * inst->exec_size / 8;
4436
4437 if (inst->sources > 1) {
4438 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
4439 * "Message Payload":
4440 *
4441 * "Operand0[7]. For the INT DIV functions, this operand is the
4442 * denominator."
4443 * ...
4444 * "Operand1[7]. For the INT DIV functions, this operand is the
4445 * numerator."
4446 */
4447 const bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
4448 const fs_reg src0 = is_int_div ? inst->src[1] : inst->src[0];
4449 const fs_reg src1 = is_int_div ? inst->src[0] : inst->src[1];
4450
4451 inst->resize_sources(1);
4452 inst->src[0] = src0;
4453
4454 assert(inst->exec_size == 8);
4455 bld.MOV(fs_reg(MRF, inst->base_mrf + 1, src1.type), src1);
4456 }
4457 }
4458
4459 bool
4460 fs_visitor::lower_logical_sends()
4461 {
4462 bool progress = false;
4463
4464 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4465 const fs_builder ibld(this, block, inst);
4466
4467 switch (inst->opcode) {
4468 case FS_OPCODE_FB_WRITE_LOGICAL:
4469 assert(stage == MESA_SHADER_FRAGMENT);
4470 lower_fb_write_logical_send(ibld, inst,
4471 brw_wm_prog_data(prog_data),
4472 (const brw_wm_prog_key *)key,
4473 payload);
4474 break;
4475
4476 case FS_OPCODE_FB_READ_LOGICAL:
4477 lower_fb_read_logical_send(ibld, inst);
4478 break;
4479
4480 case SHADER_OPCODE_TEX_LOGICAL:
4481 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
4482 break;
4483
4484 case SHADER_OPCODE_TXD_LOGICAL:
4485 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
4486 break;
4487
4488 case SHADER_OPCODE_TXF_LOGICAL:
4489 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
4490 break;
4491
4492 case SHADER_OPCODE_TXL_LOGICAL:
4493 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
4494 break;
4495
4496 case SHADER_OPCODE_TXS_LOGICAL:
4497 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
4498 break;
4499
4500 case FS_OPCODE_TXB_LOGICAL:
4501 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
4502 break;
4503
4504 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4505 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
4506 break;
4507
4508 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
4509 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS_W);
4510 break;
4511
4512 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4513 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
4514 break;
4515
4516 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4517 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
4518 break;
4519
4520 case SHADER_OPCODE_LOD_LOGICAL:
4521 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
4522 break;
4523
4524 case SHADER_OPCODE_TG4_LOGICAL:
4525 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
4526 break;
4527
4528 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4529 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
4530 break;
4531
4532 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4533 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_SAMPLEINFO);
4534 break;
4535
4536 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4537 lower_surface_logical_send(ibld, inst,
4538 SHADER_OPCODE_UNTYPED_SURFACE_READ,
4539 fs_reg());
4540 break;
4541
4542 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4543 lower_surface_logical_send(ibld, inst,
4544 SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
4545 ibld.sample_mask_reg());
4546 break;
4547
4548 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
4549 lower_surface_logical_send(ibld, inst,
4550 SHADER_OPCODE_BYTE_SCATTERED_WRITE,
4551 ibld.sample_mask_reg());
4552 break;
4553
4554 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4555 lower_surface_logical_send(ibld, inst,
4556 SHADER_OPCODE_UNTYPED_ATOMIC,
4557 ibld.sample_mask_reg());
4558 break;
4559
4560 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4561 lower_surface_logical_send(ibld, inst,
4562 SHADER_OPCODE_TYPED_SURFACE_READ,
4563 brw_imm_d(0xffff));
4564 break;
4565
4566 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4567 lower_surface_logical_send(ibld, inst,
4568 SHADER_OPCODE_TYPED_SURFACE_WRITE,
4569 ibld.sample_mask_reg());
4570 break;
4571
4572 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4573 lower_surface_logical_send(ibld, inst,
4574 SHADER_OPCODE_TYPED_ATOMIC,
4575 ibld.sample_mask_reg());
4576 break;
4577
4578 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4579 lower_varying_pull_constant_logical_send(ibld, inst);
4580 break;
4581
4582 case SHADER_OPCODE_RCP:
4583 case SHADER_OPCODE_RSQ:
4584 case SHADER_OPCODE_SQRT:
4585 case SHADER_OPCODE_EXP2:
4586 case SHADER_OPCODE_LOG2:
4587 case SHADER_OPCODE_SIN:
4588 case SHADER_OPCODE_COS:
4589 case SHADER_OPCODE_POW:
4590 case SHADER_OPCODE_INT_QUOTIENT:
4591 case SHADER_OPCODE_INT_REMAINDER:
4592 /* The math opcodes are overloaded for the send-like and
4593 * expression-like instructions which seems kind of icky. Gen6+ has
4594 * a native (but rather quirky) MATH instruction so we don't need to
4595 * do anything here. On Gen4-5 we'll have to lower the Gen6-like
4596 * logical instructions (which we can easily recognize because they
4597 * have mlen = 0) into send-like virtual instructions.
4598 */
4599 if (devinfo->gen < 6 && inst->mlen == 0) {
4600 lower_math_logical_send(ibld, inst);
4601 break;
4602
4603 } else {
4604 continue;
4605 }
4606
4607 default:
4608 continue;
4609 }
4610
4611 progress = true;
4612 }
4613
4614 if (progress)
4615 invalidate_live_intervals();
4616
4617 return progress;
4618 }
4619
4620 /**
4621 * Get the closest allowed SIMD width for instruction \p inst accounting for
4622 * some common regioning and execution control restrictions that apply to FPU
4623 * instructions. These restrictions don't necessarily have any relevance to
4624 * instructions not executed by the FPU pipeline like extended math, control
4625 * flow or send message instructions.
4626 *
4627 * For virtual opcodes it's really up to the instruction -- In some cases
4628 * (e.g. where a virtual instruction unrolls into a simple sequence of FPU
4629 * instructions) it may simplify virtual instruction lowering if we can
4630 * enforce FPU-like regioning restrictions already on the virtual instruction,
4631 * in other cases (e.g. virtual send-like instructions) this may be
4632 * excessively restrictive.
4633 */
4634 static unsigned
4635 get_fpu_lowered_simd_width(const struct gen_device_info *devinfo,
4636 const fs_inst *inst)
4637 {
4638 /* Maximum execution size representable in the instruction controls. */
4639 unsigned max_width = MIN2(32, inst->exec_size);
4640
4641 /* According to the PRMs:
4642 * "A. In Direct Addressing mode, a source cannot span more than 2
4643 * adjacent GRF registers.
4644 * B. A destination cannot span more than 2 adjacent GRF registers."
4645 *
4646 * Look for the source or destination with the largest register region
4647 * which is the one that is going to limit the overall execution size of
4648 * the instruction due to this rule.
4649 */
4650 unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
4651
4652 for (unsigned i = 0; i < inst->sources; i++)
4653 reg_count = MAX2(reg_count, DIV_ROUND_UP(inst->size_read(i), REG_SIZE));
4654
4655 /* Calculate the maximum execution size of the instruction based on the
4656 * factor by which it goes over the hardware limit of 2 GRFs.
4657 */
4658 if (reg_count > 2)
4659 max_width = MIN2(max_width, inst->exec_size / DIV_ROUND_UP(reg_count, 2));
4660
4661 /* According to the IVB PRMs:
4662 * "When destination spans two registers, the source MUST span two
4663 * registers. The exception to the above rule:
4664 *
4665 * - When source is scalar, the source registers are not incremented.
4666 * - When source is packed integer Word and destination is packed
4667 * integer DWord, the source register is not incremented but the
4668 * source sub register is incremented."
4669 *
4670 * The hardware specs from Gen4 to Gen7.5 mention similar regioning
4671 * restrictions. The code below intentionally doesn't check whether the
4672 * destination type is integer because empirically the hardware doesn't
4673 * seem to care what the actual type is as long as it's dword-aligned.
4674 */
4675 if (devinfo->gen < 8) {
4676 for (unsigned i = 0; i < inst->sources; i++) {
4677 /* IVB implements DF scalars as <0;2,1> regions. */
4678 const bool is_scalar_exception = is_uniform(inst->src[i]) &&
4679 (devinfo->is_haswell || type_sz(inst->src[i].type) != 8);
4680 const bool is_packed_word_exception =
4681 type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
4682 type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1;
4683
4684 if (inst->size_written > REG_SIZE &&
4685 inst->size_read(i) != 0 && inst->size_read(i) <= REG_SIZE &&
4686 !is_scalar_exception && !is_packed_word_exception) {
4687 const unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
4688 max_width = MIN2(max_width, inst->exec_size / reg_count);
4689 }
4690 }
4691 }
4692
4693 /* From the IVB PRMs:
4694 * "When an instruction is SIMD32, the low 16 bits of the execution mask
4695 * are applied for both halves of the SIMD32 instruction. If different
4696 * execution mask channels are required, split the instruction into two
4697 * SIMD16 instructions."
4698 *
4699 * There is similar text in the HSW PRMs. Gen4-6 don't even implement
4700 * 32-wide control flow support in hardware and will behave similarly.
4701 */
4702 if (devinfo->gen < 8 && !inst->force_writemask_all)
4703 max_width = MIN2(max_width, 16);
4704
4705 /* From the IVB PRMs (applies to HSW too):
4706 * "Instructions with condition modifiers must not use SIMD32."
4707 *
4708 * From the BDW PRMs (applies to later hardware too):
4709 * "Ternary instruction with condition modifiers must not use SIMD32."
4710 */
4711 if (inst->conditional_mod && (devinfo->gen < 8 || inst->is_3src(devinfo)))
4712 max_width = MIN2(max_width, 16);
4713
4714 /* From the IVB PRMs (applies to other devices that don't have the
4715 * gen_device_info::supports_simd16_3src flag set):
4716 * "In Align16 access mode, SIMD16 is not allowed for DW operations and
4717 * SIMD8 is not allowed for DF operations."
4718 */
4719 if (inst->is_3src(devinfo) && !devinfo->supports_simd16_3src)
4720 max_width = MIN2(max_width, inst->exec_size / reg_count);
4721
4722 /* Pre-Gen8 EUs are hardwired to use the QtrCtrl+1 (where QtrCtrl is
4723 * the 8-bit quarter of the execution mask signals specified in the
4724 * instruction control fields) for the second compressed half of any
4725 * single-precision instruction (for double-precision instructions
4726 * it's hardwired to use NibCtrl+1, at least on HSW), which means that
4727 * the EU will apply the wrong execution controls for the second
4728 * sequential GRF write if the number of channels per GRF is not exactly
4729 * eight in single-precision mode (or four in double-float mode).
4730 *
4731 * In this situation we calculate the maximum size of the split
4732 * instructions so they only ever write to a single register.
4733 */
4734 if (devinfo->gen < 8 && inst->size_written > REG_SIZE &&
4735 !inst->force_writemask_all) {
4736 const unsigned channels_per_grf = inst->exec_size /
4737 DIV_ROUND_UP(inst->size_written, REG_SIZE);
4738 const unsigned exec_type_size = get_exec_type_size(inst);
4739 assert(exec_type_size);
4740
4741 /* The hardware shifts exactly 8 channels per compressed half of the
4742 * instruction in single-precision mode and exactly 4 in double-precision.
4743 */
4744 if (channels_per_grf != (exec_type_size == 8 ? 4 : 8))
4745 max_width = MIN2(max_width, channels_per_grf);
4746
4747 /* Lower all non-force_writemask_all DF instructions to SIMD4 on IVB/BYT
4748 * because HW applies the same channel enable signals to both halves of
4749 * the compressed instruction which will be just wrong under
4750 * non-uniform control flow.
4751 */
4752 if (devinfo->gen == 7 && !devinfo->is_haswell &&
4753 (exec_type_size == 8 || type_sz(inst->dst.type) == 8))
4754 max_width = MIN2(max_width, 4);
4755 }
4756
4757 /* Only power-of-two execution sizes are representable in the instruction
4758 * control fields.
4759 */
4760 return 1 << _mesa_logbase2(max_width);
4761 }
4762
4763 /**
4764 * Get the maximum allowed SIMD width for instruction \p inst accounting for
4765 * various payload size restrictions that apply to sampler message
4766 * instructions.
4767 *
4768 * This is only intended to provide a maximum theoretical bound for the
4769 * execution size of the message based on the number of argument components
4770 * alone, which in most cases will determine whether the SIMD8 or SIMD16
4771 * variant of the message can be used, though some messages may have
4772 * additional restrictions not accounted for here (e.g. pre-ILK hardware uses
4773 * the message length to determine the exact SIMD width and argument count,
4774 * which makes a number of sampler message combinations impossible to
4775 * represent).
4776 */
4777 static unsigned
4778 get_sampler_lowered_simd_width(const struct gen_device_info *devinfo,
4779 const fs_inst *inst)
4780 {
4781 /* Calculate the number of coordinate components that have to be present
4782 * assuming that additional arguments follow the texel coordinates in the
4783 * message payload. On IVB+ there is no need for padding, on ILK-SNB we
4784 * need to pad to four or three components depending on the message,
4785 * pre-ILK we need to pad to at most three components.
4786 */
4787 const unsigned req_coord_components =
4788 (devinfo->gen >= 7 ||
4789 !inst->components_read(TEX_LOGICAL_SRC_COORDINATE)) ? 0 :
4790 (devinfo->gen >= 5 && inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
4791 inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL) ? 4 :
4792 3;
4793
4794 /* On Gen9+ the LOD argument is for free if we're able to use the LZ
4795 * variant of the TXL or TXF message.
4796 */
4797 const bool implicit_lod = devinfo->gen >= 9 &&
4798 (inst->opcode == SHADER_OPCODE_TXL ||
4799 inst->opcode == SHADER_OPCODE_TXF) &&
4800 inst->src[TEX_LOGICAL_SRC_LOD].is_zero();
4801
4802 /* Calculate the total number of argument components that need to be passed
4803 * to the sampler unit.
4804 */
4805 const unsigned num_payload_components =
4806 MAX2(inst->components_read(TEX_LOGICAL_SRC_COORDINATE),
4807 req_coord_components) +
4808 inst->components_read(TEX_LOGICAL_SRC_SHADOW_C) +
4809 (implicit_lod ? 0 : inst->components_read(TEX_LOGICAL_SRC_LOD)) +
4810 inst->components_read(TEX_LOGICAL_SRC_LOD2) +
4811 inst->components_read(TEX_LOGICAL_SRC_SAMPLE_INDEX) +
4812 (inst->opcode == SHADER_OPCODE_TG4_OFFSET_LOGICAL ?
4813 inst->components_read(TEX_LOGICAL_SRC_TG4_OFFSET) : 0) +
4814 inst->components_read(TEX_LOGICAL_SRC_MCS);
4815
4816 /* SIMD16 messages with more than five arguments exceed the maximum message
4817 * size supported by the sampler, regardless of whether a header is
4818 * provided or not.
4819 */
4820 return MIN2(inst->exec_size,
4821 num_payload_components > MAX_SAMPLER_MESSAGE_SIZE / 2 ? 8 : 16);
4822 }
4823
4824 /**
4825 * Get the closest native SIMD width supported by the hardware for instruction
4826 * \p inst. The instruction will be left untouched by
4827 * fs_visitor::lower_simd_width() if the returned value is equal to the
4828 * original execution size.
4829 */
4830 static unsigned
4831 get_lowered_simd_width(const struct gen_device_info *devinfo,
4832 const fs_inst *inst)
4833 {
4834 switch (inst->opcode) {
4835 case BRW_OPCODE_MOV:
4836 case BRW_OPCODE_SEL:
4837 case BRW_OPCODE_NOT:
4838 case BRW_OPCODE_AND:
4839 case BRW_OPCODE_OR:
4840 case BRW_OPCODE_XOR:
4841 case BRW_OPCODE_SHR:
4842 case BRW_OPCODE_SHL:
4843 case BRW_OPCODE_ASR:
4844 case BRW_OPCODE_CMPN:
4845 case BRW_OPCODE_CSEL:
4846 case BRW_OPCODE_F32TO16:
4847 case BRW_OPCODE_F16TO32:
4848 case BRW_OPCODE_BFREV:
4849 case BRW_OPCODE_BFE:
4850 case BRW_OPCODE_ADD:
4851 case BRW_OPCODE_MUL:
4852 case BRW_OPCODE_AVG:
4853 case BRW_OPCODE_FRC:
4854 case BRW_OPCODE_RNDU:
4855 case BRW_OPCODE_RNDD:
4856 case BRW_OPCODE_RNDE:
4857 case BRW_OPCODE_RNDZ:
4858 case BRW_OPCODE_LZD:
4859 case BRW_OPCODE_FBH:
4860 case BRW_OPCODE_FBL:
4861 case BRW_OPCODE_CBIT:
4862 case BRW_OPCODE_SAD2:
4863 case BRW_OPCODE_MAD:
4864 case BRW_OPCODE_LRP:
4865 case FS_OPCODE_PACK:
4866 return get_fpu_lowered_simd_width(devinfo, inst);
4867
4868 case BRW_OPCODE_CMP: {
4869 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says that
4870 * when the destination is a GRF the dependency-clear bit on the flag
4871 * register is cleared early.
4872 *
4873 * Suggested workarounds are to disable coissuing CMP instructions
4874 * or to split CMP(16) instructions into two CMP(8) instructions.
4875 *
4876 * We choose to split into CMP(8) instructions since disabling
4877 * coissuing would affect CMP instructions not otherwise affected by
4878 * the errata.
4879 */
4880 const unsigned max_width = (devinfo->gen == 7 && !devinfo->is_haswell &&
4881 !inst->dst.is_null() ? 8 : ~0);
4882 return MIN2(max_width, get_fpu_lowered_simd_width(devinfo, inst));
4883 }
4884 case BRW_OPCODE_BFI1:
4885 case BRW_OPCODE_BFI2:
4886 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
4887 * should
4888 * "Force BFI instructions to be executed always in SIMD8."
4889 */
4890 return MIN2(devinfo->is_haswell ? 8 : ~0u,
4891 get_fpu_lowered_simd_width(devinfo, inst));
4892
4893 case BRW_OPCODE_IF:
4894 assert(inst->src[0].file == BAD_FILE || inst->exec_size <= 16);
4895 return inst->exec_size;
4896
4897 case SHADER_OPCODE_RCP:
4898 case SHADER_OPCODE_RSQ:
4899 case SHADER_OPCODE_SQRT:
4900 case SHADER_OPCODE_EXP2:
4901 case SHADER_OPCODE_LOG2:
4902 case SHADER_OPCODE_SIN:
4903 case SHADER_OPCODE_COS:
4904 /* Unary extended math instructions are limited to SIMD8 on Gen4 and
4905 * Gen6.
4906 */
4907 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4908 devinfo->gen == 5 || devinfo->is_g4x ? MIN2(16, inst->exec_size) :
4909 MIN2(8, inst->exec_size));
4910
4911 case SHADER_OPCODE_POW:
4912 /* SIMD16 is only allowed on Gen7+. */
4913 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4914 MIN2(8, inst->exec_size));
4915
4916 case SHADER_OPCODE_INT_QUOTIENT:
4917 case SHADER_OPCODE_INT_REMAINDER:
4918 /* Integer division is limited to SIMD8 on all generations. */
4919 return MIN2(8, inst->exec_size);
4920
4921 case FS_OPCODE_LINTERP:
4922 case FS_OPCODE_GET_BUFFER_SIZE:
4923 case FS_OPCODE_DDX_COARSE:
4924 case FS_OPCODE_DDX_FINE:
4925 case FS_OPCODE_DDY_COARSE:
4926 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
4927 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
4928 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
4929 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
4930 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
4931 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
4932 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
4933 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
4934 return MIN2(16, inst->exec_size);
4935
4936 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4937 /* Pre-ILK hardware doesn't have a SIMD8 variant of the texel fetch
4938 * message used to implement varying pull constant loads, so expand it
4939 * to SIMD16. An alternative with longer message payload length but
4940 * shorter return payload would be to use the SIMD8 sampler message that
4941 * takes (header, u, v, r) as parameters instead of (header, u).
4942 */
4943 return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
4944
4945 case FS_OPCODE_DDY_FINE:
4946 /* The implementation of this virtual opcode may require emitting
4947 * compressed Align16 instructions, which are severely limited on some
4948 * generations.
4949 *
4950 * From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
4951 * Region Restrictions):
4952 *
4953 * "In Align16 access mode, SIMD16 is not allowed for DW operations
4954 * and SIMD8 is not allowed for DF operations."
4955 *
4956 * In this context, "DW operations" means "operations acting on 32-bit
4957 * values", so it includes operations on floats.
4958 *
4959 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
4960 * (Instruction Compression -> Rules and Restrictions):
4961 *
4962 * "A compressed instruction must be in Align1 access mode. Align16
4963 * mode instructions cannot be compressed."
4964 *
4965 * Similar text exists in the g45 PRM.
4966 *
4967 * Empirically, compressed align16 instructions using odd register
4968 * numbers don't appear to work on Sandybridge either.
4969 */
4970 return (devinfo->gen == 4 || devinfo->gen == 6 ||
4971 (devinfo->gen == 7 && !devinfo->is_haswell) ?
4972 MIN2(8, inst->exec_size) : MIN2(16, inst->exec_size));
4973
4974 case SHADER_OPCODE_MULH:
4975 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
4976 * is 8-wide on Gen7+.
4977 */
4978 return (devinfo->gen >= 7 ? 8 :
4979 get_fpu_lowered_simd_width(devinfo, inst));
4980
4981 case FS_OPCODE_FB_WRITE_LOGICAL:
4982 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
4983 * here.
4984 */
4985 assert(devinfo->gen != 6 ||
4986 inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH].file == BAD_FILE ||
4987 inst->exec_size == 8);
4988 /* Dual-source FB writes are unsupported in SIMD16 mode. */
4989 return (inst->src[FB_WRITE_LOGICAL_SRC_COLOR1].file != BAD_FILE ?
4990 8 : MIN2(16, inst->exec_size));
4991
4992 case FS_OPCODE_FB_READ_LOGICAL:
4993 return MIN2(16, inst->exec_size);
4994
4995 case SHADER_OPCODE_TEX_LOGICAL:
4996 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4997 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4998 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4999 case SHADER_OPCODE_LOD_LOGICAL:
5000 case SHADER_OPCODE_TG4_LOGICAL:
5001 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
5002 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
5003 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
5004 return get_sampler_lowered_simd_width(devinfo, inst);
5005
5006 case SHADER_OPCODE_TXD_LOGICAL:
5007 /* TXD is unsupported in SIMD16 mode. */
5008 return 8;
5009
5010 case SHADER_OPCODE_TXL_LOGICAL:
5011 case FS_OPCODE_TXB_LOGICAL:
5012 /* Only one execution size is representable pre-ILK depending on whether
5013 * the shadow reference argument is present.
5014 */
5015 if (devinfo->gen == 4)
5016 return inst->src[TEX_LOGICAL_SRC_SHADOW_C].file == BAD_FILE ? 16 : 8;
5017 else
5018 return get_sampler_lowered_simd_width(devinfo, inst);
5019
5020 case SHADER_OPCODE_TXF_LOGICAL:
5021 case SHADER_OPCODE_TXS_LOGICAL:
5022 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
5023 * messages. Use SIMD16 instead.
5024 */
5025 if (devinfo->gen == 4)
5026 return 16;
5027 else
5028 return get_sampler_lowered_simd_width(devinfo, inst);
5029
5030 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5031 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5032 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5033 return 8;
5034
5035 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5036 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5037 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5038 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5039 return MIN2(16, inst->exec_size);
5040
5041 case SHADER_OPCODE_URB_READ_SIMD8:
5042 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
5043 case SHADER_OPCODE_URB_WRITE_SIMD8:
5044 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
5045 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
5046 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
5047 return MIN2(8, inst->exec_size);
5048
5049 case SHADER_OPCODE_MOV_INDIRECT: {
5050 /* From IVB and HSW PRMs:
5051 *
5052 * "2.When the destination requires two registers and the sources are
5053 * indirect, the sources must use 1x1 regioning mode.
5054 *
5055 * In case of DF instructions in HSW/IVB, the exec_size is limited by
5056 * the EU decompression logic not handling VxH indirect addressing
5057 * correctly.
5058 */
5059 const unsigned max_size = (devinfo->gen >= 8 ? 2 : 1) * REG_SIZE;
5060 /* Prior to Broadwell, we only have 8 address subregisters. */
5061 return MIN3(devinfo->gen >= 8 ? 16 : 8,
5062 max_size / (inst->dst.stride * type_sz(inst->dst.type)),
5063 inst->exec_size);
5064 }
5065
5066 case SHADER_OPCODE_LOAD_PAYLOAD: {
5067 const unsigned reg_count =
5068 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
5069
5070 if (reg_count > 2) {
5071 /* Only LOAD_PAYLOAD instructions with per-channel destination region
5072 * can be easily lowered (which excludes headers and heterogeneous
5073 * types).
5074 */
5075 assert(!inst->header_size);
5076 for (unsigned i = 0; i < inst->sources; i++)
5077 assert(type_sz(inst->dst.type) == type_sz(inst->src[i].type) ||
5078 inst->src[i].file == BAD_FILE);
5079
5080 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
5081 } else {
5082 return inst->exec_size;
5083 }
5084 }
5085 default:
5086 return inst->exec_size;
5087 }
5088 }
5089
5090 /**
5091 * Return true if splitting out the group of channels of instruction \p inst
5092 * given by lbld.group() requires allocating a temporary for the i-th source
5093 * of the lowered instruction.
5094 */
5095 static inline bool
5096 needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
5097 {
5098 return !(is_periodic(inst->src[i], lbld.dispatch_width()) ||
5099 (inst->components_read(i) == 1 &&
5100 lbld.dispatch_width() <= inst->exec_size)) ||
5101 (inst->flags_written() &
5102 flag_mask(inst->src[i], type_sz(inst->src[i].type)));
5103 }
5104
5105 /**
5106 * Extract the data that would be consumed by the channel group given by
5107 * lbld.group() from the i-th source region of instruction \p inst and return
5108 * it as result in packed form.
5109 */
5110 static fs_reg
5111 emit_unzip(const fs_builder &lbld, fs_inst *inst, unsigned i)
5112 {
5113 /* Specified channel group from the source region. */
5114 const fs_reg src = horiz_offset(inst->src[i], lbld.group());
5115
5116 if (needs_src_copy(lbld, inst, i)) {
5117 /* Builder of the right width to perform the copy avoiding uninitialized
5118 * data if the lowered execution size is greater than the original
5119 * execution size of the instruction.
5120 */
5121 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
5122 inst->exec_size), 0);
5123 const fs_reg tmp = lbld.vgrf(inst->src[i].type, inst->components_read(i));
5124
5125 for (unsigned k = 0; k < inst->components_read(i); ++k)
5126 cbld.MOV(offset(tmp, lbld, k), offset(src, inst->exec_size, k));
5127
5128 return tmp;
5129
5130 } else if (is_periodic(inst->src[i], lbld.dispatch_width())) {
5131 /* The source is invariant for all dispatch_width-wide groups of the
5132 * original region.
5133 */
5134 return inst->src[i];
5135
5136 } else {
5137 /* We can just point the lowered instruction at the right channel group
5138 * from the original region.
5139 */
5140 return src;
5141 }
5142 }
5143
5144 /**
5145 * Return true if splitting out the group of channels of instruction \p inst
5146 * given by lbld.group() requires allocating a temporary for the destination
5147 * of the lowered instruction and copying the data back to the original
5148 * destination region.
5149 */
5150 static inline bool
5151 needs_dst_copy(const fs_builder &lbld, const fs_inst *inst)
5152 {
5153 /* If the instruction writes more than one component we'll have to shuffle
5154 * the results of multiple lowered instructions in order to make sure that
5155 * they end up arranged correctly in the original destination region.
5156 */
5157 if (inst->size_written > inst->dst.component_size(inst->exec_size))
5158 return true;
5159
5160 /* If the lowered execution size is larger than the original the result of
5161 * the instruction won't fit in the original destination, so we'll have to
5162 * allocate a temporary in any case.
5163 */
5164 if (lbld.dispatch_width() > inst->exec_size)
5165 return true;
5166
5167 for (unsigned i = 0; i < inst->sources; i++) {
5168 /* If we already made a copy of the source for other reasons there won't
5169 * be any overlap with the destination.
5170 */
5171 if (needs_src_copy(lbld, inst, i))
5172 continue;
5173
5174 /* In order to keep the logic simple we emit a copy whenever the
5175 * destination region doesn't exactly match an overlapping source, which
5176 * may point at the source and destination not being aligned group by
5177 * group which could cause one of the lowered instructions to overwrite
5178 * the data read from the same source by other lowered instructions.
5179 */
5180 if (regions_overlap(inst->dst, inst->size_written,
5181 inst->src[i], inst->size_read(i)) &&
5182 !inst->dst.equals(inst->src[i]))
5183 return true;
5184 }
5185
5186 return false;
5187 }
5188
5189 /**
5190 * Insert data from a packed temporary into the channel group given by
5191 * lbld.group() of the destination region of instruction \p inst and return
5192 * the temporary as result. Any copy instructions that are required for
5193 * unzipping the previous value (in the case of partial writes) will be
5194 * inserted using \p lbld_before and any copy instructions required for
5195 * zipping up the destination of \p inst will be inserted using \p lbld_after.
5196 */
5197 static fs_reg
5198 emit_zip(const fs_builder &lbld_before, const fs_builder &lbld_after,
5199 fs_inst *inst)
5200 {
5201 assert(lbld_before.dispatch_width() == lbld_after.dispatch_width());
5202 assert(lbld_before.group() == lbld_after.group());
5203
5204 /* Specified channel group from the destination region. */
5205 const fs_reg dst = horiz_offset(inst->dst, lbld_after.group());
5206 const unsigned dst_size = inst->size_written /
5207 inst->dst.component_size(inst->exec_size);
5208
5209 if (needs_dst_copy(lbld_after, inst)) {
5210 const fs_reg tmp = lbld_after.vgrf(inst->dst.type, dst_size);
5211
5212 if (inst->predicate) {
5213 /* Handle predication by copying the original contents of
5214 * the destination into the temporary before emitting the
5215 * lowered instruction.
5216 */
5217 const fs_builder gbld_before =
5218 lbld_before.group(MIN2(lbld_before.dispatch_width(),
5219 inst->exec_size), 0);
5220 for (unsigned k = 0; k < dst_size; ++k) {
5221 gbld_before.MOV(offset(tmp, lbld_before, k),
5222 offset(dst, inst->exec_size, k));
5223 }
5224 }
5225
5226 const fs_builder gbld_after =
5227 lbld_after.group(MIN2(lbld_after.dispatch_width(),
5228 inst->exec_size), 0);
5229 for (unsigned k = 0; k < dst_size; ++k) {
5230 /* Use a builder of the right width to perform the copy avoiding
5231 * uninitialized data if the lowered execution size is greater than
5232 * the original execution size of the instruction.
5233 */
5234 gbld_after.MOV(offset(dst, inst->exec_size, k),
5235 offset(tmp, lbld_after, k));
5236 }
5237
5238 return tmp;
5239
5240 } else {
5241 /* No need to allocate a temporary for the lowered instruction, just
5242 * take the right group of channels from the original region.
5243 */
5244 return dst;
5245 }
5246 }
5247
5248 bool
5249 fs_visitor::lower_simd_width()
5250 {
5251 bool progress = false;
5252
5253 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5254 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
5255
5256 if (lower_width != inst->exec_size) {
5257 /* Builder matching the original instruction. We may also need to
5258 * emit an instruction of width larger than the original, set the
5259 * execution size of the builder to the highest of both for now so
5260 * we're sure that both cases can be handled.
5261 */
5262 const unsigned max_width = MAX2(inst->exec_size, lower_width);
5263 const fs_builder ibld = bld.at(block, inst)
5264 .exec_all(inst->force_writemask_all)
5265 .group(max_width, inst->group / max_width);
5266
5267 /* Split the copies in chunks of the execution width of either the
5268 * original or the lowered instruction, whichever is lower.
5269 */
5270 const unsigned n = DIV_ROUND_UP(inst->exec_size, lower_width);
5271 const unsigned dst_size = inst->size_written /
5272 inst->dst.component_size(inst->exec_size);
5273
5274 assert(!inst->writes_accumulator && !inst->mlen);
5275
5276 /* Inserting the zip, unzip, and duplicated instructions in all of
5277 * the right spots is somewhat tricky. All of the unzip and any
5278 * instructions from the zip which unzip the destination prior to
5279 * writing need to happen before all of the per-group instructions
5280 * and the zip instructions need to happen after. In order to sort
5281 * this all out, we insert the unzip instructions before \p inst,
5282 * insert the per-group instructions after \p inst (i.e. before
5283 * inst->next), and insert the zip instructions before the
5284 * instruction after \p inst. Since we are inserting instructions
5285 * after \p inst, inst->next is a moving target and we need to save
5286 * it off here so that we insert the zip instructions in the right
5287 * place.
5288 */
5289 exec_node *const after_inst = inst->next;
5290 for (unsigned i = 0; i < n; i++) {
5291 /* Emit a copy of the original instruction with the lowered width.
5292 * If the EOT flag was set throw it away except for the last
5293 * instruction to avoid killing the thread prematurely.
5294 */
5295 fs_inst split_inst = *inst;
5296 split_inst.exec_size = lower_width;
5297 split_inst.eot = inst->eot && i == 0;
5298
5299 /* Select the correct channel enables for the i-th group, then
5300 * transform the sources and destination and emit the lowered
5301 * instruction.
5302 */
5303 const fs_builder lbld = ibld.group(lower_width, i);
5304
5305 for (unsigned j = 0; j < inst->sources; j++)
5306 split_inst.src[j] = emit_unzip(lbld.at(block, inst), inst, j);
5307
5308 split_inst.dst = emit_zip(lbld.at(block, inst),
5309 lbld.at(block, after_inst), inst);
5310 split_inst.size_written =
5311 split_inst.dst.component_size(lower_width) * dst_size;
5312
5313 lbld.at(block, inst->next).emit(split_inst);
5314 }
5315
5316 inst->remove(block);
5317 progress = true;
5318 }
5319 }
5320
5321 if (progress)
5322 invalidate_live_intervals();
5323
5324 return progress;
5325 }
5326
5327 void
5328 fs_visitor::dump_instructions()
5329 {
5330 dump_instructions(NULL);
5331 }
5332
5333 void
5334 fs_visitor::dump_instructions(const char *name)
5335 {
5336 FILE *file = stderr;
5337 if (name && geteuid() != 0) {
5338 file = fopen(name, "w");
5339 if (!file)
5340 file = stderr;
5341 }
5342
5343 if (cfg) {
5344 calculate_register_pressure();
5345 int ip = 0, max_pressure = 0;
5346 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
5347 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
5348 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
5349 dump_instruction(inst, file);
5350 ip++;
5351 }
5352 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
5353 } else {
5354 int ip = 0;
5355 foreach_in_list(backend_instruction, inst, &instructions) {
5356 fprintf(file, "%4d: ", ip++);
5357 dump_instruction(inst, file);
5358 }
5359 }
5360
5361 if (file != stderr) {
5362 fclose(file);
5363 }
5364 }
5365
5366 void
5367 fs_visitor::dump_instruction(backend_instruction *be_inst)
5368 {
5369 dump_instruction(be_inst, stderr);
5370 }
5371
5372 void
5373 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
5374 {
5375 fs_inst *inst = (fs_inst *)be_inst;
5376
5377 if (inst->predicate) {
5378 fprintf(file, "(%cf0.%d) ",
5379 inst->predicate_inverse ? '-' : '+',
5380 inst->flag_subreg);
5381 }
5382
5383 fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
5384 if (inst->saturate)
5385 fprintf(file, ".sat");
5386 if (inst->conditional_mod) {
5387 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
5388 if (!inst->predicate &&
5389 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
5390 inst->opcode != BRW_OPCODE_IF &&
5391 inst->opcode != BRW_OPCODE_WHILE))) {
5392 fprintf(file, ".f0.%d", inst->flag_subreg);
5393 }
5394 }
5395 fprintf(file, "(%d) ", inst->exec_size);
5396
5397 if (inst->mlen) {
5398 fprintf(file, "(mlen: %d) ", inst->mlen);
5399 }
5400
5401 if (inst->eot) {
5402 fprintf(file, "(EOT) ");
5403 }
5404
5405 switch (inst->dst.file) {
5406 case VGRF:
5407 fprintf(file, "vgrf%d", inst->dst.nr);
5408 break;
5409 case FIXED_GRF:
5410 fprintf(file, "g%d", inst->dst.nr);
5411 break;
5412 case MRF:
5413 fprintf(file, "m%d", inst->dst.nr);
5414 break;
5415 case BAD_FILE:
5416 fprintf(file, "(null)");
5417 break;
5418 case UNIFORM:
5419 fprintf(file, "***u%d***", inst->dst.nr);
5420 break;
5421 case ATTR:
5422 fprintf(file, "***attr%d***", inst->dst.nr);
5423 break;
5424 case ARF:
5425 switch (inst->dst.nr) {
5426 case BRW_ARF_NULL:
5427 fprintf(file, "null");
5428 break;
5429 case BRW_ARF_ADDRESS:
5430 fprintf(file, "a0.%d", inst->dst.subnr);
5431 break;
5432 case BRW_ARF_ACCUMULATOR:
5433 fprintf(file, "acc%d", inst->dst.subnr);
5434 break;
5435 case BRW_ARF_FLAG:
5436 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5437 break;
5438 default:
5439 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5440 break;
5441 }
5442 break;
5443 case IMM:
5444 unreachable("not reached");
5445 }
5446
5447 if (inst->dst.offset ||
5448 (inst->dst.file == VGRF &&
5449 alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
5450 const unsigned reg_size = (inst->dst.file == UNIFORM ? 4 : REG_SIZE);
5451 fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
5452 inst->dst.offset % reg_size);
5453 }
5454
5455 if (inst->dst.stride != 1)
5456 fprintf(file, "<%u>", inst->dst.stride);
5457 fprintf(file, ":%s, ", brw_reg_type_to_letters(inst->dst.type));
5458
5459 for (int i = 0; i < inst->sources; i++) {
5460 if (inst->src[i].negate)
5461 fprintf(file, "-");
5462 if (inst->src[i].abs)
5463 fprintf(file, "|");
5464 switch (inst->src[i].file) {
5465 case VGRF:
5466 fprintf(file, "vgrf%d", inst->src[i].nr);
5467 break;
5468 case FIXED_GRF:
5469 fprintf(file, "g%d", inst->src[i].nr);
5470 break;
5471 case MRF:
5472 fprintf(file, "***m%d***", inst->src[i].nr);
5473 break;
5474 case ATTR:
5475 fprintf(file, "attr%d", inst->src[i].nr);
5476 break;
5477 case UNIFORM:
5478 fprintf(file, "u%d", inst->src[i].nr);
5479 break;
5480 case BAD_FILE:
5481 fprintf(file, "(null)");
5482 break;
5483 case IMM:
5484 switch (inst->src[i].type) {
5485 case BRW_REGISTER_TYPE_F:
5486 fprintf(file, "%-gf", inst->src[i].f);
5487 break;
5488 case BRW_REGISTER_TYPE_DF:
5489 fprintf(file, "%fdf", inst->src[i].df);
5490 break;
5491 case BRW_REGISTER_TYPE_W:
5492 case BRW_REGISTER_TYPE_D:
5493 fprintf(file, "%dd", inst->src[i].d);
5494 break;
5495 case BRW_REGISTER_TYPE_UW:
5496 case BRW_REGISTER_TYPE_UD:
5497 fprintf(file, "%uu", inst->src[i].ud);
5498 break;
5499 case BRW_REGISTER_TYPE_VF:
5500 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
5501 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
5502 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
5503 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
5504 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
5505 break;
5506 default:
5507 fprintf(file, "???");
5508 break;
5509 }
5510 break;
5511 case ARF:
5512 switch (inst->src[i].nr) {
5513 case BRW_ARF_NULL:
5514 fprintf(file, "null");
5515 break;
5516 case BRW_ARF_ADDRESS:
5517 fprintf(file, "a0.%d", inst->src[i].subnr);
5518 break;
5519 case BRW_ARF_ACCUMULATOR:
5520 fprintf(file, "acc%d", inst->src[i].subnr);
5521 break;
5522 case BRW_ARF_FLAG:
5523 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5524 break;
5525 default:
5526 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5527 break;
5528 }
5529 break;
5530 }
5531
5532 if (inst->src[i].offset ||
5533 (inst->src[i].file == VGRF &&
5534 alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
5535 const unsigned reg_size = (inst->src[i].file == UNIFORM ? 4 : REG_SIZE);
5536 fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
5537 inst->src[i].offset % reg_size);
5538 }
5539
5540 if (inst->src[i].abs)
5541 fprintf(file, "|");
5542
5543 if (inst->src[i].file != IMM) {
5544 unsigned stride;
5545 if (inst->src[i].file == ARF || inst->src[i].file == FIXED_GRF) {
5546 unsigned hstride = inst->src[i].hstride;
5547 stride = (hstride == 0 ? 0 : (1 << (hstride - 1)));
5548 } else {
5549 stride = inst->src[i].stride;
5550 }
5551 if (stride != 1)
5552 fprintf(file, "<%u>", stride);
5553
5554 fprintf(file, ":%s", brw_reg_type_to_letters(inst->src[i].type));
5555 }
5556
5557 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
5558 fprintf(file, ", ");
5559 }
5560
5561 fprintf(file, " ");
5562
5563 if (inst->force_writemask_all)
5564 fprintf(file, "NoMask ");
5565
5566 if (inst->exec_size != dispatch_width)
5567 fprintf(file, "group%d ", inst->group);
5568
5569 fprintf(file, "\n");
5570 }
5571
5572 /**
5573 * Possibly returns an instruction that set up @param reg.
5574 *
5575 * Sometimes we want to take the result of some expression/variable
5576 * dereference tree and rewrite the instruction generating the result
5577 * of the tree. When processing the tree, we know that the
5578 * instructions generated are all writing temporaries that are dead
5579 * outside of this tree. So, if we have some instructions that write
5580 * a temporary, we're free to point that temp write somewhere else.
5581 *
5582 * Note that this doesn't guarantee that the instruction generated
5583 * only reg -- it might be the size=4 destination of a texture instruction.
5584 */
5585 fs_inst *
5586 fs_visitor::get_instruction_generating_reg(fs_inst *start,
5587 fs_inst *end,
5588 const fs_reg &reg)
5589 {
5590 if (end == start ||
5591 end->is_partial_write() ||
5592 !reg.equals(end->dst)) {
5593 return NULL;
5594 } else {
5595 return end;
5596 }
5597 }
5598
5599 void
5600 fs_visitor::setup_fs_payload_gen6()
5601 {
5602 assert(stage == MESA_SHADER_FRAGMENT);
5603 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
5604
5605 assert(devinfo->gen >= 6);
5606
5607 /* R0-1: masks, pixel X/Y coordinates. */
5608 payload.num_regs = 2;
5609 /* R2: only for 32-pixel dispatch.*/
5610
5611 /* R3-26: barycentric interpolation coordinates. These appear in the
5612 * same order that they appear in the brw_barycentric_mode
5613 * enum. Each set of coordinates occupies 2 registers if dispatch width
5614 * == 8 and 4 registers if dispatch width == 16. Coordinates only
5615 * appear if they were enabled using the "Barycentric Interpolation
5616 * Mode" bits in WM_STATE.
5617 */
5618 for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
5619 if (prog_data->barycentric_interp_modes & (1 << i)) {
5620 payload.barycentric_coord_reg[i] = payload.num_regs;
5621 payload.num_regs += 2;
5622 if (dispatch_width == 16) {
5623 payload.num_regs += 2;
5624 }
5625 }
5626 }
5627
5628 /* R27: interpolated depth if uses source depth */
5629 prog_data->uses_src_depth =
5630 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5631 if (prog_data->uses_src_depth) {
5632 payload.source_depth_reg = payload.num_regs;
5633 payload.num_regs++;
5634 if (dispatch_width == 16) {
5635 /* R28: interpolated depth if not SIMD8. */
5636 payload.num_regs++;
5637 }
5638 }
5639
5640 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
5641 prog_data->uses_src_w =
5642 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5643 if (prog_data->uses_src_w) {
5644 payload.source_w_reg = payload.num_regs;
5645 payload.num_regs++;
5646 if (dispatch_width == 16) {
5647 /* R30: interpolated W if not SIMD8. */
5648 payload.num_regs++;
5649 }
5650 }
5651
5652 /* R31: MSAA position offsets. */
5653 if (prog_data->persample_dispatch &&
5654 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
5655 /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
5656 *
5657 * "MSDISPMODE_PERSAMPLE is required in order to select
5658 * POSOFFSET_SAMPLE"
5659 *
5660 * So we can only really get sample positions if we are doing real
5661 * per-sample dispatch. If we need gl_SamplePosition and we don't have
5662 * persample dispatch, we hard-code it to 0.5.
5663 */
5664 prog_data->uses_pos_offset = true;
5665 payload.sample_pos_reg = payload.num_regs;
5666 payload.num_regs++;
5667 }
5668
5669 /* R32: MSAA input coverage mask */
5670 prog_data->uses_sample_mask =
5671 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
5672 if (prog_data->uses_sample_mask) {
5673 assert(devinfo->gen >= 7);
5674 payload.sample_mask_in_reg = payload.num_regs;
5675 payload.num_regs++;
5676 if (dispatch_width == 16) {
5677 /* R33: input coverage mask if not SIMD8. */
5678 payload.num_regs++;
5679 }
5680 }
5681
5682 /* R34-: bary for 32-pixel. */
5683 /* R58-59: interp W for 32-pixel. */
5684
5685 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
5686 source_depth_to_render_target = true;
5687 }
5688 }
5689
5690 void
5691 fs_visitor::setup_vs_payload()
5692 {
5693 /* R0: thread header, R1: urb handles */
5694 payload.num_regs = 2;
5695 }
5696
5697 void
5698 fs_visitor::setup_gs_payload()
5699 {
5700 assert(stage == MESA_SHADER_GEOMETRY);
5701
5702 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
5703 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
5704
5705 /* R0: thread header, R1: output URB handles */
5706 payload.num_regs = 2;
5707
5708 if (gs_prog_data->include_primitive_id) {
5709 /* R2: Primitive ID 0..7 */
5710 payload.num_regs++;
5711 }
5712
5713 /* Always enable VUE handles so we can safely use pull model if needed.
5714 *
5715 * The push model for a GS uses a ton of register space even for trivial
5716 * scenarios with just a few inputs, so just make things easier and a bit
5717 * safer by always having pull model available.
5718 */
5719 gs_prog_data->base.include_vue_handles = true;
5720
5721 /* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
5722 payload.num_regs += nir->info.gs.vertices_in;
5723
5724 /* Use a maximum of 24 registers for push-model inputs. */
5725 const unsigned max_push_components = 24;
5726
5727 /* If pushing our inputs would take too many registers, reduce the URB read
5728 * length (which is in HWords, or 8 registers), and resort to pulling.
5729 *
5730 * Note that the GS reads <URB Read Length> HWords for every vertex - so we
5731 * have to multiply by VerticesIn to obtain the total storage requirement.
5732 */
5733 if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
5734 max_push_components) {
5735 vue_prog_data->urb_read_length =
5736 ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
5737 }
5738 }
5739
5740 void
5741 fs_visitor::setup_cs_payload()
5742 {
5743 assert(devinfo->gen >= 7);
5744 payload.num_regs = 1;
5745 }
5746
5747 void
5748 fs_visitor::calculate_register_pressure()
5749 {
5750 invalidate_live_intervals();
5751 calculate_live_intervals();
5752
5753 unsigned num_instructions = 0;
5754 foreach_block(block, cfg)
5755 num_instructions += block->instructions.length();
5756
5757 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
5758
5759 for (unsigned reg = 0; reg < alloc.count; reg++) {
5760 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
5761 regs_live_at_ip[ip] += alloc.sizes[reg];
5762 }
5763 }
5764
5765 /**
5766 * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
5767 *
5768 * The needs_unlit_centroid_workaround ends up producing one of these per
5769 * channel of centroid input, so it's good to clean them up.
5770 *
5771 * An assumption here is that nothing ever modifies the dispatched pixels
5772 * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
5773 * dictates that anyway.
5774 */
5775 bool
5776 fs_visitor::opt_drop_redundant_mov_to_flags()
5777 {
5778 bool flag_mov_found[2] = {false};
5779 bool progress = false;
5780
5781 /* Instructions removed by this pass can only be added if this were true */
5782 if (!devinfo->needs_unlit_centroid_workaround)
5783 return false;
5784
5785 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5786 if (inst->is_control_flow()) {
5787 memset(flag_mov_found, 0, sizeof(flag_mov_found));
5788 } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
5789 if (!flag_mov_found[inst->flag_subreg]) {
5790 flag_mov_found[inst->flag_subreg] = true;
5791 } else {
5792 inst->remove(block);
5793 progress = true;
5794 }
5795 } else if (inst->flags_written()) {
5796 flag_mov_found[inst->flag_subreg] = false;
5797 }
5798 }
5799
5800 return progress;
5801 }
5802
5803 void
5804 fs_visitor::optimize()
5805 {
5806 /* Start by validating the shader we currently have. */
5807 validate();
5808
5809 /* bld is the common builder object pointing at the end of the program we
5810 * used to translate it into i965 IR. For the optimization and lowering
5811 * passes coming next, any code added after the end of the program without
5812 * having explicitly called fs_builder::at() clearly points at a mistake.
5813 * Ideally optimization passes wouldn't be part of the visitor so they
5814 * wouldn't have access to bld at all, but they do, so just in case some
5815 * pass forgets to ask for a location explicitly set it to NULL here to
5816 * make it trip. The dispatch width is initialized to a bogus value to
5817 * make sure that optimizations set the execution controls explicitly to
5818 * match the code they are manipulating instead of relying on the defaults.
5819 */
5820 bld = fs_builder(this, 64);
5821
5822 assign_constant_locations();
5823 lower_constant_loads();
5824
5825 validate();
5826
5827 split_virtual_grfs();
5828 validate();
5829
5830 #define OPT(pass, args...) ({ \
5831 pass_num++; \
5832 bool this_progress = pass(args); \
5833 \
5834 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
5835 char filename[64]; \
5836 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
5837 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
5838 \
5839 backend_shader::dump_instructions(filename); \
5840 } \
5841 \
5842 validate(); \
5843 \
5844 progress = progress || this_progress; \
5845 this_progress; \
5846 })
5847
5848 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
5849 char filename[64];
5850 snprintf(filename, 64, "%s%d-%s-00-00-start",
5851 stage_abbrev, dispatch_width, nir->info.name);
5852
5853 backend_shader::dump_instructions(filename);
5854 }
5855
5856 bool progress = false;
5857 int iteration = 0;
5858 int pass_num = 0;
5859
5860 OPT(opt_drop_redundant_mov_to_flags);
5861 OPT(remove_extra_rounding_modes);
5862
5863 do {
5864 progress = false;
5865 pass_num = 0;
5866 iteration++;
5867
5868 OPT(remove_duplicate_mrf_writes);
5869
5870 OPT(opt_algebraic);
5871 OPT(opt_cse);
5872 OPT(opt_copy_propagation);
5873 OPT(opt_predicated_break, this);
5874 OPT(opt_cmod_propagation);
5875 OPT(dead_code_eliminate);
5876 OPT(opt_peephole_sel);
5877 OPT(dead_control_flow_eliminate, this);
5878 OPT(opt_register_renaming);
5879 OPT(opt_saturate_propagation);
5880 OPT(register_coalesce);
5881 OPT(compute_to_mrf);
5882 OPT(eliminate_find_live_channel);
5883
5884 OPT(compact_virtual_grfs);
5885 } while (progress);
5886
5887 progress = false;
5888 pass_num = 0;
5889
5890 if (OPT(lower_pack)) {
5891 OPT(register_coalesce);
5892 OPT(dead_code_eliminate);
5893 }
5894
5895 OPT(lower_simd_width);
5896
5897 /* After SIMD lowering just in case we had to unroll the EOT send. */
5898 OPT(opt_sampler_eot);
5899
5900 OPT(lower_logical_sends);
5901
5902 if (progress) {
5903 OPT(opt_copy_propagation);
5904 /* Only run after logical send lowering because it's easier to implement
5905 * in terms of physical sends.
5906 */
5907 if (OPT(opt_zero_samples))
5908 OPT(opt_copy_propagation);
5909 /* Run after logical send lowering to give it a chance to CSE the
5910 * LOAD_PAYLOAD instructions created to construct the payloads of
5911 * e.g. texturing messages in cases where it wasn't possible to CSE the
5912 * whole logical instruction.
5913 */
5914 OPT(opt_cse);
5915 OPT(register_coalesce);
5916 OPT(compute_to_mrf);
5917 OPT(dead_code_eliminate);
5918 OPT(remove_duplicate_mrf_writes);
5919 OPT(opt_peephole_sel);
5920 }
5921
5922 OPT(opt_redundant_discard_jumps);
5923
5924 if (OPT(lower_load_payload)) {
5925 split_virtual_grfs();
5926 OPT(register_coalesce);
5927 OPT(compute_to_mrf);
5928 OPT(dead_code_eliminate);
5929 }
5930
5931 OPT(opt_combine_constants);
5932 OPT(lower_integer_multiplication);
5933
5934 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
5935 OPT(opt_cmod_propagation);
5936 OPT(opt_cse);
5937 OPT(opt_copy_propagation);
5938 OPT(dead_code_eliminate);
5939 }
5940
5941 if (OPT(lower_conversions)) {
5942 OPT(opt_copy_propagation);
5943 OPT(dead_code_eliminate);
5944 OPT(lower_simd_width);
5945 }
5946
5947 lower_uniform_pull_constant_loads();
5948
5949 validate();
5950 }
5951
5952 /**
5953 * Three source instruction must have a GRF/MRF destination register.
5954 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
5955 */
5956 void
5957 fs_visitor::fixup_3src_null_dest()
5958 {
5959 bool progress = false;
5960
5961 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
5962 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
5963 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
5964 inst->dst.type);
5965 progress = true;
5966 }
5967 }
5968
5969 if (progress)
5970 invalidate_live_intervals();
5971 }
5972
5973 void
5974 fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
5975 {
5976 bool allocated_without_spills;
5977
5978 static const enum instruction_scheduler_mode pre_modes[] = {
5979 SCHEDULE_PRE,
5980 SCHEDULE_PRE_NON_LIFO,
5981 SCHEDULE_PRE_LIFO,
5982 };
5983
5984 bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
5985
5986 /* Try each scheduling heuristic to see if it can successfully register
5987 * allocate without spilling. They should be ordered by decreasing
5988 * performance but increasing likelihood of allocating.
5989 */
5990 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
5991 schedule_instructions(pre_modes[i]);
5992
5993 if (0) {
5994 assign_regs_trivial();
5995 allocated_without_spills = true;
5996 } else {
5997 allocated_without_spills = assign_regs(false, spill_all);
5998 }
5999 if (allocated_without_spills)
6000 break;
6001 }
6002
6003 if (!allocated_without_spills) {
6004 if (!allow_spilling)
6005 fail("Failure to register allocate and spilling is not allowed.");
6006
6007 /* We assume that any spilling is worse than just dropping back to
6008 * SIMD8. There's probably actually some intermediate point where
6009 * SIMD16 with a couple of spills is still better.
6010 */
6011 if (dispatch_width > min_dispatch_width) {
6012 fail("Failure to register allocate. Reduce number of "
6013 "live scalar values to avoid this.");
6014 } else {
6015 compiler->shader_perf_log(log_data,
6016 "%s shader triggered register spilling. "
6017 "Try reducing the number of live scalar "
6018 "values to improve performance.\n",
6019 stage_name);
6020 }
6021
6022 /* Since we're out of heuristics, just go spill registers until we
6023 * get an allocation.
6024 */
6025 while (!assign_regs(true, spill_all)) {
6026 if (failed)
6027 break;
6028 }
6029 }
6030
6031 /* This must come after all optimization and register allocation, since
6032 * it inserts dead code that happens to have side effects, and it does
6033 * so based on the actual physical registers in use.
6034 */
6035 insert_gen4_send_dependency_workarounds();
6036
6037 if (failed)
6038 return;
6039
6040 schedule_instructions(SCHEDULE_POST);
6041
6042 if (last_scratch > 0) {
6043 MAYBE_UNUSED unsigned max_scratch_size = 2 * 1024 * 1024;
6044
6045 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
6046
6047 if (stage == MESA_SHADER_COMPUTE) {
6048 if (devinfo->is_haswell) {
6049 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
6050 * field documentation, Haswell supports a minimum of 2kB of
6051 * scratch space for compute shaders, unlike every other stage
6052 * and platform.
6053 */
6054 prog_data->total_scratch = MAX2(prog_data->total_scratch, 2048);
6055 } else if (devinfo->gen <= 7) {
6056 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
6057 * field documentation, platforms prior to Haswell measure scratch
6058 * size linearly with a range of [1kB, 12kB] and 1kB granularity.
6059 */
6060 prog_data->total_scratch = ALIGN(last_scratch, 1024);
6061 max_scratch_size = 12 * 1024;
6062 }
6063 }
6064
6065 /* We currently only support up to 2MB of scratch space. If we
6066 * need to support more eventually, the documentation suggests
6067 * that we could allocate a larger buffer, and partition it out
6068 * ourselves. We'd just have to undo the hardware's address
6069 * calculation by subtracting (FFTID * Per Thread Scratch Space)
6070 * and then add FFTID * (Larger Per Thread Scratch Space).
6071 *
6072 * See 3D-Media-GPGPU Engine > Media GPGPU Pipeline >
6073 * Thread Group Tracking > Local Memory/Scratch Space.
6074 */
6075 assert(prog_data->total_scratch < max_scratch_size);
6076 }
6077 }
6078
6079 bool
6080 fs_visitor::run_vs()
6081 {
6082 assert(stage == MESA_SHADER_VERTEX);
6083
6084 setup_vs_payload();
6085
6086 if (shader_time_index >= 0)
6087 emit_shader_time_begin();
6088
6089 emit_nir_code();
6090
6091 if (failed)
6092 return false;
6093
6094 compute_clip_distance();
6095
6096 emit_urb_writes();
6097
6098 if (shader_time_index >= 0)
6099 emit_shader_time_end();
6100
6101 calculate_cfg();
6102
6103 optimize();
6104
6105 assign_curb_setup();
6106 assign_vs_urb_setup();
6107
6108 fixup_3src_null_dest();
6109 allocate_registers(8, true);
6110
6111 return !failed;
6112 }
6113
6114 bool
6115 fs_visitor::run_tcs_single_patch()
6116 {
6117 assert(stage == MESA_SHADER_TESS_CTRL);
6118
6119 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
6120
6121 /* r1-r4 contain the ICP handles. */
6122 payload.num_regs = 5;
6123
6124 if (shader_time_index >= 0)
6125 emit_shader_time_begin();
6126
6127 /* Initialize gl_InvocationID */
6128 fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
6129 fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
6130 bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
6131 bld.MOV(channels_ud, channels_uw);
6132
6133 if (tcs_prog_data->instances == 1) {
6134 invocation_id = channels_ud;
6135 } else {
6136 invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
6137
6138 /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
6139 fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
6140 fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
6141 bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
6142 brw_imm_ud(INTEL_MASK(23, 17)));
6143 bld.SHR(instance_times_8, t, brw_imm_ud(17 - 3));
6144
6145 bld.ADD(invocation_id, instance_times_8, channels_ud);
6146 }
6147
6148 /* Fix the disptach mask */
6149 if (nir->info.tess.tcs_vertices_out % 8) {
6150 bld.CMP(bld.null_reg_ud(), invocation_id,
6151 brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
6152 bld.IF(BRW_PREDICATE_NORMAL);
6153 }
6154
6155 emit_nir_code();
6156
6157 if (nir->info.tess.tcs_vertices_out % 8) {
6158 bld.emit(BRW_OPCODE_ENDIF);
6159 }
6160
6161 /* Emit EOT write; set TR DS Cache bit */
6162 fs_reg srcs[3] = {
6163 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
6164 fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
6165 fs_reg(brw_imm_ud(0)),
6166 };
6167 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
6168 bld.LOAD_PAYLOAD(payload, srcs, 3, 2);
6169
6170 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
6171 bld.null_reg_ud(), payload);
6172 inst->mlen = 3;
6173 inst->eot = true;
6174
6175 if (shader_time_index >= 0)
6176 emit_shader_time_end();
6177
6178 if (failed)
6179 return false;
6180
6181 calculate_cfg();
6182
6183 optimize();
6184
6185 assign_curb_setup();
6186 assign_tcs_single_patch_urb_setup();
6187
6188 fixup_3src_null_dest();
6189 allocate_registers(8, true);
6190
6191 return !failed;
6192 }
6193
6194 bool
6195 fs_visitor::run_tes()
6196 {
6197 assert(stage == MESA_SHADER_TESS_EVAL);
6198
6199 /* R0: thread header, R1-3: gl_TessCoord.xyz, R4: URB handles */
6200 payload.num_regs = 5;
6201
6202 if (shader_time_index >= 0)
6203 emit_shader_time_begin();
6204
6205 emit_nir_code();
6206
6207 if (failed)
6208 return false;
6209
6210 emit_urb_writes();
6211
6212 if (shader_time_index >= 0)
6213 emit_shader_time_end();
6214
6215 calculate_cfg();
6216
6217 optimize();
6218
6219 assign_curb_setup();
6220 assign_tes_urb_setup();
6221
6222 fixup_3src_null_dest();
6223 allocate_registers(8, true);
6224
6225 return !failed;
6226 }
6227
6228 bool
6229 fs_visitor::run_gs()
6230 {
6231 assert(stage == MESA_SHADER_GEOMETRY);
6232
6233 setup_gs_payload();
6234
6235 this->final_gs_vertex_count = vgrf(glsl_type::uint_type);
6236
6237 if (gs_compile->control_data_header_size_bits > 0) {
6238 /* Create a VGRF to store accumulated control data bits. */
6239 this->control_data_bits = vgrf(glsl_type::uint_type);
6240
6241 /* If we're outputting more than 32 control data bits, then EmitVertex()
6242 * will set control_data_bits to 0 after emitting the first vertex.
6243 * Otherwise, we need to initialize it to 0 here.
6244 */
6245 if (gs_compile->control_data_header_size_bits <= 32) {
6246 const fs_builder abld = bld.annotate("initialize control data bits");
6247 abld.MOV(this->control_data_bits, brw_imm_ud(0u));
6248 }
6249 }
6250
6251 if (shader_time_index >= 0)
6252 emit_shader_time_begin();
6253
6254 emit_nir_code();
6255
6256 emit_gs_thread_end();
6257
6258 if (shader_time_index >= 0)
6259 emit_shader_time_end();
6260
6261 if (failed)
6262 return false;
6263
6264 calculate_cfg();
6265
6266 optimize();
6267
6268 assign_curb_setup();
6269 assign_gs_urb_setup();
6270
6271 fixup_3src_null_dest();
6272 allocate_registers(8, true);
6273
6274 return !failed;
6275 }
6276
6277 /* From the SKL PRM, Volume 16, Workarounds:
6278 *
6279 * 0877 3D Pixel Shader Hang possible when pixel shader dispatched with
6280 * only header phases (R0-R2)
6281 *
6282 * WA: Enable a non-header phase (e.g. push constant) when dispatch would
6283 * have been header only.
6284 *
6285 * Instead of enabling push constants one can alternatively enable one of the
6286 * inputs. Here one simply chooses "layer" which shouldn't impose much
6287 * overhead.
6288 */
6289 static void
6290 gen9_ps_header_only_workaround(struct brw_wm_prog_data *wm_prog_data)
6291 {
6292 if (wm_prog_data->num_varying_inputs)
6293 return;
6294
6295 if (wm_prog_data->base.curb_read_length)
6296 return;
6297
6298 wm_prog_data->urb_setup[VARYING_SLOT_LAYER] = 0;
6299 wm_prog_data->num_varying_inputs = 1;
6300 }
6301
6302 bool
6303 fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
6304 {
6305 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
6306 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
6307
6308 assert(stage == MESA_SHADER_FRAGMENT);
6309
6310 if (devinfo->gen >= 6)
6311 setup_fs_payload_gen6();
6312 else
6313 setup_fs_payload_gen4();
6314
6315 if (0) {
6316 emit_dummy_fs();
6317 } else if (do_rep_send) {
6318 assert(dispatch_width == 16);
6319 emit_repclear_shader();
6320 } else {
6321 if (shader_time_index >= 0)
6322 emit_shader_time_begin();
6323
6324 calculate_urb_setup();
6325 if (nir->info.inputs_read > 0 ||
6326 (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
6327 if (devinfo->gen < 6)
6328 emit_interpolation_setup_gen4();
6329 else
6330 emit_interpolation_setup_gen6();
6331 }
6332
6333 /* We handle discards by keeping track of the still-live pixels in f0.1.
6334 * Initialize it with the dispatched pixels.
6335 */
6336 if (wm_prog_data->uses_kill) {
6337 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
6338 discard_init->flag_subreg = 1;
6339 }
6340
6341 /* Generate FS IR for main(). (the visitor only descends into
6342 * functions called "main").
6343 */
6344 emit_nir_code();
6345
6346 if (failed)
6347 return false;
6348
6349 if (wm_prog_data->uses_kill)
6350 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
6351
6352 if (wm_key->alpha_test_func)
6353 emit_alpha_test();
6354
6355 emit_fb_writes();
6356
6357 if (shader_time_index >= 0)
6358 emit_shader_time_end();
6359
6360 calculate_cfg();
6361
6362 optimize();
6363
6364 assign_curb_setup();
6365
6366 if (devinfo->gen >= 9)
6367 gen9_ps_header_only_workaround(wm_prog_data);
6368
6369 assign_urb_setup();
6370
6371 fixup_3src_null_dest();
6372 allocate_registers(8, allow_spilling);
6373
6374 if (failed)
6375 return false;
6376 }
6377
6378 return !failed;
6379 }
6380
6381 bool
6382 fs_visitor::run_cs(unsigned min_dispatch_width)
6383 {
6384 assert(stage == MESA_SHADER_COMPUTE);
6385 assert(dispatch_width >= min_dispatch_width);
6386
6387 setup_cs_payload();
6388
6389 if (shader_time_index >= 0)
6390 emit_shader_time_begin();
6391
6392 if (devinfo->is_haswell && prog_data->total_shared > 0) {
6393 /* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
6394 const fs_builder abld = bld.exec_all().group(1, 0);
6395 abld.MOV(retype(brw_sr0_reg(1), BRW_REGISTER_TYPE_UW),
6396 suboffset(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW), 1));
6397 }
6398
6399 emit_nir_code();
6400
6401 if (failed)
6402 return false;
6403
6404 emit_cs_terminate();
6405
6406 if (shader_time_index >= 0)
6407 emit_shader_time_end();
6408
6409 calculate_cfg();
6410
6411 optimize();
6412
6413 assign_curb_setup();
6414
6415 fixup_3src_null_dest();
6416 allocate_registers(min_dispatch_width, true);
6417
6418 if (failed)
6419 return false;
6420
6421 return !failed;
6422 }
6423
6424 /**
6425 * Return a bitfield where bit n is set if barycentric interpolation mode n
6426 * (see enum brw_barycentric_mode) is needed by the fragment shader.
6427 *
6428 * We examine the load_barycentric intrinsics rather than looking at input
6429 * variables so that we catch interpolateAtCentroid() messages too, which
6430 * also need the BRW_BARYCENTRIC_[NON]PERSPECTIVE_CENTROID mode set up.
6431 */
6432 static unsigned
6433 brw_compute_barycentric_interp_modes(const struct gen_device_info *devinfo,
6434 const nir_shader *shader)
6435 {
6436 unsigned barycentric_interp_modes = 0;
6437
6438 nir_foreach_function(f, shader) {
6439 if (!f->impl)
6440 continue;
6441
6442 nir_foreach_block(block, f->impl) {
6443 nir_foreach_instr(instr, block) {
6444 if (instr->type != nir_instr_type_intrinsic)
6445 continue;
6446
6447 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6448 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
6449 continue;
6450
6451 /* Ignore WPOS; it doesn't require interpolation. */
6452 if (nir_intrinsic_base(intrin) == VARYING_SLOT_POS)
6453 continue;
6454
6455 intrin = nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
6456 enum glsl_interp_mode interp = (enum glsl_interp_mode)
6457 nir_intrinsic_interp_mode(intrin);
6458 nir_intrinsic_op bary_op = intrin->intrinsic;
6459 enum brw_barycentric_mode bary =
6460 brw_barycentric_mode(interp, bary_op);
6461
6462 barycentric_interp_modes |= 1 << bary;
6463
6464 if (devinfo->needs_unlit_centroid_workaround &&
6465 bary_op == nir_intrinsic_load_barycentric_centroid)
6466 barycentric_interp_modes |= 1 << centroid_to_pixel(bary);
6467 }
6468 }
6469 }
6470
6471 return barycentric_interp_modes;
6472 }
6473
6474 static void
6475 brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
6476 const nir_shader *shader)
6477 {
6478 prog_data->flat_inputs = 0;
6479
6480 nir_foreach_variable(var, &shader->inputs) {
6481 int input_index = prog_data->urb_setup[var->data.location];
6482
6483 if (input_index < 0)
6484 continue;
6485
6486 /* flat shading */
6487 if (var->data.interpolation == INTERP_MODE_FLAT)
6488 prog_data->flat_inputs |= (1 << input_index);
6489 }
6490 }
6491
6492 static uint8_t
6493 computed_depth_mode(const nir_shader *shader)
6494 {
6495 if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
6496 switch (shader->info.fs.depth_layout) {
6497 case FRAG_DEPTH_LAYOUT_NONE:
6498 case FRAG_DEPTH_LAYOUT_ANY:
6499 return BRW_PSCDEPTH_ON;
6500 case FRAG_DEPTH_LAYOUT_GREATER:
6501 return BRW_PSCDEPTH_ON_GE;
6502 case FRAG_DEPTH_LAYOUT_LESS:
6503 return BRW_PSCDEPTH_ON_LE;
6504 case FRAG_DEPTH_LAYOUT_UNCHANGED:
6505 return BRW_PSCDEPTH_OFF;
6506 }
6507 }
6508 return BRW_PSCDEPTH_OFF;
6509 }
6510
6511 /**
6512 * Move load_interpolated_input with simple (payload-based) barycentric modes
6513 * to the top of the program so we don't emit multiple PLNs for the same input.
6514 *
6515 * This works around CSE not being able to handle non-dominating cases
6516 * such as:
6517 *
6518 * if (...) {
6519 * interpolate input
6520 * } else {
6521 * interpolate the same exact input
6522 * }
6523 *
6524 * This should be replaced by global value numbering someday.
6525 */
6526 static bool
6527 move_interpolation_to_top(nir_shader *nir)
6528 {
6529 bool progress = false;
6530
6531 nir_foreach_function(f, nir) {
6532 if (!f->impl)
6533 continue;
6534
6535 nir_block *top = nir_start_block(f->impl);
6536 exec_node *cursor_node = NULL;
6537
6538 nir_foreach_block(block, f->impl) {
6539 if (block == top)
6540 continue;
6541
6542 nir_foreach_instr_safe(instr, block) {
6543 if (instr->type != nir_instr_type_intrinsic)
6544 continue;
6545
6546 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6547 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
6548 continue;
6549 nir_intrinsic_instr *bary_intrinsic =
6550 nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
6551 nir_intrinsic_op op = bary_intrinsic->intrinsic;
6552
6553 /* Leave interpolateAtSample/Offset() where they are. */
6554 if (op == nir_intrinsic_load_barycentric_at_sample ||
6555 op == nir_intrinsic_load_barycentric_at_offset)
6556 continue;
6557
6558 nir_instr *move[3] = {
6559 &bary_intrinsic->instr,
6560 intrin->src[1].ssa->parent_instr,
6561 instr
6562 };
6563
6564 for (unsigned i = 0; i < ARRAY_SIZE(move); i++) {
6565 if (move[i]->block != top) {
6566 move[i]->block = top;
6567 exec_node_remove(&move[i]->node);
6568 if (cursor_node) {
6569 exec_node_insert_after(cursor_node, &move[i]->node);
6570 } else {
6571 exec_list_push_head(&top->instr_list, &move[i]->node);
6572 }
6573 cursor_node = &move[i]->node;
6574 progress = true;
6575 }
6576 }
6577 }
6578 }
6579 nir_metadata_preserve(f->impl, (nir_metadata)
6580 ((unsigned) nir_metadata_block_index |
6581 (unsigned) nir_metadata_dominance));
6582 }
6583
6584 return progress;
6585 }
6586
6587 /**
6588 * Demote per-sample barycentric intrinsics to centroid.
6589 *
6590 * Useful when rendering to a non-multisampled buffer.
6591 */
6592 static bool
6593 demote_sample_qualifiers(nir_shader *nir)
6594 {
6595 bool progress = true;
6596
6597 nir_foreach_function(f, nir) {
6598 if (!f->impl)
6599 continue;
6600
6601 nir_builder b;
6602 nir_builder_init(&b, f->impl);
6603
6604 nir_foreach_block(block, f->impl) {
6605 nir_foreach_instr_safe(instr, block) {
6606 if (instr->type != nir_instr_type_intrinsic)
6607 continue;
6608
6609 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6610 if (intrin->intrinsic != nir_intrinsic_load_barycentric_sample &&
6611 intrin->intrinsic != nir_intrinsic_load_barycentric_at_sample)
6612 continue;
6613
6614 b.cursor = nir_before_instr(instr);
6615 nir_ssa_def *centroid =
6616 nir_load_barycentric(&b, nir_intrinsic_load_barycentric_centroid,
6617 nir_intrinsic_interp_mode(intrin));
6618 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
6619 nir_src_for_ssa(centroid));
6620 nir_instr_remove(instr);
6621 progress = true;
6622 }
6623 }
6624
6625 nir_metadata_preserve(f->impl, (nir_metadata)
6626 ((unsigned) nir_metadata_block_index |
6627 (unsigned) nir_metadata_dominance));
6628 }
6629
6630 return progress;
6631 }
6632
6633 /**
6634 * Pre-gen6, the register file of the EUs was shared between threads,
6635 * and each thread used some subset allocated on a 16-register block
6636 * granularity. The unit states wanted these block counts.
6637 */
6638 static inline int
6639 brw_register_blocks(int reg_count)
6640 {
6641 return ALIGN(reg_count, 16) / 16 - 1;
6642 }
6643
6644 const unsigned *
6645 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
6646 void *mem_ctx,
6647 const struct brw_wm_prog_key *key,
6648 struct brw_wm_prog_data *prog_data,
6649 const nir_shader *src_shader,
6650 struct gl_program *prog,
6651 int shader_time_index8, int shader_time_index16,
6652 bool allow_spilling,
6653 bool use_rep_send, struct brw_vue_map *vue_map,
6654 char **error_str)
6655 {
6656 const struct gen_device_info *devinfo = compiler->devinfo;
6657
6658 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6659 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
6660 brw_nir_lower_fs_inputs(shader, devinfo, key);
6661 brw_nir_lower_fs_outputs(shader);
6662
6663 if (devinfo->gen < 6) {
6664 brw_setup_vue_interpolation(vue_map, shader, prog_data, devinfo);
6665 }
6666
6667 if (!key->multisample_fbo)
6668 NIR_PASS_V(shader, demote_sample_qualifiers);
6669 NIR_PASS_V(shader, move_interpolation_to_top);
6670 shader = brw_postprocess_nir(shader, compiler, true);
6671
6672 /* key->alpha_test_func means simulating alpha testing via discards,
6673 * so the shader definitely kills pixels.
6674 */
6675 prog_data->uses_kill = shader->info.fs.uses_discard ||
6676 key->alpha_test_func;
6677 prog_data->uses_omask = key->multisample_fbo &&
6678 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
6679 prog_data->computed_depth_mode = computed_depth_mode(shader);
6680 prog_data->computed_stencil =
6681 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
6682
6683 prog_data->persample_dispatch =
6684 key->multisample_fbo &&
6685 (key->persample_interp ||
6686 (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
6687 SYSTEM_BIT_SAMPLE_POS)) ||
6688 shader->info.fs.uses_sample_qualifier ||
6689 shader->info.outputs_read);
6690
6691 prog_data->has_render_target_reads = shader->info.outputs_read != 0ull;
6692
6693 prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
6694 prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
6695 prog_data->inner_coverage = shader->info.fs.inner_coverage;
6696
6697 prog_data->barycentric_interp_modes =
6698 brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
6699
6700 cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL;
6701 uint8_t simd8_grf_start = 0, simd16_grf_start = 0;
6702 unsigned simd8_grf_used = 0, simd16_grf_used = 0;
6703
6704 fs_visitor v8(compiler, log_data, mem_ctx, key,
6705 &prog_data->base, prog, shader, 8,
6706 shader_time_index8);
6707 if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
6708 if (error_str)
6709 *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
6710
6711 return NULL;
6712 } else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
6713 simd8_cfg = v8.cfg;
6714 simd8_grf_start = v8.payload.num_regs;
6715 simd8_grf_used = v8.grf_used;
6716 }
6717
6718 if (v8.max_dispatch_width >= 16 &&
6719 likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
6720 /* Try a SIMD16 compile */
6721 fs_visitor v16(compiler, log_data, mem_ctx, key,
6722 &prog_data->base, prog, shader, 16,
6723 shader_time_index16);
6724 v16.import_uniforms(&v8);
6725 if (!v16.run_fs(allow_spilling, use_rep_send)) {
6726 compiler->shader_perf_log(log_data,
6727 "SIMD16 shader failed to compile: %s",
6728 v16.fail_msg);
6729 } else {
6730 simd16_cfg = v16.cfg;
6731 simd16_grf_start = v16.payload.num_regs;
6732 simd16_grf_used = v16.grf_used;
6733 }
6734 }
6735
6736 /* When the caller requests a repclear shader, they want SIMD16-only */
6737 if (use_rep_send)
6738 simd8_cfg = NULL;
6739
6740 /* Prior to Iron Lake, the PS had a single shader offset with a jump table
6741 * at the top to select the shader. We've never implemented that.
6742 * Instead, we just give them exactly one shader and we pick the widest one
6743 * available.
6744 */
6745 if (compiler->devinfo->gen < 5 && simd16_cfg)
6746 simd8_cfg = NULL;
6747
6748 if (prog_data->persample_dispatch) {
6749 /* Starting with SandyBridge (where we first get MSAA), the different
6750 * pixel dispatch combinations are grouped into classifications A
6751 * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
6752 * generations, the only configurations supporting persample dispatch
6753 * are are this in which only one dispatch width is enabled.
6754 *
6755 * If computed depth is enabled, SNB only allows SIMD8 while IVB+
6756 * allow SIMD8 or SIMD16 so we choose SIMD16 if available.
6757 */
6758 if (compiler->devinfo->gen == 6 &&
6759 prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF) {
6760 simd16_cfg = NULL;
6761 } else if (simd16_cfg) {
6762 simd8_cfg = NULL;
6763 }
6764 }
6765
6766 /* We have to compute the flat inputs after the visitor is finished running
6767 * because it relies on prog_data->urb_setup which is computed in
6768 * fs_visitor::calculate_urb_setup().
6769 */
6770 brw_compute_flat_inputs(prog_data, shader);
6771
6772 fs_generator g(compiler, log_data, mem_ctx, (void *) key, &prog_data->base,
6773 v8.promoted_constants, v8.runtime_check_aads_emit,
6774 MESA_SHADER_FRAGMENT);
6775
6776 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
6777 g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
6778 shader->info.label ?
6779 shader->info.label : "unnamed",
6780 shader->info.name));
6781 }
6782
6783 if (simd8_cfg) {
6784 prog_data->dispatch_8 = true;
6785 g.generate_code(simd8_cfg, 8);
6786 prog_data->base.dispatch_grf_start_reg = simd8_grf_start;
6787 prog_data->reg_blocks_0 = brw_register_blocks(simd8_grf_used);
6788
6789 if (simd16_cfg) {
6790 prog_data->dispatch_16 = true;
6791 prog_data->prog_offset_2 = g.generate_code(simd16_cfg, 16);
6792 prog_data->dispatch_grf_start_reg_2 = simd16_grf_start;
6793 prog_data->reg_blocks_2 = brw_register_blocks(simd16_grf_used);
6794 }
6795 } else if (simd16_cfg) {
6796 prog_data->dispatch_16 = true;
6797 g.generate_code(simd16_cfg, 16);
6798 prog_data->base.dispatch_grf_start_reg = simd16_grf_start;
6799 prog_data->reg_blocks_0 = brw_register_blocks(simd16_grf_used);
6800 }
6801
6802 return g.get_assembly(&prog_data->base.program_size);
6803 }
6804
6805 fs_reg *
6806 fs_visitor::emit_cs_work_group_id_setup()
6807 {
6808 assert(stage == MESA_SHADER_COMPUTE);
6809
6810 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
6811
6812 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
6813 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
6814 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
6815
6816 bld.MOV(*reg, r0_1);
6817 bld.MOV(offset(*reg, bld, 1), r0_6);
6818 bld.MOV(offset(*reg, bld, 2), r0_7);
6819
6820 return reg;
6821 }
6822
6823 static void
6824 fill_push_const_block_info(struct brw_push_const_block *block, unsigned dwords)
6825 {
6826 block->dwords = dwords;
6827 block->regs = DIV_ROUND_UP(dwords, 8);
6828 block->size = block->regs * 32;
6829 }
6830
6831 static void
6832 cs_fill_push_const_info(const struct gen_device_info *devinfo,
6833 struct brw_cs_prog_data *cs_prog_data)
6834 {
6835 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
6836 int subgroup_id_index = get_subgroup_id_param_index(prog_data);
6837 bool cross_thread_supported = devinfo->gen > 7 || devinfo->is_haswell;
6838
6839 /* The thread ID should be stored in the last param dword */
6840 assert(subgroup_id_index == -1 ||
6841 subgroup_id_index == (int)prog_data->nr_params - 1);
6842
6843 unsigned cross_thread_dwords, per_thread_dwords;
6844 if (!cross_thread_supported) {
6845 cross_thread_dwords = 0u;
6846 per_thread_dwords = prog_data->nr_params;
6847 } else if (subgroup_id_index >= 0) {
6848 /* Fill all but the last register with cross-thread payload */
6849 cross_thread_dwords = 8 * (subgroup_id_index / 8);
6850 per_thread_dwords = prog_data->nr_params - cross_thread_dwords;
6851 assert(per_thread_dwords > 0 && per_thread_dwords <= 8);
6852 } else {
6853 /* Fill all data using cross-thread payload */
6854 cross_thread_dwords = prog_data->nr_params;
6855 per_thread_dwords = 0u;
6856 }
6857
6858 fill_push_const_block_info(&cs_prog_data->push.cross_thread, cross_thread_dwords);
6859 fill_push_const_block_info(&cs_prog_data->push.per_thread, per_thread_dwords);
6860
6861 unsigned total_dwords =
6862 (cs_prog_data->push.per_thread.size * cs_prog_data->threads +
6863 cs_prog_data->push.cross_thread.size) / 4;
6864 fill_push_const_block_info(&cs_prog_data->push.total, total_dwords);
6865
6866 assert(cs_prog_data->push.cross_thread.dwords % 8 == 0 ||
6867 cs_prog_data->push.per_thread.size == 0);
6868 assert(cs_prog_data->push.cross_thread.dwords +
6869 cs_prog_data->push.per_thread.dwords ==
6870 prog_data->nr_params);
6871 }
6872
6873 static void
6874 cs_set_simd_size(struct brw_cs_prog_data *cs_prog_data, unsigned size)
6875 {
6876 cs_prog_data->simd_size = size;
6877 unsigned group_size = cs_prog_data->local_size[0] *
6878 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
6879 cs_prog_data->threads = (group_size + size - 1) / size;
6880 }
6881
6882 static nir_shader *
6883 compile_cs_to_nir(const struct brw_compiler *compiler,
6884 void *mem_ctx,
6885 const struct brw_cs_prog_key *key,
6886 struct brw_cs_prog_data *prog_data,
6887 const nir_shader *src_shader,
6888 unsigned dispatch_width)
6889 {
6890 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6891 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
6892 brw_nir_lower_cs_intrinsics(shader, dispatch_width);
6893 return brw_postprocess_nir(shader, compiler, true);
6894 }
6895
6896 const unsigned *
6897 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
6898 void *mem_ctx,
6899 const struct brw_cs_prog_key *key,
6900 struct brw_cs_prog_data *prog_data,
6901 const nir_shader *src_shader,
6902 int shader_time_index,
6903 char **error_str)
6904 {
6905 prog_data->local_size[0] = src_shader->info.cs.local_size[0];
6906 prog_data->local_size[1] = src_shader->info.cs.local_size[1];
6907 prog_data->local_size[2] = src_shader->info.cs.local_size[2];
6908 unsigned local_workgroup_size =
6909 src_shader->info.cs.local_size[0] * src_shader->info.cs.local_size[1] *
6910 src_shader->info.cs.local_size[2];
6911
6912 unsigned min_dispatch_width =
6913 DIV_ROUND_UP(local_workgroup_size, compiler->devinfo->max_cs_threads);
6914 min_dispatch_width = MAX2(8, min_dispatch_width);
6915 min_dispatch_width = util_next_power_of_two(min_dispatch_width);
6916 assert(min_dispatch_width <= 32);
6917
6918 fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
6919 cfg_t *cfg = NULL;
6920 const char *fail_msg = NULL;
6921 unsigned promoted_constants;
6922
6923 /* Now the main event: Visit the shader IR and generate our CS IR for it.
6924 */
6925 if (min_dispatch_width <= 8) {
6926 nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
6927 prog_data, src_shader, 8);
6928 v8 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
6929 NULL, /* Never used in core profile */
6930 nir8, 8, shader_time_index);
6931 if (!v8->run_cs(min_dispatch_width)) {
6932 fail_msg = v8->fail_msg;
6933 } else {
6934 /* We should always be able to do SIMD32 for compute shaders */
6935 assert(v8->max_dispatch_width >= 32);
6936
6937 cfg = v8->cfg;
6938 cs_set_simd_size(prog_data, 8);
6939 cs_fill_push_const_info(compiler->devinfo, prog_data);
6940 promoted_constants = v8->promoted_constants;
6941 }
6942 }
6943
6944 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
6945 !fail_msg && min_dispatch_width <= 16) {
6946 /* Try a SIMD16 compile */
6947 nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
6948 prog_data, src_shader, 16);
6949 v16 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
6950 NULL, /* Never used in core profile */
6951 nir16, 16, shader_time_index);
6952 if (v8)
6953 v16->import_uniforms(v8);
6954
6955 if (!v16->run_cs(min_dispatch_width)) {
6956 compiler->shader_perf_log(log_data,
6957 "SIMD16 shader failed to compile: %s",
6958 v16->fail_msg);
6959 if (!cfg) {
6960 fail_msg =
6961 "Couldn't generate SIMD16 program and not "
6962 "enough threads for SIMD8";
6963 }
6964 } else {
6965 /* We should always be able to do SIMD32 for compute shaders */
6966 assert(v16->max_dispatch_width >= 32);
6967
6968 cfg = v16->cfg;
6969 cs_set_simd_size(prog_data, 16);
6970 cs_fill_push_const_info(compiler->devinfo, prog_data);
6971 promoted_constants = v16->promoted_constants;
6972 }
6973 }
6974
6975 /* We should always be able to do SIMD32 for compute shaders */
6976 assert(!v16 || v16->max_dispatch_width >= 32);
6977
6978 if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
6979 /* Try a SIMD32 compile */
6980 nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
6981 prog_data, src_shader, 32);
6982 v32 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
6983 NULL, /* Never used in core profile */
6984 nir32, 32, shader_time_index);
6985 if (v8)
6986 v32->import_uniforms(v8);
6987 else if (v16)
6988 v32->import_uniforms(v16);
6989
6990 if (!v32->run_cs(min_dispatch_width)) {
6991 compiler->shader_perf_log(log_data,
6992 "SIMD32 shader failed to compile: %s",
6993 v16->fail_msg);
6994 if (!cfg) {
6995 fail_msg =
6996 "Couldn't generate SIMD32 program and not "
6997 "enough threads for SIMD16";
6998 }
6999 } else {
7000 cfg = v32->cfg;
7001 cs_set_simd_size(prog_data, 32);
7002 cs_fill_push_const_info(compiler->devinfo, prog_data);
7003 promoted_constants = v32->promoted_constants;
7004 }
7005 }
7006
7007 const unsigned *ret = NULL;
7008 if (unlikely(cfg == NULL)) {
7009 assert(fail_msg);
7010 if (error_str)
7011 *error_str = ralloc_strdup(mem_ctx, fail_msg);
7012 } else {
7013 fs_generator g(compiler, log_data, mem_ctx, (void*) key, &prog_data->base,
7014 promoted_constants, false, MESA_SHADER_COMPUTE);
7015 if (INTEL_DEBUG & DEBUG_CS) {
7016 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
7017 src_shader->info.label ?
7018 src_shader->info.label : "unnamed",
7019 src_shader->info.name);
7020 g.enable_debug(name);
7021 }
7022
7023 g.generate_code(cfg, prog_data->simd_size);
7024
7025 ret = g.get_assembly(&prog_data->base.program_size);
7026 }
7027
7028 delete v8;
7029 delete v16;
7030 delete v32;
7031
7032 return ret;
7033 }
7034
7035 /**
7036 * Test the dispatch mask packing assumptions of
7037 * brw_stage_has_packed_dispatch(). Call this from e.g. the top of
7038 * fs_visitor::emit_nir_code() to cause a GPU hang if any shader invocation is
7039 * executed with an unexpected dispatch mask.
7040 */
7041 static UNUSED void
7042 brw_fs_test_dispatch_packing(const fs_builder &bld)
7043 {
7044 const gl_shader_stage stage = bld.shader->stage;
7045
7046 if (brw_stage_has_packed_dispatch(bld.shader->devinfo, stage,
7047 bld.shader->stage_prog_data)) {
7048 const fs_builder ubld = bld.exec_all().group(1, 0);
7049 const fs_reg tmp = component(bld.vgrf(BRW_REGISTER_TYPE_UD), 0);
7050 const fs_reg mask = (stage == MESA_SHADER_FRAGMENT ? brw_vmask_reg() :
7051 brw_dmask_reg());
7052
7053 ubld.ADD(tmp, mask, brw_imm_ud(1));
7054 ubld.AND(tmp, mask, tmp);
7055
7056 /* This will loop forever if the dispatch mask doesn't have the expected
7057 * form '2^n-1', in which case tmp will be non-zero.
7058 */
7059 bld.emit(BRW_OPCODE_DO);
7060 bld.CMP(bld.null_reg_ud(), tmp, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
7061 set_predicate(BRW_PREDICATE_NORMAL, bld.emit(BRW_OPCODE_WHILE));
7062 }
7063 }