a2a99b7209634ce41459ffeca3309a86b8f6a670
[mesa.git] / src / intel / compiler / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include "main/macros.h"
32 #include "brw_eu.h"
33 #include "brw_fs.h"
34 #include "brw_nir.h"
35 #include "brw_vec4_gs_visitor.h"
36 #include "brw_cfg.h"
37 #include "brw_dead_control_flow.h"
38 #include "common/gen_debug.h"
39 #include "compiler/glsl_types.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "program/prog_parameter.h"
42
43 using namespace brw;
44
45 static unsigned get_lowered_simd_width(const struct gen_device_info *devinfo,
46 const fs_inst *inst);
47
48 void
49 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
50 const fs_reg *src, unsigned sources)
51 {
52 memset(this, 0, sizeof(*this));
53
54 this->src = new fs_reg[MAX2(sources, 3)];
55 for (unsigned i = 0; i < sources; i++)
56 this->src[i] = src[i];
57
58 this->opcode = opcode;
59 this->dst = dst;
60 this->sources = sources;
61 this->exec_size = exec_size;
62 this->base_mrf = -1;
63
64 assert(dst.file != IMM && dst.file != UNIFORM);
65
66 assert(this->exec_size != 0);
67
68 this->conditional_mod = BRW_CONDITIONAL_NONE;
69
70 /* This will be the case for almost all instructions. */
71 switch (dst.file) {
72 case VGRF:
73 case ARF:
74 case FIXED_GRF:
75 case MRF:
76 case ATTR:
77 this->size_written = dst.component_size(exec_size);
78 break;
79 case BAD_FILE:
80 this->size_written = 0;
81 break;
82 case IMM:
83 case UNIFORM:
84 unreachable("Invalid destination register file");
85 }
86
87 this->writes_accumulator = false;
88 }
89
90 fs_inst::fs_inst()
91 {
92 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
93 }
94
95 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
96 {
97 init(opcode, exec_size, reg_undef, NULL, 0);
98 }
99
100 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
101 {
102 init(opcode, exec_size, dst, NULL, 0);
103 }
104
105 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
106 const fs_reg &src0)
107 {
108 const fs_reg src[1] = { src0 };
109 init(opcode, exec_size, dst, src, 1);
110 }
111
112 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
113 const fs_reg &src0, const fs_reg &src1)
114 {
115 const fs_reg src[2] = { src0, src1 };
116 init(opcode, exec_size, dst, src, 2);
117 }
118
119 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
120 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
121 {
122 const fs_reg src[3] = { src0, src1, src2 };
123 init(opcode, exec_size, dst, src, 3);
124 }
125
126 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
127 const fs_reg src[], unsigned sources)
128 {
129 init(opcode, exec_width, dst, src, sources);
130 }
131
132 fs_inst::fs_inst(const fs_inst &that)
133 {
134 memcpy(this, &that, sizeof(that));
135
136 this->src = new fs_reg[MAX2(that.sources, 3)];
137
138 for (unsigned i = 0; i < that.sources; i++)
139 this->src[i] = that.src[i];
140 }
141
142 fs_inst::~fs_inst()
143 {
144 delete[] this->src;
145 }
146
147 void
148 fs_inst::resize_sources(uint8_t num_sources)
149 {
150 if (this->sources != num_sources) {
151 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
152
153 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
154 src[i] = this->src[i];
155
156 delete[] this->src;
157 this->src = src;
158 this->sources = num_sources;
159 }
160 }
161
162 void
163 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
164 const fs_reg &dst,
165 const fs_reg &surf_index,
166 const fs_reg &varying_offset,
167 uint32_t const_offset)
168 {
169 /* We have our constant surface use a pitch of 4 bytes, so our index can
170 * be any component of a vector, and then we load 4 contiguous
171 * components starting from that.
172 *
173 * We break down the const_offset to a portion added to the variable offset
174 * and a portion done using fs_reg::offset, which means that if you have
175 * GLSL using something like "uniform vec4 a[20]; gl_FragColor = a[i]",
176 * we'll temporarily generate 4 vec4 loads from offset i * 4, and CSE can
177 * later notice that those loads are all the same and eliminate the
178 * redundant ones.
179 */
180 fs_reg vec4_offset = vgrf(glsl_type::uint_type);
181 bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
182
183 /* The pull load message will load a vec4 (16 bytes). If we are loading
184 * a double this means we are only loading 2 elements worth of data.
185 * We also want to use a 32-bit data type for the dst of the load operation
186 * so other parts of the driver don't get confused about the size of the
187 * result.
188 */
189 fs_reg vec4_result = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
190 fs_inst *inst = bld.emit(FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
191 vec4_result, surf_index, vec4_offset);
192 inst->size_written = 4 * vec4_result.component_size(inst->exec_size);
193
194 if (type_sz(dst.type) == 8) {
195 shuffle_32bit_load_result_to_64bit_data(
196 bld, retype(vec4_result, dst.type), vec4_result, 2);
197 }
198
199 vec4_result.type = dst.type;
200 bld.MOV(dst, offset(vec4_result, bld,
201 (const_offset & 0xf) / type_sz(vec4_result.type)));
202 }
203
204 /**
205 * A helper for MOV generation for fixing up broken hardware SEND dependency
206 * handling.
207 */
208 void
209 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
210 {
211 /* The caller always wants uncompressed to emit the minimal extra
212 * dependencies, and to avoid having to deal with aligning its regs to 2.
213 */
214 const fs_builder ubld = bld.annotate("send dependency resolve")
215 .half(0);
216
217 ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
218 }
219
220 bool
221 fs_inst::equals(fs_inst *inst) const
222 {
223 return (opcode == inst->opcode &&
224 dst.equals(inst->dst) &&
225 src[0].equals(inst->src[0]) &&
226 src[1].equals(inst->src[1]) &&
227 src[2].equals(inst->src[2]) &&
228 saturate == inst->saturate &&
229 predicate == inst->predicate &&
230 conditional_mod == inst->conditional_mod &&
231 mlen == inst->mlen &&
232 base_mrf == inst->base_mrf &&
233 target == inst->target &&
234 eot == inst->eot &&
235 header_size == inst->header_size &&
236 shadow_compare == inst->shadow_compare &&
237 exec_size == inst->exec_size &&
238 offset == inst->offset);
239 }
240
241 bool
242 fs_inst::is_send_from_grf() const
243 {
244 switch (opcode) {
245 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
246 case SHADER_OPCODE_SHADER_TIME_ADD:
247 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
248 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
249 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
250 case SHADER_OPCODE_UNTYPED_ATOMIC:
251 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
252 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
253 case SHADER_OPCODE_TYPED_ATOMIC:
254 case SHADER_OPCODE_TYPED_SURFACE_READ:
255 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
256 case SHADER_OPCODE_URB_WRITE_SIMD8:
257 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
258 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
259 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
260 case SHADER_OPCODE_URB_READ_SIMD8:
261 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
262 return true;
263 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
264 return src[1].file == VGRF;
265 case FS_OPCODE_FB_WRITE:
266 case FS_OPCODE_FB_READ:
267 return src[0].file == VGRF;
268 default:
269 if (is_tex())
270 return src[0].file == VGRF;
271
272 return false;
273 }
274 }
275
276 /**
277 * Returns true if this instruction's sources and destinations cannot
278 * safely be the same register.
279 *
280 * In most cases, a register can be written over safely by the same
281 * instruction that is its last use. For a single instruction, the
282 * sources are dereferenced before writing of the destination starts
283 * (naturally).
284 *
285 * However, there are a few cases where this can be problematic:
286 *
287 * - Virtual opcodes that translate to multiple instructions in the
288 * code generator: if src == dst and one instruction writes the
289 * destination before a later instruction reads the source, then
290 * src will have been clobbered.
291 *
292 * - SIMD16 compressed instructions with certain regioning (see below).
293 *
294 * The register allocator uses this information to set up conflicts between
295 * GRF sources and the destination.
296 */
297 bool
298 fs_inst::has_source_and_destination_hazard() const
299 {
300 switch (opcode) {
301 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
302 /* Multiple partial writes to the destination */
303 return true;
304 default:
305 /* The SIMD16 compressed instruction
306 *
307 * add(16) g4<1>F g4<8,8,1>F g6<8,8,1>F
308 *
309 * is actually decoded in hardware as:
310 *
311 * add(8) g4<1>F g4<8,8,1>F g6<8,8,1>F
312 * add(8) g5<1>F g5<8,8,1>F g7<8,8,1>F
313 *
314 * Which is safe. However, if we have uniform accesses
315 * happening, we get into trouble:
316 *
317 * add(8) g4<1>F g4<0,1,0>F g6<8,8,1>F
318 * add(8) g5<1>F g4<0,1,0>F g7<8,8,1>F
319 *
320 * Now our destination for the first instruction overwrote the
321 * second instruction's src0, and we get garbage for those 8
322 * pixels. There's a similar issue for the pre-gen6
323 * pixel_x/pixel_y, which are registers of 16-bit values and thus
324 * would get stomped by the first decode as well.
325 */
326 if (exec_size == 16) {
327 for (int i = 0; i < sources; i++) {
328 if (src[i].file == VGRF && (src[i].stride == 0 ||
329 src[i].type == BRW_REGISTER_TYPE_UW ||
330 src[i].type == BRW_REGISTER_TYPE_W ||
331 src[i].type == BRW_REGISTER_TYPE_UB ||
332 src[i].type == BRW_REGISTER_TYPE_B)) {
333 return true;
334 }
335 }
336 }
337 return false;
338 }
339 }
340
341 bool
342 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
343 {
344 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
345 return false;
346
347 fs_reg reg = this->src[0];
348 if (reg.file != VGRF || reg.offset != 0 || reg.stride != 1)
349 return false;
350
351 if (grf_alloc.sizes[reg.nr] * REG_SIZE != this->size_written)
352 return false;
353
354 for (int i = 0; i < this->sources; i++) {
355 reg.type = this->src[i].type;
356 if (!this->src[i].equals(reg))
357 return false;
358
359 if (i < this->header_size) {
360 reg.offset += REG_SIZE;
361 } else {
362 reg = horiz_offset(reg, this->exec_size);
363 }
364 }
365
366 return true;
367 }
368
369 bool
370 fs_inst::can_do_source_mods(const struct gen_device_info *devinfo)
371 {
372 if (devinfo->gen == 6 && is_math())
373 return false;
374
375 if (is_send_from_grf())
376 return false;
377
378 if (!backend_instruction::can_do_source_mods())
379 return false;
380
381 return true;
382 }
383
384 bool
385 fs_inst::can_change_types() const
386 {
387 return dst.type == src[0].type &&
388 !src[0].abs && !src[0].negate && !saturate &&
389 (opcode == BRW_OPCODE_MOV ||
390 (opcode == BRW_OPCODE_SEL &&
391 dst.type == src[1].type &&
392 predicate != BRW_PREDICATE_NONE &&
393 !src[1].abs && !src[1].negate));
394 }
395
396 bool
397 fs_inst::has_side_effects() const
398 {
399 return this->eot || backend_instruction::has_side_effects();
400 }
401
402 void
403 fs_reg::init()
404 {
405 memset(this, 0, sizeof(*this));
406 stride = 1;
407 }
408
409 /** Generic unset register constructor. */
410 fs_reg::fs_reg()
411 {
412 init();
413 this->file = BAD_FILE;
414 }
415
416 fs_reg::fs_reg(struct ::brw_reg reg) :
417 backend_reg(reg)
418 {
419 this->offset = 0;
420 this->stride = 1;
421 if (this->file == IMM &&
422 (this->type != BRW_REGISTER_TYPE_V &&
423 this->type != BRW_REGISTER_TYPE_UV &&
424 this->type != BRW_REGISTER_TYPE_VF)) {
425 this->stride = 0;
426 }
427 }
428
429 bool
430 fs_reg::equals(const fs_reg &r) const
431 {
432 return (this->backend_reg::equals(r) &&
433 stride == r.stride);
434 }
435
436 bool
437 fs_reg::is_contiguous() const
438 {
439 return stride == 1;
440 }
441
442 unsigned
443 fs_reg::component_size(unsigned width) const
444 {
445 const unsigned stride = ((file != ARF && file != FIXED_GRF) ? this->stride :
446 hstride == 0 ? 0 :
447 1 << (hstride - 1));
448 return MAX2(width * stride, 1) * type_sz(type);
449 }
450
451 extern "C" int
452 type_size_scalar(const struct glsl_type *type)
453 {
454 unsigned int size, i;
455
456 switch (type->base_type) {
457 case GLSL_TYPE_UINT:
458 case GLSL_TYPE_INT:
459 case GLSL_TYPE_FLOAT:
460 case GLSL_TYPE_BOOL:
461 return type->components();
462 case GLSL_TYPE_DOUBLE:
463 case GLSL_TYPE_UINT64:
464 case GLSL_TYPE_INT64:
465 return type->components() * 2;
466 case GLSL_TYPE_ARRAY:
467 return type_size_scalar(type->fields.array) * type->length;
468 case GLSL_TYPE_STRUCT:
469 size = 0;
470 for (i = 0; i < type->length; i++) {
471 size += type_size_scalar(type->fields.structure[i].type);
472 }
473 return size;
474 case GLSL_TYPE_SAMPLER:
475 /* Samplers take up no register space, since they're baked in at
476 * link time.
477 */
478 return 0;
479 case GLSL_TYPE_ATOMIC_UINT:
480 return 0;
481 case GLSL_TYPE_SUBROUTINE:
482 return 1;
483 case GLSL_TYPE_IMAGE:
484 return BRW_IMAGE_PARAM_SIZE;
485 case GLSL_TYPE_VOID:
486 case GLSL_TYPE_ERROR:
487 case GLSL_TYPE_INTERFACE:
488 case GLSL_TYPE_FUNCTION:
489 unreachable("not reached");
490 }
491
492 return 0;
493 }
494
495 /**
496 * Create a MOV to read the timestamp register.
497 *
498 * The caller is responsible for emitting the MOV. The return value is
499 * the destination of the MOV, with extra parameters set.
500 */
501 fs_reg
502 fs_visitor::get_timestamp(const fs_builder &bld)
503 {
504 assert(devinfo->gen >= 7);
505
506 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
507 BRW_ARF_TIMESTAMP,
508 0),
509 BRW_REGISTER_TYPE_UD));
510
511 fs_reg dst = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
512
513 /* We want to read the 3 fields we care about even if it's not enabled in
514 * the dispatch.
515 */
516 bld.group(4, 0).exec_all().MOV(dst, ts);
517
518 return dst;
519 }
520
521 void
522 fs_visitor::emit_shader_time_begin()
523 {
524 /* We want only the low 32 bits of the timestamp. Since it's running
525 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
526 * which is plenty of time for our purposes. It is identical across the
527 * EUs, but since it's tracking GPU core speed it will increment at a
528 * varying rate as render P-states change.
529 */
530 shader_start_time = component(
531 get_timestamp(bld.annotate("shader time start")), 0);
532 }
533
534 void
535 fs_visitor::emit_shader_time_end()
536 {
537 /* Insert our code just before the final SEND with EOT. */
538 exec_node *end = this->instructions.get_tail();
539 assert(end && ((fs_inst *) end)->eot);
540 const fs_builder ibld = bld.annotate("shader time end")
541 .exec_all().at(NULL, end);
542 const fs_reg timestamp = get_timestamp(ibld);
543
544 /* We only use the low 32 bits of the timestamp - see
545 * emit_shader_time_begin()).
546 *
547 * We could also check if render P-states have changed (or anything
548 * else that might disrupt timing) by setting smear to 2 and checking if
549 * that field is != 0.
550 */
551 const fs_reg shader_end_time = component(timestamp, 0);
552
553 /* Check that there weren't any timestamp reset events (assuming these
554 * were the only two timestamp reads that happened).
555 */
556 const fs_reg reset = component(timestamp, 2);
557 set_condmod(BRW_CONDITIONAL_Z,
558 ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
559 ibld.IF(BRW_PREDICATE_NORMAL);
560
561 fs_reg start = shader_start_time;
562 start.negate = true;
563 const fs_reg diff = component(fs_reg(VGRF, alloc.allocate(1),
564 BRW_REGISTER_TYPE_UD),
565 0);
566 const fs_builder cbld = ibld.group(1, 0);
567 cbld.group(1, 0).ADD(diff, start, shader_end_time);
568
569 /* If there were no instructions between the two timestamp gets, the diff
570 * is 2 cycles. Remove that overhead, so I can forget about that when
571 * trying to determine the time taken for single instructions.
572 */
573 cbld.ADD(diff, diff, brw_imm_ud(-2u));
574 SHADER_TIME_ADD(cbld, 0, diff);
575 SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
576 ibld.emit(BRW_OPCODE_ELSE);
577 SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
578 ibld.emit(BRW_OPCODE_ENDIF);
579 }
580
581 void
582 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
583 int shader_time_subindex,
584 fs_reg value)
585 {
586 int index = shader_time_index * 3 + shader_time_subindex;
587 struct brw_reg offset = brw_imm_d(index * BRW_SHADER_TIME_STRIDE);
588
589 fs_reg payload;
590 if (dispatch_width == 8)
591 payload = vgrf(glsl_type::uvec2_type);
592 else
593 payload = vgrf(glsl_type::uint_type);
594
595 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
596 }
597
598 void
599 fs_visitor::vfail(const char *format, va_list va)
600 {
601 char *msg;
602
603 if (failed)
604 return;
605
606 failed = true;
607
608 msg = ralloc_vasprintf(mem_ctx, format, va);
609 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
610
611 this->fail_msg = msg;
612
613 if (debug_enabled) {
614 fprintf(stderr, "%s", msg);
615 }
616 }
617
618 void
619 fs_visitor::fail(const char *format, ...)
620 {
621 va_list va;
622
623 va_start(va, format);
624 vfail(format, va);
625 va_end(va);
626 }
627
628 /**
629 * Mark this program as impossible to compile with dispatch width greater
630 * than n.
631 *
632 * During the SIMD8 compile (which happens first), we can detect and flag
633 * things that are unsupported in SIMD16+ mode, so the compiler can skip the
634 * SIMD16+ compile altogether.
635 *
636 * During a compile of dispatch width greater than n (if one happens anyway),
637 * this just calls fail().
638 */
639 void
640 fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
641 {
642 if (dispatch_width > n) {
643 fail("%s", msg);
644 } else {
645 max_dispatch_width = n;
646 compiler->shader_perf_log(log_data,
647 "Shader dispatch width limited to SIMD%d: %s",
648 n, msg);
649 }
650 }
651
652 /**
653 * Returns true if the instruction has a flag that means it won't
654 * update an entire destination register.
655 *
656 * For example, dead code elimination and live variable analysis want to know
657 * when a write to a variable screens off any preceding values that were in
658 * it.
659 */
660 bool
661 fs_inst::is_partial_write() const
662 {
663 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
664 (this->exec_size * type_sz(this->dst.type)) < 32 ||
665 !this->dst.is_contiguous() ||
666 this->dst.offset % REG_SIZE != 0);
667 }
668
669 unsigned
670 fs_inst::components_read(unsigned i) const
671 {
672 /* Return zero if the source is not present. */
673 if (src[i].file == BAD_FILE)
674 return 0;
675
676 switch (opcode) {
677 case FS_OPCODE_LINTERP:
678 if (i == 0)
679 return 2;
680 else
681 return 1;
682
683 case FS_OPCODE_PIXEL_X:
684 case FS_OPCODE_PIXEL_Y:
685 assert(i == 0);
686 return 2;
687
688 case FS_OPCODE_FB_WRITE_LOGICAL:
689 assert(src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
690 /* First/second FB write color. */
691 if (i < 2)
692 return src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
693 else
694 return 1;
695
696 case SHADER_OPCODE_TEX_LOGICAL:
697 case SHADER_OPCODE_TXD_LOGICAL:
698 case SHADER_OPCODE_TXF_LOGICAL:
699 case SHADER_OPCODE_TXL_LOGICAL:
700 case SHADER_OPCODE_TXS_LOGICAL:
701 case FS_OPCODE_TXB_LOGICAL:
702 case SHADER_OPCODE_TXF_CMS_LOGICAL:
703 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
704 case SHADER_OPCODE_TXF_UMS_LOGICAL:
705 case SHADER_OPCODE_TXF_MCS_LOGICAL:
706 case SHADER_OPCODE_LOD_LOGICAL:
707 case SHADER_OPCODE_TG4_LOGICAL:
708 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
709 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
710 assert(src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM &&
711 src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
712 /* Texture coordinates. */
713 if (i == TEX_LOGICAL_SRC_COORDINATE)
714 return src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
715 /* Texture derivatives. */
716 else if ((i == TEX_LOGICAL_SRC_LOD || i == TEX_LOGICAL_SRC_LOD2) &&
717 opcode == SHADER_OPCODE_TXD_LOGICAL)
718 return src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
719 /* Texture offset. */
720 else if (i == TEX_LOGICAL_SRC_TG4_OFFSET)
721 return 2;
722 /* MCS */
723 else if (i == TEX_LOGICAL_SRC_MCS && opcode == SHADER_OPCODE_TXF_CMS_W_LOGICAL)
724 return 2;
725 else
726 return 1;
727
728 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
729 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
730 assert(src[3].file == IMM);
731 /* Surface coordinates. */
732 if (i == 0)
733 return src[3].ud;
734 /* Surface operation source (ignored for reads). */
735 else if (i == 1)
736 return 0;
737 else
738 return 1;
739
740 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
741 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
742 assert(src[3].file == IMM &&
743 src[4].file == IMM);
744 /* Surface coordinates. */
745 if (i == 0)
746 return src[3].ud;
747 /* Surface operation source. */
748 else if (i == 1)
749 return src[4].ud;
750 else
751 return 1;
752
753 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
754 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
755 assert(src[3].file == IMM &&
756 src[4].file == IMM);
757 const unsigned op = src[4].ud;
758 /* Surface coordinates. */
759 if (i == 0)
760 return src[3].ud;
761 /* Surface operation source. */
762 else if (i == 1 && op == BRW_AOP_CMPWR)
763 return 2;
764 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
765 op == BRW_AOP_PREDEC))
766 return 0;
767 else
768 return 1;
769 }
770
771 default:
772 return 1;
773 }
774 }
775
776 unsigned
777 fs_inst::size_read(int arg) const
778 {
779 switch (opcode) {
780 case FS_OPCODE_FB_WRITE:
781 case FS_OPCODE_FB_READ:
782 case SHADER_OPCODE_URB_WRITE_SIMD8:
783 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
784 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
785 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
786 case SHADER_OPCODE_URB_READ_SIMD8:
787 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
788 case SHADER_OPCODE_UNTYPED_ATOMIC:
789 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
790 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
791 case SHADER_OPCODE_TYPED_ATOMIC:
792 case SHADER_OPCODE_TYPED_SURFACE_READ:
793 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
794 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
795 if (arg == 0)
796 return mlen * REG_SIZE;
797 break;
798
799 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
800 /* The payload is actually stored in src1 */
801 if (arg == 1)
802 return mlen * REG_SIZE;
803 break;
804
805 case FS_OPCODE_LINTERP:
806 if (arg == 1)
807 return 16;
808 break;
809
810 case SHADER_OPCODE_LOAD_PAYLOAD:
811 if (arg < this->header_size)
812 return REG_SIZE;
813 break;
814
815 case CS_OPCODE_CS_TERMINATE:
816 case SHADER_OPCODE_BARRIER:
817 return REG_SIZE;
818
819 case SHADER_OPCODE_MOV_INDIRECT:
820 if (arg == 0) {
821 assert(src[2].file == IMM);
822 return src[2].ud;
823 }
824 break;
825
826 default:
827 if (is_tex() && arg == 0 && src[0].file == VGRF)
828 return mlen * REG_SIZE;
829 break;
830 }
831
832 switch (src[arg].file) {
833 case UNIFORM:
834 case IMM:
835 return components_read(arg) * type_sz(src[arg].type);
836 case BAD_FILE:
837 case ARF:
838 case FIXED_GRF:
839 case VGRF:
840 case ATTR:
841 return components_read(arg) * src[arg].component_size(exec_size);
842 case MRF:
843 unreachable("MRF registers are not allowed as sources");
844 }
845 return 0;
846 }
847
848 namespace {
849 /* Return the subset of flag registers that an instruction could
850 * potentially read or write based on the execution controls and flag
851 * subregister number of the instruction.
852 */
853 unsigned
854 flag_mask(const fs_inst *inst)
855 {
856 const unsigned start = inst->flag_subreg * 16 + inst->group;
857 const unsigned end = start + inst->exec_size;
858 return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
859 }
860 }
861
862 unsigned
863 fs_inst::flags_read(const gen_device_info *devinfo) const
864 {
865 /* XXX - This doesn't consider explicit uses of the flag register as source
866 * region.
867 */
868 if (predicate == BRW_PREDICATE_ALIGN1_ANYV ||
869 predicate == BRW_PREDICATE_ALIGN1_ALLV) {
870 /* The vertical predication modes combine corresponding bits from
871 * f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
872 */
873 const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
874 return flag_mask(this) << shift | flag_mask(this);
875 } else if (predicate) {
876 return flag_mask(this);
877 } else {
878 return 0;
879 }
880 }
881
882 unsigned
883 fs_inst::flags_written() const
884 {
885 /* XXX - This doesn't consider explicit uses of the flag register as
886 * destination region.
887 */
888 if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
889 opcode != BRW_OPCODE_IF &&
890 opcode != BRW_OPCODE_WHILE)) ||
891 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
892 return flag_mask(this);
893 } else {
894 return 0;
895 }
896 }
897
898 /**
899 * Returns how many MRFs an FS opcode will write over.
900 *
901 * Note that this is not the 0 or 1 implied writes in an actual gen
902 * instruction -- the FS opcodes often generate MOVs in addition.
903 */
904 int
905 fs_visitor::implied_mrf_writes(fs_inst *inst)
906 {
907 if (inst->mlen == 0)
908 return 0;
909
910 if (inst->base_mrf == -1)
911 return 0;
912
913 switch (inst->opcode) {
914 case SHADER_OPCODE_RCP:
915 case SHADER_OPCODE_RSQ:
916 case SHADER_OPCODE_SQRT:
917 case SHADER_OPCODE_EXP2:
918 case SHADER_OPCODE_LOG2:
919 case SHADER_OPCODE_SIN:
920 case SHADER_OPCODE_COS:
921 return 1 * dispatch_width / 8;
922 case SHADER_OPCODE_POW:
923 case SHADER_OPCODE_INT_QUOTIENT:
924 case SHADER_OPCODE_INT_REMAINDER:
925 return 2 * dispatch_width / 8;
926 case SHADER_OPCODE_TEX:
927 case FS_OPCODE_TXB:
928 case SHADER_OPCODE_TXD:
929 case SHADER_OPCODE_TXF:
930 case SHADER_OPCODE_TXF_CMS:
931 case SHADER_OPCODE_TXF_MCS:
932 case SHADER_OPCODE_TG4:
933 case SHADER_OPCODE_TG4_OFFSET:
934 case SHADER_OPCODE_TXL:
935 case SHADER_OPCODE_TXS:
936 case SHADER_OPCODE_LOD:
937 case SHADER_OPCODE_SAMPLEINFO:
938 return 1;
939 case FS_OPCODE_FB_WRITE:
940 return 2;
941 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
942 case SHADER_OPCODE_GEN4_SCRATCH_READ:
943 return 1;
944 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
945 return inst->mlen;
946 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
947 return inst->mlen;
948 default:
949 unreachable("not reached");
950 }
951 }
952
953 fs_reg
954 fs_visitor::vgrf(const glsl_type *const type)
955 {
956 int reg_width = dispatch_width / 8;
957 return fs_reg(VGRF, alloc.allocate(type_size_scalar(type) * reg_width),
958 brw_type_for_base_type(type));
959 }
960
961 fs_reg::fs_reg(enum brw_reg_file file, int nr)
962 {
963 init();
964 this->file = file;
965 this->nr = nr;
966 this->type = BRW_REGISTER_TYPE_F;
967 this->stride = (file == UNIFORM ? 0 : 1);
968 }
969
970 fs_reg::fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type)
971 {
972 init();
973 this->file = file;
974 this->nr = nr;
975 this->type = type;
976 this->stride = (file == UNIFORM ? 0 : 1);
977 }
978
979 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
980 * This brings in those uniform definitions
981 */
982 void
983 fs_visitor::import_uniforms(fs_visitor *v)
984 {
985 this->push_constant_loc = v->push_constant_loc;
986 this->pull_constant_loc = v->pull_constant_loc;
987 this->uniforms = v->uniforms;
988 }
989
990 void
991 fs_visitor::emit_fragcoord_interpolation(fs_reg wpos)
992 {
993 assert(stage == MESA_SHADER_FRAGMENT);
994
995 /* gl_FragCoord.x */
996 bld.MOV(wpos, this->pixel_x);
997 wpos = offset(wpos, bld, 1);
998
999 /* gl_FragCoord.y */
1000 bld.MOV(wpos, this->pixel_y);
1001 wpos = offset(wpos, bld, 1);
1002
1003 /* gl_FragCoord.z */
1004 if (devinfo->gen >= 6) {
1005 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
1006 } else {
1007 bld.emit(FS_OPCODE_LINTERP, wpos,
1008 this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
1009 interp_reg(VARYING_SLOT_POS, 2));
1010 }
1011 wpos = offset(wpos, bld, 1);
1012
1013 /* gl_FragCoord.w: Already set up in emit_interpolation */
1014 bld.MOV(wpos, this->wpos_w);
1015 }
1016
1017 enum brw_barycentric_mode
1018 brw_barycentric_mode(enum glsl_interp_mode mode, nir_intrinsic_op op)
1019 {
1020 /* Barycentric modes don't make sense for flat inputs. */
1021 assert(mode != INTERP_MODE_FLAT);
1022
1023 unsigned bary;
1024 switch (op) {
1025 case nir_intrinsic_load_barycentric_pixel:
1026 case nir_intrinsic_load_barycentric_at_offset:
1027 bary = BRW_BARYCENTRIC_PERSPECTIVE_PIXEL;
1028 break;
1029 case nir_intrinsic_load_barycentric_centroid:
1030 bary = BRW_BARYCENTRIC_PERSPECTIVE_CENTROID;
1031 break;
1032 case nir_intrinsic_load_barycentric_sample:
1033 case nir_intrinsic_load_barycentric_at_sample:
1034 bary = BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE;
1035 break;
1036 default:
1037 unreachable("invalid intrinsic");
1038 }
1039
1040 if (mode == INTERP_MODE_NOPERSPECTIVE)
1041 bary += 3;
1042
1043 return (enum brw_barycentric_mode) bary;
1044 }
1045
1046 /**
1047 * Turn one of the two CENTROID barycentric modes into PIXEL mode.
1048 */
1049 static enum brw_barycentric_mode
1050 centroid_to_pixel(enum brw_barycentric_mode bary)
1051 {
1052 assert(bary == BRW_BARYCENTRIC_PERSPECTIVE_CENTROID ||
1053 bary == BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID);
1054 return (enum brw_barycentric_mode) ((unsigned) bary - 1);
1055 }
1056
1057 fs_reg *
1058 fs_visitor::emit_frontfacing_interpolation()
1059 {
1060 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1061
1062 if (devinfo->gen >= 6) {
1063 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1064 * a boolean result from this (~0/true or 0/false).
1065 *
1066 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1067 * this task in only one instruction:
1068 * - a negation source modifier will flip the bit; and
1069 * - a W -> D type conversion will sign extend the bit into the high
1070 * word of the destination.
1071 *
1072 * An ASR 15 fills the low word of the destination.
1073 */
1074 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1075 g0.negate = true;
1076
1077 bld.ASR(*reg, g0, brw_imm_d(15));
1078 } else {
1079 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1080 * a boolean result from this (1/true or 0/false).
1081 *
1082 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1083 * the negation source modifier to flip it. Unfortunately the SHR
1084 * instruction only operates on UD (or D with an abs source modifier)
1085 * sources without negation.
1086 *
1087 * Instead, use ASR (which will give ~0/true or 0/false).
1088 */
1089 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1090 g1_6.negate = true;
1091
1092 bld.ASR(*reg, g1_6, brw_imm_d(31));
1093 }
1094
1095 return reg;
1096 }
1097
1098 void
1099 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1100 {
1101 assert(stage == MESA_SHADER_FRAGMENT);
1102 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1103 assert(dst.type == BRW_REGISTER_TYPE_F);
1104
1105 if (wm_prog_data->persample_dispatch) {
1106 /* Convert int_sample_pos to floating point */
1107 bld.MOV(dst, int_sample_pos);
1108 /* Scale to the range [0, 1] */
1109 bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
1110 }
1111 else {
1112 /* From ARB_sample_shading specification:
1113 * "When rendering to a non-multisample buffer, or if multisample
1114 * rasterization is disabled, gl_SamplePosition will always be
1115 * (0.5, 0.5).
1116 */
1117 bld.MOV(dst, brw_imm_f(0.5f));
1118 }
1119 }
1120
1121 fs_reg *
1122 fs_visitor::emit_samplepos_setup()
1123 {
1124 assert(devinfo->gen >= 6);
1125
1126 const fs_builder abld = bld.annotate("compute sample position");
1127 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1128 fs_reg pos = *reg;
1129 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1130 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1131
1132 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1133 * mode will be enabled.
1134 *
1135 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1136 * R31.1:0 Position Offset X/Y for Slot[3:0]
1137 * R31.3:2 Position Offset X/Y for Slot[7:4]
1138 * .....
1139 *
1140 * The X, Y sample positions come in as bytes in thread payload. So, read
1141 * the positions using vstride=16, width=8, hstride=2.
1142 */
1143 struct brw_reg sample_pos_reg =
1144 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1145 BRW_REGISTER_TYPE_B), 16, 8, 2);
1146
1147 if (dispatch_width == 8) {
1148 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1149 } else {
1150 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1151 abld.half(1).MOV(half(int_sample_x, 1),
1152 fs_reg(suboffset(sample_pos_reg, 16)));
1153 }
1154 /* Compute gl_SamplePosition.x */
1155 compute_sample_position(pos, int_sample_x);
1156 pos = offset(pos, abld, 1);
1157 if (dispatch_width == 8) {
1158 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1159 } else {
1160 abld.half(0).MOV(half(int_sample_y, 0),
1161 fs_reg(suboffset(sample_pos_reg, 1)));
1162 abld.half(1).MOV(half(int_sample_y, 1),
1163 fs_reg(suboffset(sample_pos_reg, 17)));
1164 }
1165 /* Compute gl_SamplePosition.y */
1166 compute_sample_position(pos, int_sample_y);
1167 return reg;
1168 }
1169
1170 fs_reg *
1171 fs_visitor::emit_sampleid_setup()
1172 {
1173 assert(stage == MESA_SHADER_FRAGMENT);
1174 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1175 assert(devinfo->gen >= 6);
1176
1177 const fs_builder abld = bld.annotate("compute sample id");
1178 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1179
1180 if (!key->multisample_fbo) {
1181 /* As per GL_ARB_sample_shading specification:
1182 * "When rendering to a non-multisample buffer, or if multisample
1183 * rasterization is disabled, gl_SampleID will always be zero."
1184 */
1185 abld.MOV(*reg, brw_imm_d(0));
1186 } else if (devinfo->gen >= 8) {
1187 /* Sample ID comes in as 4-bit numbers in g1.0:
1188 *
1189 * 15:12 Slot 3 SampleID (only used in SIMD16)
1190 * 11:8 Slot 2 SampleID (only used in SIMD16)
1191 * 7:4 Slot 1 SampleID
1192 * 3:0 Slot 0 SampleID
1193 *
1194 * Each slot corresponds to four channels, so we want to replicate each
1195 * half-byte value to 4 channels in a row:
1196 *
1197 * dst+0: .7 .6 .5 .4 .3 .2 .1 .0
1198 * 7:4 7:4 7:4 7:4 3:0 3:0 3:0 3:0
1199 *
1200 * dst+1: .7 .6 .5 .4 .3 .2 .1 .0 (if SIMD16)
1201 * 15:12 15:12 15:12 15:12 11:8 11:8 11:8 11:8
1202 *
1203 * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
1204 * channels to read the first byte (7:0), and the second group of 8
1205 * channels to read the second byte (15:8). Then, we shift right by
1206 * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
1207 * values into place. Finally, we AND with 0xf to keep the low nibble.
1208 *
1209 * shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
1210 * and(16) dst<1>D tmp<8,8,1>W 0xf:W
1211 *
1212 * TODO: These payload bits exist on Gen7 too, but they appear to always
1213 * be zero, so this code fails to work. We should find out why.
1214 */
1215 fs_reg tmp(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1216
1217 abld.SHR(tmp, fs_reg(stride(retype(brw_vec1_grf(1, 0),
1218 BRW_REGISTER_TYPE_B), 1, 8, 0)),
1219 brw_imm_v(0x44440000));
1220 abld.AND(*reg, tmp, brw_imm_w(0xf));
1221 } else {
1222 const fs_reg t1 = component(fs_reg(VGRF, alloc.allocate(1),
1223 BRW_REGISTER_TYPE_D), 0);
1224 const fs_reg t2(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1225
1226 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1227 * 8x multisampling, subspan 0 will represent sample N (where N
1228 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1229 * 7. We can find the value of N by looking at R0.0 bits 7:6
1230 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1231 * (since samples are always delivered in pairs). That is, we
1232 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1233 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1234 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1235 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1236 * populating a temporary variable with the sequence (0, 1, 2, 3),
1237 * and then reading from it using vstride=1, width=4, hstride=0.
1238 * These computations hold good for 4x multisampling as well.
1239 *
1240 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1241 * the first four slots are sample 0 of subspan 0; the next four
1242 * are sample 1 of subspan 0; the third group is sample 0 of
1243 * subspan 1, and finally sample 1 of subspan 1.
1244 */
1245
1246 /* SKL+ has an extra bit for the Starting Sample Pair Index to
1247 * accomodate 16x MSAA.
1248 */
1249 abld.exec_all().group(1, 0)
1250 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
1251 brw_imm_ud(0xc0));
1252 abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
1253
1254 /* This works for both SIMD8 and SIMD16 */
1255 abld.exec_all().group(4, 0).MOV(t2, brw_imm_v(0x3210));
1256
1257 /* This special instruction takes care of setting vstride=1,
1258 * width=4, hstride=0 of t2 during an ADD instruction.
1259 */
1260 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1261 }
1262
1263 return reg;
1264 }
1265
1266 fs_reg *
1267 fs_visitor::emit_samplemaskin_setup()
1268 {
1269 assert(stage == MESA_SHADER_FRAGMENT);
1270 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1271 assert(devinfo->gen >= 6);
1272
1273 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1274
1275 fs_reg coverage_mask(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
1276 BRW_REGISTER_TYPE_D));
1277
1278 if (wm_prog_data->persample_dispatch) {
1279 /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
1280 * and a mask representing which sample is being processed by the
1281 * current shader invocation.
1282 *
1283 * From the OES_sample_variables specification:
1284 * "When per-sample shading is active due to the use of a fragment input
1285 * qualified by "sample" or due to the use of the gl_SampleID or
1286 * gl_SamplePosition variables, only the bit for the current sample is
1287 * set in gl_SampleMaskIn."
1288 */
1289 const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
1290
1291 if (nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
1292 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
1293
1294 fs_reg one = vgrf(glsl_type::int_type);
1295 fs_reg enabled_mask = vgrf(glsl_type::int_type);
1296 abld.MOV(one, brw_imm_d(1));
1297 abld.SHL(enabled_mask, one, nir_system_values[SYSTEM_VALUE_SAMPLE_ID]);
1298 abld.AND(*reg, enabled_mask, coverage_mask);
1299 } else {
1300 /* In per-pixel mode, the coverage mask is sufficient. */
1301 *reg = coverage_mask;
1302 }
1303 return reg;
1304 }
1305
1306 fs_reg
1307 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1308 {
1309 if (!src.abs && !src.negate)
1310 return src;
1311
1312 fs_reg temp = bld.vgrf(src.type);
1313 bld.MOV(temp, src);
1314
1315 return temp;
1316 }
1317
1318 void
1319 fs_visitor::emit_discard_jump()
1320 {
1321 assert(brw_wm_prog_data(this->prog_data)->uses_kill);
1322
1323 /* For performance, after a discard, jump to the end of the
1324 * shader if all relevant channels have been discarded.
1325 */
1326 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1327 discard_jump->flag_subreg = 1;
1328
1329 discard_jump->predicate = BRW_PREDICATE_ALIGN1_ANY4H;
1330 discard_jump->predicate_inverse = true;
1331 }
1332
1333 void
1334 fs_visitor::emit_gs_thread_end()
1335 {
1336 assert(stage == MESA_SHADER_GEOMETRY);
1337
1338 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1339
1340 if (gs_compile->control_data_header_size_bits > 0) {
1341 emit_gs_control_data_bits(this->final_gs_vertex_count);
1342 }
1343
1344 const fs_builder abld = bld.annotate("thread end");
1345 fs_inst *inst;
1346
1347 if (gs_prog_data->static_vertex_count != -1) {
1348 foreach_in_list_reverse(fs_inst, prev, &this->instructions) {
1349 if (prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8 ||
1350 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
1351 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
1352 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) {
1353 prev->eot = true;
1354
1355 /* Delete now dead instructions. */
1356 foreach_in_list_reverse_safe(exec_node, dead, &this->instructions) {
1357 if (dead == prev)
1358 break;
1359 dead->remove();
1360 }
1361 return;
1362 } else if (prev->is_control_flow() || prev->has_side_effects()) {
1363 break;
1364 }
1365 }
1366 fs_reg hdr = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1367 abld.MOV(hdr, fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)));
1368 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, hdr);
1369 inst->mlen = 1;
1370 } else {
1371 fs_reg payload = abld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1372 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
1373 sources[0] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1374 sources[1] = this->final_gs_vertex_count;
1375 abld.LOAD_PAYLOAD(payload, sources, 2, 2);
1376 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
1377 inst->mlen = 2;
1378 }
1379 inst->eot = true;
1380 inst->offset = 0;
1381 }
1382
1383 void
1384 fs_visitor::assign_curb_setup()
1385 {
1386 prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
1387
1388 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1389 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1390 for (unsigned int i = 0; i < inst->sources; i++) {
1391 if (inst->src[i].file == UNIFORM) {
1392 int uniform_nr = inst->src[i].nr + inst->src[i].offset / 4;
1393 int constant_nr;
1394 if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1395 constant_nr = push_constant_loc[uniform_nr];
1396 } else {
1397 /* Section 5.11 of the OpenGL 4.1 spec says:
1398 * "Out-of-bounds reads return undefined values, which include
1399 * values from other variables of the active program or zero."
1400 * Just return the first push constant.
1401 */
1402 constant_nr = 0;
1403 }
1404
1405 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1406 constant_nr / 8,
1407 constant_nr % 8);
1408 brw_reg.abs = inst->src[i].abs;
1409 brw_reg.negate = inst->src[i].negate;
1410
1411 assert(inst->src[i].stride == 0);
1412 inst->src[i] = byte_offset(
1413 retype(brw_reg, inst->src[i].type),
1414 inst->src[i].offset % 4);
1415 }
1416 }
1417 }
1418
1419 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1420 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1421 }
1422
1423 void
1424 fs_visitor::calculate_urb_setup()
1425 {
1426 assert(stage == MESA_SHADER_FRAGMENT);
1427 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1428 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1429
1430 memset(prog_data->urb_setup, -1,
1431 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1432
1433 int urb_next = 0;
1434 /* Figure out where each of the incoming setup attributes lands. */
1435 if (devinfo->gen >= 6) {
1436 if (_mesa_bitcount_64(nir->info.inputs_read &
1437 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1438 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1439 * first 16 varying inputs, so we can put them wherever we want.
1440 * Just put them in order.
1441 *
1442 * This is useful because it means that (a) inputs not used by the
1443 * fragment shader won't take up valuable register space, and (b) we
1444 * won't have to recompile the fragment shader if it gets paired with
1445 * a different vertex (or geometry) shader.
1446 */
1447 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1448 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1449 BITFIELD64_BIT(i)) {
1450 prog_data->urb_setup[i] = urb_next++;
1451 }
1452 }
1453 } else {
1454 bool include_vue_header =
1455 nir->info.inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
1456
1457 /* We have enough input varyings that the SF/SBE pipeline stage can't
1458 * arbitrarily rearrange them to suit our whim; we have to put them
1459 * in an order that matches the output of the previous pipeline stage
1460 * (geometry or vertex shader).
1461 */
1462 struct brw_vue_map prev_stage_vue_map;
1463 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1464 key->input_slots_valid,
1465 nir->info.separate_shader);
1466 int first_slot =
1467 include_vue_header ? 0 : 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
1468
1469 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1470 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1471 slot++) {
1472 int varying = prev_stage_vue_map.slot_to_varying[slot];
1473 if (varying != BRW_VARYING_SLOT_PAD &&
1474 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1475 BITFIELD64_BIT(varying))) {
1476 prog_data->urb_setup[varying] = slot - first_slot;
1477 }
1478 }
1479 urb_next = prev_stage_vue_map.num_slots - first_slot;
1480 }
1481 } else {
1482 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1483 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1484 /* Point size is packed into the header, not as a general attribute */
1485 if (i == VARYING_SLOT_PSIZ)
1486 continue;
1487
1488 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1489 /* The back color slot is skipped when the front color is
1490 * also written to. In addition, some slots can be
1491 * written in the vertex shader and not read in the
1492 * fragment shader. So the register number must always be
1493 * incremented, mapped or not.
1494 */
1495 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1496 prog_data->urb_setup[i] = urb_next;
1497 urb_next++;
1498 }
1499 }
1500
1501 /*
1502 * It's a FS only attribute, and we did interpolation for this attribute
1503 * in SF thread. So, count it here, too.
1504 *
1505 * See compile_sf_prog() for more info.
1506 */
1507 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1508 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1509 }
1510
1511 prog_data->num_varying_inputs = urb_next;
1512 }
1513
1514 void
1515 fs_visitor::assign_urb_setup()
1516 {
1517 assert(stage == MESA_SHADER_FRAGMENT);
1518 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1519
1520 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1521
1522 /* Offset all the urb_setup[] index by the actual position of the
1523 * setup regs, now that the location of the constants has been chosen.
1524 */
1525 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1526 if (inst->opcode == FS_OPCODE_LINTERP) {
1527 assert(inst->src[1].file == FIXED_GRF);
1528 inst->src[1].nr += urb_start;
1529 }
1530
1531 if (inst->opcode == FS_OPCODE_CINTERP) {
1532 assert(inst->src[0].file == FIXED_GRF);
1533 inst->src[0].nr += urb_start;
1534 }
1535 }
1536
1537 /* Each attribute is 4 setup channels, each of which is half a reg. */
1538 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1539 }
1540
1541 void
1542 fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
1543 {
1544 for (int i = 0; i < inst->sources; i++) {
1545 if (inst->src[i].file == ATTR) {
1546 int grf = payload.num_regs +
1547 prog_data->curb_read_length +
1548 inst->src[i].nr +
1549 inst->src[i].offset / REG_SIZE;
1550
1551 /* As explained at brw_reg_from_fs_reg, From the Haswell PRM:
1552 *
1553 * VertStride must be used to cross GRF register boundaries. This
1554 * rule implies that elements within a 'Width' cannot cross GRF
1555 * boundaries.
1556 *
1557 * So, for registers that are large enough, we have to split the exec
1558 * size in two and trust the compression state to sort it out.
1559 */
1560 unsigned total_size = inst->exec_size *
1561 inst->src[i].stride *
1562 type_sz(inst->src[i].type);
1563
1564 assert(total_size <= 2 * REG_SIZE);
1565 const unsigned exec_size =
1566 (total_size <= REG_SIZE) ? inst->exec_size : inst->exec_size / 2;
1567
1568 unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
1569 struct brw_reg reg =
1570 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1571 inst->src[i].offset % REG_SIZE),
1572 exec_size * inst->src[i].stride,
1573 width, inst->src[i].stride);
1574 reg.abs = inst->src[i].abs;
1575 reg.negate = inst->src[i].negate;
1576
1577 inst->src[i] = reg;
1578 }
1579 }
1580 }
1581
1582 void
1583 fs_visitor::assign_vs_urb_setup()
1584 {
1585 struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
1586
1587 assert(stage == MESA_SHADER_VERTEX);
1588
1589 /* Each attribute is 4 regs. */
1590 this->first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
1591
1592 assert(vs_prog_data->base.urb_read_length <= 15);
1593
1594 /* Rewrite all ATTR file references to the hw grf that they land in. */
1595 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1596 convert_attr_sources_to_hw_regs(inst);
1597 }
1598 }
1599
1600 void
1601 fs_visitor::assign_tcs_single_patch_urb_setup()
1602 {
1603 assert(stage == MESA_SHADER_TESS_CTRL);
1604
1605 /* Rewrite all ATTR file references to HW_REGs. */
1606 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1607 convert_attr_sources_to_hw_regs(inst);
1608 }
1609 }
1610
1611 void
1612 fs_visitor::assign_tes_urb_setup()
1613 {
1614 assert(stage == MESA_SHADER_TESS_EVAL);
1615
1616 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1617
1618 first_non_payload_grf += 8 * vue_prog_data->urb_read_length;
1619
1620 /* Rewrite all ATTR file references to HW_REGs. */
1621 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1622 convert_attr_sources_to_hw_regs(inst);
1623 }
1624 }
1625
1626 void
1627 fs_visitor::assign_gs_urb_setup()
1628 {
1629 assert(stage == MESA_SHADER_GEOMETRY);
1630
1631 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1632
1633 first_non_payload_grf +=
1634 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
1635
1636 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1637 /* Rewrite all ATTR file references to GRFs. */
1638 convert_attr_sources_to_hw_regs(inst);
1639 }
1640 }
1641
1642
1643 /**
1644 * Split large virtual GRFs into separate components if we can.
1645 *
1646 * This is mostly duplicated with what brw_fs_vector_splitting does,
1647 * but that's really conservative because it's afraid of doing
1648 * splitting that doesn't result in real progress after the rest of
1649 * the optimization phases, which would cause infinite looping in
1650 * optimization. We can do it once here, safely. This also has the
1651 * opportunity to split interpolated values, or maybe even uniforms,
1652 * which we don't have at the IR level.
1653 *
1654 * We want to split, because virtual GRFs are what we register
1655 * allocate and spill (due to contiguousness requirements for some
1656 * instructions), and they're what we naturally generate in the
1657 * codegen process, but most virtual GRFs don't actually need to be
1658 * contiguous sets of GRFs. If we split, we'll end up with reduced
1659 * live intervals and better dead code elimination and coalescing.
1660 */
1661 void
1662 fs_visitor::split_virtual_grfs()
1663 {
1664 /* Compact the register file so we eliminate dead vgrfs. This
1665 * only defines split points for live registers, so if we have
1666 * too large dead registers they will hit assertions later.
1667 */
1668 compact_virtual_grfs();
1669
1670 int num_vars = this->alloc.count;
1671
1672 /* Count the total number of registers */
1673 int reg_count = 0;
1674 int vgrf_to_reg[num_vars];
1675 for (int i = 0; i < num_vars; i++) {
1676 vgrf_to_reg[i] = reg_count;
1677 reg_count += alloc.sizes[i];
1678 }
1679
1680 /* An array of "split points". For each register slot, this indicates
1681 * if this slot can be separated from the previous slot. Every time an
1682 * instruction uses multiple elements of a register (as a source or
1683 * destination), we mark the used slots as inseparable. Then we go
1684 * through and split the registers into the smallest pieces we can.
1685 */
1686 bool split_points[reg_count];
1687 memset(split_points, 0, sizeof(split_points));
1688
1689 /* Mark all used registers as fully splittable */
1690 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1691 if (inst->dst.file == VGRF) {
1692 int reg = vgrf_to_reg[inst->dst.nr];
1693 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.nr]; j++)
1694 split_points[reg + j] = true;
1695 }
1696
1697 for (int i = 0; i < inst->sources; i++) {
1698 if (inst->src[i].file == VGRF) {
1699 int reg = vgrf_to_reg[inst->src[i].nr];
1700 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].nr]; j++)
1701 split_points[reg + j] = true;
1702 }
1703 }
1704 }
1705
1706 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1707 if (inst->dst.file == VGRF) {
1708 int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
1709 for (unsigned j = 1; j < regs_written(inst); j++)
1710 split_points[reg + j] = false;
1711 }
1712 for (int i = 0; i < inst->sources; i++) {
1713 if (inst->src[i].file == VGRF) {
1714 int reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
1715 for (unsigned j = 1; j < regs_read(inst, i); j++)
1716 split_points[reg + j] = false;
1717 }
1718 }
1719 }
1720
1721 int new_virtual_grf[reg_count];
1722 int new_reg_offset[reg_count];
1723
1724 int reg = 0;
1725 for (int i = 0; i < num_vars; i++) {
1726 /* The first one should always be 0 as a quick sanity check. */
1727 assert(split_points[reg] == false);
1728
1729 /* j = 0 case */
1730 new_reg_offset[reg] = 0;
1731 reg++;
1732 int offset = 1;
1733
1734 /* j > 0 case */
1735 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1736 /* If this is a split point, reset the offset to 0 and allocate a
1737 * new virtual GRF for the previous offset many registers
1738 */
1739 if (split_points[reg]) {
1740 assert(offset <= MAX_VGRF_SIZE);
1741 int grf = alloc.allocate(offset);
1742 for (int k = reg - offset; k < reg; k++)
1743 new_virtual_grf[k] = grf;
1744 offset = 0;
1745 }
1746 new_reg_offset[reg] = offset;
1747 offset++;
1748 reg++;
1749 }
1750
1751 /* The last one gets the original register number */
1752 assert(offset <= MAX_VGRF_SIZE);
1753 alloc.sizes[i] = offset;
1754 for (int k = reg - offset; k < reg; k++)
1755 new_virtual_grf[k] = i;
1756 }
1757 assert(reg == reg_count);
1758
1759 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1760 if (inst->dst.file == VGRF) {
1761 reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
1762 inst->dst.nr = new_virtual_grf[reg];
1763 inst->dst.offset = new_reg_offset[reg] * REG_SIZE +
1764 inst->dst.offset % REG_SIZE;
1765 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1766 }
1767 for (int i = 0; i < inst->sources; i++) {
1768 if (inst->src[i].file == VGRF) {
1769 reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
1770 inst->src[i].nr = new_virtual_grf[reg];
1771 inst->src[i].offset = new_reg_offset[reg] * REG_SIZE +
1772 inst->src[i].offset % REG_SIZE;
1773 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1774 }
1775 }
1776 }
1777 invalidate_live_intervals();
1778 }
1779
1780 /**
1781 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1782 *
1783 * During code generation, we create tons of temporary variables, many of
1784 * which get immediately killed and are never used again. Yet, in later
1785 * optimization and analysis passes, such as compute_live_intervals, we need
1786 * to loop over all the virtual GRFs. Compacting them can save a lot of
1787 * overhead.
1788 */
1789 bool
1790 fs_visitor::compact_virtual_grfs()
1791 {
1792 bool progress = false;
1793 int remap_table[this->alloc.count];
1794 memset(remap_table, -1, sizeof(remap_table));
1795
1796 /* Mark which virtual GRFs are used. */
1797 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1798 if (inst->dst.file == VGRF)
1799 remap_table[inst->dst.nr] = 0;
1800
1801 for (int i = 0; i < inst->sources; i++) {
1802 if (inst->src[i].file == VGRF)
1803 remap_table[inst->src[i].nr] = 0;
1804 }
1805 }
1806
1807 /* Compact the GRF arrays. */
1808 int new_index = 0;
1809 for (unsigned i = 0; i < this->alloc.count; i++) {
1810 if (remap_table[i] == -1) {
1811 /* We just found an unused register. This means that we are
1812 * actually going to compact something.
1813 */
1814 progress = true;
1815 } else {
1816 remap_table[i] = new_index;
1817 alloc.sizes[new_index] = alloc.sizes[i];
1818 invalidate_live_intervals();
1819 ++new_index;
1820 }
1821 }
1822
1823 this->alloc.count = new_index;
1824
1825 /* Patch all the instructions to use the newly renumbered registers */
1826 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1827 if (inst->dst.file == VGRF)
1828 inst->dst.nr = remap_table[inst->dst.nr];
1829
1830 for (int i = 0; i < inst->sources; i++) {
1831 if (inst->src[i].file == VGRF)
1832 inst->src[i].nr = remap_table[inst->src[i].nr];
1833 }
1834 }
1835
1836 /* Patch all the references to delta_xy, since they're used in register
1837 * allocation. If they're unused, switch them to BAD_FILE so we don't
1838 * think some random VGRF is delta_xy.
1839 */
1840 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
1841 if (delta_xy[i].file == VGRF) {
1842 if (remap_table[delta_xy[i].nr] != -1) {
1843 delta_xy[i].nr = remap_table[delta_xy[i].nr];
1844 } else {
1845 delta_xy[i].file = BAD_FILE;
1846 }
1847 }
1848 }
1849
1850 return progress;
1851 }
1852
1853 static void
1854 set_push_pull_constant_loc(unsigned uniform, int *chunk_start,
1855 unsigned *max_chunk_bitsize,
1856 bool contiguous, unsigned bitsize,
1857 const unsigned target_bitsize,
1858 int *push_constant_loc, int *pull_constant_loc,
1859 unsigned *num_push_constants,
1860 unsigned *num_pull_constants,
1861 const unsigned max_push_components,
1862 const unsigned max_chunk_size,
1863 struct brw_stage_prog_data *stage_prog_data)
1864 {
1865 /* This is the first live uniform in the chunk */
1866 if (*chunk_start < 0)
1867 *chunk_start = uniform;
1868
1869 /* Keep track of the maximum bit size access in contiguous uniforms */
1870 *max_chunk_bitsize = MAX2(*max_chunk_bitsize, bitsize);
1871
1872 /* If this element does not need to be contiguous with the next, we
1873 * split at this point and everything between chunk_start and u forms a
1874 * single chunk.
1875 */
1876 if (!contiguous) {
1877 /* If bitsize doesn't match the target one, skip it */
1878 if (*max_chunk_bitsize != target_bitsize) {
1879 /* FIXME: right now we only support 32 and 64-bit accesses */
1880 assert(*max_chunk_bitsize == 4 || *max_chunk_bitsize == 8);
1881 *max_chunk_bitsize = 0;
1882 *chunk_start = -1;
1883 return;
1884 }
1885
1886 unsigned chunk_size = uniform - *chunk_start + 1;
1887
1888 /* Decide whether we should push or pull this parameter. In the
1889 * Vulkan driver, push constants are explicitly exposed via the API
1890 * so we push everything. In GL, we only push small arrays.
1891 */
1892 if (stage_prog_data->pull_param == NULL ||
1893 (*num_push_constants + chunk_size <= max_push_components &&
1894 chunk_size <= max_chunk_size)) {
1895 assert(*num_push_constants + chunk_size <= max_push_components);
1896 for (unsigned j = *chunk_start; j <= uniform; j++)
1897 push_constant_loc[j] = (*num_push_constants)++;
1898 } else {
1899 for (unsigned j = *chunk_start; j <= uniform; j++)
1900 pull_constant_loc[j] = (*num_pull_constants)++;
1901 }
1902
1903 *max_chunk_bitsize = 0;
1904 *chunk_start = -1;
1905 }
1906 }
1907
1908 /**
1909 * Assign UNIFORM file registers to either push constants or pull constants.
1910 *
1911 * We allow a fragment shader to have more than the specified minimum
1912 * maximum number of fragment shader uniform components (64). If
1913 * there are too many of these, they'd fill up all of register space.
1914 * So, this will push some of them out to the pull constant buffer and
1915 * update the program to load them.
1916 */
1917 void
1918 fs_visitor::assign_constant_locations()
1919 {
1920 /* Only the first compile gets to decide on locations. */
1921 if (dispatch_width != min_dispatch_width)
1922 return;
1923
1924 bool is_live[uniforms];
1925 memset(is_live, 0, sizeof(is_live));
1926 unsigned bitsize_access[uniforms];
1927 memset(bitsize_access, 0, sizeof(bitsize_access));
1928
1929 /* For each uniform slot, a value of true indicates that the given slot and
1930 * the next slot must remain contiguous. This is used to keep us from
1931 * splitting arrays apart.
1932 */
1933 bool contiguous[uniforms];
1934 memset(contiguous, 0, sizeof(contiguous));
1935
1936 int thread_local_id_index =
1937 (stage == MESA_SHADER_COMPUTE) ?
1938 brw_cs_prog_data(stage_prog_data)->thread_local_id_index : -1;
1939
1940 /* First, we walk through the instructions and do two things:
1941 *
1942 * 1) Figure out which uniforms are live.
1943 *
1944 * 2) Mark any indirectly used ranges of registers as contiguous.
1945 *
1946 * Note that we don't move constant-indexed accesses to arrays. No
1947 * testing has been done of the performance impact of this choice.
1948 */
1949 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
1950 for (int i = 0 ; i < inst->sources; i++) {
1951 if (inst->src[i].file != UNIFORM)
1952 continue;
1953
1954 int constant_nr = inst->src[i].nr + inst->src[i].offset / 4;
1955
1956 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
1957 assert(inst->src[2].ud % 4 == 0);
1958 unsigned last = constant_nr + (inst->src[2].ud / 4) - 1;
1959 assert(last < uniforms);
1960
1961 for (unsigned j = constant_nr; j < last; j++) {
1962 is_live[j] = true;
1963 contiguous[j] = true;
1964 bitsize_access[j] = MAX2(bitsize_access[j], type_sz(inst->src[i].type));
1965 }
1966 is_live[last] = true;
1967 bitsize_access[last] = MAX2(bitsize_access[last], type_sz(inst->src[i].type));
1968 } else {
1969 if (constant_nr >= 0 && constant_nr < (int) uniforms) {
1970 int regs_read = inst->components_read(i) *
1971 type_sz(inst->src[i].type) / 4;
1972 for (int j = 0; j < regs_read; j++) {
1973 is_live[constant_nr + j] = true;
1974 bitsize_access[constant_nr + j] =
1975 MAX2(bitsize_access[constant_nr + j], type_sz(inst->src[i].type));
1976 }
1977 }
1978 }
1979 }
1980 }
1981
1982 if (thread_local_id_index >= 0 && !is_live[thread_local_id_index])
1983 thread_local_id_index = -1;
1984
1985 /* Only allow 16 registers (128 uniform components) as push constants.
1986 *
1987 * Just demote the end of the list. We could probably do better
1988 * here, demoting things that are rarely used in the program first.
1989 *
1990 * If changing this value, note the limitation about total_regs in
1991 * brw_curbe.c.
1992 */
1993 unsigned int max_push_components = 16 * 8;
1994 if (thread_local_id_index >= 0)
1995 max_push_components--; /* Save a slot for the thread ID */
1996
1997 /* We push small arrays, but no bigger than 16 floats. This is big enough
1998 * for a vec4 but hopefully not large enough to push out other stuff. We
1999 * should probably use a better heuristic at some point.
2000 */
2001 const unsigned int max_chunk_size = 16;
2002
2003 unsigned int num_push_constants = 0;
2004 unsigned int num_pull_constants = 0;
2005
2006 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2007 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2008
2009 /* Default to -1 meaning no location */
2010 memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
2011 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2012
2013 int chunk_start = -1;
2014 unsigned max_chunk_bitsize = 0;
2015
2016 /* First push 64-bit uniforms to ensure they are properly aligned */
2017 const unsigned uniform_64_bit_size = type_sz(BRW_REGISTER_TYPE_DF);
2018 for (unsigned u = 0; u < uniforms; u++) {
2019 if (!is_live[u])
2020 continue;
2021
2022 set_push_pull_constant_loc(u, &chunk_start, &max_chunk_bitsize,
2023 contiguous[u], bitsize_access[u],
2024 uniform_64_bit_size,
2025 push_constant_loc, pull_constant_loc,
2026 &num_push_constants, &num_pull_constants,
2027 max_push_components, max_chunk_size,
2028 stage_prog_data);
2029
2030 }
2031
2032 /* Then push the rest of uniforms */
2033 const unsigned uniform_32_bit_size = type_sz(BRW_REGISTER_TYPE_F);
2034 for (unsigned u = 0; u < uniforms; u++) {
2035 if (!is_live[u])
2036 continue;
2037
2038 /* Skip thread_local_id_index to put it in the last push register. */
2039 if (thread_local_id_index == (int)u)
2040 continue;
2041
2042 set_push_pull_constant_loc(u, &chunk_start, &max_chunk_bitsize,
2043 contiguous[u], bitsize_access[u],
2044 uniform_32_bit_size,
2045 push_constant_loc, pull_constant_loc,
2046 &num_push_constants, &num_pull_constants,
2047 max_push_components, max_chunk_size,
2048 stage_prog_data);
2049 }
2050
2051 /* Add the CS local thread ID uniform at the end of the push constants */
2052 if (thread_local_id_index >= 0)
2053 push_constant_loc[thread_local_id_index] = num_push_constants++;
2054
2055 /* As the uniforms are going to be reordered, take the data from a temporary
2056 * copy of the original param[].
2057 */
2058 gl_constant_value **param = ralloc_array(NULL, gl_constant_value*,
2059 stage_prog_data->nr_params);
2060 memcpy(param, stage_prog_data->param,
2061 sizeof(gl_constant_value*) * stage_prog_data->nr_params);
2062 stage_prog_data->nr_params = num_push_constants;
2063 stage_prog_data->nr_pull_params = num_pull_constants;
2064
2065 /* Up until now, the param[] array has been indexed by reg + offset
2066 * of UNIFORM registers. Move pull constants into pull_param[] and
2067 * condense param[] to only contain the uniforms we chose to push.
2068 *
2069 * NOTE: Because we are condensing the params[] array, we know that
2070 * push_constant_loc[i] <= i and we can do it in one smooth loop without
2071 * having to make a copy.
2072 */
2073 int new_thread_local_id_index = -1;
2074 for (unsigned int i = 0; i < uniforms; i++) {
2075 const gl_constant_value *value = param[i];
2076
2077 if (pull_constant_loc[i] != -1) {
2078 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
2079 } else if (push_constant_loc[i] != -1) {
2080 stage_prog_data->param[push_constant_loc[i]] = value;
2081 if (thread_local_id_index == (int)i)
2082 new_thread_local_id_index = push_constant_loc[i];
2083 }
2084 }
2085 ralloc_free(param);
2086
2087 if (stage == MESA_SHADER_COMPUTE)
2088 brw_cs_prog_data(stage_prog_data)->thread_local_id_index =
2089 new_thread_local_id_index;
2090 }
2091
2092 /**
2093 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
2094 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
2095 */
2096 void
2097 fs_visitor::lower_constant_loads()
2098 {
2099 const unsigned index = stage_prog_data->binding_table.pull_constants_start;
2100
2101 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2102 /* Set up the annotation tracking for new generated instructions. */
2103 const fs_builder ibld(this, block, inst);
2104
2105 for (int i = 0; i < inst->sources; i++) {
2106 if (inst->src[i].file != UNIFORM)
2107 continue;
2108
2109 /* We'll handle this case later */
2110 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
2111 continue;
2112
2113 unsigned location = inst->src[i].nr + inst->src[i].offset / 4;
2114 if (location >= uniforms)
2115 continue; /* Out of bounds access */
2116
2117 int pull_index = pull_constant_loc[location];
2118
2119 if (pull_index == -1)
2120 continue;
2121
2122 assert(inst->src[i].stride == 0);
2123
2124 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
2125 const fs_builder ubld = ibld.exec_all().group(block_sz / 4, 0);
2126 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
2127 const unsigned base = pull_index * 4;
2128
2129 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
2130 dst, brw_imm_ud(index), brw_imm_ud(base & ~(block_sz - 1)));
2131
2132 /* Rewrite the instruction to use the temporary VGRF. */
2133 inst->src[i].file = VGRF;
2134 inst->src[i].nr = dst.nr;
2135 inst->src[i].offset = (base & (block_sz - 1)) +
2136 inst->src[i].offset % 4;
2137
2138 brw_mark_surface_used(prog_data, index);
2139 }
2140
2141 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
2142 inst->src[0].file == UNIFORM) {
2143
2144 unsigned location = inst->src[0].nr + inst->src[0].offset / 4;
2145 if (location >= uniforms)
2146 continue; /* Out of bounds access */
2147
2148 int pull_index = pull_constant_loc[location];
2149
2150 if (pull_index == -1)
2151 continue;
2152
2153 VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
2154 brw_imm_ud(index),
2155 inst->src[1],
2156 pull_index * 4);
2157 inst->remove(block);
2158
2159 brw_mark_surface_used(prog_data, index);
2160 }
2161 }
2162 invalidate_live_intervals();
2163 }
2164
2165 bool
2166 fs_visitor::opt_algebraic()
2167 {
2168 bool progress = false;
2169
2170 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2171 switch (inst->opcode) {
2172 case BRW_OPCODE_MOV:
2173 if (inst->src[0].file != IMM)
2174 break;
2175
2176 if (inst->saturate) {
2177 if (inst->dst.type != inst->src[0].type)
2178 assert(!"unimplemented: saturate mixed types");
2179
2180 if (brw_saturate_immediate(inst->dst.type,
2181 &inst->src[0].as_brw_reg())) {
2182 inst->saturate = false;
2183 progress = true;
2184 }
2185 }
2186 break;
2187
2188 case BRW_OPCODE_MUL:
2189 if (inst->src[1].file != IMM)
2190 continue;
2191
2192 /* a * 1.0 = a */
2193 if (inst->src[1].is_one()) {
2194 inst->opcode = BRW_OPCODE_MOV;
2195 inst->src[1] = reg_undef;
2196 progress = true;
2197 break;
2198 }
2199
2200 /* a * -1.0 = -a */
2201 if (inst->src[1].is_negative_one()) {
2202 inst->opcode = BRW_OPCODE_MOV;
2203 inst->src[0].negate = !inst->src[0].negate;
2204 inst->src[1] = reg_undef;
2205 progress = true;
2206 break;
2207 }
2208
2209 /* a * 0.0 = 0.0 */
2210 if (inst->src[1].is_zero()) {
2211 inst->opcode = BRW_OPCODE_MOV;
2212 inst->src[0] = inst->src[1];
2213 inst->src[1] = reg_undef;
2214 progress = true;
2215 break;
2216 }
2217
2218 if (inst->src[0].file == IMM) {
2219 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2220 inst->opcode = BRW_OPCODE_MOV;
2221 inst->src[0].f *= inst->src[1].f;
2222 inst->src[1] = reg_undef;
2223 progress = true;
2224 break;
2225 }
2226 break;
2227 case BRW_OPCODE_ADD:
2228 if (inst->src[1].file != IMM)
2229 continue;
2230
2231 /* a + 0.0 = a */
2232 if (inst->src[1].is_zero()) {
2233 inst->opcode = BRW_OPCODE_MOV;
2234 inst->src[1] = reg_undef;
2235 progress = true;
2236 break;
2237 }
2238
2239 if (inst->src[0].file == IMM) {
2240 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2241 inst->opcode = BRW_OPCODE_MOV;
2242 inst->src[0].f += inst->src[1].f;
2243 inst->src[1] = reg_undef;
2244 progress = true;
2245 break;
2246 }
2247 break;
2248 case BRW_OPCODE_OR:
2249 if (inst->src[0].equals(inst->src[1])) {
2250 inst->opcode = BRW_OPCODE_MOV;
2251 inst->src[1] = reg_undef;
2252 progress = true;
2253 break;
2254 }
2255 break;
2256 case BRW_OPCODE_LRP:
2257 if (inst->src[1].equals(inst->src[2])) {
2258 inst->opcode = BRW_OPCODE_MOV;
2259 inst->src[0] = inst->src[1];
2260 inst->src[1] = reg_undef;
2261 inst->src[2] = reg_undef;
2262 progress = true;
2263 break;
2264 }
2265 break;
2266 case BRW_OPCODE_CMP:
2267 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
2268 inst->src[0].abs &&
2269 inst->src[0].negate &&
2270 inst->src[1].is_zero()) {
2271 inst->src[0].abs = false;
2272 inst->src[0].negate = false;
2273 inst->conditional_mod = BRW_CONDITIONAL_Z;
2274 progress = true;
2275 break;
2276 }
2277 break;
2278 case BRW_OPCODE_SEL:
2279 if (inst->src[0].equals(inst->src[1])) {
2280 inst->opcode = BRW_OPCODE_MOV;
2281 inst->src[1] = reg_undef;
2282 inst->predicate = BRW_PREDICATE_NONE;
2283 inst->predicate_inverse = false;
2284 progress = true;
2285 } else if (inst->saturate && inst->src[1].file == IMM) {
2286 switch (inst->conditional_mod) {
2287 case BRW_CONDITIONAL_LE:
2288 case BRW_CONDITIONAL_L:
2289 switch (inst->src[1].type) {
2290 case BRW_REGISTER_TYPE_F:
2291 if (inst->src[1].f >= 1.0f) {
2292 inst->opcode = BRW_OPCODE_MOV;
2293 inst->src[1] = reg_undef;
2294 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2295 progress = true;
2296 }
2297 break;
2298 default:
2299 break;
2300 }
2301 break;
2302 case BRW_CONDITIONAL_GE:
2303 case BRW_CONDITIONAL_G:
2304 switch (inst->src[1].type) {
2305 case BRW_REGISTER_TYPE_F:
2306 if (inst->src[1].f <= 0.0f) {
2307 inst->opcode = BRW_OPCODE_MOV;
2308 inst->src[1] = reg_undef;
2309 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2310 progress = true;
2311 }
2312 break;
2313 default:
2314 break;
2315 }
2316 default:
2317 break;
2318 }
2319 }
2320 break;
2321 case BRW_OPCODE_MAD:
2322 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2323 inst->opcode = BRW_OPCODE_MOV;
2324 inst->src[1] = reg_undef;
2325 inst->src[2] = reg_undef;
2326 progress = true;
2327 } else if (inst->src[0].is_zero()) {
2328 inst->opcode = BRW_OPCODE_MUL;
2329 inst->src[0] = inst->src[2];
2330 inst->src[2] = reg_undef;
2331 progress = true;
2332 } else if (inst->src[1].is_one()) {
2333 inst->opcode = BRW_OPCODE_ADD;
2334 inst->src[1] = inst->src[2];
2335 inst->src[2] = reg_undef;
2336 progress = true;
2337 } else if (inst->src[2].is_one()) {
2338 inst->opcode = BRW_OPCODE_ADD;
2339 inst->src[2] = reg_undef;
2340 progress = true;
2341 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2342 inst->opcode = BRW_OPCODE_ADD;
2343 inst->src[1].f *= inst->src[2].f;
2344 inst->src[2] = reg_undef;
2345 progress = true;
2346 }
2347 break;
2348 case SHADER_OPCODE_BROADCAST:
2349 if (is_uniform(inst->src[0])) {
2350 inst->opcode = BRW_OPCODE_MOV;
2351 inst->sources = 1;
2352 inst->force_writemask_all = true;
2353 progress = true;
2354 } else if (inst->src[1].file == IMM) {
2355 inst->opcode = BRW_OPCODE_MOV;
2356 inst->src[0] = component(inst->src[0],
2357 inst->src[1].ud);
2358 inst->sources = 1;
2359 inst->force_writemask_all = true;
2360 progress = true;
2361 }
2362 break;
2363
2364 default:
2365 break;
2366 }
2367
2368 /* Swap if src[0] is immediate. */
2369 if (progress && inst->is_commutative()) {
2370 if (inst->src[0].file == IMM) {
2371 fs_reg tmp = inst->src[1];
2372 inst->src[1] = inst->src[0];
2373 inst->src[0] = tmp;
2374 }
2375 }
2376 }
2377 return progress;
2378 }
2379
2380 /**
2381 * Optimize sample messages that have constant zero values for the trailing
2382 * texture coordinates. We can just reduce the message length for these
2383 * instructions instead of reserving a register for it. Trailing parameters
2384 * that aren't sent default to zero anyway. This will cause the dead code
2385 * eliminator to remove the MOV instruction that would otherwise be emitted to
2386 * set up the zero value.
2387 */
2388 bool
2389 fs_visitor::opt_zero_samples()
2390 {
2391 /* Gen4 infers the texturing opcode based on the message length so we can't
2392 * change it.
2393 */
2394 if (devinfo->gen < 5)
2395 return false;
2396
2397 bool progress = false;
2398
2399 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2400 if (!inst->is_tex())
2401 continue;
2402
2403 fs_inst *load_payload = (fs_inst *) inst->prev;
2404
2405 if (load_payload->is_head_sentinel() ||
2406 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2407 continue;
2408
2409 /* We don't want to remove the message header or the first parameter.
2410 * Removing the first parameter is not allowed, see the Haswell PRM
2411 * volume 7, page 149:
2412 *
2413 * "Parameter 0 is required except for the sampleinfo message, which
2414 * has no parameter 0"
2415 */
2416 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2417 load_payload->src[(inst->mlen - inst->header_size) /
2418 (inst->exec_size / 8) +
2419 inst->header_size - 1].is_zero()) {
2420 inst->mlen -= inst->exec_size / 8;
2421 progress = true;
2422 }
2423 }
2424
2425 if (progress)
2426 invalidate_live_intervals();
2427
2428 return progress;
2429 }
2430
2431 /**
2432 * Optimize sample messages which are followed by the final RT write.
2433 *
2434 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2435 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2436 * final texturing results copied to the framebuffer write payload and modify
2437 * them to write to the framebuffer directly.
2438 */
2439 bool
2440 fs_visitor::opt_sampler_eot()
2441 {
2442 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2443
2444 if (stage != MESA_SHADER_FRAGMENT)
2445 return false;
2446
2447 if (devinfo->gen != 9 && !devinfo->is_cherryview)
2448 return false;
2449
2450 /* FINISHME: It should be possible to implement this optimization when there
2451 * are multiple drawbuffers.
2452 */
2453 if (key->nr_color_regions != 1)
2454 return false;
2455
2456 /* Requires emitting a bunch of saturating MOV instructions during logical
2457 * send lowering to clamp the color payload, which the sampler unit isn't
2458 * going to do for us.
2459 */
2460 if (key->clamp_fragment_color)
2461 return false;
2462
2463 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2464 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2465 fs_inst *fb_write = (fs_inst *)block->end();
2466 assert(fb_write->eot);
2467 assert(fb_write->opcode == FS_OPCODE_FB_WRITE_LOGICAL);
2468
2469 /* There wasn't one; nothing to do. */
2470 if (unlikely(fb_write->prev->is_head_sentinel()))
2471 return false;
2472
2473 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2474
2475 /* 3D Sampler » Messages » Message Format
2476 *
2477 * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
2478 * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
2479 */
2480 if (tex_inst->opcode != SHADER_OPCODE_TEX_LOGICAL &&
2481 tex_inst->opcode != SHADER_OPCODE_TXD_LOGICAL &&
2482 tex_inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
2483 tex_inst->opcode != SHADER_OPCODE_TXL_LOGICAL &&
2484 tex_inst->opcode != FS_OPCODE_TXB_LOGICAL &&
2485 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL &&
2486 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_W_LOGICAL &&
2487 tex_inst->opcode != SHADER_OPCODE_TXF_UMS_LOGICAL)
2488 return false;
2489
2490 /* XXX - This shouldn't be necessary. */
2491 if (tex_inst->prev->is_head_sentinel())
2492 return false;
2493
2494 /* Check that the FB write sources are fully initialized by the single
2495 * texturing instruction.
2496 */
2497 for (unsigned i = 0; i < FB_WRITE_LOGICAL_NUM_SRCS; i++) {
2498 if (i == FB_WRITE_LOGICAL_SRC_COLOR0) {
2499 if (!fb_write->src[i].equals(tex_inst->dst) ||
2500 fb_write->size_read(i) != tex_inst->size_written)
2501 return false;
2502 } else if (i != FB_WRITE_LOGICAL_SRC_COMPONENTS) {
2503 if (fb_write->src[i].file != BAD_FILE)
2504 return false;
2505 }
2506 }
2507
2508 assert(!tex_inst->eot); /* We can't get here twice */
2509 assert((tex_inst->offset & (0xff << 24)) == 0);
2510
2511 const fs_builder ibld(this, block, tex_inst);
2512
2513 tex_inst->offset |= fb_write->target << 24;
2514 tex_inst->eot = true;
2515 tex_inst->dst = ibld.null_reg_ud();
2516 tex_inst->size_written = 0;
2517 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2518
2519 /* Marking EOT is sufficient, lower_logical_sends() will notice the EOT
2520 * flag and submit a header together with the sampler message as required
2521 * by the hardware.
2522 */
2523 invalidate_live_intervals();
2524 return true;
2525 }
2526
2527 bool
2528 fs_visitor::opt_register_renaming()
2529 {
2530 bool progress = false;
2531 int depth = 0;
2532
2533 int remap[alloc.count];
2534 memset(remap, -1, sizeof(int) * alloc.count);
2535
2536 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2537 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2538 depth++;
2539 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2540 inst->opcode == BRW_OPCODE_WHILE) {
2541 depth--;
2542 }
2543
2544 /* Rewrite instruction sources. */
2545 for (int i = 0; i < inst->sources; i++) {
2546 if (inst->src[i].file == VGRF &&
2547 remap[inst->src[i].nr] != -1 &&
2548 remap[inst->src[i].nr] != inst->src[i].nr) {
2549 inst->src[i].nr = remap[inst->src[i].nr];
2550 progress = true;
2551 }
2552 }
2553
2554 const int dst = inst->dst.nr;
2555
2556 if (depth == 0 &&
2557 inst->dst.file == VGRF &&
2558 alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
2559 !inst->is_partial_write()) {
2560 if (remap[dst] == -1) {
2561 remap[dst] = dst;
2562 } else {
2563 remap[dst] = alloc.allocate(regs_written(inst));
2564 inst->dst.nr = remap[dst];
2565 progress = true;
2566 }
2567 } else if (inst->dst.file == VGRF &&
2568 remap[dst] != -1 &&
2569 remap[dst] != dst) {
2570 inst->dst.nr = remap[dst];
2571 progress = true;
2572 }
2573 }
2574
2575 if (progress) {
2576 invalidate_live_intervals();
2577
2578 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2579 if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
2580 delta_xy[i].nr = remap[delta_xy[i].nr];
2581 }
2582 }
2583 }
2584
2585 return progress;
2586 }
2587
2588 /**
2589 * Remove redundant or useless discard jumps.
2590 *
2591 * For example, we can eliminate jumps in the following sequence:
2592 *
2593 * discard-jump (redundant with the next jump)
2594 * discard-jump (useless; jumps to the next instruction)
2595 * placeholder-halt
2596 */
2597 bool
2598 fs_visitor::opt_redundant_discard_jumps()
2599 {
2600 bool progress = false;
2601
2602 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2603
2604 fs_inst *placeholder_halt = NULL;
2605 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2606 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2607 placeholder_halt = inst;
2608 break;
2609 }
2610 }
2611
2612 if (!placeholder_halt)
2613 return false;
2614
2615 /* Delete any HALTs immediately before the placeholder halt. */
2616 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2617 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2618 prev = (fs_inst *) placeholder_halt->prev) {
2619 prev->remove(last_bblock);
2620 progress = true;
2621 }
2622
2623 if (progress)
2624 invalidate_live_intervals();
2625
2626 return progress;
2627 }
2628
2629 /**
2630 * Compute a bitmask with GRF granularity with a bit set for each GRF starting
2631 * from \p r.offset which overlaps the region starting at \p s.offset and
2632 * spanning \p ds bytes.
2633 */
2634 static inline unsigned
2635 mask_relative_to(const fs_reg &r, const fs_reg &s, unsigned ds)
2636 {
2637 const int rel_offset = reg_offset(s) - reg_offset(r);
2638 const int shift = rel_offset / REG_SIZE;
2639 const unsigned n = DIV_ROUND_UP(rel_offset % REG_SIZE + ds, REG_SIZE);
2640 assert(reg_space(r) == reg_space(s) &&
2641 shift >= 0 && shift < int(8 * sizeof(unsigned)));
2642 return ((1 << n) - 1) << shift;
2643 }
2644
2645 bool
2646 fs_visitor::compute_to_mrf()
2647 {
2648 bool progress = false;
2649 int next_ip = 0;
2650
2651 /* No MRFs on Gen >= 7. */
2652 if (devinfo->gen >= 7)
2653 return false;
2654
2655 calculate_live_intervals();
2656
2657 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2658 int ip = next_ip;
2659 next_ip++;
2660
2661 if (inst->opcode != BRW_OPCODE_MOV ||
2662 inst->is_partial_write() ||
2663 inst->dst.file != MRF || inst->src[0].file != VGRF ||
2664 inst->dst.type != inst->src[0].type ||
2665 inst->src[0].abs || inst->src[0].negate ||
2666 !inst->src[0].is_contiguous() ||
2667 inst->src[0].offset % REG_SIZE != 0)
2668 continue;
2669
2670 /* Can't compute-to-MRF this GRF if someone else was going to
2671 * read it later.
2672 */
2673 if (this->virtual_grf_end[inst->src[0].nr] > ip)
2674 continue;
2675
2676 /* Found a move of a GRF to a MRF. Let's see if we can go rewrite the
2677 * things that computed the value of all GRFs of the source region. The
2678 * regs_left bitset keeps track of the registers we haven't yet found a
2679 * generating instruction for.
2680 */
2681 unsigned regs_left = (1 << regs_read(inst, 0)) - 1;
2682
2683 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2684 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
2685 inst->src[0], inst->size_read(0))) {
2686 /* Found the last thing to write our reg we want to turn
2687 * into a compute-to-MRF.
2688 */
2689
2690 /* If this one instruction didn't populate all the
2691 * channels, bail. We might be able to rewrite everything
2692 * that writes that reg, but it would require smarter
2693 * tracking.
2694 */
2695 if (scan_inst->is_partial_write())
2696 break;
2697
2698 /* Handling things not fully contained in the source of the copy
2699 * would need us to understand coalescing out more than one MOV at
2700 * a time.
2701 */
2702 if (!region_contained_in(scan_inst->dst, scan_inst->size_written,
2703 inst->src[0], inst->size_read(0)))
2704 break;
2705
2706 /* SEND instructions can't have MRF as a destination. */
2707 if (scan_inst->mlen)
2708 break;
2709
2710 if (devinfo->gen == 6) {
2711 /* gen6 math instructions must have the destination be
2712 * GRF, so no compute-to-MRF for them.
2713 */
2714 if (scan_inst->is_math()) {
2715 break;
2716 }
2717 }
2718
2719 /* Clear the bits for any registers this instruction overwrites. */
2720 regs_left &= ~mask_relative_to(
2721 inst->src[0], scan_inst->dst, scan_inst->size_written);
2722 if (!regs_left)
2723 break;
2724 }
2725
2726 /* We don't handle control flow here. Most computation of
2727 * values that end up in MRFs are shortly before the MRF
2728 * write anyway.
2729 */
2730 if (block->start() == scan_inst)
2731 break;
2732
2733 /* You can't read from an MRF, so if someone else reads our
2734 * MRF's source GRF that we wanted to rewrite, that stops us.
2735 */
2736 bool interfered = false;
2737 for (int i = 0; i < scan_inst->sources; i++) {
2738 if (regions_overlap(scan_inst->src[i], scan_inst->size_read(i),
2739 inst->src[0], inst->size_read(0))) {
2740 interfered = true;
2741 }
2742 }
2743 if (interfered)
2744 break;
2745
2746 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
2747 inst->dst, inst->size_written)) {
2748 /* If somebody else writes our MRF here, we can't
2749 * compute-to-MRF before that.
2750 */
2751 break;
2752 }
2753
2754 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1 &&
2755 regions_overlap(fs_reg(MRF, scan_inst->base_mrf), scan_inst->mlen * REG_SIZE,
2756 inst->dst, inst->size_written)) {
2757 /* Found a SEND instruction, which means that there are
2758 * live values in MRFs from base_mrf to base_mrf +
2759 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2760 * above it.
2761 */
2762 break;
2763 }
2764 }
2765
2766 if (regs_left)
2767 continue;
2768
2769 /* Found all generating instructions of our MRF's source value, so it
2770 * should be safe to rewrite them to point to the MRF directly.
2771 */
2772 regs_left = (1 << regs_read(inst, 0)) - 1;
2773
2774 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2775 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
2776 inst->src[0], inst->size_read(0))) {
2777 /* Clear the bits for any registers this instruction overwrites. */
2778 regs_left &= ~mask_relative_to(
2779 inst->src[0], scan_inst->dst, scan_inst->size_written);
2780
2781 const unsigned rel_offset = reg_offset(scan_inst->dst) -
2782 reg_offset(inst->src[0]);
2783
2784 if (inst->dst.nr & BRW_MRF_COMPR4) {
2785 /* Apply the same address transformation done by the hardware
2786 * for COMPR4 MRF writes.
2787 */
2788 assert(rel_offset < 2 * REG_SIZE);
2789 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE * 4;
2790
2791 /* Clear the COMPR4 bit if the generating instruction is not
2792 * compressed.
2793 */
2794 if (scan_inst->size_written < 2 * REG_SIZE)
2795 scan_inst->dst.nr &= ~BRW_MRF_COMPR4;
2796
2797 } else {
2798 /* Calculate the MRF number the result of this instruction is
2799 * ultimately written to.
2800 */
2801 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE;
2802 }
2803
2804 scan_inst->dst.file = MRF;
2805 scan_inst->dst.offset = inst->dst.offset + rel_offset % REG_SIZE;
2806 scan_inst->saturate |= inst->saturate;
2807 if (!regs_left)
2808 break;
2809 }
2810 }
2811
2812 assert(!regs_left);
2813 inst->remove(block);
2814 progress = true;
2815 }
2816
2817 if (progress)
2818 invalidate_live_intervals();
2819
2820 return progress;
2821 }
2822
2823 /**
2824 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2825 * flow. We could probably do better here with some form of divergence
2826 * analysis.
2827 */
2828 bool
2829 fs_visitor::eliminate_find_live_channel()
2830 {
2831 bool progress = false;
2832 unsigned depth = 0;
2833
2834 if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
2835 /* The optimization below assumes that channel zero is live on thread
2836 * dispatch, which may not be the case if the fixed function dispatches
2837 * threads sparsely.
2838 */
2839 return false;
2840 }
2841
2842 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2843 switch (inst->opcode) {
2844 case BRW_OPCODE_IF:
2845 case BRW_OPCODE_DO:
2846 depth++;
2847 break;
2848
2849 case BRW_OPCODE_ENDIF:
2850 case BRW_OPCODE_WHILE:
2851 depth--;
2852 break;
2853
2854 case FS_OPCODE_DISCARD_JUMP:
2855 /* This can potentially make control flow non-uniform until the end
2856 * of the program.
2857 */
2858 return progress;
2859
2860 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2861 if (depth == 0) {
2862 inst->opcode = BRW_OPCODE_MOV;
2863 inst->src[0] = brw_imm_ud(0u);
2864 inst->sources = 1;
2865 inst->force_writemask_all = true;
2866 progress = true;
2867 }
2868 break;
2869
2870 default:
2871 break;
2872 }
2873 }
2874
2875 return progress;
2876 }
2877
2878 /**
2879 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2880 * instructions to FS_OPCODE_REP_FB_WRITE.
2881 */
2882 void
2883 fs_visitor::emit_repclear_shader()
2884 {
2885 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2886 int base_mrf = 0;
2887 int color_mrf = base_mrf + 2;
2888 fs_inst *mov;
2889
2890 if (uniforms > 0) {
2891 mov = bld.exec_all().group(4, 0)
2892 .MOV(brw_message_reg(color_mrf),
2893 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
2894 } else {
2895 struct brw_reg reg =
2896 brw_reg(BRW_GENERAL_REGISTER_FILE, 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
2897 BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
2898 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2899
2900 mov = bld.exec_all().group(4, 0)
2901 .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
2902 }
2903
2904 fs_inst *write;
2905 if (key->nr_color_regions == 1) {
2906 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2907 write->saturate = key->clamp_fragment_color;
2908 write->base_mrf = color_mrf;
2909 write->target = 0;
2910 write->header_size = 0;
2911 write->mlen = 1;
2912 } else {
2913 assume(key->nr_color_regions > 0);
2914 for (int i = 0; i < key->nr_color_regions; ++i) {
2915 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2916 write->saturate = key->clamp_fragment_color;
2917 write->base_mrf = base_mrf;
2918 write->target = i;
2919 write->header_size = 2;
2920 write->mlen = 3;
2921 }
2922 }
2923 write->eot = true;
2924
2925 calculate_cfg();
2926
2927 assign_constant_locations();
2928 assign_curb_setup();
2929
2930 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
2931 if (uniforms > 0) {
2932 assert(mov->src[0].file == FIXED_GRF);
2933 mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
2934 }
2935 }
2936
2937 /**
2938 * Walks through basic blocks, looking for repeated MRF writes and
2939 * removing the later ones.
2940 */
2941 bool
2942 fs_visitor::remove_duplicate_mrf_writes()
2943 {
2944 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
2945 bool progress = false;
2946
2947 /* Need to update the MRF tracking for compressed instructions. */
2948 if (dispatch_width >= 16)
2949 return false;
2950
2951 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2952
2953 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2954 if (inst->is_control_flow()) {
2955 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2956 }
2957
2958 if (inst->opcode == BRW_OPCODE_MOV &&
2959 inst->dst.file == MRF) {
2960 fs_inst *prev_inst = last_mrf_move[inst->dst.nr];
2961 if (prev_inst && inst->equals(prev_inst)) {
2962 inst->remove(block);
2963 progress = true;
2964 continue;
2965 }
2966 }
2967
2968 /* Clear out the last-write records for MRFs that were overwritten. */
2969 if (inst->dst.file == MRF) {
2970 last_mrf_move[inst->dst.nr] = NULL;
2971 }
2972
2973 if (inst->mlen > 0 && inst->base_mrf != -1) {
2974 /* Found a SEND instruction, which will include two or fewer
2975 * implied MRF writes. We could do better here.
2976 */
2977 for (int i = 0; i < implied_mrf_writes(inst); i++) {
2978 last_mrf_move[inst->base_mrf + i] = NULL;
2979 }
2980 }
2981
2982 /* Clear out any MRF move records whose sources got overwritten. */
2983 for (unsigned i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
2984 if (last_mrf_move[i] &&
2985 regions_overlap(inst->dst, inst->size_written,
2986 last_mrf_move[i]->src[0],
2987 last_mrf_move[i]->size_read(0))) {
2988 last_mrf_move[i] = NULL;
2989 }
2990 }
2991
2992 if (inst->opcode == BRW_OPCODE_MOV &&
2993 inst->dst.file == MRF &&
2994 inst->src[0].file != ARF &&
2995 !inst->is_partial_write()) {
2996 last_mrf_move[inst->dst.nr] = inst;
2997 }
2998 }
2999
3000 if (progress)
3001 invalidate_live_intervals();
3002
3003 return progress;
3004 }
3005
3006 static void
3007 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
3008 {
3009 /* Clear the flag for registers that actually got read (as expected). */
3010 for (int i = 0; i < inst->sources; i++) {
3011 int grf;
3012 if (inst->src[i].file == VGRF || inst->src[i].file == FIXED_GRF) {
3013 grf = inst->src[i].nr;
3014 } else {
3015 continue;
3016 }
3017
3018 if (grf >= first_grf &&
3019 grf < first_grf + grf_len) {
3020 deps[grf - first_grf] = false;
3021 if (inst->exec_size == 16)
3022 deps[grf - first_grf + 1] = false;
3023 }
3024 }
3025 }
3026
3027 /**
3028 * Implements this workaround for the original 965:
3029 *
3030 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
3031 * check for post destination dependencies on this instruction, software
3032 * must ensure that there is no destination hazard for the case of ‘write
3033 * followed by a posted write’ shown in the following example.
3034 *
3035 * 1. mov r3 0
3036 * 2. send r3.xy <rest of send instruction>
3037 * 3. mov r2 r3
3038 *
3039 * Due to no post-destination dependency check on the ‘send’, the above
3040 * code sequence could have two instructions (1 and 2) in flight at the
3041 * same time that both consider ‘r3’ as the target of their final writes.
3042 */
3043 void
3044 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
3045 fs_inst *inst)
3046 {
3047 int write_len = regs_written(inst);
3048 int first_write_grf = inst->dst.nr;
3049 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3050 assert(write_len < (int)sizeof(needs_dep) - 1);
3051
3052 memset(needs_dep, false, sizeof(needs_dep));
3053 memset(needs_dep, true, write_len);
3054
3055 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
3056
3057 /* Walk backwards looking for writes to registers we're writing which
3058 * aren't read since being written. If we hit the start of the program,
3059 * we assume that there are no outstanding dependencies on entry to the
3060 * program.
3061 */
3062 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3063 /* If we hit control flow, assume that there *are* outstanding
3064 * dependencies, and force their cleanup before our instruction.
3065 */
3066 if (block->start() == scan_inst && block->num != 0) {
3067 for (int i = 0; i < write_len; i++) {
3068 if (needs_dep[i])
3069 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
3070 first_write_grf + i);
3071 }
3072 return;
3073 }
3074
3075 /* We insert our reads as late as possible on the assumption that any
3076 * instruction but a MOV that might have left us an outstanding
3077 * dependency has more latency than a MOV.
3078 */
3079 if (scan_inst->dst.file == VGRF) {
3080 for (unsigned i = 0; i < regs_written(scan_inst); i++) {
3081 int reg = scan_inst->dst.nr + i;
3082
3083 if (reg >= first_write_grf &&
3084 reg < first_write_grf + write_len &&
3085 needs_dep[reg - first_write_grf]) {
3086 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
3087 needs_dep[reg - first_write_grf] = false;
3088 if (scan_inst->exec_size == 16)
3089 needs_dep[reg - first_write_grf + 1] = false;
3090 }
3091 }
3092 }
3093
3094 /* Clear the flag for registers that actually got read (as expected). */
3095 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3096
3097 /* Continue the loop only if we haven't resolved all the dependencies */
3098 int i;
3099 for (i = 0; i < write_len; i++) {
3100 if (needs_dep[i])
3101 break;
3102 }
3103 if (i == write_len)
3104 return;
3105 }
3106 }
3107
3108 /**
3109 * Implements this workaround for the original 965:
3110 *
3111 * "[DevBW, DevCL] Errata: A destination register from a send can not be
3112 * used as a destination register until after it has been sourced by an
3113 * instruction with a different destination register.
3114 */
3115 void
3116 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
3117 {
3118 int write_len = regs_written(inst);
3119 int first_write_grf = inst->dst.nr;
3120 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3121 assert(write_len < (int)sizeof(needs_dep) - 1);
3122
3123 memset(needs_dep, false, sizeof(needs_dep));
3124 memset(needs_dep, true, write_len);
3125 /* Walk forwards looking for writes to registers we're writing which aren't
3126 * read before being written.
3127 */
3128 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst) {
3129 /* If we hit control flow, force resolve all remaining dependencies. */
3130 if (block->end() == scan_inst && block->num != cfg->num_blocks - 1) {
3131 for (int i = 0; i < write_len; i++) {
3132 if (needs_dep[i])
3133 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3134 first_write_grf + i);
3135 }
3136 return;
3137 }
3138
3139 /* Clear the flag for registers that actually got read (as expected). */
3140 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3141
3142 /* We insert our reads as late as possible since they're reading the
3143 * result of a SEND, which has massive latency.
3144 */
3145 if (scan_inst->dst.file == VGRF &&
3146 scan_inst->dst.nr >= first_write_grf &&
3147 scan_inst->dst.nr < first_write_grf + write_len &&
3148 needs_dep[scan_inst->dst.nr - first_write_grf]) {
3149 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3150 scan_inst->dst.nr);
3151 needs_dep[scan_inst->dst.nr - first_write_grf] = false;
3152 }
3153
3154 /* Continue the loop only if we haven't resolved all the dependencies */
3155 int i;
3156 for (i = 0; i < write_len; i++) {
3157 if (needs_dep[i])
3158 break;
3159 }
3160 if (i == write_len)
3161 return;
3162 }
3163 }
3164
3165 void
3166 fs_visitor::insert_gen4_send_dependency_workarounds()
3167 {
3168 if (devinfo->gen != 4 || devinfo->is_g4x)
3169 return;
3170
3171 bool progress = false;
3172
3173 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3174 if (inst->mlen != 0 && inst->dst.file == VGRF) {
3175 insert_gen4_pre_send_dependency_workarounds(block, inst);
3176 insert_gen4_post_send_dependency_workarounds(block, inst);
3177 progress = true;
3178 }
3179 }
3180
3181 if (progress)
3182 invalidate_live_intervals();
3183 }
3184
3185 /**
3186 * Turns the generic expression-style uniform pull constant load instruction
3187 * into a hardware-specific series of instructions for loading a pull
3188 * constant.
3189 *
3190 * The expression style allows the CSE pass before this to optimize out
3191 * repeated loads from the same offset, and gives the pre-register-allocation
3192 * scheduling full flexibility, while the conversion to native instructions
3193 * allows the post-register-allocation scheduler the best information
3194 * possible.
3195 *
3196 * Note that execution masking for setting up pull constant loads is special:
3197 * the channels that need to be written are unrelated to the current execution
3198 * mask, since a later instruction will use one of the result channels as a
3199 * source operand for all 8 or 16 of its channels.
3200 */
3201 void
3202 fs_visitor::lower_uniform_pull_constant_loads()
3203 {
3204 foreach_block_and_inst (block, fs_inst, inst, cfg) {
3205 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
3206 continue;
3207
3208 if (devinfo->gen >= 7) {
3209 const fs_builder ubld = fs_builder(this, block, inst).exec_all();
3210 const fs_reg payload = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD);
3211
3212 ubld.group(8, 0).MOV(payload,
3213 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3214 ubld.group(1, 0).MOV(component(payload, 2),
3215 brw_imm_ud(inst->src[1].ud / 16));
3216
3217 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3218 inst->src[1] = payload;
3219 inst->header_size = 1;
3220 inst->mlen = 1;
3221
3222 invalidate_live_intervals();
3223 } else {
3224 /* Before register allocation, we didn't tell the scheduler about the
3225 * MRF we use. We know it's safe to use this MRF because nothing
3226 * else does except for register spill/unspill, which generates and
3227 * uses its MRF within a single IR instruction.
3228 */
3229 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3230 inst->mlen = 1;
3231 }
3232 }
3233 }
3234
3235 bool
3236 fs_visitor::lower_load_payload()
3237 {
3238 bool progress = false;
3239
3240 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3241 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3242 continue;
3243
3244 assert(inst->dst.file == MRF || inst->dst.file == VGRF);
3245 assert(inst->saturate == false);
3246 fs_reg dst = inst->dst;
3247
3248 /* Get rid of COMPR4. We'll add it back in if we need it */
3249 if (dst.file == MRF)
3250 dst.nr = dst.nr & ~BRW_MRF_COMPR4;
3251
3252 const fs_builder ibld(this, block, inst);
3253 const fs_builder hbld = ibld.exec_all().group(8, 0);
3254
3255 for (uint8_t i = 0; i < inst->header_size; i++) {
3256 if (inst->src[i].file != BAD_FILE) {
3257 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3258 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3259 hbld.MOV(mov_dst, mov_src);
3260 }
3261 dst = offset(dst, hbld, 1);
3262 }
3263
3264 if (inst->dst.file == MRF && (inst->dst.nr & BRW_MRF_COMPR4) &&
3265 inst->exec_size > 8) {
3266 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3267 * a straightforward copy. Instead, the result of the
3268 * LOAD_PAYLOAD is treated as interleaved and the first four
3269 * non-header sources are unpacked as:
3270 *
3271 * m + 0: r0
3272 * m + 1: g0
3273 * m + 2: b0
3274 * m + 3: a0
3275 * m + 4: r1
3276 * m + 5: g1
3277 * m + 6: b1
3278 * m + 7: a1
3279 *
3280 * This is used for gen <= 5 fb writes.
3281 */
3282 assert(inst->exec_size == 16);
3283 assert(inst->header_size + 4 <= inst->sources);
3284 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3285 if (inst->src[i].file != BAD_FILE) {
3286 if (devinfo->has_compr4) {
3287 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3288 compr4_dst.nr |= BRW_MRF_COMPR4;
3289 ibld.MOV(compr4_dst, inst->src[i]);
3290 } else {
3291 /* Platform doesn't have COMPR4. We have to fake it */
3292 fs_reg mov_dst = retype(dst, inst->src[i].type);
3293 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3294 mov_dst.nr += 4;
3295 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3296 }
3297 }
3298
3299 dst.nr++;
3300 }
3301
3302 /* The loop above only ever incremented us through the first set
3303 * of 4 registers. However, thanks to the magic of COMPR4, we
3304 * actually wrote to the first 8 registers, so we need to take
3305 * that into account now.
3306 */
3307 dst.nr += 4;
3308
3309 /* The COMPR4 code took care of the first 4 sources. We'll let
3310 * the regular path handle any remaining sources. Yes, we are
3311 * modifying the instruction but we're about to delete it so
3312 * this really doesn't hurt anything.
3313 */
3314 inst->header_size += 4;
3315 }
3316
3317 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3318 if (inst->src[i].file != BAD_FILE)
3319 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3320 dst = offset(dst, ibld, 1);
3321 }
3322
3323 inst->remove(block);
3324 progress = true;
3325 }
3326
3327 if (progress)
3328 invalidate_live_intervals();
3329
3330 return progress;
3331 }
3332
3333 bool
3334 fs_visitor::lower_integer_multiplication()
3335 {
3336 bool progress = false;
3337
3338 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3339 const fs_builder ibld(this, block, inst);
3340
3341 if (inst->opcode == BRW_OPCODE_MUL) {
3342 if (inst->dst.is_accumulator() ||
3343 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3344 inst->dst.type != BRW_REGISTER_TYPE_UD))
3345 continue;
3346
3347 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
3348 * operation directly, but CHV/BXT cannot.
3349 */
3350 if (devinfo->gen >= 8 &&
3351 !devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo))
3352 continue;
3353
3354 if (inst->src[1].file == IMM &&
3355 inst->src[1].ud < (1 << 16)) {
3356 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3357 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3358 * src1 are used.
3359 *
3360 * If multiplying by an immediate value that fits in 16-bits, do a
3361 * single MUL instruction with that value in the proper location.
3362 */
3363 if (devinfo->gen < 7) {
3364 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8),
3365 inst->dst.type);
3366 ibld.MOV(imm, inst->src[1]);
3367 ibld.MUL(inst->dst, imm, inst->src[0]);
3368 } else {
3369 const bool ud = (inst->src[1].type == BRW_REGISTER_TYPE_UD);
3370 ibld.MUL(inst->dst, inst->src[0],
3371 ud ? brw_imm_uw(inst->src[1].ud)
3372 : brw_imm_w(inst->src[1].d));
3373 }
3374 } else {
3375 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3376 * do 32-bit integer multiplication in one instruction, but instead
3377 * must do a sequence (which actually calculates a 64-bit result):
3378 *
3379 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3380 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3381 * mov(8) g2<1>D acc0<8,8,1>D
3382 *
3383 * But on Gen > 6, the ability to use second accumulator register
3384 * (acc1) for non-float data types was removed, preventing a simple
3385 * implementation in SIMD16. A 16-channel result can be calculated by
3386 * executing the three instructions twice in SIMD8, once with quarter
3387 * control of 1Q for the first eight channels and again with 2Q for
3388 * the second eight channels.
3389 *
3390 * Which accumulator register is implicitly accessed (by AccWrEnable
3391 * for instance) is determined by the quarter control. Unfortunately
3392 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3393 * implicit accumulator access by an instruction with 2Q will access
3394 * acc1 regardless of whether the data type is usable in acc1.
3395 *
3396 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3397 * integer data types.
3398 *
3399 * Since we only want the low 32-bits of the result, we can do two
3400 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3401 * adjust the high result and add them (like the mach is doing):
3402 *
3403 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3404 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3405 * shl(8) g9<1>D g8<8,8,1>D 16D
3406 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3407 *
3408 * We avoid the shl instruction by realizing that we only want to add
3409 * the low 16-bits of the "high" result to the high 16-bits of the
3410 * "low" result and using proper regioning on the add:
3411 *
3412 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3413 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3414 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3415 *
3416 * Since it does not use the (single) accumulator register, we can
3417 * schedule multi-component multiplications much better.
3418 */
3419
3420 fs_reg orig_dst = inst->dst;
3421 if (orig_dst.is_null() || orig_dst.file == MRF) {
3422 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
3423 inst->dst.type);
3424 }
3425 fs_reg low = inst->dst;
3426 fs_reg high(VGRF, alloc.allocate(dispatch_width / 8),
3427 inst->dst.type);
3428
3429 if (devinfo->gen >= 7) {
3430 if (inst->src[1].file == IMM) {
3431 ibld.MUL(low, inst->src[0],
3432 brw_imm_uw(inst->src[1].ud & 0xffff));
3433 ibld.MUL(high, inst->src[0],
3434 brw_imm_uw(inst->src[1].ud >> 16));
3435 } else {
3436 ibld.MUL(low, inst->src[0],
3437 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
3438 ibld.MUL(high, inst->src[0],
3439 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 1));
3440 }
3441 } else {
3442 ibld.MUL(low, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 0),
3443 inst->src[1]);
3444 ibld.MUL(high, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 1),
3445 inst->src[1]);
3446 }
3447
3448 ibld.ADD(subscript(inst->dst, BRW_REGISTER_TYPE_UW, 1),
3449 subscript(low, BRW_REGISTER_TYPE_UW, 1),
3450 subscript(high, BRW_REGISTER_TYPE_UW, 0));
3451
3452 if (inst->conditional_mod || orig_dst.file == MRF) {
3453 set_condmod(inst->conditional_mod,
3454 ibld.MOV(orig_dst, inst->dst));
3455 }
3456 }
3457
3458 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3459 /* Should have been lowered to 8-wide. */
3460 assert(inst->exec_size <= get_lowered_simd_width(devinfo, inst));
3461 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3462 inst->dst.type);
3463 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3464 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3465
3466 if (devinfo->gen >= 8) {
3467 /* Until Gen8, integer multiplies read 32-bits from one source,
3468 * and 16-bits from the other, and relying on the MACH instruction
3469 * to generate the high bits of the result.
3470 *
3471 * On Gen8, the multiply instruction does a full 32x32-bit
3472 * multiply, but in order to do a 64-bit multiply we can simulate
3473 * the previous behavior and then use a MACH instruction.
3474 *
3475 * FINISHME: Don't use source modifiers on src1.
3476 */
3477 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3478 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3479 mul->src[1].type = BRW_REGISTER_TYPE_UW;
3480 mul->src[1].stride *= 2;
3481
3482 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3483 inst->group > 0) {
3484 /* Among other things the quarter control bits influence which
3485 * accumulator register is used by the hardware for instructions
3486 * that access the accumulator implicitly (e.g. MACH). A
3487 * second-half instruction would normally map to acc1, which
3488 * doesn't exist on Gen7 and up (the hardware does emulate it for
3489 * floating-point instructions *only* by taking advantage of the
3490 * extra precision of acc0 not normally used for floating point
3491 * arithmetic).
3492 *
3493 * HSW and up are careful enough not to try to access an
3494 * accumulator register that doesn't exist, but on earlier Gen7
3495 * hardware we need to make sure that the quarter control bits are
3496 * zero to avoid non-deterministic behaviour and emit an extra MOV
3497 * to get the result masked correctly according to the current
3498 * channel enables.
3499 */
3500 mach->group = 0;
3501 mach->force_writemask_all = true;
3502 mach->dst = ibld.vgrf(inst->dst.type);
3503 ibld.MOV(inst->dst, mach->dst);
3504 }
3505 } else {
3506 continue;
3507 }
3508
3509 inst->remove(block);
3510 progress = true;
3511 }
3512
3513 if (progress)
3514 invalidate_live_intervals();
3515
3516 return progress;
3517 }
3518
3519 bool
3520 fs_visitor::lower_minmax()
3521 {
3522 assert(devinfo->gen < 6);
3523
3524 bool progress = false;
3525
3526 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3527 const fs_builder ibld(this, block, inst);
3528
3529 if (inst->opcode == BRW_OPCODE_SEL &&
3530 inst->predicate == BRW_PREDICATE_NONE) {
3531 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
3532 * the original SEL.L/GE instruction
3533 */
3534 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
3535 inst->conditional_mod);
3536 inst->predicate = BRW_PREDICATE_NORMAL;
3537 inst->conditional_mod = BRW_CONDITIONAL_NONE;
3538
3539 progress = true;
3540 }
3541 }
3542
3543 if (progress)
3544 invalidate_live_intervals();
3545
3546 return progress;
3547 }
3548
3549 static void
3550 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
3551 fs_reg *dst, fs_reg color, unsigned components)
3552 {
3553 if (key->clamp_fragment_color) {
3554 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
3555 assert(color.type == BRW_REGISTER_TYPE_F);
3556
3557 for (unsigned i = 0; i < components; i++)
3558 set_saturate(true,
3559 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
3560
3561 color = tmp;
3562 }
3563
3564 for (unsigned i = 0; i < components; i++)
3565 dst[i] = offset(color, bld, i);
3566 }
3567
3568 static void
3569 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
3570 const struct brw_wm_prog_data *prog_data,
3571 const brw_wm_prog_key *key,
3572 const fs_visitor::thread_payload &payload)
3573 {
3574 assert(inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
3575 const gen_device_info *devinfo = bld.shader->devinfo;
3576 const fs_reg &color0 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR0];
3577 const fs_reg &color1 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR1];
3578 const fs_reg &src0_alpha = inst->src[FB_WRITE_LOGICAL_SRC_SRC0_ALPHA];
3579 const fs_reg &src_depth = inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH];
3580 const fs_reg &dst_depth = inst->src[FB_WRITE_LOGICAL_SRC_DST_DEPTH];
3581 const fs_reg &src_stencil = inst->src[FB_WRITE_LOGICAL_SRC_SRC_STENCIL];
3582 fs_reg sample_mask = inst->src[FB_WRITE_LOGICAL_SRC_OMASK];
3583 const unsigned components =
3584 inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
3585
3586 /* We can potentially have a message length of up to 15, so we have to set
3587 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3588 */
3589 fs_reg sources[15];
3590 int header_size = 2, payload_header_size;
3591 unsigned length = 0;
3592
3593 /* From the Sandy Bridge PRM, volume 4, page 198:
3594 *
3595 * "Dispatched Pixel Enables. One bit per pixel indicating
3596 * which pixels were originally enabled when the thread was
3597 * dispatched. This field is only required for the end-of-
3598 * thread message and on all dual-source messages."
3599 */
3600 if (devinfo->gen >= 6 &&
3601 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3602 color1.file == BAD_FILE &&
3603 key->nr_color_regions == 1) {
3604 header_size = 0;
3605 }
3606
3607 if (header_size != 0) {
3608 assert(header_size == 2);
3609 /* Allocate 2 registers for a header */
3610 length += 2;
3611 }
3612
3613 if (payload.aa_dest_stencil_reg) {
3614 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
3615 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
3616 .MOV(sources[length],
3617 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
3618 length++;
3619 }
3620
3621 if (sample_mask.file != BAD_FILE) {
3622 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1),
3623 BRW_REGISTER_TYPE_UD);
3624
3625 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
3626 * relevant. Since it's unsigned single words one vgrf is always
3627 * 16-wide, but only the lower or higher 8 channels will be used by the
3628 * hardware when doing a SIMD8 write depending on whether we have
3629 * selected the subspans for the first or second half respectively.
3630 */
3631 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
3632 sample_mask.type = BRW_REGISTER_TYPE_UW;
3633 sample_mask.stride *= 2;
3634
3635 bld.exec_all().annotate("FB write oMask")
3636 .MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
3637 inst->group),
3638 sample_mask);
3639 length++;
3640 }
3641
3642 payload_header_size = length;
3643
3644 if (src0_alpha.file != BAD_FILE) {
3645 /* FIXME: This is being passed at the wrong location in the payload and
3646 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
3647 * It's supposed to be immediately before oMask but there seems to be no
3648 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
3649 * requires header sources to form a contiguous segment at the beginning
3650 * of the message and src0_alpha has per-channel semantics.
3651 */
3652 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
3653 length++;
3654 } else if (key->replicate_alpha && inst->target != 0) {
3655 /* Handle the case when fragment shader doesn't write to draw buffer
3656 * zero. No need to call setup_color_payload() for src0_alpha because
3657 * alpha value will be undefined.
3658 */
3659 length++;
3660 }
3661
3662 setup_color_payload(bld, key, &sources[length], color0, components);
3663 length += 4;
3664
3665 if (color1.file != BAD_FILE) {
3666 setup_color_payload(bld, key, &sources[length], color1, components);
3667 length += 4;
3668 }
3669
3670 if (src_depth.file != BAD_FILE) {
3671 sources[length] = src_depth;
3672 length++;
3673 }
3674
3675 if (dst_depth.file != BAD_FILE) {
3676 sources[length] = dst_depth;
3677 length++;
3678 }
3679
3680 if (src_stencil.file != BAD_FILE) {
3681 assert(devinfo->gen >= 9);
3682 assert(bld.dispatch_width() != 16);
3683
3684 /* XXX: src_stencil is only available on gen9+. dst_depth is never
3685 * available on gen9+. As such it's impossible to have both enabled at the
3686 * same time and therefore length cannot overrun the array.
3687 */
3688 assert(length < 15);
3689
3690 sources[length] = bld.vgrf(BRW_REGISTER_TYPE_UD);
3691 bld.exec_all().annotate("FB write OS")
3692 .MOV(retype(sources[length], BRW_REGISTER_TYPE_UB),
3693 subscript(src_stencil, BRW_REGISTER_TYPE_UB, 0));
3694 length++;
3695 }
3696
3697 fs_inst *load;
3698 if (devinfo->gen >= 7) {
3699 /* Send from the GRF */
3700 fs_reg payload = fs_reg(VGRF, -1, BRW_REGISTER_TYPE_F);
3701 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
3702 payload.nr = bld.shader->alloc.allocate(regs_written(load));
3703 load->dst = payload;
3704
3705 inst->src[0] = payload;
3706 inst->resize_sources(1);
3707 } else {
3708 /* Send from the MRF */
3709 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
3710 sources, length, payload_header_size);
3711
3712 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3713 * will do this for us if we just give it a COMPR4 destination.
3714 */
3715 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
3716 load->dst.nr |= BRW_MRF_COMPR4;
3717
3718 inst->resize_sources(0);
3719 inst->base_mrf = 1;
3720 }
3721
3722 inst->opcode = FS_OPCODE_FB_WRITE;
3723 inst->mlen = regs_written(load);
3724 inst->header_size = header_size;
3725 }
3726
3727 static void
3728 lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
3729 {
3730 const fs_builder &ubld = bld.exec_all();
3731 const unsigned length = 2;
3732 const fs_reg header = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD, length);
3733
3734 ubld.group(16, 0)
3735 .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3736
3737 inst->resize_sources(1);
3738 inst->src[0] = header;
3739 inst->opcode = FS_OPCODE_FB_READ;
3740 inst->mlen = length;
3741 inst->header_size = length;
3742 }
3743
3744 static void
3745 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
3746 const fs_reg &coordinate,
3747 const fs_reg &shadow_c,
3748 const fs_reg &lod, const fs_reg &lod2,
3749 const fs_reg &surface,
3750 const fs_reg &sampler,
3751 unsigned coord_components,
3752 unsigned grad_components)
3753 {
3754 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
3755 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
3756 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
3757 fs_reg msg_end = msg_begin;
3758
3759 /* g0 header. */
3760 msg_end = offset(msg_end, bld.group(8, 0), 1);
3761
3762 for (unsigned i = 0; i < coord_components; i++)
3763 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
3764 offset(coordinate, bld, i));
3765
3766 msg_end = offset(msg_end, bld, coord_components);
3767
3768 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
3769 * require all three components to be present and zero if they are unused.
3770 */
3771 if (coord_components > 0 &&
3772 (has_lod || shadow_c.file != BAD_FILE ||
3773 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
3774 for (unsigned i = coord_components; i < 3; i++)
3775 bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
3776
3777 msg_end = offset(msg_end, bld, 3 - coord_components);
3778 }
3779
3780 if (op == SHADER_OPCODE_TXD) {
3781 /* TXD unsupported in SIMD16 mode. */
3782 assert(bld.dispatch_width() == 8);
3783
3784 /* the slots for u and v are always present, but r is optional */
3785 if (coord_components < 2)
3786 msg_end = offset(msg_end, bld, 2 - coord_components);
3787
3788 /* P = u, v, r
3789 * dPdx = dudx, dvdx, drdx
3790 * dPdy = dudy, dvdy, drdy
3791 *
3792 * 1-arg: Does not exist.
3793 *
3794 * 2-arg: dudx dvdx dudy dvdy
3795 * dPdx.x dPdx.y dPdy.x dPdy.y
3796 * m4 m5 m6 m7
3797 *
3798 * 3-arg: dudx dvdx drdx dudy dvdy drdy
3799 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
3800 * m5 m6 m7 m8 m9 m10
3801 */
3802 for (unsigned i = 0; i < grad_components; i++)
3803 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
3804
3805 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3806
3807 for (unsigned i = 0; i < grad_components; i++)
3808 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
3809
3810 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3811 }
3812
3813 if (has_lod) {
3814 /* Bias/LOD with shadow comparator is unsupported in SIMD16 -- *Without*
3815 * shadow comparator (including RESINFO) it's unsupported in SIMD8 mode.
3816 */
3817 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
3818 bld.dispatch_width() == 16);
3819
3820 const brw_reg_type type =
3821 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
3822 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
3823 bld.MOV(retype(msg_end, type), lod);
3824 msg_end = offset(msg_end, bld, 1);
3825 }
3826
3827 if (shadow_c.file != BAD_FILE) {
3828 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
3829 /* There's no plain shadow compare message, so we use shadow
3830 * compare with a bias of 0.0.
3831 */
3832 bld.MOV(msg_end, brw_imm_f(0.0f));
3833 msg_end = offset(msg_end, bld, 1);
3834 }
3835
3836 bld.MOV(msg_end, shadow_c);
3837 msg_end = offset(msg_end, bld, 1);
3838 }
3839
3840 inst->opcode = op;
3841 inst->src[0] = reg_undef;
3842 inst->src[1] = surface;
3843 inst->src[2] = sampler;
3844 inst->resize_sources(3);
3845 inst->base_mrf = msg_begin.nr;
3846 inst->mlen = msg_end.nr - msg_begin.nr;
3847 inst->header_size = 1;
3848 }
3849
3850 static void
3851 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
3852 const fs_reg &coordinate,
3853 const fs_reg &shadow_c,
3854 const fs_reg &lod, const fs_reg &lod2,
3855 const fs_reg &sample_index,
3856 const fs_reg &surface,
3857 const fs_reg &sampler,
3858 unsigned coord_components,
3859 unsigned grad_components)
3860 {
3861 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
3862 fs_reg msg_coords = message;
3863 unsigned header_size = 0;
3864
3865 if (inst->offset != 0) {
3866 /* The offsets set up by the visitor are in the m1 header, so we can't
3867 * go headerless.
3868 */
3869 header_size = 1;
3870 message.nr--;
3871 }
3872
3873 for (unsigned i = 0; i < coord_components; i++)
3874 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type),
3875 offset(coordinate, bld, i));
3876
3877 fs_reg msg_end = offset(msg_coords, bld, coord_components);
3878 fs_reg msg_lod = offset(msg_coords, bld, 4);
3879
3880 if (shadow_c.file != BAD_FILE) {
3881 fs_reg msg_shadow = msg_lod;
3882 bld.MOV(msg_shadow, shadow_c);
3883 msg_lod = offset(msg_shadow, bld, 1);
3884 msg_end = msg_lod;
3885 }
3886
3887 switch (op) {
3888 case SHADER_OPCODE_TXL:
3889 case FS_OPCODE_TXB:
3890 bld.MOV(msg_lod, lod);
3891 msg_end = offset(msg_lod, bld, 1);
3892 break;
3893 case SHADER_OPCODE_TXD:
3894 /**
3895 * P = u, v, r
3896 * dPdx = dudx, dvdx, drdx
3897 * dPdy = dudy, dvdy, drdy
3898 *
3899 * Load up these values:
3900 * - dudx dudy dvdx dvdy drdx drdy
3901 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
3902 */
3903 msg_end = msg_lod;
3904 for (unsigned i = 0; i < grad_components; i++) {
3905 bld.MOV(msg_end, offset(lod, bld, i));
3906 msg_end = offset(msg_end, bld, 1);
3907
3908 bld.MOV(msg_end, offset(lod2, bld, i));
3909 msg_end = offset(msg_end, bld, 1);
3910 }
3911 break;
3912 case SHADER_OPCODE_TXS:
3913 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
3914 bld.MOV(msg_lod, lod);
3915 msg_end = offset(msg_lod, bld, 1);
3916 break;
3917 case SHADER_OPCODE_TXF:
3918 msg_lod = offset(msg_coords, bld, 3);
3919 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
3920 msg_end = offset(msg_lod, bld, 1);
3921 break;
3922 case SHADER_OPCODE_TXF_CMS:
3923 msg_lod = offset(msg_coords, bld, 3);
3924 /* lod */
3925 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
3926 /* sample index */
3927 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
3928 msg_end = offset(msg_lod, bld, 2);
3929 break;
3930 default:
3931 break;
3932 }
3933
3934 inst->opcode = op;
3935 inst->src[0] = reg_undef;
3936 inst->src[1] = surface;
3937 inst->src[2] = sampler;
3938 inst->resize_sources(3);
3939 inst->base_mrf = message.nr;
3940 inst->mlen = msg_end.nr - message.nr;
3941 inst->header_size = header_size;
3942
3943 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
3944 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
3945 }
3946
3947 static bool
3948 is_high_sampler(const struct gen_device_info *devinfo, const fs_reg &sampler)
3949 {
3950 if (devinfo->gen < 8 && !devinfo->is_haswell)
3951 return false;
3952
3953 return sampler.file != IMM || sampler.ud >= 16;
3954 }
3955
3956 static void
3957 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
3958 const fs_reg &coordinate,
3959 const fs_reg &shadow_c,
3960 fs_reg lod, const fs_reg &lod2,
3961 const fs_reg &sample_index,
3962 const fs_reg &mcs,
3963 const fs_reg &surface,
3964 const fs_reg &sampler,
3965 const fs_reg &tg4_offset,
3966 unsigned coord_components,
3967 unsigned grad_components)
3968 {
3969 const gen_device_info *devinfo = bld.shader->devinfo;
3970 unsigned reg_width = bld.dispatch_width() / 8;
3971 unsigned header_size = 0, length = 0;
3972 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
3973 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
3974 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
3975
3976 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
3977 inst->offset != 0 || inst->eot ||
3978 op == SHADER_OPCODE_SAMPLEINFO ||
3979 is_high_sampler(devinfo, sampler)) {
3980 /* For general texture offsets (no txf workaround), we need a header to
3981 * put them in. Note that we're only reserving space for it in the
3982 * message payload as it will be initialized implicitly by the
3983 * generator.
3984 *
3985 * TG4 needs to place its channel select in the header, for interaction
3986 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
3987 * larger sampler numbers we need to offset the Sampler State Pointer in
3988 * the header.
3989 */
3990 header_size = 1;
3991 sources[0] = fs_reg();
3992 length++;
3993
3994 /* If we're requesting fewer than four channels worth of response,
3995 * and we have an explicit header, we need to set up the sampler
3996 * writemask. It's reversed from normal: 1 means "don't write".
3997 */
3998 if (!inst->eot && regs_written(inst) != 4 * reg_width) {
3999 assert(regs_written(inst) % reg_width == 0);
4000 unsigned mask = ~((1 << (regs_written(inst) / reg_width)) - 1) & 0xf;
4001 inst->offset |= mask << 12;
4002 }
4003 }
4004
4005 if (shadow_c.file != BAD_FILE) {
4006 bld.MOV(sources[length], shadow_c);
4007 length++;
4008 }
4009
4010 bool coordinate_done = false;
4011
4012 /* Set up the LOD info */
4013 switch (op) {
4014 case FS_OPCODE_TXB:
4015 case SHADER_OPCODE_TXL:
4016 if (devinfo->gen >= 9 && op == SHADER_OPCODE_TXL && lod.is_zero()) {
4017 op = SHADER_OPCODE_TXL_LZ;
4018 break;
4019 }
4020 bld.MOV(sources[length], lod);
4021 length++;
4022 break;
4023 case SHADER_OPCODE_TXD:
4024 /* TXD should have been lowered in SIMD16 mode. */
4025 assert(bld.dispatch_width() == 8);
4026
4027 /* Load dPdx and the coordinate together:
4028 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
4029 */
4030 for (unsigned i = 0; i < coord_components; i++) {
4031 bld.MOV(sources[length++], offset(coordinate, bld, i));
4032
4033 /* For cube map array, the coordinate is (u,v,r,ai) but there are
4034 * only derivatives for (u, v, r).
4035 */
4036 if (i < grad_components) {
4037 bld.MOV(sources[length++], offset(lod, bld, i));
4038 bld.MOV(sources[length++], offset(lod2, bld, i));
4039 }
4040 }
4041
4042 coordinate_done = true;
4043 break;
4044 case SHADER_OPCODE_TXS:
4045 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
4046 length++;
4047 break;
4048 case SHADER_OPCODE_TXF:
4049 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
4050 * On Gen9 they are u, v, lod, r
4051 */
4052 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D), coordinate);
4053
4054 if (devinfo->gen >= 9) {
4055 if (coord_components >= 2) {
4056 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D),
4057 offset(coordinate, bld, 1));
4058 } else {
4059 sources[length] = brw_imm_d(0);
4060 }
4061 length++;
4062 }
4063
4064 if (devinfo->gen >= 9 && lod.is_zero()) {
4065 op = SHADER_OPCODE_TXF_LZ;
4066 } else {
4067 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
4068 length++;
4069 }
4070
4071 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++)
4072 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4073 offset(coordinate, bld, i));
4074
4075 coordinate_done = true;
4076 break;
4077
4078 case SHADER_OPCODE_TXF_CMS:
4079 case SHADER_OPCODE_TXF_CMS_W:
4080 case SHADER_OPCODE_TXF_UMS:
4081 case SHADER_OPCODE_TXF_MCS:
4082 if (op == SHADER_OPCODE_TXF_UMS ||
4083 op == SHADER_OPCODE_TXF_CMS ||
4084 op == SHADER_OPCODE_TXF_CMS_W) {
4085 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
4086 length++;
4087 }
4088
4089 if (op == SHADER_OPCODE_TXF_CMS || op == SHADER_OPCODE_TXF_CMS_W) {
4090 /* Data from the multisample control surface. */
4091 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
4092 length++;
4093
4094 /* On Gen9+ we'll use ld2dms_w instead which has two registers for
4095 * the MCS data.
4096 */
4097 if (op == SHADER_OPCODE_TXF_CMS_W) {
4098 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD),
4099 mcs.file == IMM ?
4100 mcs :
4101 offset(mcs, bld, 1));
4102 length++;
4103 }
4104 }
4105
4106 /* There is no offsetting for this message; just copy in the integer
4107 * texture coordinates.
4108 */
4109 for (unsigned i = 0; i < coord_components; i++)
4110 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4111 offset(coordinate, bld, i));
4112
4113 coordinate_done = true;
4114 break;
4115 case SHADER_OPCODE_TG4_OFFSET:
4116 /* More crazy intermixing */
4117 for (unsigned i = 0; i < 2; i++) /* u, v */
4118 bld.MOV(sources[length++], offset(coordinate, bld, i));
4119
4120 for (unsigned i = 0; i < 2; i++) /* offu, offv */
4121 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4122 offset(tg4_offset, bld, i));
4123
4124 if (coord_components == 3) /* r if present */
4125 bld.MOV(sources[length++], offset(coordinate, bld, 2));
4126
4127 coordinate_done = true;
4128 break;
4129 default:
4130 break;
4131 }
4132
4133 /* Set up the coordinate (except for cases where it was done above) */
4134 if (!coordinate_done) {
4135 for (unsigned i = 0; i < coord_components; i++)
4136 bld.MOV(sources[length++], offset(coordinate, bld, i));
4137 }
4138
4139 int mlen;
4140 if (reg_width == 2)
4141 mlen = length * reg_width - header_size;
4142 else
4143 mlen = length * reg_width;
4144
4145 const fs_reg src_payload = fs_reg(VGRF, bld.shader->alloc.allocate(mlen),
4146 BRW_REGISTER_TYPE_F);
4147 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
4148
4149 /* Generate the SEND. */
4150 inst->opcode = op;
4151 inst->src[0] = src_payload;
4152 inst->src[1] = surface;
4153 inst->src[2] = sampler;
4154 inst->resize_sources(3);
4155 inst->mlen = mlen;
4156 inst->header_size = header_size;
4157
4158 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4159 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4160 }
4161
4162 static void
4163 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
4164 {
4165 const gen_device_info *devinfo = bld.shader->devinfo;
4166 const fs_reg &coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
4167 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4168 const fs_reg &lod = inst->src[TEX_LOGICAL_SRC_LOD];
4169 const fs_reg &lod2 = inst->src[TEX_LOGICAL_SRC_LOD2];
4170 const fs_reg &sample_index = inst->src[TEX_LOGICAL_SRC_SAMPLE_INDEX];
4171 const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
4172 const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
4173 const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
4174 const fs_reg &tg4_offset = inst->src[TEX_LOGICAL_SRC_TG4_OFFSET];
4175 assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
4176 const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4177 assert(inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
4178 const unsigned grad_components = inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
4179
4180 if (devinfo->gen >= 7) {
4181 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
4182 shadow_c, lod, lod2, sample_index,
4183 mcs, surface, sampler, tg4_offset,
4184 coord_components, grad_components);
4185 } else if (devinfo->gen >= 5) {
4186 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
4187 shadow_c, lod, lod2, sample_index,
4188 surface, sampler,
4189 coord_components, grad_components);
4190 } else {
4191 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
4192 shadow_c, lod, lod2,
4193 surface, sampler,
4194 coord_components, grad_components);
4195 }
4196 }
4197
4198 /**
4199 * Initialize the header present in some typed and untyped surface
4200 * messages.
4201 */
4202 static fs_reg
4203 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
4204 {
4205 fs_builder ubld = bld.exec_all().group(8, 0);
4206 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4207 ubld.MOV(dst, brw_imm_d(0));
4208 ubld.MOV(component(dst, 7), sample_mask);
4209 return dst;
4210 }
4211
4212 static void
4213 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
4214 const fs_reg &sample_mask)
4215 {
4216 /* Get the logical send arguments. */
4217 const fs_reg &addr = inst->src[0];
4218 const fs_reg &src = inst->src[1];
4219 const fs_reg &surface = inst->src[2];
4220 const UNUSED fs_reg &dims = inst->src[3];
4221 const fs_reg &arg = inst->src[4];
4222
4223 /* Calculate the total number of components of the payload. */
4224 const unsigned addr_sz = inst->components_read(0);
4225 const unsigned src_sz = inst->components_read(1);
4226 const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
4227 const unsigned sz = header_sz + addr_sz + src_sz;
4228
4229 /* Allocate space for the payload. */
4230 fs_reg *const components = new fs_reg[sz];
4231 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
4232 unsigned n = 0;
4233
4234 /* Construct the payload. */
4235 if (header_sz)
4236 components[n++] = emit_surface_header(bld, sample_mask);
4237
4238 for (unsigned i = 0; i < addr_sz; i++)
4239 components[n++] = offset(addr, bld, i);
4240
4241 for (unsigned i = 0; i < src_sz; i++)
4242 components[n++] = offset(src, bld, i);
4243
4244 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
4245
4246 /* Update the original instruction. */
4247 inst->opcode = op;
4248 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
4249 inst->header_size = header_sz;
4250
4251 inst->src[0] = payload;
4252 inst->src[1] = surface;
4253 inst->src[2] = arg;
4254 inst->resize_sources(3);
4255
4256 delete[] components;
4257 }
4258
4259 static void
4260 lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
4261 {
4262 const gen_device_info *devinfo = bld.shader->devinfo;
4263
4264 if (devinfo->gen >= 7) {
4265 /* We are switching the instruction from an ALU-like instruction to a
4266 * send-from-grf instruction. Since sends can't handle strides or
4267 * source modifiers, we have to make a copy of the offset source.
4268 */
4269 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4270 bld.MOV(tmp, inst->src[1]);
4271 inst->src[1] = tmp;
4272
4273 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
4274
4275 } else {
4276 const fs_reg payload(MRF, FIRST_PULL_LOAD_MRF(devinfo->gen),
4277 BRW_REGISTER_TYPE_UD);
4278
4279 bld.MOV(byte_offset(payload, REG_SIZE), inst->src[1]);
4280
4281 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4;
4282 inst->resize_sources(1);
4283 inst->base_mrf = payload.nr;
4284 inst->header_size = 1;
4285 inst->mlen = 1 + inst->exec_size / 8;
4286 }
4287 }
4288
4289 static void
4290 lower_math_logical_send(const fs_builder &bld, fs_inst *inst)
4291 {
4292 assert(bld.shader->devinfo->gen < 6);
4293
4294 inst->base_mrf = 2;
4295 inst->mlen = inst->sources * inst->exec_size / 8;
4296
4297 if (inst->sources > 1) {
4298 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
4299 * "Message Payload":
4300 *
4301 * "Operand0[7]. For the INT DIV functions, this operand is the
4302 * denominator."
4303 * ...
4304 * "Operand1[7]. For the INT DIV functions, this operand is the
4305 * numerator."
4306 */
4307 const bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
4308 const fs_reg src0 = is_int_div ? inst->src[1] : inst->src[0];
4309 const fs_reg src1 = is_int_div ? inst->src[0] : inst->src[1];
4310
4311 inst->resize_sources(1);
4312 inst->src[0] = src0;
4313
4314 assert(inst->exec_size == 8);
4315 bld.MOV(fs_reg(MRF, inst->base_mrf + 1, src1.type), src1);
4316 }
4317 }
4318
4319 bool
4320 fs_visitor::lower_logical_sends()
4321 {
4322 bool progress = false;
4323
4324 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4325 const fs_builder ibld(this, block, inst);
4326
4327 switch (inst->opcode) {
4328 case FS_OPCODE_FB_WRITE_LOGICAL:
4329 assert(stage == MESA_SHADER_FRAGMENT);
4330 lower_fb_write_logical_send(ibld, inst,
4331 brw_wm_prog_data(prog_data),
4332 (const brw_wm_prog_key *)key,
4333 payload);
4334 break;
4335
4336 case FS_OPCODE_FB_READ_LOGICAL:
4337 lower_fb_read_logical_send(ibld, inst);
4338 break;
4339
4340 case SHADER_OPCODE_TEX_LOGICAL:
4341 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
4342 break;
4343
4344 case SHADER_OPCODE_TXD_LOGICAL:
4345 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
4346 break;
4347
4348 case SHADER_OPCODE_TXF_LOGICAL:
4349 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
4350 break;
4351
4352 case SHADER_OPCODE_TXL_LOGICAL:
4353 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
4354 break;
4355
4356 case SHADER_OPCODE_TXS_LOGICAL:
4357 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
4358 break;
4359
4360 case FS_OPCODE_TXB_LOGICAL:
4361 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
4362 break;
4363
4364 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4365 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
4366 break;
4367
4368 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
4369 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS_W);
4370 break;
4371
4372 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4373 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
4374 break;
4375
4376 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4377 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
4378 break;
4379
4380 case SHADER_OPCODE_LOD_LOGICAL:
4381 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
4382 break;
4383
4384 case SHADER_OPCODE_TG4_LOGICAL:
4385 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
4386 break;
4387
4388 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4389 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
4390 break;
4391
4392 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4393 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_SAMPLEINFO);
4394 break;
4395
4396 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4397 lower_surface_logical_send(ibld, inst,
4398 SHADER_OPCODE_UNTYPED_SURFACE_READ,
4399 fs_reg());
4400 break;
4401
4402 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4403 lower_surface_logical_send(ibld, inst,
4404 SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
4405 ibld.sample_mask_reg());
4406 break;
4407
4408 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4409 lower_surface_logical_send(ibld, inst,
4410 SHADER_OPCODE_UNTYPED_ATOMIC,
4411 ibld.sample_mask_reg());
4412 break;
4413
4414 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4415 lower_surface_logical_send(ibld, inst,
4416 SHADER_OPCODE_TYPED_SURFACE_READ,
4417 brw_imm_d(0xffff));
4418 break;
4419
4420 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4421 lower_surface_logical_send(ibld, inst,
4422 SHADER_OPCODE_TYPED_SURFACE_WRITE,
4423 ibld.sample_mask_reg());
4424 break;
4425
4426 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4427 lower_surface_logical_send(ibld, inst,
4428 SHADER_OPCODE_TYPED_ATOMIC,
4429 ibld.sample_mask_reg());
4430 break;
4431
4432 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4433 lower_varying_pull_constant_logical_send(ibld, inst);
4434 break;
4435
4436 case SHADER_OPCODE_RCP:
4437 case SHADER_OPCODE_RSQ:
4438 case SHADER_OPCODE_SQRT:
4439 case SHADER_OPCODE_EXP2:
4440 case SHADER_OPCODE_LOG2:
4441 case SHADER_OPCODE_SIN:
4442 case SHADER_OPCODE_COS:
4443 case SHADER_OPCODE_POW:
4444 case SHADER_OPCODE_INT_QUOTIENT:
4445 case SHADER_OPCODE_INT_REMAINDER:
4446 /* The math opcodes are overloaded for the send-like and
4447 * expression-like instructions which seems kind of icky. Gen6+ has
4448 * a native (but rather quirky) MATH instruction so we don't need to
4449 * do anything here. On Gen4-5 we'll have to lower the Gen6-like
4450 * logical instructions (which we can easily recognize because they
4451 * have mlen = 0) into send-like virtual instructions.
4452 */
4453 if (devinfo->gen < 6 && inst->mlen == 0) {
4454 lower_math_logical_send(ibld, inst);
4455 break;
4456
4457 } else {
4458 continue;
4459 }
4460
4461 default:
4462 continue;
4463 }
4464
4465 progress = true;
4466 }
4467
4468 if (progress)
4469 invalidate_live_intervals();
4470
4471 return progress;
4472 }
4473
4474 /**
4475 * Get the closest allowed SIMD width for instruction \p inst accounting for
4476 * some common regioning and execution control restrictions that apply to FPU
4477 * instructions. These restrictions don't necessarily have any relevance to
4478 * instructions not executed by the FPU pipeline like extended math, control
4479 * flow or send message instructions.
4480 *
4481 * For virtual opcodes it's really up to the instruction -- In some cases
4482 * (e.g. where a virtual instruction unrolls into a simple sequence of FPU
4483 * instructions) it may simplify virtual instruction lowering if we can
4484 * enforce FPU-like regioning restrictions already on the virtual instruction,
4485 * in other cases (e.g. virtual send-like instructions) this may be
4486 * excessively restrictive.
4487 */
4488 static unsigned
4489 get_fpu_lowered_simd_width(const struct gen_device_info *devinfo,
4490 const fs_inst *inst)
4491 {
4492 /* Maximum execution size representable in the instruction controls. */
4493 unsigned max_width = MIN2(32, inst->exec_size);
4494
4495 /* According to the PRMs:
4496 * "A. In Direct Addressing mode, a source cannot span more than 2
4497 * adjacent GRF registers.
4498 * B. A destination cannot span more than 2 adjacent GRF registers."
4499 *
4500 * Look for the source or destination with the largest register region
4501 * which is the one that is going to limit the overall execution size of
4502 * the instruction due to this rule.
4503 */
4504 unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
4505
4506 for (unsigned i = 0; i < inst->sources; i++)
4507 reg_count = MAX2(reg_count, DIV_ROUND_UP(inst->size_read(i), REG_SIZE));
4508
4509 /* Calculate the maximum execution size of the instruction based on the
4510 * factor by which it goes over the hardware limit of 2 GRFs.
4511 */
4512 if (reg_count > 2)
4513 max_width = MIN2(max_width, inst->exec_size / DIV_ROUND_UP(reg_count, 2));
4514
4515 /* According to the IVB PRMs:
4516 * "When destination spans two registers, the source MUST span two
4517 * registers. The exception to the above rule:
4518 *
4519 * - When source is scalar, the source registers are not incremented.
4520 * - When source is packed integer Word and destination is packed
4521 * integer DWord, the source register is not incremented but the
4522 * source sub register is incremented."
4523 *
4524 * The hardware specs from Gen4 to Gen7.5 mention similar regioning
4525 * restrictions. The code below intentionally doesn't check whether the
4526 * destination type is integer because empirically the hardware doesn't
4527 * seem to care what the actual type is as long as it's dword-aligned.
4528 */
4529 if (devinfo->gen < 8) {
4530 for (unsigned i = 0; i < inst->sources; i++) {
4531 /* IVB implements DF scalars as <0;2,1> regions. */
4532 const bool is_scalar_exception = is_uniform(inst->src[i]) &&
4533 (devinfo->is_haswell || type_sz(inst->src[i].type) != 8);
4534 const bool is_packed_word_exception =
4535 type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
4536 type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1;
4537
4538 if (inst->size_written > REG_SIZE &&
4539 inst->size_read(i) != 0 && inst->size_read(i) <= REG_SIZE &&
4540 !is_scalar_exception && !is_packed_word_exception) {
4541 const unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
4542 max_width = MIN2(max_width, inst->exec_size / reg_count);
4543 }
4544 }
4545 }
4546
4547 /* From the IVB PRMs:
4548 * "When an instruction is SIMD32, the low 16 bits of the execution mask
4549 * are applied for both halves of the SIMD32 instruction. If different
4550 * execution mask channels are required, split the instruction into two
4551 * SIMD16 instructions."
4552 *
4553 * There is similar text in the HSW PRMs. Gen4-6 don't even implement
4554 * 32-wide control flow support in hardware and will behave similarly.
4555 */
4556 if (devinfo->gen < 8 && !inst->force_writemask_all)
4557 max_width = MIN2(max_width, 16);
4558
4559 /* From the IVB PRMs (applies to HSW too):
4560 * "Instructions with condition modifiers must not use SIMD32."
4561 *
4562 * From the BDW PRMs (applies to later hardware too):
4563 * "Ternary instruction with condition modifiers must not use SIMD32."
4564 */
4565 if (inst->conditional_mod && (devinfo->gen < 8 || inst->is_3src(devinfo)))
4566 max_width = MIN2(max_width, 16);
4567
4568 /* From the IVB PRMs (applies to other devices that don't have the
4569 * gen_device_info::supports_simd16_3src flag set):
4570 * "In Align16 access mode, SIMD16 is not allowed for DW operations and
4571 * SIMD8 is not allowed for DF operations."
4572 */
4573 if (inst->is_3src(devinfo) && !devinfo->supports_simd16_3src)
4574 max_width = MIN2(max_width, inst->exec_size / reg_count);
4575
4576 /* Pre-Gen8 EUs are hardwired to use the QtrCtrl+1 (where QtrCtrl is
4577 * the 8-bit quarter of the execution mask signals specified in the
4578 * instruction control fields) for the second compressed half of any
4579 * single-precision instruction (for double-precision instructions
4580 * it's hardwired to use NibCtrl+1, at least on HSW), which means that
4581 * the EU will apply the wrong execution controls for the second
4582 * sequential GRF write if the number of channels per GRF is not exactly
4583 * eight in single-precision mode (or four in double-float mode).
4584 *
4585 * In this situation we calculate the maximum size of the split
4586 * instructions so they only ever write to a single register.
4587 */
4588 if (devinfo->gen < 8 && inst->size_written > REG_SIZE &&
4589 !inst->force_writemask_all) {
4590 const unsigned channels_per_grf = inst->exec_size /
4591 DIV_ROUND_UP(inst->size_written, REG_SIZE);
4592 const unsigned exec_type_size = get_exec_type_size(inst);
4593 assert(exec_type_size);
4594
4595 /* The hardware shifts exactly 8 channels per compressed half of the
4596 * instruction in single-precision mode and exactly 4 in double-precision.
4597 */
4598 if (channels_per_grf != (exec_type_size == 8 ? 4 : 8))
4599 max_width = MIN2(max_width, channels_per_grf);
4600
4601 /* Lower all non-force_writemask_all DF instructions to SIMD4 on IVB/BYT
4602 * because HW applies the same channel enable signals to both halves of
4603 * the compressed instruction which will be just wrong under
4604 * non-uniform control flow.
4605 */
4606 if (devinfo->gen == 7 && !devinfo->is_haswell &&
4607 (exec_type_size == 8 || type_sz(inst->dst.type) == 8))
4608 max_width = MIN2(max_width, 4);
4609 }
4610
4611 /* Only power-of-two execution sizes are representable in the instruction
4612 * control fields.
4613 */
4614 return 1 << _mesa_logbase2(max_width);
4615 }
4616
4617 /**
4618 * Get the maximum allowed SIMD width for instruction \p inst accounting for
4619 * various payload size restrictions that apply to sampler message
4620 * instructions.
4621 *
4622 * This is only intended to provide a maximum theoretical bound for the
4623 * execution size of the message based on the number of argument components
4624 * alone, which in most cases will determine whether the SIMD8 or SIMD16
4625 * variant of the message can be used, though some messages may have
4626 * additional restrictions not accounted for here (e.g. pre-ILK hardware uses
4627 * the message length to determine the exact SIMD width and argument count,
4628 * which makes a number of sampler message combinations impossible to
4629 * represent).
4630 */
4631 static unsigned
4632 get_sampler_lowered_simd_width(const struct gen_device_info *devinfo,
4633 const fs_inst *inst)
4634 {
4635 /* Calculate the number of coordinate components that have to be present
4636 * assuming that additional arguments follow the texel coordinates in the
4637 * message payload. On IVB+ there is no need for padding, on ILK-SNB we
4638 * need to pad to four or three components depending on the message,
4639 * pre-ILK we need to pad to at most three components.
4640 */
4641 const unsigned req_coord_components =
4642 (devinfo->gen >= 7 ||
4643 !inst->components_read(TEX_LOGICAL_SRC_COORDINATE)) ? 0 :
4644 (devinfo->gen >= 5 && inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
4645 inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL) ? 4 :
4646 3;
4647
4648 /* On Gen9+ the LOD argument is for free if we're able to use the LZ
4649 * variant of the TXL or TXF message.
4650 */
4651 const bool implicit_lod = devinfo->gen >= 9 &&
4652 (inst->opcode == SHADER_OPCODE_TXL ||
4653 inst->opcode == SHADER_OPCODE_TXF) &&
4654 inst->src[TEX_LOGICAL_SRC_LOD].is_zero();
4655
4656 /* Calculate the total number of argument components that need to be passed
4657 * to the sampler unit.
4658 */
4659 const unsigned num_payload_components =
4660 MAX2(inst->components_read(TEX_LOGICAL_SRC_COORDINATE),
4661 req_coord_components) +
4662 inst->components_read(TEX_LOGICAL_SRC_SHADOW_C) +
4663 (implicit_lod ? 0 : inst->components_read(TEX_LOGICAL_SRC_LOD)) +
4664 inst->components_read(TEX_LOGICAL_SRC_LOD2) +
4665 inst->components_read(TEX_LOGICAL_SRC_SAMPLE_INDEX) +
4666 (inst->opcode == SHADER_OPCODE_TG4_OFFSET_LOGICAL ?
4667 inst->components_read(TEX_LOGICAL_SRC_TG4_OFFSET) : 0) +
4668 inst->components_read(TEX_LOGICAL_SRC_MCS);
4669
4670 /* SIMD16 messages with more than five arguments exceed the maximum message
4671 * size supported by the sampler, regardless of whether a header is
4672 * provided or not.
4673 */
4674 return MIN2(inst->exec_size,
4675 num_payload_components > MAX_SAMPLER_MESSAGE_SIZE / 2 ? 8 : 16);
4676 }
4677
4678 /**
4679 * Get the closest native SIMD width supported by the hardware for instruction
4680 * \p inst. The instruction will be left untouched by
4681 * fs_visitor::lower_simd_width() if the returned value is equal to the
4682 * original execution size.
4683 */
4684 static unsigned
4685 get_lowered_simd_width(const struct gen_device_info *devinfo,
4686 const fs_inst *inst)
4687 {
4688 switch (inst->opcode) {
4689 case BRW_OPCODE_MOV:
4690 case BRW_OPCODE_SEL:
4691 case BRW_OPCODE_NOT:
4692 case BRW_OPCODE_AND:
4693 case BRW_OPCODE_OR:
4694 case BRW_OPCODE_XOR:
4695 case BRW_OPCODE_SHR:
4696 case BRW_OPCODE_SHL:
4697 case BRW_OPCODE_ASR:
4698 case BRW_OPCODE_CMPN:
4699 case BRW_OPCODE_CSEL:
4700 case BRW_OPCODE_F32TO16:
4701 case BRW_OPCODE_F16TO32:
4702 case BRW_OPCODE_BFREV:
4703 case BRW_OPCODE_BFE:
4704 case BRW_OPCODE_ADD:
4705 case BRW_OPCODE_MUL:
4706 case BRW_OPCODE_AVG:
4707 case BRW_OPCODE_FRC:
4708 case BRW_OPCODE_RNDU:
4709 case BRW_OPCODE_RNDD:
4710 case BRW_OPCODE_RNDE:
4711 case BRW_OPCODE_RNDZ:
4712 case BRW_OPCODE_LZD:
4713 case BRW_OPCODE_FBH:
4714 case BRW_OPCODE_FBL:
4715 case BRW_OPCODE_CBIT:
4716 case BRW_OPCODE_SAD2:
4717 case BRW_OPCODE_MAD:
4718 case BRW_OPCODE_LRP:
4719 case FS_OPCODE_PACK:
4720 return get_fpu_lowered_simd_width(devinfo, inst);
4721
4722 case BRW_OPCODE_CMP: {
4723 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says that
4724 * when the destination is a GRF the dependency-clear bit on the flag
4725 * register is cleared early.
4726 *
4727 * Suggested workarounds are to disable coissuing CMP instructions
4728 * or to split CMP(16) instructions into two CMP(8) instructions.
4729 *
4730 * We choose to split into CMP(8) instructions since disabling
4731 * coissuing would affect CMP instructions not otherwise affected by
4732 * the errata.
4733 */
4734 const unsigned max_width = (devinfo->gen == 7 && !devinfo->is_haswell &&
4735 !inst->dst.is_null() ? 8 : ~0);
4736 return MIN2(max_width, get_fpu_lowered_simd_width(devinfo, inst));
4737 }
4738 case BRW_OPCODE_BFI1:
4739 case BRW_OPCODE_BFI2:
4740 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
4741 * should
4742 * "Force BFI instructions to be executed always in SIMD8."
4743 */
4744 return MIN2(devinfo->is_haswell ? 8 : ~0u,
4745 get_fpu_lowered_simd_width(devinfo, inst));
4746
4747 case BRW_OPCODE_IF:
4748 assert(inst->src[0].file == BAD_FILE || inst->exec_size <= 16);
4749 return inst->exec_size;
4750
4751 case SHADER_OPCODE_RCP:
4752 case SHADER_OPCODE_RSQ:
4753 case SHADER_OPCODE_SQRT:
4754 case SHADER_OPCODE_EXP2:
4755 case SHADER_OPCODE_LOG2:
4756 case SHADER_OPCODE_SIN:
4757 case SHADER_OPCODE_COS:
4758 /* Unary extended math instructions are limited to SIMD8 on Gen4 and
4759 * Gen6.
4760 */
4761 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4762 devinfo->gen == 5 || devinfo->is_g4x ? MIN2(16, inst->exec_size) :
4763 MIN2(8, inst->exec_size));
4764
4765 case SHADER_OPCODE_POW:
4766 /* SIMD16 is only allowed on Gen7+. */
4767 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4768 MIN2(8, inst->exec_size));
4769
4770 case SHADER_OPCODE_INT_QUOTIENT:
4771 case SHADER_OPCODE_INT_REMAINDER:
4772 /* Integer division is limited to SIMD8 on all generations. */
4773 return MIN2(8, inst->exec_size);
4774
4775 case FS_OPCODE_LINTERP:
4776 case FS_OPCODE_GET_BUFFER_SIZE:
4777 case FS_OPCODE_DDX_COARSE:
4778 case FS_OPCODE_DDX_FINE:
4779 case FS_OPCODE_DDY_COARSE:
4780 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
4781 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
4782 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
4783 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
4784 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
4785 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
4786 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
4787 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
4788 return MIN2(16, inst->exec_size);
4789
4790 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4791 /* Pre-ILK hardware doesn't have a SIMD8 variant of the texel fetch
4792 * message used to implement varying pull constant loads, so expand it
4793 * to SIMD16. An alternative with longer message payload length but
4794 * shorter return payload would be to use the SIMD8 sampler message that
4795 * takes (header, u, v, r) as parameters instead of (header, u).
4796 */
4797 return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
4798
4799 case FS_OPCODE_DDY_FINE:
4800 /* The implementation of this virtual opcode may require emitting
4801 * compressed Align16 instructions, which are severely limited on some
4802 * generations.
4803 *
4804 * From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
4805 * Region Restrictions):
4806 *
4807 * "In Align16 access mode, SIMD16 is not allowed for DW operations
4808 * and SIMD8 is not allowed for DF operations."
4809 *
4810 * In this context, "DW operations" means "operations acting on 32-bit
4811 * values", so it includes operations on floats.
4812 *
4813 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
4814 * (Instruction Compression -> Rules and Restrictions):
4815 *
4816 * "A compressed instruction must be in Align1 access mode. Align16
4817 * mode instructions cannot be compressed."
4818 *
4819 * Similar text exists in the g45 PRM.
4820 *
4821 * Empirically, compressed align16 instructions using odd register
4822 * numbers don't appear to work on Sandybridge either.
4823 */
4824 return (devinfo->gen == 4 || devinfo->gen == 6 ||
4825 (devinfo->gen == 7 && !devinfo->is_haswell) ?
4826 MIN2(8, inst->exec_size) : MIN2(16, inst->exec_size));
4827
4828 case SHADER_OPCODE_MULH:
4829 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
4830 * is 8-wide on Gen7+.
4831 */
4832 return (devinfo->gen >= 7 ? 8 :
4833 get_fpu_lowered_simd_width(devinfo, inst));
4834
4835 case FS_OPCODE_FB_WRITE_LOGICAL:
4836 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
4837 * here.
4838 */
4839 assert(devinfo->gen != 6 ||
4840 inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH].file == BAD_FILE ||
4841 inst->exec_size == 8);
4842 /* Dual-source FB writes are unsupported in SIMD16 mode. */
4843 return (inst->src[FB_WRITE_LOGICAL_SRC_COLOR1].file != BAD_FILE ?
4844 8 : MIN2(16, inst->exec_size));
4845
4846 case FS_OPCODE_FB_READ_LOGICAL:
4847 return MIN2(16, inst->exec_size);
4848
4849 case SHADER_OPCODE_TEX_LOGICAL:
4850 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4851 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4852 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4853 case SHADER_OPCODE_LOD_LOGICAL:
4854 case SHADER_OPCODE_TG4_LOGICAL:
4855 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4856 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
4857 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4858 return get_sampler_lowered_simd_width(devinfo, inst);
4859
4860 case SHADER_OPCODE_TXD_LOGICAL:
4861 /* TXD is unsupported in SIMD16 mode. */
4862 return 8;
4863
4864 case SHADER_OPCODE_TXL_LOGICAL:
4865 case FS_OPCODE_TXB_LOGICAL:
4866 /* Only one execution size is representable pre-ILK depending on whether
4867 * the shadow reference argument is present.
4868 */
4869 if (devinfo->gen == 4)
4870 return inst->src[TEX_LOGICAL_SRC_SHADOW_C].file == BAD_FILE ? 16 : 8;
4871 else
4872 return get_sampler_lowered_simd_width(devinfo, inst);
4873
4874 case SHADER_OPCODE_TXF_LOGICAL:
4875 case SHADER_OPCODE_TXS_LOGICAL:
4876 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
4877 * messages. Use SIMD16 instead.
4878 */
4879 if (devinfo->gen == 4)
4880 return 16;
4881 else
4882 return get_sampler_lowered_simd_width(devinfo, inst);
4883
4884 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4885 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4886 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4887 return 8;
4888
4889 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4890 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4891 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4892 return MIN2(16, inst->exec_size);
4893
4894 case SHADER_OPCODE_URB_READ_SIMD8:
4895 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
4896 case SHADER_OPCODE_URB_WRITE_SIMD8:
4897 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
4898 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
4899 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
4900 return MIN2(8, inst->exec_size);
4901
4902 case SHADER_OPCODE_MOV_INDIRECT: {
4903 /* From IVB and HSW PRMs:
4904 *
4905 * "2.When the destination requires two registers and the sources are
4906 * indirect, the sources must use 1x1 regioning mode.
4907 *
4908 * In case of DF instructions in HSW/IVB, the exec_size is limited by
4909 * the EU decompression logic not handling VxH indirect addressing
4910 * correctly.
4911 */
4912 const unsigned max_size = (devinfo->gen >= 8 ? 2 : 1) * REG_SIZE;
4913 /* Prior to Broadwell, we only have 8 address subregisters. */
4914 return MIN3(devinfo->gen >= 8 ? 16 : 8,
4915 max_size / (inst->dst.stride * type_sz(inst->dst.type)),
4916 inst->exec_size);
4917 }
4918
4919 case SHADER_OPCODE_LOAD_PAYLOAD: {
4920 const unsigned reg_count =
4921 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
4922
4923 if (reg_count > 2) {
4924 /* Only LOAD_PAYLOAD instructions with per-channel destination region
4925 * can be easily lowered (which excludes headers and heterogeneous
4926 * types).
4927 */
4928 assert(!inst->header_size);
4929 for (unsigned i = 0; i < inst->sources; i++)
4930 assert(type_sz(inst->dst.type) == type_sz(inst->src[i].type) ||
4931 inst->src[i].file == BAD_FILE);
4932
4933 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
4934 } else {
4935 return inst->exec_size;
4936 }
4937 }
4938 default:
4939 return inst->exec_size;
4940 }
4941 }
4942
4943 /**
4944 * Return true if splitting out the group of channels of instruction \p inst
4945 * given by lbld.group() requires allocating a temporary for the i-th source
4946 * of the lowered instruction.
4947 */
4948 static inline bool
4949 needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
4950 {
4951 return !(is_periodic(inst->src[i], lbld.dispatch_width()) ||
4952 (inst->components_read(i) == 1 &&
4953 lbld.dispatch_width() <= inst->exec_size));
4954 }
4955
4956 /**
4957 * Extract the data that would be consumed by the channel group given by
4958 * lbld.group() from the i-th source region of instruction \p inst and return
4959 * it as result in packed form. If any copy instructions are required they
4960 * will be emitted before the given \p inst in \p block.
4961 */
4962 static fs_reg
4963 emit_unzip(const fs_builder &lbld, bblock_t *block, fs_inst *inst,
4964 unsigned i)
4965 {
4966 /* Specified channel group from the source region. */
4967 const fs_reg src = horiz_offset(inst->src[i], lbld.group());
4968
4969 if (needs_src_copy(lbld, inst, i)) {
4970 /* Builder of the right width to perform the copy avoiding uninitialized
4971 * data if the lowered execution size is greater than the original
4972 * execution size of the instruction.
4973 */
4974 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
4975 inst->exec_size), 0);
4976 const fs_reg tmp = lbld.vgrf(inst->src[i].type, inst->components_read(i));
4977
4978 for (unsigned k = 0; k < inst->components_read(i); ++k)
4979 cbld.at(block, inst)
4980 .MOV(offset(tmp, lbld, k), offset(src, inst->exec_size, k));
4981
4982 return tmp;
4983
4984 } else if (is_periodic(inst->src[i], lbld.dispatch_width())) {
4985 /* The source is invariant for all dispatch_width-wide groups of the
4986 * original region.
4987 */
4988 return inst->src[i];
4989
4990 } else {
4991 /* We can just point the lowered instruction at the right channel group
4992 * from the original region.
4993 */
4994 return src;
4995 }
4996 }
4997
4998 /**
4999 * Return true if splitting out the group of channels of instruction \p inst
5000 * given by lbld.group() requires allocating a temporary for the destination
5001 * of the lowered instruction and copying the data back to the original
5002 * destination region.
5003 */
5004 static inline bool
5005 needs_dst_copy(const fs_builder &lbld, const fs_inst *inst)
5006 {
5007 /* If the instruction writes more than one component we'll have to shuffle
5008 * the results of multiple lowered instructions in order to make sure that
5009 * they end up arranged correctly in the original destination region.
5010 */
5011 if (inst->size_written > inst->dst.component_size(inst->exec_size))
5012 return true;
5013
5014 /* If the lowered execution size is larger than the original the result of
5015 * the instruction won't fit in the original destination, so we'll have to
5016 * allocate a temporary in any case.
5017 */
5018 if (lbld.dispatch_width() > inst->exec_size)
5019 return true;
5020
5021 for (unsigned i = 0; i < inst->sources; i++) {
5022 /* If we already made a copy of the source for other reasons there won't
5023 * be any overlap with the destination.
5024 */
5025 if (needs_src_copy(lbld, inst, i))
5026 continue;
5027
5028 /* In order to keep the logic simple we emit a copy whenever the
5029 * destination region doesn't exactly match an overlapping source, which
5030 * may point at the source and destination not being aligned group by
5031 * group which could cause one of the lowered instructions to overwrite
5032 * the data read from the same source by other lowered instructions.
5033 */
5034 if (regions_overlap(inst->dst, inst->size_written,
5035 inst->src[i], inst->size_read(i)) &&
5036 !inst->dst.equals(inst->src[i]))
5037 return true;
5038 }
5039
5040 return false;
5041 }
5042
5043 /**
5044 * Insert data from a packed temporary into the channel group given by
5045 * lbld.group() of the destination region of instruction \p inst and return
5046 * the temporary as result. If any copy instructions are required they will
5047 * be emitted around the given \p inst in \p block.
5048 */
5049 static fs_reg
5050 emit_zip(const fs_builder &lbld, bblock_t *block, fs_inst *inst)
5051 {
5052 /* Builder of the right width to perform the copy avoiding uninitialized
5053 * data if the lowered execution size is greater than the original
5054 * execution size of the instruction.
5055 */
5056 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
5057 inst->exec_size), 0);
5058
5059 /* Specified channel group from the destination region. */
5060 const fs_reg dst = horiz_offset(inst->dst, lbld.group());
5061 const unsigned dst_size = inst->size_written /
5062 inst->dst.component_size(inst->exec_size);
5063
5064 if (needs_dst_copy(lbld, inst)) {
5065 const fs_reg tmp = lbld.vgrf(inst->dst.type, dst_size);
5066
5067 if (inst->predicate) {
5068 /* Handle predication by copying the original contents of
5069 * the destination into the temporary before emitting the
5070 * lowered instruction.
5071 */
5072 for (unsigned k = 0; k < dst_size; ++k)
5073 cbld.at(block, inst)
5074 .MOV(offset(tmp, lbld, k), offset(dst, inst->exec_size, k));
5075 }
5076
5077 for (unsigned k = 0; k < dst_size; ++k)
5078 cbld.at(block, inst->next)
5079 .MOV(offset(dst, inst->exec_size, k), offset(tmp, lbld, k));
5080
5081 return tmp;
5082
5083 } else {
5084 /* No need to allocate a temporary for the lowered instruction, just
5085 * take the right group of channels from the original region.
5086 */
5087 return dst;
5088 }
5089 }
5090
5091 bool
5092 fs_visitor::lower_simd_width()
5093 {
5094 bool progress = false;
5095
5096 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5097 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
5098
5099 if (lower_width != inst->exec_size) {
5100 /* Builder matching the original instruction. We may also need to
5101 * emit an instruction of width larger than the original, set the
5102 * execution size of the builder to the highest of both for now so
5103 * we're sure that both cases can be handled.
5104 */
5105 const unsigned max_width = MAX2(inst->exec_size, lower_width);
5106 const fs_builder ibld = bld.at(block, inst)
5107 .exec_all(inst->force_writemask_all)
5108 .group(max_width, inst->group / max_width);
5109
5110 /* Split the copies in chunks of the execution width of either the
5111 * original or the lowered instruction, whichever is lower.
5112 */
5113 const unsigned n = DIV_ROUND_UP(inst->exec_size, lower_width);
5114 const unsigned dst_size = inst->size_written /
5115 inst->dst.component_size(inst->exec_size);
5116
5117 assert(!inst->writes_accumulator && !inst->mlen);
5118
5119 for (unsigned i = 0; i < n; i++) {
5120 /* Emit a copy of the original instruction with the lowered width.
5121 * If the EOT flag was set throw it away except for the last
5122 * instruction to avoid killing the thread prematurely.
5123 */
5124 fs_inst split_inst = *inst;
5125 split_inst.exec_size = lower_width;
5126 split_inst.eot = inst->eot && i == n - 1;
5127
5128 /* Select the correct channel enables for the i-th group, then
5129 * transform the sources and destination and emit the lowered
5130 * instruction.
5131 */
5132 const fs_builder lbld = ibld.group(lower_width, i);
5133
5134 for (unsigned j = 0; j < inst->sources; j++)
5135 split_inst.src[j] = emit_unzip(lbld, block, inst, j);
5136
5137 split_inst.dst = emit_zip(lbld, block, inst);
5138 split_inst.size_written =
5139 split_inst.dst.component_size(lower_width) * dst_size;
5140
5141 lbld.emit(split_inst);
5142 }
5143
5144 inst->remove(block);
5145 progress = true;
5146 }
5147 }
5148
5149 if (progress)
5150 invalidate_live_intervals();
5151
5152 return progress;
5153 }
5154
5155 void
5156 fs_visitor::dump_instructions()
5157 {
5158 dump_instructions(NULL);
5159 }
5160
5161 void
5162 fs_visitor::dump_instructions(const char *name)
5163 {
5164 FILE *file = stderr;
5165 if (name && geteuid() != 0) {
5166 file = fopen(name, "w");
5167 if (!file)
5168 file = stderr;
5169 }
5170
5171 if (cfg) {
5172 calculate_register_pressure();
5173 int ip = 0, max_pressure = 0;
5174 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
5175 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
5176 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
5177 dump_instruction(inst, file);
5178 ip++;
5179 }
5180 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
5181 } else {
5182 int ip = 0;
5183 foreach_in_list(backend_instruction, inst, &instructions) {
5184 fprintf(file, "%4d: ", ip++);
5185 dump_instruction(inst, file);
5186 }
5187 }
5188
5189 if (file != stderr) {
5190 fclose(file);
5191 }
5192 }
5193
5194 void
5195 fs_visitor::dump_instruction(backend_instruction *be_inst)
5196 {
5197 dump_instruction(be_inst, stderr);
5198 }
5199
5200 void
5201 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
5202 {
5203 fs_inst *inst = (fs_inst *)be_inst;
5204
5205 if (inst->predicate) {
5206 fprintf(file, "(%cf0.%d) ",
5207 inst->predicate_inverse ? '-' : '+',
5208 inst->flag_subreg);
5209 }
5210
5211 fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
5212 if (inst->saturate)
5213 fprintf(file, ".sat");
5214 if (inst->conditional_mod) {
5215 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
5216 if (!inst->predicate &&
5217 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
5218 inst->opcode != BRW_OPCODE_IF &&
5219 inst->opcode != BRW_OPCODE_WHILE))) {
5220 fprintf(file, ".f0.%d", inst->flag_subreg);
5221 }
5222 }
5223 fprintf(file, "(%d) ", inst->exec_size);
5224
5225 if (inst->mlen) {
5226 fprintf(file, "(mlen: %d) ", inst->mlen);
5227 }
5228
5229 if (inst->eot) {
5230 fprintf(file, "(EOT) ");
5231 }
5232
5233 switch (inst->dst.file) {
5234 case VGRF:
5235 fprintf(file, "vgrf%d", inst->dst.nr);
5236 break;
5237 case FIXED_GRF:
5238 fprintf(file, "g%d", inst->dst.nr);
5239 break;
5240 case MRF:
5241 fprintf(file, "m%d", inst->dst.nr);
5242 break;
5243 case BAD_FILE:
5244 fprintf(file, "(null)");
5245 break;
5246 case UNIFORM:
5247 fprintf(file, "***u%d***", inst->dst.nr);
5248 break;
5249 case ATTR:
5250 fprintf(file, "***attr%d***", inst->dst.nr);
5251 break;
5252 case ARF:
5253 switch (inst->dst.nr) {
5254 case BRW_ARF_NULL:
5255 fprintf(file, "null");
5256 break;
5257 case BRW_ARF_ADDRESS:
5258 fprintf(file, "a0.%d", inst->dst.subnr);
5259 break;
5260 case BRW_ARF_ACCUMULATOR:
5261 fprintf(file, "acc%d", inst->dst.subnr);
5262 break;
5263 case BRW_ARF_FLAG:
5264 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5265 break;
5266 default:
5267 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5268 break;
5269 }
5270 break;
5271 case IMM:
5272 unreachable("not reached");
5273 }
5274
5275 if (inst->dst.offset ||
5276 (inst->dst.file == VGRF &&
5277 alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
5278 const unsigned reg_size = (inst->dst.file == UNIFORM ? 4 : REG_SIZE);
5279 fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
5280 inst->dst.offset % reg_size);
5281 }
5282
5283 if (inst->dst.stride != 1)
5284 fprintf(file, "<%u>", inst->dst.stride);
5285 fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
5286
5287 for (int i = 0; i < inst->sources; i++) {
5288 if (inst->src[i].negate)
5289 fprintf(file, "-");
5290 if (inst->src[i].abs)
5291 fprintf(file, "|");
5292 switch (inst->src[i].file) {
5293 case VGRF:
5294 fprintf(file, "vgrf%d", inst->src[i].nr);
5295 break;
5296 case FIXED_GRF:
5297 fprintf(file, "g%d", inst->src[i].nr);
5298 break;
5299 case MRF:
5300 fprintf(file, "***m%d***", inst->src[i].nr);
5301 break;
5302 case ATTR:
5303 fprintf(file, "attr%d", inst->src[i].nr);
5304 break;
5305 case UNIFORM:
5306 fprintf(file, "u%d", inst->src[i].nr);
5307 break;
5308 case BAD_FILE:
5309 fprintf(file, "(null)");
5310 break;
5311 case IMM:
5312 switch (inst->src[i].type) {
5313 case BRW_REGISTER_TYPE_F:
5314 fprintf(file, "%-gf", inst->src[i].f);
5315 break;
5316 case BRW_REGISTER_TYPE_DF:
5317 fprintf(file, "%fdf", inst->src[i].df);
5318 break;
5319 case BRW_REGISTER_TYPE_W:
5320 case BRW_REGISTER_TYPE_D:
5321 fprintf(file, "%dd", inst->src[i].d);
5322 break;
5323 case BRW_REGISTER_TYPE_UW:
5324 case BRW_REGISTER_TYPE_UD:
5325 fprintf(file, "%uu", inst->src[i].ud);
5326 break;
5327 case BRW_REGISTER_TYPE_VF:
5328 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
5329 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
5330 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
5331 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
5332 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
5333 break;
5334 default:
5335 fprintf(file, "???");
5336 break;
5337 }
5338 break;
5339 case ARF:
5340 switch (inst->src[i].nr) {
5341 case BRW_ARF_NULL:
5342 fprintf(file, "null");
5343 break;
5344 case BRW_ARF_ADDRESS:
5345 fprintf(file, "a0.%d", inst->src[i].subnr);
5346 break;
5347 case BRW_ARF_ACCUMULATOR:
5348 fprintf(file, "acc%d", inst->src[i].subnr);
5349 break;
5350 case BRW_ARF_FLAG:
5351 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5352 break;
5353 default:
5354 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5355 break;
5356 }
5357 break;
5358 }
5359
5360 if (inst->src[i].offset ||
5361 (inst->src[i].file == VGRF &&
5362 alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
5363 const unsigned reg_size = (inst->src[i].file == UNIFORM ? 4 : REG_SIZE);
5364 fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
5365 inst->src[i].offset % reg_size);
5366 }
5367
5368 if (inst->src[i].abs)
5369 fprintf(file, "|");
5370
5371 if (inst->src[i].file != IMM) {
5372 unsigned stride;
5373 if (inst->src[i].file == ARF || inst->src[i].file == FIXED_GRF) {
5374 unsigned hstride = inst->src[i].hstride;
5375 stride = (hstride == 0 ? 0 : (1 << (hstride - 1)));
5376 } else {
5377 stride = inst->src[i].stride;
5378 }
5379 if (stride != 1)
5380 fprintf(file, "<%u>", stride);
5381
5382 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
5383 }
5384
5385 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
5386 fprintf(file, ", ");
5387 }
5388
5389 fprintf(file, " ");
5390
5391 if (inst->force_writemask_all)
5392 fprintf(file, "NoMask ");
5393
5394 if (inst->exec_size != dispatch_width)
5395 fprintf(file, "group%d ", inst->group);
5396
5397 fprintf(file, "\n");
5398 }
5399
5400 /**
5401 * Possibly returns an instruction that set up @param reg.
5402 *
5403 * Sometimes we want to take the result of some expression/variable
5404 * dereference tree and rewrite the instruction generating the result
5405 * of the tree. When processing the tree, we know that the
5406 * instructions generated are all writing temporaries that are dead
5407 * outside of this tree. So, if we have some instructions that write
5408 * a temporary, we're free to point that temp write somewhere else.
5409 *
5410 * Note that this doesn't guarantee that the instruction generated
5411 * only reg -- it might be the size=4 destination of a texture instruction.
5412 */
5413 fs_inst *
5414 fs_visitor::get_instruction_generating_reg(fs_inst *start,
5415 fs_inst *end,
5416 const fs_reg &reg)
5417 {
5418 if (end == start ||
5419 end->is_partial_write() ||
5420 !reg.equals(end->dst)) {
5421 return NULL;
5422 } else {
5423 return end;
5424 }
5425 }
5426
5427 void
5428 fs_visitor::setup_fs_payload_gen6()
5429 {
5430 assert(stage == MESA_SHADER_FRAGMENT);
5431 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
5432
5433 assert(devinfo->gen >= 6);
5434
5435 /* R0-1: masks, pixel X/Y coordinates. */
5436 payload.num_regs = 2;
5437 /* R2: only for 32-pixel dispatch.*/
5438
5439 /* R3-26: barycentric interpolation coordinates. These appear in the
5440 * same order that they appear in the brw_barycentric_mode
5441 * enum. Each set of coordinates occupies 2 registers if dispatch width
5442 * == 8 and 4 registers if dispatch width == 16. Coordinates only
5443 * appear if they were enabled using the "Barycentric Interpolation
5444 * Mode" bits in WM_STATE.
5445 */
5446 for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
5447 if (prog_data->barycentric_interp_modes & (1 << i)) {
5448 payload.barycentric_coord_reg[i] = payload.num_regs;
5449 payload.num_regs += 2;
5450 if (dispatch_width == 16) {
5451 payload.num_regs += 2;
5452 }
5453 }
5454 }
5455
5456 /* R27: interpolated depth if uses source depth */
5457 prog_data->uses_src_depth =
5458 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5459 if (prog_data->uses_src_depth) {
5460 payload.source_depth_reg = payload.num_regs;
5461 payload.num_regs++;
5462 if (dispatch_width == 16) {
5463 /* R28: interpolated depth if not SIMD8. */
5464 payload.num_regs++;
5465 }
5466 }
5467
5468 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
5469 prog_data->uses_src_w =
5470 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5471 if (prog_data->uses_src_w) {
5472 payload.source_w_reg = payload.num_regs;
5473 payload.num_regs++;
5474 if (dispatch_width == 16) {
5475 /* R30: interpolated W if not SIMD8. */
5476 payload.num_regs++;
5477 }
5478 }
5479
5480 /* R31: MSAA position offsets. */
5481 if (prog_data->persample_dispatch &&
5482 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
5483 /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
5484 *
5485 * "MSDISPMODE_PERSAMPLE is required in order to select
5486 * POSOFFSET_SAMPLE"
5487 *
5488 * So we can only really get sample positions if we are doing real
5489 * per-sample dispatch. If we need gl_SamplePosition and we don't have
5490 * persample dispatch, we hard-code it to 0.5.
5491 */
5492 prog_data->uses_pos_offset = true;
5493 payload.sample_pos_reg = payload.num_regs;
5494 payload.num_regs++;
5495 }
5496
5497 /* R32: MSAA input coverage mask */
5498 prog_data->uses_sample_mask =
5499 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
5500 if (prog_data->uses_sample_mask) {
5501 assert(devinfo->gen >= 7);
5502 payload.sample_mask_in_reg = payload.num_regs;
5503 payload.num_regs++;
5504 if (dispatch_width == 16) {
5505 /* R33: input coverage mask if not SIMD8. */
5506 payload.num_regs++;
5507 }
5508 }
5509
5510 /* R34-: bary for 32-pixel. */
5511 /* R58-59: interp W for 32-pixel. */
5512
5513 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
5514 source_depth_to_render_target = true;
5515 }
5516 }
5517
5518 void
5519 fs_visitor::setup_vs_payload()
5520 {
5521 /* R0: thread header, R1: urb handles */
5522 payload.num_regs = 2;
5523 }
5524
5525 void
5526 fs_visitor::setup_gs_payload()
5527 {
5528 assert(stage == MESA_SHADER_GEOMETRY);
5529
5530 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
5531 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
5532
5533 /* R0: thread header, R1: output URB handles */
5534 payload.num_regs = 2;
5535
5536 if (gs_prog_data->include_primitive_id) {
5537 /* R2: Primitive ID 0..7 */
5538 payload.num_regs++;
5539 }
5540
5541 /* Use a maximum of 24 registers for push-model inputs. */
5542 const unsigned max_push_components = 24;
5543
5544 /* If pushing our inputs would take too many registers, reduce the URB read
5545 * length (which is in HWords, or 8 registers), and resort to pulling.
5546 *
5547 * Note that the GS reads <URB Read Length> HWords for every vertex - so we
5548 * have to multiply by VerticesIn to obtain the total storage requirement.
5549 */
5550 if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
5551 max_push_components || gs_prog_data->invocations > 1) {
5552 gs_prog_data->base.include_vue_handles = true;
5553
5554 /* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
5555 payload.num_regs += nir->info.gs.vertices_in;
5556
5557 vue_prog_data->urb_read_length =
5558 ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
5559 }
5560 }
5561
5562 void
5563 fs_visitor::setup_cs_payload()
5564 {
5565 assert(devinfo->gen >= 7);
5566 payload.num_regs = 1;
5567 }
5568
5569 void
5570 fs_visitor::calculate_register_pressure()
5571 {
5572 invalidate_live_intervals();
5573 calculate_live_intervals();
5574
5575 unsigned num_instructions = 0;
5576 foreach_block(block, cfg)
5577 num_instructions += block->instructions.length();
5578
5579 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
5580
5581 for (unsigned reg = 0; reg < alloc.count; reg++) {
5582 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
5583 regs_live_at_ip[ip] += alloc.sizes[reg];
5584 }
5585 }
5586
5587 /**
5588 * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
5589 *
5590 * The needs_unlit_centroid_workaround ends up producing one of these per
5591 * channel of centroid input, so it's good to clean them up.
5592 *
5593 * An assumption here is that nothing ever modifies the dispatched pixels
5594 * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
5595 * dictates that anyway.
5596 */
5597 bool
5598 fs_visitor::opt_drop_redundant_mov_to_flags()
5599 {
5600 bool flag_mov_found[2] = {false};
5601 bool progress = false;
5602
5603 /* Instructions removed by this pass can only be added if this were true */
5604 if (!devinfo->needs_unlit_centroid_workaround)
5605 return false;
5606
5607 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5608 if (inst->is_control_flow()) {
5609 memset(flag_mov_found, 0, sizeof(flag_mov_found));
5610 } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
5611 if (!flag_mov_found[inst->flag_subreg]) {
5612 flag_mov_found[inst->flag_subreg] = true;
5613 } else {
5614 inst->remove(block);
5615 progress = true;
5616 }
5617 } else if (inst->flags_written()) {
5618 flag_mov_found[inst->flag_subreg] = false;
5619 }
5620 }
5621
5622 return progress;
5623 }
5624
5625 void
5626 fs_visitor::optimize()
5627 {
5628 /* Start by validating the shader we currently have. */
5629 validate();
5630
5631 /* bld is the common builder object pointing at the end of the program we
5632 * used to translate it into i965 IR. For the optimization and lowering
5633 * passes coming next, any code added after the end of the program without
5634 * having explicitly called fs_builder::at() clearly points at a mistake.
5635 * Ideally optimization passes wouldn't be part of the visitor so they
5636 * wouldn't have access to bld at all, but they do, so just in case some
5637 * pass forgets to ask for a location explicitly set it to NULL here to
5638 * make it trip. The dispatch width is initialized to a bogus value to
5639 * make sure that optimizations set the execution controls explicitly to
5640 * match the code they are manipulating instead of relying on the defaults.
5641 */
5642 bld = fs_builder(this, 64);
5643
5644 assign_constant_locations();
5645 lower_constant_loads();
5646
5647 validate();
5648
5649 split_virtual_grfs();
5650 validate();
5651
5652 #define OPT(pass, args...) ({ \
5653 pass_num++; \
5654 bool this_progress = pass(args); \
5655 \
5656 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
5657 char filename[64]; \
5658 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
5659 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
5660 \
5661 backend_shader::dump_instructions(filename); \
5662 } \
5663 \
5664 validate(); \
5665 \
5666 progress = progress || this_progress; \
5667 this_progress; \
5668 })
5669
5670 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
5671 char filename[64];
5672 snprintf(filename, 64, "%s%d-%s-00-00-start",
5673 stage_abbrev, dispatch_width, nir->info.name);
5674
5675 backend_shader::dump_instructions(filename);
5676 }
5677
5678 bool progress = false;
5679 int iteration = 0;
5680 int pass_num = 0;
5681
5682 OPT(opt_drop_redundant_mov_to_flags);
5683
5684 do {
5685 progress = false;
5686 pass_num = 0;
5687 iteration++;
5688
5689 OPT(remove_duplicate_mrf_writes);
5690
5691 OPT(opt_algebraic);
5692 OPT(opt_cse);
5693 OPT(opt_copy_propagation);
5694 OPT(opt_predicated_break, this);
5695 OPT(opt_cmod_propagation);
5696 OPT(dead_code_eliminate);
5697 OPT(opt_peephole_sel);
5698 OPT(dead_control_flow_eliminate, this);
5699 OPT(opt_register_renaming);
5700 OPT(opt_saturate_propagation);
5701 OPT(register_coalesce);
5702 OPT(compute_to_mrf);
5703 OPT(eliminate_find_live_channel);
5704
5705 OPT(compact_virtual_grfs);
5706 } while (progress);
5707
5708 progress = false;
5709 pass_num = 0;
5710
5711 if (OPT(lower_pack)) {
5712 OPT(register_coalesce);
5713 OPT(dead_code_eliminate);
5714 }
5715
5716 OPT(lower_simd_width);
5717
5718 /* After SIMD lowering just in case we had to unroll the EOT send. */
5719 OPT(opt_sampler_eot);
5720
5721 OPT(lower_logical_sends);
5722
5723 if (progress) {
5724 OPT(opt_copy_propagation);
5725 /* Only run after logical send lowering because it's easier to implement
5726 * in terms of physical sends.
5727 */
5728 if (OPT(opt_zero_samples))
5729 OPT(opt_copy_propagation);
5730 /* Run after logical send lowering to give it a chance to CSE the
5731 * LOAD_PAYLOAD instructions created to construct the payloads of
5732 * e.g. texturing messages in cases where it wasn't possible to CSE the
5733 * whole logical instruction.
5734 */
5735 OPT(opt_cse);
5736 OPT(register_coalesce);
5737 OPT(compute_to_mrf);
5738 OPT(dead_code_eliminate);
5739 OPT(remove_duplicate_mrf_writes);
5740 OPT(opt_peephole_sel);
5741 }
5742
5743 OPT(opt_redundant_discard_jumps);
5744
5745 if (OPT(lower_load_payload)) {
5746 split_virtual_grfs();
5747 OPT(register_coalesce);
5748 OPT(compute_to_mrf);
5749 OPT(dead_code_eliminate);
5750 }
5751
5752 OPT(opt_combine_constants);
5753 OPT(lower_integer_multiplication);
5754
5755 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
5756 OPT(opt_cmod_propagation);
5757 OPT(opt_cse);
5758 OPT(opt_copy_propagation);
5759 OPT(dead_code_eliminate);
5760 }
5761
5762 if (OPT(lower_conversions)) {
5763 OPT(opt_copy_propagation);
5764 OPT(dead_code_eliminate);
5765 OPT(lower_simd_width);
5766 }
5767
5768 lower_uniform_pull_constant_loads();
5769
5770 validate();
5771 }
5772
5773 /**
5774 * Three source instruction must have a GRF/MRF destination register.
5775 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
5776 */
5777 void
5778 fs_visitor::fixup_3src_null_dest()
5779 {
5780 bool progress = false;
5781
5782 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
5783 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
5784 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
5785 inst->dst.type);
5786 progress = true;
5787 }
5788 }
5789
5790 if (progress)
5791 invalidate_live_intervals();
5792 }
5793
5794 void
5795 fs_visitor::allocate_registers(bool allow_spilling)
5796 {
5797 bool allocated_without_spills;
5798
5799 static const enum instruction_scheduler_mode pre_modes[] = {
5800 SCHEDULE_PRE,
5801 SCHEDULE_PRE_NON_LIFO,
5802 SCHEDULE_PRE_LIFO,
5803 };
5804
5805 bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
5806
5807 /* Try each scheduling heuristic to see if it can successfully register
5808 * allocate without spilling. They should be ordered by decreasing
5809 * performance but increasing likelihood of allocating.
5810 */
5811 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
5812 schedule_instructions(pre_modes[i]);
5813
5814 if (0) {
5815 assign_regs_trivial();
5816 allocated_without_spills = true;
5817 } else {
5818 allocated_without_spills = assign_regs(false, spill_all);
5819 }
5820 if (allocated_without_spills)
5821 break;
5822 }
5823
5824 if (!allocated_without_spills) {
5825 if (!allow_spilling)
5826 fail("Failure to register allocate and spilling is not allowed.");
5827
5828 /* We assume that any spilling is worse than just dropping back to
5829 * SIMD8. There's probably actually some intermediate point where
5830 * SIMD16 with a couple of spills is still better.
5831 */
5832 if (dispatch_width > min_dispatch_width) {
5833 fail("Failure to register allocate. Reduce number of "
5834 "live scalar values to avoid this.");
5835 } else {
5836 compiler->shader_perf_log(log_data,
5837 "%s shader triggered register spilling. "
5838 "Try reducing the number of live scalar "
5839 "values to improve performance.\n",
5840 stage_name);
5841 }
5842
5843 /* Since we're out of heuristics, just go spill registers until we
5844 * get an allocation.
5845 */
5846 while (!assign_regs(true, spill_all)) {
5847 if (failed)
5848 break;
5849 }
5850 }
5851
5852 /* This must come after all optimization and register allocation, since
5853 * it inserts dead code that happens to have side effects, and it does
5854 * so based on the actual physical registers in use.
5855 */
5856 insert_gen4_send_dependency_workarounds();
5857
5858 if (failed)
5859 return;
5860
5861 schedule_instructions(SCHEDULE_POST);
5862
5863 if (last_scratch > 0) {
5864 MAYBE_UNUSED unsigned max_scratch_size = 2 * 1024 * 1024;
5865
5866 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
5867
5868 if (stage == MESA_SHADER_COMPUTE) {
5869 if (devinfo->is_haswell) {
5870 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
5871 * field documentation, Haswell supports a minimum of 2kB of
5872 * scratch space for compute shaders, unlike every other stage
5873 * and platform.
5874 */
5875 prog_data->total_scratch = MAX2(prog_data->total_scratch, 2048);
5876 } else if (devinfo->gen <= 7) {
5877 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
5878 * field documentation, platforms prior to Haswell measure scratch
5879 * size linearly with a range of [1kB, 12kB] and 1kB granularity.
5880 */
5881 prog_data->total_scratch = ALIGN(last_scratch, 1024);
5882 max_scratch_size = 12 * 1024;
5883 }
5884 }
5885
5886 /* We currently only support up to 2MB of scratch space. If we
5887 * need to support more eventually, the documentation suggests
5888 * that we could allocate a larger buffer, and partition it out
5889 * ourselves. We'd just have to undo the hardware's address
5890 * calculation by subtracting (FFTID * Per Thread Scratch Space)
5891 * and then add FFTID * (Larger Per Thread Scratch Space).
5892 *
5893 * See 3D-Media-GPGPU Engine > Media GPGPU Pipeline >
5894 * Thread Group Tracking > Local Memory/Scratch Space.
5895 */
5896 assert(prog_data->total_scratch < max_scratch_size);
5897 }
5898 }
5899
5900 bool
5901 fs_visitor::run_vs(gl_clip_plane *clip_planes)
5902 {
5903 assert(stage == MESA_SHADER_VERTEX);
5904
5905 setup_vs_payload();
5906
5907 if (shader_time_index >= 0)
5908 emit_shader_time_begin();
5909
5910 emit_nir_code();
5911
5912 if (failed)
5913 return false;
5914
5915 compute_clip_distance(clip_planes);
5916
5917 emit_urb_writes();
5918
5919 if (shader_time_index >= 0)
5920 emit_shader_time_end();
5921
5922 calculate_cfg();
5923
5924 optimize();
5925
5926 assign_curb_setup();
5927 assign_vs_urb_setup();
5928
5929 fixup_3src_null_dest();
5930 allocate_registers(true);
5931
5932 return !failed;
5933 }
5934
5935 bool
5936 fs_visitor::run_tcs_single_patch()
5937 {
5938 assert(stage == MESA_SHADER_TESS_CTRL);
5939
5940 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
5941
5942 /* r1-r4 contain the ICP handles. */
5943 payload.num_regs = 5;
5944
5945 if (shader_time_index >= 0)
5946 emit_shader_time_begin();
5947
5948 /* Initialize gl_InvocationID */
5949 fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
5950 fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
5951 bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
5952 bld.MOV(channels_ud, channels_uw);
5953
5954 if (tcs_prog_data->instances == 1) {
5955 invocation_id = channels_ud;
5956 } else {
5957 invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
5958
5959 /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
5960 fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
5961 fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
5962 bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
5963 brw_imm_ud(INTEL_MASK(23, 17)));
5964 bld.SHR(instance_times_8, t, brw_imm_ud(17 - 3));
5965
5966 bld.ADD(invocation_id, instance_times_8, channels_ud);
5967 }
5968
5969 /* Fix the disptach mask */
5970 if (nir->info.tess.tcs_vertices_out % 8) {
5971 bld.CMP(bld.null_reg_ud(), invocation_id,
5972 brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
5973 bld.IF(BRW_PREDICATE_NORMAL);
5974 }
5975
5976 emit_nir_code();
5977
5978 if (nir->info.tess.tcs_vertices_out % 8) {
5979 bld.emit(BRW_OPCODE_ENDIF);
5980 }
5981
5982 /* Emit EOT write; set TR DS Cache bit */
5983 fs_reg srcs[3] = {
5984 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
5985 fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
5986 fs_reg(brw_imm_ud(0)),
5987 };
5988 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
5989 bld.LOAD_PAYLOAD(payload, srcs, 3, 2);
5990
5991 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
5992 bld.null_reg_ud(), payload);
5993 inst->mlen = 3;
5994 inst->eot = true;
5995
5996 if (shader_time_index >= 0)
5997 emit_shader_time_end();
5998
5999 if (failed)
6000 return false;
6001
6002 calculate_cfg();
6003
6004 optimize();
6005
6006 assign_curb_setup();
6007 assign_tcs_single_patch_urb_setup();
6008
6009 fixup_3src_null_dest();
6010 allocate_registers(true);
6011
6012 return !failed;
6013 }
6014
6015 bool
6016 fs_visitor::run_tes()
6017 {
6018 assert(stage == MESA_SHADER_TESS_EVAL);
6019
6020 /* R0: thread header, R1-3: gl_TessCoord.xyz, R4: URB handles */
6021 payload.num_regs = 5;
6022
6023 if (shader_time_index >= 0)
6024 emit_shader_time_begin();
6025
6026 emit_nir_code();
6027
6028 if (failed)
6029 return false;
6030
6031 emit_urb_writes();
6032
6033 if (shader_time_index >= 0)
6034 emit_shader_time_end();
6035
6036 calculate_cfg();
6037
6038 optimize();
6039
6040 assign_curb_setup();
6041 assign_tes_urb_setup();
6042
6043 fixup_3src_null_dest();
6044 allocate_registers(true);
6045
6046 return !failed;
6047 }
6048
6049 bool
6050 fs_visitor::run_gs()
6051 {
6052 assert(stage == MESA_SHADER_GEOMETRY);
6053
6054 setup_gs_payload();
6055
6056 this->final_gs_vertex_count = vgrf(glsl_type::uint_type);
6057
6058 if (gs_compile->control_data_header_size_bits > 0) {
6059 /* Create a VGRF to store accumulated control data bits. */
6060 this->control_data_bits = vgrf(glsl_type::uint_type);
6061
6062 /* If we're outputting more than 32 control data bits, then EmitVertex()
6063 * will set control_data_bits to 0 after emitting the first vertex.
6064 * Otherwise, we need to initialize it to 0 here.
6065 */
6066 if (gs_compile->control_data_header_size_bits <= 32) {
6067 const fs_builder abld = bld.annotate("initialize control data bits");
6068 abld.MOV(this->control_data_bits, brw_imm_ud(0u));
6069 }
6070 }
6071
6072 if (shader_time_index >= 0)
6073 emit_shader_time_begin();
6074
6075 emit_nir_code();
6076
6077 emit_gs_thread_end();
6078
6079 if (shader_time_index >= 0)
6080 emit_shader_time_end();
6081
6082 if (failed)
6083 return false;
6084
6085 calculate_cfg();
6086
6087 optimize();
6088
6089 assign_curb_setup();
6090 assign_gs_urb_setup();
6091
6092 fixup_3src_null_dest();
6093 allocate_registers(true);
6094
6095 return !failed;
6096 }
6097
6098 bool
6099 fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
6100 {
6101 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
6102 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
6103
6104 assert(stage == MESA_SHADER_FRAGMENT);
6105
6106 if (devinfo->gen >= 6)
6107 setup_fs_payload_gen6();
6108 else
6109 setup_fs_payload_gen4();
6110
6111 if (0) {
6112 emit_dummy_fs();
6113 } else if (do_rep_send) {
6114 assert(dispatch_width == 16);
6115 emit_repclear_shader();
6116 } else {
6117 if (shader_time_index >= 0)
6118 emit_shader_time_begin();
6119
6120 calculate_urb_setup();
6121 if (nir->info.inputs_read > 0 ||
6122 (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
6123 if (devinfo->gen < 6)
6124 emit_interpolation_setup_gen4();
6125 else
6126 emit_interpolation_setup_gen6();
6127 }
6128
6129 /* We handle discards by keeping track of the still-live pixels in f0.1.
6130 * Initialize it with the dispatched pixels.
6131 */
6132 if (wm_prog_data->uses_kill) {
6133 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
6134 discard_init->flag_subreg = 1;
6135 }
6136
6137 /* Generate FS IR for main(). (the visitor only descends into
6138 * functions called "main").
6139 */
6140 emit_nir_code();
6141
6142 if (failed)
6143 return false;
6144
6145 if (wm_prog_data->uses_kill)
6146 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
6147
6148 if (wm_key->alpha_test_func)
6149 emit_alpha_test();
6150
6151 emit_fb_writes();
6152
6153 if (shader_time_index >= 0)
6154 emit_shader_time_end();
6155
6156 calculate_cfg();
6157
6158 optimize();
6159
6160 assign_curb_setup();
6161 assign_urb_setup();
6162
6163 fixup_3src_null_dest();
6164 allocate_registers(allow_spilling);
6165
6166 if (failed)
6167 return false;
6168 }
6169
6170 return !failed;
6171 }
6172
6173 bool
6174 fs_visitor::run_cs()
6175 {
6176 assert(stage == MESA_SHADER_COMPUTE);
6177
6178 setup_cs_payload();
6179
6180 if (shader_time_index >= 0)
6181 emit_shader_time_begin();
6182
6183 if (devinfo->is_haswell && prog_data->total_shared > 0) {
6184 /* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
6185 const fs_builder abld = bld.exec_all().group(1, 0);
6186 abld.MOV(retype(brw_sr0_reg(1), BRW_REGISTER_TYPE_UW),
6187 suboffset(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW), 1));
6188 }
6189
6190 emit_nir_code();
6191
6192 if (failed)
6193 return false;
6194
6195 emit_cs_terminate();
6196
6197 if (shader_time_index >= 0)
6198 emit_shader_time_end();
6199
6200 calculate_cfg();
6201
6202 optimize();
6203
6204 assign_curb_setup();
6205
6206 fixup_3src_null_dest();
6207 allocate_registers(true);
6208
6209 if (failed)
6210 return false;
6211
6212 return !failed;
6213 }
6214
6215 /**
6216 * Return a bitfield where bit n is set if barycentric interpolation mode n
6217 * (see enum brw_barycentric_mode) is needed by the fragment shader.
6218 *
6219 * We examine the load_barycentric intrinsics rather than looking at input
6220 * variables so that we catch interpolateAtCentroid() messages too, which
6221 * also need the BRW_BARYCENTRIC_[NON]PERSPECTIVE_CENTROID mode set up.
6222 */
6223 static unsigned
6224 brw_compute_barycentric_interp_modes(const struct gen_device_info *devinfo,
6225 const nir_shader *shader)
6226 {
6227 unsigned barycentric_interp_modes = 0;
6228
6229 nir_foreach_function(f, shader) {
6230 if (!f->impl)
6231 continue;
6232
6233 nir_foreach_block(block, f->impl) {
6234 nir_foreach_instr(instr, block) {
6235 if (instr->type != nir_instr_type_intrinsic)
6236 continue;
6237
6238 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6239 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
6240 continue;
6241
6242 /* Ignore WPOS; it doesn't require interpolation. */
6243 if (nir_intrinsic_base(intrin) == VARYING_SLOT_POS)
6244 continue;
6245
6246 intrin = nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
6247 enum glsl_interp_mode interp = (enum glsl_interp_mode)
6248 nir_intrinsic_interp_mode(intrin);
6249 nir_intrinsic_op bary_op = intrin->intrinsic;
6250 enum brw_barycentric_mode bary =
6251 brw_barycentric_mode(interp, bary_op);
6252
6253 barycentric_interp_modes |= 1 << bary;
6254
6255 if (devinfo->needs_unlit_centroid_workaround &&
6256 bary_op == nir_intrinsic_load_barycentric_centroid)
6257 barycentric_interp_modes |= 1 << centroid_to_pixel(bary);
6258 }
6259 }
6260 }
6261
6262 return barycentric_interp_modes;
6263 }
6264
6265 static void
6266 brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
6267 const nir_shader *shader)
6268 {
6269 prog_data->flat_inputs = 0;
6270
6271 nir_foreach_variable(var, &shader->inputs) {
6272 int input_index = prog_data->urb_setup[var->data.location];
6273
6274 if (input_index < 0)
6275 continue;
6276
6277 /* flat shading */
6278 if (var->data.interpolation == INTERP_MODE_FLAT)
6279 prog_data->flat_inputs |= (1 << input_index);
6280 }
6281 }
6282
6283 static uint8_t
6284 computed_depth_mode(const nir_shader *shader)
6285 {
6286 if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
6287 switch (shader->info.fs.depth_layout) {
6288 case FRAG_DEPTH_LAYOUT_NONE:
6289 case FRAG_DEPTH_LAYOUT_ANY:
6290 return BRW_PSCDEPTH_ON;
6291 case FRAG_DEPTH_LAYOUT_GREATER:
6292 return BRW_PSCDEPTH_ON_GE;
6293 case FRAG_DEPTH_LAYOUT_LESS:
6294 return BRW_PSCDEPTH_ON_LE;
6295 case FRAG_DEPTH_LAYOUT_UNCHANGED:
6296 return BRW_PSCDEPTH_OFF;
6297 }
6298 }
6299 return BRW_PSCDEPTH_OFF;
6300 }
6301
6302 /**
6303 * Move load_interpolated_input with simple (payload-based) barycentric modes
6304 * to the top of the program so we don't emit multiple PLNs for the same input.
6305 *
6306 * This works around CSE not being able to handle non-dominating cases
6307 * such as:
6308 *
6309 * if (...) {
6310 * interpolate input
6311 * } else {
6312 * interpolate the same exact input
6313 * }
6314 *
6315 * This should be replaced by global value numbering someday.
6316 */
6317 static bool
6318 move_interpolation_to_top(nir_shader *nir)
6319 {
6320 bool progress = false;
6321
6322 nir_foreach_function(f, nir) {
6323 if (!f->impl)
6324 continue;
6325
6326 nir_block *top = nir_start_block(f->impl);
6327 exec_node *cursor_node = NULL;
6328
6329 nir_foreach_block(block, f->impl) {
6330 if (block == top)
6331 continue;
6332
6333 nir_foreach_instr_safe(instr, block) {
6334 if (instr->type != nir_instr_type_intrinsic)
6335 continue;
6336
6337 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6338 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
6339 continue;
6340 nir_intrinsic_instr *bary_intrinsic =
6341 nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
6342 nir_intrinsic_op op = bary_intrinsic->intrinsic;
6343
6344 /* Leave interpolateAtSample/Offset() where they are. */
6345 if (op == nir_intrinsic_load_barycentric_at_sample ||
6346 op == nir_intrinsic_load_barycentric_at_offset)
6347 continue;
6348
6349 nir_instr *move[3] = {
6350 &bary_intrinsic->instr,
6351 intrin->src[1].ssa->parent_instr,
6352 instr
6353 };
6354
6355 for (unsigned i = 0; i < ARRAY_SIZE(move); i++) {
6356 if (move[i]->block != top) {
6357 move[i]->block = top;
6358 exec_node_remove(&move[i]->node);
6359 if (cursor_node) {
6360 exec_node_insert_after(cursor_node, &move[i]->node);
6361 } else {
6362 exec_list_push_head(&top->instr_list, &move[i]->node);
6363 }
6364 cursor_node = &move[i]->node;
6365 progress = true;
6366 }
6367 }
6368 }
6369 }
6370 nir_metadata_preserve(f->impl, (nir_metadata)
6371 ((unsigned) nir_metadata_block_index |
6372 (unsigned) nir_metadata_dominance));
6373 }
6374
6375 return progress;
6376 }
6377
6378 /**
6379 * Demote per-sample barycentric intrinsics to centroid.
6380 *
6381 * Useful when rendering to a non-multisampled buffer.
6382 */
6383 static bool
6384 demote_sample_qualifiers(nir_shader *nir)
6385 {
6386 bool progress = true;
6387
6388 nir_foreach_function(f, nir) {
6389 if (!f->impl)
6390 continue;
6391
6392 nir_builder b;
6393 nir_builder_init(&b, f->impl);
6394
6395 nir_foreach_block(block, f->impl) {
6396 nir_foreach_instr_safe(instr, block) {
6397 if (instr->type != nir_instr_type_intrinsic)
6398 continue;
6399
6400 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6401 if (intrin->intrinsic != nir_intrinsic_load_barycentric_sample &&
6402 intrin->intrinsic != nir_intrinsic_load_barycentric_at_sample)
6403 continue;
6404
6405 b.cursor = nir_before_instr(instr);
6406 nir_ssa_def *centroid =
6407 nir_load_barycentric(&b, nir_intrinsic_load_barycentric_centroid,
6408 nir_intrinsic_interp_mode(intrin));
6409 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
6410 nir_src_for_ssa(centroid));
6411 nir_instr_remove(instr);
6412 progress = true;
6413 }
6414 }
6415
6416 nir_metadata_preserve(f->impl, (nir_metadata)
6417 ((unsigned) nir_metadata_block_index |
6418 (unsigned) nir_metadata_dominance));
6419 }
6420
6421 return progress;
6422 }
6423
6424 /**
6425 * Pre-gen6, the register file of the EUs was shared between threads,
6426 * and each thread used some subset allocated on a 16-register block
6427 * granularity. The unit states wanted these block counts.
6428 */
6429 static inline int
6430 brw_register_blocks(int reg_count)
6431 {
6432 return ALIGN(reg_count, 16) / 16 - 1;
6433 }
6434
6435 const unsigned *
6436 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
6437 void *mem_ctx,
6438 const struct brw_wm_prog_key *key,
6439 struct brw_wm_prog_data *prog_data,
6440 const nir_shader *src_shader,
6441 struct gl_program *prog,
6442 int shader_time_index8, int shader_time_index16,
6443 bool allow_spilling,
6444 bool use_rep_send, struct brw_vue_map *vue_map,
6445 unsigned *final_assembly_size,
6446 char **error_str)
6447 {
6448 const struct gen_device_info *devinfo = compiler->devinfo;
6449
6450 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6451 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
6452 brw_nir_lower_fs_inputs(shader, devinfo, key);
6453 brw_nir_lower_fs_outputs(shader);
6454
6455 if (devinfo->gen < 6) {
6456 brw_setup_vue_interpolation(vue_map, shader, prog_data, devinfo);
6457 }
6458
6459 if (!key->multisample_fbo)
6460 NIR_PASS_V(shader, demote_sample_qualifiers);
6461 NIR_PASS_V(shader, move_interpolation_to_top);
6462 shader = brw_postprocess_nir(shader, compiler, true);
6463
6464 /* key->alpha_test_func means simulating alpha testing via discards,
6465 * so the shader definitely kills pixels.
6466 */
6467 prog_data->uses_kill = shader->info.fs.uses_discard ||
6468 key->alpha_test_func;
6469 prog_data->uses_omask = key->multisample_fbo &&
6470 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
6471 prog_data->computed_depth_mode = computed_depth_mode(shader);
6472 prog_data->computed_stencil =
6473 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
6474
6475 prog_data->persample_dispatch =
6476 key->multisample_fbo &&
6477 (key->persample_interp ||
6478 (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
6479 SYSTEM_BIT_SAMPLE_POS)) ||
6480 shader->info.fs.uses_sample_qualifier ||
6481 shader->info.outputs_read);
6482
6483 prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
6484 prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
6485 prog_data->inner_coverage = shader->info.fs.inner_coverage;
6486
6487 prog_data->barycentric_interp_modes =
6488 brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
6489
6490 cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL;
6491 uint8_t simd8_grf_start = 0, simd16_grf_start = 0;
6492 unsigned simd8_grf_used = 0, simd16_grf_used = 0;
6493
6494 fs_visitor v8(compiler, log_data, mem_ctx, key,
6495 &prog_data->base, prog, shader, 8,
6496 shader_time_index8);
6497 if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
6498 if (error_str)
6499 *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
6500
6501 return NULL;
6502 } else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
6503 simd8_cfg = v8.cfg;
6504 simd8_grf_start = v8.payload.num_regs;
6505 simd8_grf_used = v8.grf_used;
6506 }
6507
6508 if (v8.max_dispatch_width >= 16 &&
6509 likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
6510 /* Try a SIMD16 compile */
6511 fs_visitor v16(compiler, log_data, mem_ctx, key,
6512 &prog_data->base, prog, shader, 16,
6513 shader_time_index16);
6514 v16.import_uniforms(&v8);
6515 if (!v16.run_fs(allow_spilling, use_rep_send)) {
6516 compiler->shader_perf_log(log_data,
6517 "SIMD16 shader failed to compile: %s",
6518 v16.fail_msg);
6519 } else {
6520 simd16_cfg = v16.cfg;
6521 simd16_grf_start = v16.payload.num_regs;
6522 simd16_grf_used = v16.grf_used;
6523 }
6524 }
6525
6526 /* When the caller requests a repclear shader, they want SIMD16-only */
6527 if (use_rep_send)
6528 simd8_cfg = NULL;
6529
6530 /* Prior to Iron Lake, the PS had a single shader offset with a jump table
6531 * at the top to select the shader. We've never implemented that.
6532 * Instead, we just give them exactly one shader and we pick the widest one
6533 * available.
6534 */
6535 if (compiler->devinfo->gen < 5 && simd16_cfg)
6536 simd8_cfg = NULL;
6537
6538 if (prog_data->persample_dispatch) {
6539 /* Starting with SandyBridge (where we first get MSAA), the different
6540 * pixel dispatch combinations are grouped into classifications A
6541 * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
6542 * generations, the only configurations supporting persample dispatch
6543 * are are this in which only one dispatch width is enabled.
6544 *
6545 * If computed depth is enabled, SNB only allows SIMD8 while IVB+
6546 * allow SIMD8 or SIMD16 so we choose SIMD16 if available.
6547 */
6548 if (compiler->devinfo->gen == 6 &&
6549 prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF) {
6550 simd16_cfg = NULL;
6551 } else if (simd16_cfg) {
6552 simd8_cfg = NULL;
6553 }
6554 }
6555
6556 /* We have to compute the flat inputs after the visitor is finished running
6557 * because it relies on prog_data->urb_setup which is computed in
6558 * fs_visitor::calculate_urb_setup().
6559 */
6560 brw_compute_flat_inputs(prog_data, shader);
6561
6562 fs_generator g(compiler, log_data, mem_ctx, (void *) key, &prog_data->base,
6563 v8.promoted_constants, v8.runtime_check_aads_emit,
6564 MESA_SHADER_FRAGMENT);
6565
6566 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
6567 g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
6568 shader->info.label ?
6569 shader->info.label : "unnamed",
6570 shader->info.name));
6571 }
6572
6573 if (simd8_cfg) {
6574 prog_data->dispatch_8 = true;
6575 g.generate_code(simd8_cfg, 8);
6576 prog_data->base.dispatch_grf_start_reg = simd8_grf_start;
6577 prog_data->reg_blocks_0 = brw_register_blocks(simd8_grf_used);
6578
6579 if (simd16_cfg) {
6580 prog_data->dispatch_16 = true;
6581 prog_data->prog_offset_2 = g.generate_code(simd16_cfg, 16);
6582 prog_data->dispatch_grf_start_reg_2 = simd16_grf_start;
6583 prog_data->reg_blocks_2 = brw_register_blocks(simd16_grf_used);
6584 }
6585 } else if (simd16_cfg) {
6586 prog_data->dispatch_16 = true;
6587 g.generate_code(simd16_cfg, 16);
6588 prog_data->base.dispatch_grf_start_reg = simd16_grf_start;
6589 prog_data->reg_blocks_0 = brw_register_blocks(simd16_grf_used);
6590 }
6591
6592 return g.get_assembly(final_assembly_size);
6593 }
6594
6595 fs_reg *
6596 fs_visitor::emit_cs_work_group_id_setup()
6597 {
6598 assert(stage == MESA_SHADER_COMPUTE);
6599
6600 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
6601
6602 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
6603 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
6604 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
6605
6606 bld.MOV(*reg, r0_1);
6607 bld.MOV(offset(*reg, bld, 1), r0_6);
6608 bld.MOV(offset(*reg, bld, 2), r0_7);
6609
6610 return reg;
6611 }
6612
6613 static void
6614 fill_push_const_block_info(struct brw_push_const_block *block, unsigned dwords)
6615 {
6616 block->dwords = dwords;
6617 block->regs = DIV_ROUND_UP(dwords, 8);
6618 block->size = block->regs * 32;
6619 }
6620
6621 static void
6622 cs_fill_push_const_info(const struct gen_device_info *devinfo,
6623 struct brw_cs_prog_data *cs_prog_data)
6624 {
6625 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
6626 bool fill_thread_id =
6627 cs_prog_data->thread_local_id_index >= 0 &&
6628 cs_prog_data->thread_local_id_index < (int)prog_data->nr_params;
6629 bool cross_thread_supported = devinfo->gen > 7 || devinfo->is_haswell;
6630
6631 /* The thread ID should be stored in the last param dword */
6632 assert(prog_data->nr_params > 0 || !fill_thread_id);
6633 assert(!fill_thread_id ||
6634 cs_prog_data->thread_local_id_index ==
6635 (int)prog_data->nr_params - 1);
6636
6637 unsigned cross_thread_dwords, per_thread_dwords;
6638 if (!cross_thread_supported) {
6639 cross_thread_dwords = 0u;
6640 per_thread_dwords = prog_data->nr_params;
6641 } else if (fill_thread_id) {
6642 /* Fill all but the last register with cross-thread payload */
6643 cross_thread_dwords = 8 * (cs_prog_data->thread_local_id_index / 8);
6644 per_thread_dwords = prog_data->nr_params - cross_thread_dwords;
6645 assert(per_thread_dwords > 0 && per_thread_dwords <= 8);
6646 } else {
6647 /* Fill all data using cross-thread payload */
6648 cross_thread_dwords = prog_data->nr_params;
6649 per_thread_dwords = 0u;
6650 }
6651
6652 fill_push_const_block_info(&cs_prog_data->push.cross_thread, cross_thread_dwords);
6653 fill_push_const_block_info(&cs_prog_data->push.per_thread, per_thread_dwords);
6654
6655 unsigned total_dwords =
6656 (cs_prog_data->push.per_thread.size * cs_prog_data->threads +
6657 cs_prog_data->push.cross_thread.size) / 4;
6658 fill_push_const_block_info(&cs_prog_data->push.total, total_dwords);
6659
6660 assert(cs_prog_data->push.cross_thread.dwords % 8 == 0 ||
6661 cs_prog_data->push.per_thread.size == 0);
6662 assert(cs_prog_data->push.cross_thread.dwords +
6663 cs_prog_data->push.per_thread.dwords ==
6664 prog_data->nr_params);
6665 }
6666
6667 static void
6668 cs_set_simd_size(struct brw_cs_prog_data *cs_prog_data, unsigned size)
6669 {
6670 cs_prog_data->simd_size = size;
6671 unsigned group_size = cs_prog_data->local_size[0] *
6672 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
6673 cs_prog_data->threads = (group_size + size - 1) / size;
6674 }
6675
6676 const unsigned *
6677 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
6678 void *mem_ctx,
6679 const struct brw_cs_prog_key *key,
6680 struct brw_cs_prog_data *prog_data,
6681 const nir_shader *src_shader,
6682 int shader_time_index,
6683 unsigned *final_assembly_size,
6684 char **error_str)
6685 {
6686 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6687 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
6688 brw_nir_lower_cs_shared(shader);
6689 prog_data->base.total_shared += shader->num_shared;
6690
6691 /* Now that we cloned the nir_shader, we can update num_uniforms based on
6692 * the thread_local_id_index.
6693 */
6694 assert(prog_data->thread_local_id_index >= 0);
6695 shader->num_uniforms =
6696 MAX2(shader->num_uniforms,
6697 (unsigned)4 * (prog_data->thread_local_id_index + 1));
6698
6699 brw_nir_lower_intrinsics(shader, &prog_data->base);
6700 shader = brw_postprocess_nir(shader, compiler, true);
6701
6702 prog_data->local_size[0] = shader->info.cs.local_size[0];
6703 prog_data->local_size[1] = shader->info.cs.local_size[1];
6704 prog_data->local_size[2] = shader->info.cs.local_size[2];
6705 unsigned local_workgroup_size =
6706 shader->info.cs.local_size[0] * shader->info.cs.local_size[1] *
6707 shader->info.cs.local_size[2];
6708
6709 unsigned max_cs_threads = compiler->devinfo->max_cs_threads;
6710 unsigned simd_required = DIV_ROUND_UP(local_workgroup_size, max_cs_threads);
6711
6712 cfg_t *cfg = NULL;
6713 const char *fail_msg = NULL;
6714
6715 /* Now the main event: Visit the shader IR and generate our CS IR for it.
6716 */
6717 fs_visitor v8(compiler, log_data, mem_ctx, key, &prog_data->base,
6718 NULL, /* Never used in core profile */
6719 shader, 8, shader_time_index);
6720 if (simd_required <= 8) {
6721 if (!v8.run_cs()) {
6722 fail_msg = v8.fail_msg;
6723 } else {
6724 cfg = v8.cfg;
6725 cs_set_simd_size(prog_data, 8);
6726 cs_fill_push_const_info(compiler->devinfo, prog_data);
6727 prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
6728 }
6729 }
6730
6731 fs_visitor v16(compiler, log_data, mem_ctx, key, &prog_data->base,
6732 NULL, /* Never used in core profile */
6733 shader, 16, shader_time_index);
6734 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
6735 !fail_msg && v8.max_dispatch_width >= 16 &&
6736 simd_required <= 16) {
6737 /* Try a SIMD16 compile */
6738 if (simd_required <= 8)
6739 v16.import_uniforms(&v8);
6740 if (!v16.run_cs()) {
6741 compiler->shader_perf_log(log_data,
6742 "SIMD16 shader failed to compile: %s",
6743 v16.fail_msg);
6744 if (!cfg) {
6745 fail_msg =
6746 "Couldn't generate SIMD16 program and not "
6747 "enough threads for SIMD8";
6748 }
6749 } else {
6750 cfg = v16.cfg;
6751 cs_set_simd_size(prog_data, 16);
6752 cs_fill_push_const_info(compiler->devinfo, prog_data);
6753 prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
6754 }
6755 }
6756
6757 fs_visitor v32(compiler, log_data, mem_ctx, key, &prog_data->base,
6758 NULL, /* Never used in core profile */
6759 shader, 32, shader_time_index);
6760 if (!fail_msg && v8.max_dispatch_width >= 32 &&
6761 (simd_required > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
6762 /* Try a SIMD32 compile */
6763 if (simd_required <= 8)
6764 v32.import_uniforms(&v8);
6765 else if (simd_required <= 16)
6766 v32.import_uniforms(&v16);
6767
6768 if (!v32.run_cs()) {
6769 compiler->shader_perf_log(log_data,
6770 "SIMD32 shader failed to compile: %s",
6771 v16.fail_msg);
6772 if (!cfg) {
6773 fail_msg =
6774 "Couldn't generate SIMD32 program and not "
6775 "enough threads for SIMD16";
6776 }
6777 } else {
6778 cfg = v32.cfg;
6779 cs_set_simd_size(prog_data, 32);
6780 cs_fill_push_const_info(compiler->devinfo, prog_data);
6781 }
6782 }
6783
6784 if (unlikely(cfg == NULL)) {
6785 assert(fail_msg);
6786 if (error_str)
6787 *error_str = ralloc_strdup(mem_ctx, fail_msg);
6788
6789 return NULL;
6790 }
6791
6792 fs_generator g(compiler, log_data, mem_ctx, (void*) key, &prog_data->base,
6793 v8.promoted_constants, v8.runtime_check_aads_emit,
6794 MESA_SHADER_COMPUTE);
6795 if (INTEL_DEBUG & DEBUG_CS) {
6796 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
6797 shader->info.label ? shader->info.label :
6798 "unnamed",
6799 shader->info.name);
6800 g.enable_debug(name);
6801 }
6802
6803 g.generate_code(cfg, prog_data->simd_size);
6804
6805 return g.get_assembly(final_assembly_size);
6806 }
6807
6808 /**
6809 * Test the dispatch mask packing assumptions of
6810 * brw_stage_has_packed_dispatch(). Call this from e.g. the top of
6811 * fs_visitor::emit_nir_code() to cause a GPU hang if any shader invocation is
6812 * executed with an unexpected dispatch mask.
6813 */
6814 static UNUSED void
6815 brw_fs_test_dispatch_packing(const fs_builder &bld)
6816 {
6817 const gl_shader_stage stage = bld.shader->stage;
6818
6819 if (brw_stage_has_packed_dispatch(bld.shader->devinfo, stage,
6820 bld.shader->stage_prog_data)) {
6821 const fs_builder ubld = bld.exec_all().group(1, 0);
6822 const fs_reg tmp = component(bld.vgrf(BRW_REGISTER_TYPE_UD), 0);
6823 const fs_reg mask = (stage == MESA_SHADER_FRAGMENT ? brw_vmask_reg() :
6824 brw_dmask_reg());
6825
6826 ubld.ADD(tmp, mask, brw_imm_ud(1));
6827 ubld.AND(tmp, mask, tmp);
6828
6829 /* This will loop forever if the dispatch mask doesn't have the expected
6830 * form '2^n-1', in which case tmp will be non-zero.
6831 */
6832 bld.emit(BRW_OPCODE_DO);
6833 bld.CMP(bld.null_reg_ud(), tmp, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
6834 set_predicate(BRW_PREDICATE_NORMAL, bld.emit(BRW_OPCODE_WHILE));
6835 }
6836 }