intel/fs: Use SHADER_OPCODE_SEND for varying UBO pulls on gen7+
[mesa.git] / src / intel / compiler / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include "main/macros.h"
32 #include "brw_eu.h"
33 #include "brw_fs.h"
34 #include "brw_nir.h"
35 #include "brw_vec4_gs_visitor.h"
36 #include "brw_cfg.h"
37 #include "brw_dead_control_flow.h"
38 #include "common/gen_debug.h"
39 #include "compiler/glsl_types.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "program/prog_parameter.h"
42 #include "util/u_math.h"
43
44 using namespace brw;
45
46 static unsigned get_lowered_simd_width(const struct gen_device_info *devinfo,
47 const fs_inst *inst);
48
49 void
50 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
51 const fs_reg *src, unsigned sources)
52 {
53 memset((void*)this, 0, sizeof(*this));
54
55 this->src = new fs_reg[MAX2(sources, 3)];
56 for (unsigned i = 0; i < sources; i++)
57 this->src[i] = src[i];
58
59 this->opcode = opcode;
60 this->dst = dst;
61 this->sources = sources;
62 this->exec_size = exec_size;
63 this->base_mrf = -1;
64
65 assert(dst.file != IMM && dst.file != UNIFORM);
66
67 assert(this->exec_size != 0);
68
69 this->conditional_mod = BRW_CONDITIONAL_NONE;
70
71 /* This will be the case for almost all instructions. */
72 switch (dst.file) {
73 case VGRF:
74 case ARF:
75 case FIXED_GRF:
76 case MRF:
77 case ATTR:
78 this->size_written = dst.component_size(exec_size);
79 break;
80 case BAD_FILE:
81 this->size_written = 0;
82 break;
83 case IMM:
84 case UNIFORM:
85 unreachable("Invalid destination register file");
86 }
87
88 this->writes_accumulator = false;
89 }
90
91 fs_inst::fs_inst()
92 {
93 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
94 }
95
96 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
97 {
98 init(opcode, exec_size, reg_undef, NULL, 0);
99 }
100
101 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
102 {
103 init(opcode, exec_size, dst, NULL, 0);
104 }
105
106 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
107 const fs_reg &src0)
108 {
109 const fs_reg src[1] = { src0 };
110 init(opcode, exec_size, dst, src, 1);
111 }
112
113 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
114 const fs_reg &src0, const fs_reg &src1)
115 {
116 const fs_reg src[2] = { src0, src1 };
117 init(opcode, exec_size, dst, src, 2);
118 }
119
120 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
121 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
122 {
123 const fs_reg src[3] = { src0, src1, src2 };
124 init(opcode, exec_size, dst, src, 3);
125 }
126
127 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
128 const fs_reg src[], unsigned sources)
129 {
130 init(opcode, exec_width, dst, src, sources);
131 }
132
133 fs_inst::fs_inst(const fs_inst &that)
134 {
135 memcpy((void*)this, &that, sizeof(that));
136
137 this->src = new fs_reg[MAX2(that.sources, 3)];
138
139 for (unsigned i = 0; i < that.sources; i++)
140 this->src[i] = that.src[i];
141 }
142
143 fs_inst::~fs_inst()
144 {
145 delete[] this->src;
146 }
147
148 void
149 fs_inst::resize_sources(uint8_t num_sources)
150 {
151 if (this->sources != num_sources) {
152 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
153
154 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
155 src[i] = this->src[i];
156
157 delete[] this->src;
158 this->src = src;
159 this->sources = num_sources;
160 }
161 }
162
163 void
164 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
165 const fs_reg &dst,
166 const fs_reg &surf_index,
167 const fs_reg &varying_offset,
168 uint32_t const_offset)
169 {
170 /* We have our constant surface use a pitch of 4 bytes, so our index can
171 * be any component of a vector, and then we load 4 contiguous
172 * components starting from that.
173 *
174 * We break down the const_offset to a portion added to the variable offset
175 * and a portion done using fs_reg::offset, which means that if you have
176 * GLSL using something like "uniform vec4 a[20]; gl_FragColor = a[i]",
177 * we'll temporarily generate 4 vec4 loads from offset i * 4, and CSE can
178 * later notice that those loads are all the same and eliminate the
179 * redundant ones.
180 */
181 fs_reg vec4_offset = vgrf(glsl_type::uint_type);
182 bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
183
184 /* The pull load message will load a vec4 (16 bytes). If we are loading
185 * a double this means we are only loading 2 elements worth of data.
186 * We also want to use a 32-bit data type for the dst of the load operation
187 * so other parts of the driver don't get confused about the size of the
188 * result.
189 */
190 fs_reg vec4_result = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
191 fs_inst *inst = bld.emit(FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
192 vec4_result, surf_index, vec4_offset);
193 inst->size_written = 4 * vec4_result.component_size(inst->exec_size);
194
195 shuffle_from_32bit_read(bld, dst, vec4_result,
196 (const_offset & 0xf) / type_sz(dst.type), 1);
197 }
198
199 /**
200 * A helper for MOV generation for fixing up broken hardware SEND dependency
201 * handling.
202 */
203 void
204 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
205 {
206 /* The caller always wants uncompressed to emit the minimal extra
207 * dependencies, and to avoid having to deal with aligning its regs to 2.
208 */
209 const fs_builder ubld = bld.annotate("send dependency resolve")
210 .half(0);
211
212 ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
213 }
214
215 bool
216 fs_inst::is_send_from_grf() const
217 {
218 switch (opcode) {
219 case SHADER_OPCODE_SEND:
220 case SHADER_OPCODE_SHADER_TIME_ADD:
221 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
222 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
223 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
224 case SHADER_OPCODE_UNTYPED_ATOMIC:
225 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT:
226 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
227 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
228 case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
229 case SHADER_OPCODE_BYTE_SCATTERED_READ:
230 case SHADER_OPCODE_TYPED_ATOMIC:
231 case SHADER_OPCODE_TYPED_SURFACE_READ:
232 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
233 case SHADER_OPCODE_IMAGE_SIZE:
234 case SHADER_OPCODE_URB_WRITE_SIMD8:
235 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
236 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
237 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
238 case SHADER_OPCODE_URB_READ_SIMD8:
239 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
240 return true;
241 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
242 return src[1].file == VGRF;
243 case FS_OPCODE_FB_WRITE:
244 case FS_OPCODE_FB_READ:
245 return src[0].file == VGRF;
246 default:
247 if (is_tex())
248 return src[0].file == VGRF;
249
250 return false;
251 }
252 }
253
254 /**
255 * Returns true if this instruction's sources and destinations cannot
256 * safely be the same register.
257 *
258 * In most cases, a register can be written over safely by the same
259 * instruction that is its last use. For a single instruction, the
260 * sources are dereferenced before writing of the destination starts
261 * (naturally).
262 *
263 * However, there are a few cases where this can be problematic:
264 *
265 * - Virtual opcodes that translate to multiple instructions in the
266 * code generator: if src == dst and one instruction writes the
267 * destination before a later instruction reads the source, then
268 * src will have been clobbered.
269 *
270 * - SIMD16 compressed instructions with certain regioning (see below).
271 *
272 * The register allocator uses this information to set up conflicts between
273 * GRF sources and the destination.
274 */
275 bool
276 fs_inst::has_source_and_destination_hazard() const
277 {
278 switch (opcode) {
279 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
280 /* Multiple partial writes to the destination */
281 return true;
282 case SHADER_OPCODE_SHUFFLE:
283 /* This instruction returns an arbitrary channel from the source and
284 * gets split into smaller instructions in the generator. It's possible
285 * that one of the instructions will read from a channel corresponding
286 * to an earlier instruction.
287 */
288 case SHADER_OPCODE_SEL_EXEC:
289 /* This is implemented as
290 *
291 * mov(16) g4<1>D 0D { align1 WE_all 1H };
292 * mov(16) g4<1>D g5<8,8,1>D { align1 1H }
293 *
294 * Because the source is only read in the second instruction, the first
295 * may stomp all over it.
296 */
297 return true;
298 case SHADER_OPCODE_QUAD_SWIZZLE:
299 switch (src[1].ud) {
300 case BRW_SWIZZLE_XXXX:
301 case BRW_SWIZZLE_YYYY:
302 case BRW_SWIZZLE_ZZZZ:
303 case BRW_SWIZZLE_WWWW:
304 case BRW_SWIZZLE_XXZZ:
305 case BRW_SWIZZLE_YYWW:
306 case BRW_SWIZZLE_XYXY:
307 case BRW_SWIZZLE_ZWZW:
308 /* These can be implemented as a single Align1 region on all
309 * platforms, so there's never a hazard between source and
310 * destination. C.f. fs_generator::generate_quad_swizzle().
311 */
312 return false;
313 default:
314 return !is_uniform(src[0]);
315 }
316 default:
317 /* The SIMD16 compressed instruction
318 *
319 * add(16) g4<1>F g4<8,8,1>F g6<8,8,1>F
320 *
321 * is actually decoded in hardware as:
322 *
323 * add(8) g4<1>F g4<8,8,1>F g6<8,8,1>F
324 * add(8) g5<1>F g5<8,8,1>F g7<8,8,1>F
325 *
326 * Which is safe. However, if we have uniform accesses
327 * happening, we get into trouble:
328 *
329 * add(8) g4<1>F g4<0,1,0>F g6<8,8,1>F
330 * add(8) g5<1>F g4<0,1,0>F g7<8,8,1>F
331 *
332 * Now our destination for the first instruction overwrote the
333 * second instruction's src0, and we get garbage for those 8
334 * pixels. There's a similar issue for the pre-gen6
335 * pixel_x/pixel_y, which are registers of 16-bit values and thus
336 * would get stomped by the first decode as well.
337 */
338 if (exec_size == 16) {
339 for (int i = 0; i < sources; i++) {
340 if (src[i].file == VGRF && (src[i].stride == 0 ||
341 src[i].type == BRW_REGISTER_TYPE_UW ||
342 src[i].type == BRW_REGISTER_TYPE_W ||
343 src[i].type == BRW_REGISTER_TYPE_UB ||
344 src[i].type == BRW_REGISTER_TYPE_B)) {
345 return true;
346 }
347 }
348 }
349 return false;
350 }
351 }
352
353 bool
354 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
355 {
356 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
357 return false;
358
359 fs_reg reg = this->src[0];
360 if (reg.file != VGRF || reg.offset != 0 || reg.stride != 1)
361 return false;
362
363 if (grf_alloc.sizes[reg.nr] * REG_SIZE != this->size_written)
364 return false;
365
366 for (int i = 0; i < this->sources; i++) {
367 reg.type = this->src[i].type;
368 if (!this->src[i].equals(reg))
369 return false;
370
371 if (i < this->header_size) {
372 reg.offset += REG_SIZE;
373 } else {
374 reg = horiz_offset(reg, this->exec_size);
375 }
376 }
377
378 return true;
379 }
380
381 bool
382 fs_inst::can_do_source_mods(const struct gen_device_info *devinfo) const
383 {
384 if (devinfo->gen == 6 && is_math())
385 return false;
386
387 if (is_send_from_grf())
388 return false;
389
390 if (!backend_instruction::can_do_source_mods())
391 return false;
392
393 return true;
394 }
395
396 bool
397 fs_inst::can_do_cmod()
398 {
399 if (!backend_instruction::can_do_cmod())
400 return false;
401
402 /* The accumulator result appears to get used for the conditional modifier
403 * generation. When negating a UD value, there is a 33rd bit generated for
404 * the sign in the accumulator value, so now you can't check, for example,
405 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
406 */
407 for (unsigned i = 0; i < sources; i++) {
408 if (type_is_unsigned_int(src[i].type) && src[i].negate)
409 return false;
410 }
411
412 return true;
413 }
414
415 bool
416 fs_inst::can_change_types() const
417 {
418 return dst.type == src[0].type &&
419 !src[0].abs && !src[0].negate && !saturate &&
420 (opcode == BRW_OPCODE_MOV ||
421 (opcode == BRW_OPCODE_SEL &&
422 dst.type == src[1].type &&
423 predicate != BRW_PREDICATE_NONE &&
424 !src[1].abs && !src[1].negate));
425 }
426
427 void
428 fs_reg::init()
429 {
430 memset((void*)this, 0, sizeof(*this));
431 type = BRW_REGISTER_TYPE_UD;
432 stride = 1;
433 }
434
435 /** Generic unset register constructor. */
436 fs_reg::fs_reg()
437 {
438 init();
439 this->file = BAD_FILE;
440 }
441
442 fs_reg::fs_reg(struct ::brw_reg reg) :
443 backend_reg(reg)
444 {
445 this->offset = 0;
446 this->stride = 1;
447 if (this->file == IMM &&
448 (this->type != BRW_REGISTER_TYPE_V &&
449 this->type != BRW_REGISTER_TYPE_UV &&
450 this->type != BRW_REGISTER_TYPE_VF)) {
451 this->stride = 0;
452 }
453 }
454
455 bool
456 fs_reg::equals(const fs_reg &r) const
457 {
458 return (this->backend_reg::equals(r) &&
459 stride == r.stride);
460 }
461
462 bool
463 fs_reg::negative_equals(const fs_reg &r) const
464 {
465 return (this->backend_reg::negative_equals(r) &&
466 stride == r.stride);
467 }
468
469 bool
470 fs_reg::is_contiguous() const
471 {
472 return stride == 1;
473 }
474
475 unsigned
476 fs_reg::component_size(unsigned width) const
477 {
478 const unsigned stride = ((file != ARF && file != FIXED_GRF) ? this->stride :
479 hstride == 0 ? 0 :
480 1 << (hstride - 1));
481 return MAX2(width * stride, 1) * type_sz(type);
482 }
483
484 extern "C" int
485 type_size_scalar(const struct glsl_type *type)
486 {
487 unsigned int size, i;
488
489 switch (type->base_type) {
490 case GLSL_TYPE_UINT:
491 case GLSL_TYPE_INT:
492 case GLSL_TYPE_FLOAT:
493 case GLSL_TYPE_BOOL:
494 return type->components();
495 case GLSL_TYPE_UINT16:
496 case GLSL_TYPE_INT16:
497 case GLSL_TYPE_FLOAT16:
498 return DIV_ROUND_UP(type->components(), 2);
499 case GLSL_TYPE_UINT8:
500 case GLSL_TYPE_INT8:
501 return DIV_ROUND_UP(type->components(), 4);
502 case GLSL_TYPE_DOUBLE:
503 case GLSL_TYPE_UINT64:
504 case GLSL_TYPE_INT64:
505 return type->components() * 2;
506 case GLSL_TYPE_ARRAY:
507 return type_size_scalar(type->fields.array) * type->length;
508 case GLSL_TYPE_STRUCT:
509 size = 0;
510 for (i = 0; i < type->length; i++) {
511 size += type_size_scalar(type->fields.structure[i].type);
512 }
513 return size;
514 case GLSL_TYPE_SAMPLER:
515 case GLSL_TYPE_ATOMIC_UINT:
516 case GLSL_TYPE_IMAGE:
517 /* Samplers, atomics, and images take up no register space, since
518 * they're baked in at link time.
519 */
520 return 0;
521 case GLSL_TYPE_SUBROUTINE:
522 return 1;
523 case GLSL_TYPE_VOID:
524 case GLSL_TYPE_ERROR:
525 case GLSL_TYPE_INTERFACE:
526 case GLSL_TYPE_FUNCTION:
527 unreachable("not reached");
528 }
529
530 return 0;
531 }
532
533 /**
534 * Create a MOV to read the timestamp register.
535 *
536 * The caller is responsible for emitting the MOV. The return value is
537 * the destination of the MOV, with extra parameters set.
538 */
539 fs_reg
540 fs_visitor::get_timestamp(const fs_builder &bld)
541 {
542 assert(devinfo->gen >= 7);
543
544 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
545 BRW_ARF_TIMESTAMP,
546 0),
547 BRW_REGISTER_TYPE_UD));
548
549 fs_reg dst = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
550
551 /* We want to read the 3 fields we care about even if it's not enabled in
552 * the dispatch.
553 */
554 bld.group(4, 0).exec_all().MOV(dst, ts);
555
556 return dst;
557 }
558
559 void
560 fs_visitor::emit_shader_time_begin()
561 {
562 /* We want only the low 32 bits of the timestamp. Since it's running
563 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
564 * which is plenty of time for our purposes. It is identical across the
565 * EUs, but since it's tracking GPU core speed it will increment at a
566 * varying rate as render P-states change.
567 */
568 shader_start_time = component(
569 get_timestamp(bld.annotate("shader time start")), 0);
570 }
571
572 void
573 fs_visitor::emit_shader_time_end()
574 {
575 /* Insert our code just before the final SEND with EOT. */
576 exec_node *end = this->instructions.get_tail();
577 assert(end && ((fs_inst *) end)->eot);
578 const fs_builder ibld = bld.annotate("shader time end")
579 .exec_all().at(NULL, end);
580 const fs_reg timestamp = get_timestamp(ibld);
581
582 /* We only use the low 32 bits of the timestamp - see
583 * emit_shader_time_begin()).
584 *
585 * We could also check if render P-states have changed (or anything
586 * else that might disrupt timing) by setting smear to 2 and checking if
587 * that field is != 0.
588 */
589 const fs_reg shader_end_time = component(timestamp, 0);
590
591 /* Check that there weren't any timestamp reset events (assuming these
592 * were the only two timestamp reads that happened).
593 */
594 const fs_reg reset = component(timestamp, 2);
595 set_condmod(BRW_CONDITIONAL_Z,
596 ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
597 ibld.IF(BRW_PREDICATE_NORMAL);
598
599 fs_reg start = shader_start_time;
600 start.negate = true;
601 const fs_reg diff = component(fs_reg(VGRF, alloc.allocate(1),
602 BRW_REGISTER_TYPE_UD),
603 0);
604 const fs_builder cbld = ibld.group(1, 0);
605 cbld.group(1, 0).ADD(diff, start, shader_end_time);
606
607 /* If there were no instructions between the two timestamp gets, the diff
608 * is 2 cycles. Remove that overhead, so I can forget about that when
609 * trying to determine the time taken for single instructions.
610 */
611 cbld.ADD(diff, diff, brw_imm_ud(-2u));
612 SHADER_TIME_ADD(cbld, 0, diff);
613 SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
614 ibld.emit(BRW_OPCODE_ELSE);
615 SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
616 ibld.emit(BRW_OPCODE_ENDIF);
617 }
618
619 void
620 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
621 int shader_time_subindex,
622 fs_reg value)
623 {
624 int index = shader_time_index * 3 + shader_time_subindex;
625 struct brw_reg offset = brw_imm_d(index * BRW_SHADER_TIME_STRIDE);
626
627 fs_reg payload;
628 if (dispatch_width == 8)
629 payload = vgrf(glsl_type::uvec2_type);
630 else
631 payload = vgrf(glsl_type::uint_type);
632
633 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
634 }
635
636 void
637 fs_visitor::vfail(const char *format, va_list va)
638 {
639 char *msg;
640
641 if (failed)
642 return;
643
644 failed = true;
645
646 msg = ralloc_vasprintf(mem_ctx, format, va);
647 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
648
649 this->fail_msg = msg;
650
651 if (debug_enabled) {
652 fprintf(stderr, "%s", msg);
653 }
654 }
655
656 void
657 fs_visitor::fail(const char *format, ...)
658 {
659 va_list va;
660
661 va_start(va, format);
662 vfail(format, va);
663 va_end(va);
664 }
665
666 /**
667 * Mark this program as impossible to compile with dispatch width greater
668 * than n.
669 *
670 * During the SIMD8 compile (which happens first), we can detect and flag
671 * things that are unsupported in SIMD16+ mode, so the compiler can skip the
672 * SIMD16+ compile altogether.
673 *
674 * During a compile of dispatch width greater than n (if one happens anyway),
675 * this just calls fail().
676 */
677 void
678 fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
679 {
680 if (dispatch_width > n) {
681 fail("%s", msg);
682 } else {
683 max_dispatch_width = n;
684 compiler->shader_perf_log(log_data,
685 "Shader dispatch width limited to SIMD%d: %s",
686 n, msg);
687 }
688 }
689
690 /**
691 * Returns true if the instruction has a flag that means it won't
692 * update an entire destination register.
693 *
694 * For example, dead code elimination and live variable analysis want to know
695 * when a write to a variable screens off any preceding values that were in
696 * it.
697 */
698 bool
699 fs_inst::is_partial_write() const
700 {
701 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
702 (this->exec_size * type_sz(this->dst.type)) < 32 ||
703 !this->dst.is_contiguous() ||
704 this->dst.offset % REG_SIZE != 0);
705 }
706
707 unsigned
708 fs_inst::components_read(unsigned i) const
709 {
710 /* Return zero if the source is not present. */
711 if (src[i].file == BAD_FILE)
712 return 0;
713
714 switch (opcode) {
715 case FS_OPCODE_LINTERP:
716 if (i == 0)
717 return 2;
718 else
719 return 1;
720
721 case FS_OPCODE_PIXEL_X:
722 case FS_OPCODE_PIXEL_Y:
723 assert(i == 0);
724 return 2;
725
726 case FS_OPCODE_FB_WRITE_LOGICAL:
727 assert(src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
728 /* First/second FB write color. */
729 if (i < 2)
730 return src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
731 else
732 return 1;
733
734 case SHADER_OPCODE_TEX_LOGICAL:
735 case SHADER_OPCODE_TXD_LOGICAL:
736 case SHADER_OPCODE_TXF_LOGICAL:
737 case SHADER_OPCODE_TXL_LOGICAL:
738 case SHADER_OPCODE_TXS_LOGICAL:
739 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
740 case FS_OPCODE_TXB_LOGICAL:
741 case SHADER_OPCODE_TXF_CMS_LOGICAL:
742 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
743 case SHADER_OPCODE_TXF_UMS_LOGICAL:
744 case SHADER_OPCODE_TXF_MCS_LOGICAL:
745 case SHADER_OPCODE_LOD_LOGICAL:
746 case SHADER_OPCODE_TG4_LOGICAL:
747 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
748 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
749 assert(src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM &&
750 src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
751 /* Texture coordinates. */
752 if (i == TEX_LOGICAL_SRC_COORDINATE)
753 return src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
754 /* Texture derivatives. */
755 else if ((i == TEX_LOGICAL_SRC_LOD || i == TEX_LOGICAL_SRC_LOD2) &&
756 opcode == SHADER_OPCODE_TXD_LOGICAL)
757 return src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
758 /* Texture offset. */
759 else if (i == TEX_LOGICAL_SRC_TG4_OFFSET)
760 return 2;
761 /* MCS */
762 else if (i == TEX_LOGICAL_SRC_MCS && opcode == SHADER_OPCODE_TXF_CMS_W_LOGICAL)
763 return 2;
764 else
765 return 1;
766
767 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
768 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
769 assert(src[3].file == IMM);
770 /* Surface coordinates. */
771 if (i == 0)
772 return src[3].ud;
773 /* Surface operation source (ignored for reads). */
774 else if (i == 1)
775 return 0;
776 else
777 return 1;
778
779 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
780 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
781 assert(src[3].file == IMM &&
782 src[4].file == IMM);
783 /* Surface coordinates. */
784 if (i == 0)
785 return src[3].ud;
786 /* Surface operation source. */
787 else if (i == 1)
788 return src[4].ud;
789 else
790 return 1;
791
792 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
793 /* Scattered logical opcodes use the following params:
794 * src[0] Surface coordinates
795 * src[1] Surface operation source (ignored for reads)
796 * src[2] Surface
797 * src[3] IMM with always 1 dimension.
798 * src[4] IMM with arg bitsize for scattered read/write 8, 16, 32
799 */
800 assert(src[3].file == IMM &&
801 src[4].file == IMM);
802 return i == 1 ? 0 : 1;
803
804 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
805 assert(src[3].file == IMM &&
806 src[4].file == IMM);
807 return 1;
808
809 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
810 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
811 assert(src[3].file == IMM &&
812 src[4].file == IMM);
813 const unsigned op = src[4].ud;
814 /* Surface coordinates. */
815 if (i == 0)
816 return src[3].ud;
817 /* Surface operation source. */
818 else if (i == 1 && op == BRW_AOP_CMPWR)
819 return 2;
820 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
821 op == BRW_AOP_PREDEC))
822 return 0;
823 else
824 return 1;
825 }
826 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
827 return (i == 0 ? 2 : 1);
828
829 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL: {
830 assert(src[3].file == IMM &&
831 src[4].file == IMM);
832 const unsigned op = src[4].ud;
833 /* Surface coordinates. */
834 if (i == 0)
835 return src[3].ud;
836 /* Surface operation source. */
837 else if (i == 1 && op == BRW_AOP_FCMPWR)
838 return 2;
839 else
840 return 1;
841 }
842
843 default:
844 return 1;
845 }
846 }
847
848 unsigned
849 fs_inst::size_read(int arg) const
850 {
851 switch (opcode) {
852 case SHADER_OPCODE_SEND:
853 if (arg == 2) {
854 return mlen * REG_SIZE;
855 } else if (arg == 3) {
856 return ex_mlen * REG_SIZE;
857 }
858 break;
859
860 case FS_OPCODE_FB_WRITE:
861 case FS_OPCODE_REP_FB_WRITE:
862 if (arg == 0) {
863 if (base_mrf >= 0)
864 return src[0].file == BAD_FILE ? 0 : 2 * REG_SIZE;
865 else
866 return mlen * REG_SIZE;
867 }
868 break;
869
870 case FS_OPCODE_FB_READ:
871 case SHADER_OPCODE_URB_WRITE_SIMD8:
872 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
873 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
874 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
875 case SHADER_OPCODE_URB_READ_SIMD8:
876 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
877 case SHADER_OPCODE_UNTYPED_ATOMIC:
878 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT:
879 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
880 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
881 case SHADER_OPCODE_TYPED_ATOMIC:
882 case SHADER_OPCODE_TYPED_SURFACE_READ:
883 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
884 case SHADER_OPCODE_IMAGE_SIZE:
885 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
886 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
887 case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
888 case SHADER_OPCODE_BYTE_SCATTERED_READ:
889 if (arg == 0)
890 return mlen * REG_SIZE;
891 break;
892
893 case FS_OPCODE_SET_SAMPLE_ID:
894 if (arg == 1)
895 return 1;
896 break;
897
898 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
899 /* The payload is actually stored in src1 */
900 if (arg == 1)
901 return mlen * REG_SIZE;
902 break;
903
904 case FS_OPCODE_LINTERP:
905 if (arg == 1)
906 return 16;
907 break;
908
909 case SHADER_OPCODE_LOAD_PAYLOAD:
910 if (arg < this->header_size)
911 return REG_SIZE;
912 break;
913
914 case CS_OPCODE_CS_TERMINATE:
915 case SHADER_OPCODE_BARRIER:
916 return REG_SIZE;
917
918 case SHADER_OPCODE_MOV_INDIRECT:
919 if (arg == 0) {
920 assert(src[2].file == IMM);
921 return src[2].ud;
922 }
923 break;
924
925 default:
926 if (is_tex() && arg == 0 && src[0].file == VGRF)
927 return mlen * REG_SIZE;
928 break;
929 }
930
931 switch (src[arg].file) {
932 case UNIFORM:
933 case IMM:
934 return components_read(arg) * type_sz(src[arg].type);
935 case BAD_FILE:
936 case ARF:
937 case FIXED_GRF:
938 case VGRF:
939 case ATTR:
940 return components_read(arg) * src[arg].component_size(exec_size);
941 case MRF:
942 unreachable("MRF registers are not allowed as sources");
943 }
944 return 0;
945 }
946
947 namespace {
948 /* Return the subset of flag registers that an instruction could
949 * potentially read or write based on the execution controls and flag
950 * subregister number of the instruction.
951 */
952 unsigned
953 flag_mask(const fs_inst *inst)
954 {
955 const unsigned start = inst->flag_subreg * 16 + inst->group;
956 const unsigned end = start + inst->exec_size;
957 return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
958 }
959
960 unsigned
961 bit_mask(unsigned n)
962 {
963 return (n >= CHAR_BIT * sizeof(bit_mask(n)) ? ~0u : (1u << n) - 1);
964 }
965
966 unsigned
967 flag_mask(const fs_reg &r, unsigned sz)
968 {
969 if (r.file == ARF) {
970 const unsigned start = (r.nr - BRW_ARF_FLAG) * 4 + r.subnr;
971 const unsigned end = start + sz;
972 return bit_mask(end) & ~bit_mask(start);
973 } else {
974 return 0;
975 }
976 }
977 }
978
979 unsigned
980 fs_inst::flags_read(const gen_device_info *devinfo) const
981 {
982 if (predicate == BRW_PREDICATE_ALIGN1_ANYV ||
983 predicate == BRW_PREDICATE_ALIGN1_ALLV) {
984 /* The vertical predication modes combine corresponding bits from
985 * f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
986 */
987 const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
988 return flag_mask(this) << shift | flag_mask(this);
989 } else if (predicate) {
990 return flag_mask(this);
991 } else {
992 unsigned mask = 0;
993 for (int i = 0; i < sources; i++) {
994 mask |= flag_mask(src[i], size_read(i));
995 }
996 return mask;
997 }
998 }
999
1000 unsigned
1001 fs_inst::flags_written() const
1002 {
1003 if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
1004 opcode != BRW_OPCODE_CSEL &&
1005 opcode != BRW_OPCODE_IF &&
1006 opcode != BRW_OPCODE_WHILE)) ||
1007 opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL ||
1008 opcode == FS_OPCODE_FB_WRITE) {
1009 return flag_mask(this);
1010 } else {
1011 return flag_mask(dst, size_written);
1012 }
1013 }
1014
1015 /**
1016 * Returns how many MRFs an FS opcode will write over.
1017 *
1018 * Note that this is not the 0 or 1 implied writes in an actual gen
1019 * instruction -- the FS opcodes often generate MOVs in addition.
1020 */
1021 int
1022 fs_visitor::implied_mrf_writes(fs_inst *inst) const
1023 {
1024 if (inst->mlen == 0)
1025 return 0;
1026
1027 if (inst->base_mrf == -1)
1028 return 0;
1029
1030 switch (inst->opcode) {
1031 case SHADER_OPCODE_RCP:
1032 case SHADER_OPCODE_RSQ:
1033 case SHADER_OPCODE_SQRT:
1034 case SHADER_OPCODE_EXP2:
1035 case SHADER_OPCODE_LOG2:
1036 case SHADER_OPCODE_SIN:
1037 case SHADER_OPCODE_COS:
1038 return 1 * dispatch_width / 8;
1039 case SHADER_OPCODE_POW:
1040 case SHADER_OPCODE_INT_QUOTIENT:
1041 case SHADER_OPCODE_INT_REMAINDER:
1042 return 2 * dispatch_width / 8;
1043 case SHADER_OPCODE_TEX:
1044 case FS_OPCODE_TXB:
1045 case SHADER_OPCODE_TXD:
1046 case SHADER_OPCODE_TXF:
1047 case SHADER_OPCODE_TXF_CMS:
1048 case SHADER_OPCODE_TXF_MCS:
1049 case SHADER_OPCODE_TG4:
1050 case SHADER_OPCODE_TG4_OFFSET:
1051 case SHADER_OPCODE_TXL:
1052 case SHADER_OPCODE_TXS:
1053 case SHADER_OPCODE_LOD:
1054 case SHADER_OPCODE_SAMPLEINFO:
1055 return 1;
1056 case FS_OPCODE_FB_WRITE:
1057 case FS_OPCODE_REP_FB_WRITE:
1058 return inst->src[0].file == BAD_FILE ? 0 : 2;
1059 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
1060 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1061 return 1;
1062 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
1063 return inst->mlen;
1064 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1065 return inst->mlen;
1066 default:
1067 unreachable("not reached");
1068 }
1069 }
1070
1071 fs_reg
1072 fs_visitor::vgrf(const glsl_type *const type)
1073 {
1074 int reg_width = dispatch_width / 8;
1075 return fs_reg(VGRF, alloc.allocate(type_size_scalar(type) * reg_width),
1076 brw_type_for_base_type(type));
1077 }
1078
1079 fs_reg::fs_reg(enum brw_reg_file file, int nr)
1080 {
1081 init();
1082 this->file = file;
1083 this->nr = nr;
1084 this->type = BRW_REGISTER_TYPE_F;
1085 this->stride = (file == UNIFORM ? 0 : 1);
1086 }
1087
1088 fs_reg::fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type)
1089 {
1090 init();
1091 this->file = file;
1092 this->nr = nr;
1093 this->type = type;
1094 this->stride = (file == UNIFORM ? 0 : 1);
1095 }
1096
1097 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
1098 * This brings in those uniform definitions
1099 */
1100 void
1101 fs_visitor::import_uniforms(fs_visitor *v)
1102 {
1103 this->push_constant_loc = v->push_constant_loc;
1104 this->pull_constant_loc = v->pull_constant_loc;
1105 this->uniforms = v->uniforms;
1106 this->subgroup_id = v->subgroup_id;
1107 }
1108
1109 void
1110 fs_visitor::emit_fragcoord_interpolation(fs_reg wpos)
1111 {
1112 assert(stage == MESA_SHADER_FRAGMENT);
1113
1114 /* gl_FragCoord.x */
1115 bld.MOV(wpos, this->pixel_x);
1116 wpos = offset(wpos, bld, 1);
1117
1118 /* gl_FragCoord.y */
1119 bld.MOV(wpos, this->pixel_y);
1120 wpos = offset(wpos, bld, 1);
1121
1122 /* gl_FragCoord.z */
1123 if (devinfo->gen >= 6) {
1124 bld.MOV(wpos, fetch_payload_reg(bld, payload.source_depth_reg));
1125 } else {
1126 bld.emit(FS_OPCODE_LINTERP, wpos,
1127 this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
1128 component(interp_reg(VARYING_SLOT_POS, 2), 0));
1129 }
1130 wpos = offset(wpos, bld, 1);
1131
1132 /* gl_FragCoord.w: Already set up in emit_interpolation */
1133 bld.MOV(wpos, this->wpos_w);
1134 }
1135
1136 enum brw_barycentric_mode
1137 brw_barycentric_mode(enum glsl_interp_mode mode, nir_intrinsic_op op)
1138 {
1139 /* Barycentric modes don't make sense for flat inputs. */
1140 assert(mode != INTERP_MODE_FLAT);
1141
1142 unsigned bary;
1143 switch (op) {
1144 case nir_intrinsic_load_barycentric_pixel:
1145 case nir_intrinsic_load_barycentric_at_offset:
1146 bary = BRW_BARYCENTRIC_PERSPECTIVE_PIXEL;
1147 break;
1148 case nir_intrinsic_load_barycentric_centroid:
1149 bary = BRW_BARYCENTRIC_PERSPECTIVE_CENTROID;
1150 break;
1151 case nir_intrinsic_load_barycentric_sample:
1152 case nir_intrinsic_load_barycentric_at_sample:
1153 bary = BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE;
1154 break;
1155 default:
1156 unreachable("invalid intrinsic");
1157 }
1158
1159 if (mode == INTERP_MODE_NOPERSPECTIVE)
1160 bary += 3;
1161
1162 return (enum brw_barycentric_mode) bary;
1163 }
1164
1165 /**
1166 * Turn one of the two CENTROID barycentric modes into PIXEL mode.
1167 */
1168 static enum brw_barycentric_mode
1169 centroid_to_pixel(enum brw_barycentric_mode bary)
1170 {
1171 assert(bary == BRW_BARYCENTRIC_PERSPECTIVE_CENTROID ||
1172 bary == BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID);
1173 return (enum brw_barycentric_mode) ((unsigned) bary - 1);
1174 }
1175
1176 fs_reg *
1177 fs_visitor::emit_frontfacing_interpolation()
1178 {
1179 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1180
1181 if (devinfo->gen >= 6) {
1182 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1183 * a boolean result from this (~0/true or 0/false).
1184 *
1185 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1186 * this task in only one instruction:
1187 * - a negation source modifier will flip the bit; and
1188 * - a W -> D type conversion will sign extend the bit into the high
1189 * word of the destination.
1190 *
1191 * An ASR 15 fills the low word of the destination.
1192 */
1193 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1194 g0.negate = true;
1195
1196 bld.ASR(*reg, g0, brw_imm_d(15));
1197 } else {
1198 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1199 * a boolean result from this (1/true or 0/false).
1200 *
1201 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1202 * the negation source modifier to flip it. Unfortunately the SHR
1203 * instruction only operates on UD (or D with an abs source modifier)
1204 * sources without negation.
1205 *
1206 * Instead, use ASR (which will give ~0/true or 0/false).
1207 */
1208 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1209 g1_6.negate = true;
1210
1211 bld.ASR(*reg, g1_6, brw_imm_d(31));
1212 }
1213
1214 return reg;
1215 }
1216
1217 void
1218 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1219 {
1220 assert(stage == MESA_SHADER_FRAGMENT);
1221 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1222 assert(dst.type == BRW_REGISTER_TYPE_F);
1223
1224 if (wm_prog_data->persample_dispatch) {
1225 /* Convert int_sample_pos to floating point */
1226 bld.MOV(dst, int_sample_pos);
1227 /* Scale to the range [0, 1] */
1228 bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
1229 }
1230 else {
1231 /* From ARB_sample_shading specification:
1232 * "When rendering to a non-multisample buffer, or if multisample
1233 * rasterization is disabled, gl_SamplePosition will always be
1234 * (0.5, 0.5).
1235 */
1236 bld.MOV(dst, brw_imm_f(0.5f));
1237 }
1238 }
1239
1240 fs_reg *
1241 fs_visitor::emit_samplepos_setup()
1242 {
1243 assert(devinfo->gen >= 6);
1244
1245 const fs_builder abld = bld.annotate("compute sample position");
1246 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1247 fs_reg pos = *reg;
1248 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1249 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1250
1251 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1252 * mode will be enabled.
1253 *
1254 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1255 * R31.1:0 Position Offset X/Y for Slot[3:0]
1256 * R31.3:2 Position Offset X/Y for Slot[7:4]
1257 * .....
1258 *
1259 * The X, Y sample positions come in as bytes in thread payload. So, read
1260 * the positions using vstride=16, width=8, hstride=2.
1261 */
1262 const fs_reg sample_pos_reg =
1263 fetch_payload_reg(abld, payload.sample_pos_reg, BRW_REGISTER_TYPE_W);
1264
1265 /* Compute gl_SamplePosition.x */
1266 abld.MOV(int_sample_x, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 0));
1267 compute_sample_position(offset(pos, abld, 0), int_sample_x);
1268
1269 /* Compute gl_SamplePosition.y */
1270 abld.MOV(int_sample_y, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 1));
1271 compute_sample_position(offset(pos, abld, 1), int_sample_y);
1272 return reg;
1273 }
1274
1275 fs_reg *
1276 fs_visitor::emit_sampleid_setup()
1277 {
1278 assert(stage == MESA_SHADER_FRAGMENT);
1279 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1280 assert(devinfo->gen >= 6);
1281
1282 const fs_builder abld = bld.annotate("compute sample id");
1283 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uint_type));
1284
1285 if (!key->multisample_fbo) {
1286 /* As per GL_ARB_sample_shading specification:
1287 * "When rendering to a non-multisample buffer, or if multisample
1288 * rasterization is disabled, gl_SampleID will always be zero."
1289 */
1290 abld.MOV(*reg, brw_imm_d(0));
1291 } else if (devinfo->gen >= 8) {
1292 /* Sample ID comes in as 4-bit numbers in g1.0:
1293 *
1294 * 15:12 Slot 3 SampleID (only used in SIMD16)
1295 * 11:8 Slot 2 SampleID (only used in SIMD16)
1296 * 7:4 Slot 1 SampleID
1297 * 3:0 Slot 0 SampleID
1298 *
1299 * Each slot corresponds to four channels, so we want to replicate each
1300 * half-byte value to 4 channels in a row:
1301 *
1302 * dst+0: .7 .6 .5 .4 .3 .2 .1 .0
1303 * 7:4 7:4 7:4 7:4 3:0 3:0 3:0 3:0
1304 *
1305 * dst+1: .7 .6 .5 .4 .3 .2 .1 .0 (if SIMD16)
1306 * 15:12 15:12 15:12 15:12 11:8 11:8 11:8 11:8
1307 *
1308 * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
1309 * channels to read the first byte (7:0), and the second group of 8
1310 * channels to read the second byte (15:8). Then, we shift right by
1311 * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
1312 * values into place. Finally, we AND with 0xf to keep the low nibble.
1313 *
1314 * shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
1315 * and(16) dst<1>D tmp<8,8,1>W 0xf:W
1316 *
1317 * TODO: These payload bits exist on Gen7 too, but they appear to always
1318 * be zero, so this code fails to work. We should find out why.
1319 */
1320 const fs_reg tmp = abld.vgrf(BRW_REGISTER_TYPE_UW);
1321
1322 for (unsigned i = 0; i < DIV_ROUND_UP(dispatch_width, 16); i++) {
1323 const fs_builder hbld = abld.group(MIN2(16, dispatch_width), i);
1324 hbld.SHR(offset(tmp, hbld, i),
1325 stride(retype(brw_vec1_grf(1 + i, 0), BRW_REGISTER_TYPE_UB),
1326 1, 8, 0),
1327 brw_imm_v(0x44440000));
1328 }
1329
1330 abld.AND(*reg, tmp, brw_imm_w(0xf));
1331 } else {
1332 const fs_reg t1 = component(abld.vgrf(BRW_REGISTER_TYPE_UD), 0);
1333 const fs_reg t2 = abld.vgrf(BRW_REGISTER_TYPE_UW);
1334
1335 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1336 * 8x multisampling, subspan 0 will represent sample N (where N
1337 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1338 * 7. We can find the value of N by looking at R0.0 bits 7:6
1339 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1340 * (since samples are always delivered in pairs). That is, we
1341 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1342 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1343 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1344 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1345 * populating a temporary variable with the sequence (0, 1, 2, 3),
1346 * and then reading from it using vstride=1, width=4, hstride=0.
1347 * These computations hold good for 4x multisampling as well.
1348 *
1349 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1350 * the first four slots are sample 0 of subspan 0; the next four
1351 * are sample 1 of subspan 0; the third group is sample 0 of
1352 * subspan 1, and finally sample 1 of subspan 1.
1353 */
1354
1355 /* SKL+ has an extra bit for the Starting Sample Pair Index to
1356 * accomodate 16x MSAA.
1357 */
1358 abld.exec_all().group(1, 0)
1359 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
1360 brw_imm_ud(0xc0));
1361 abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
1362
1363 /* This works for SIMD8-SIMD16. It also works for SIMD32 but only if we
1364 * can assume 4x MSAA. Disallow it on IVB+
1365 *
1366 * FINISHME: One day, we could come up with a way to do this that
1367 * actually works on gen7.
1368 */
1369 if (devinfo->gen >= 7)
1370 limit_dispatch_width(16, "gl_SampleId is unsupported in SIMD32 on gen7");
1371 abld.exec_all().group(8, 0).MOV(t2, brw_imm_v(0x32103210));
1372
1373 /* This special instruction takes care of setting vstride=1,
1374 * width=4, hstride=0 of t2 during an ADD instruction.
1375 */
1376 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1377 }
1378
1379 return reg;
1380 }
1381
1382 fs_reg *
1383 fs_visitor::emit_samplemaskin_setup()
1384 {
1385 assert(stage == MESA_SHADER_FRAGMENT);
1386 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1387 assert(devinfo->gen >= 6);
1388
1389 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1390
1391 fs_reg coverage_mask =
1392 fetch_payload_reg(bld, payload.sample_mask_in_reg, BRW_REGISTER_TYPE_D);
1393
1394 if (wm_prog_data->persample_dispatch) {
1395 /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
1396 * and a mask representing which sample is being processed by the
1397 * current shader invocation.
1398 *
1399 * From the OES_sample_variables specification:
1400 * "When per-sample shading is active due to the use of a fragment input
1401 * qualified by "sample" or due to the use of the gl_SampleID or
1402 * gl_SamplePosition variables, only the bit for the current sample is
1403 * set in gl_SampleMaskIn."
1404 */
1405 const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
1406
1407 if (nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
1408 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
1409
1410 fs_reg one = vgrf(glsl_type::int_type);
1411 fs_reg enabled_mask = vgrf(glsl_type::int_type);
1412 abld.MOV(one, brw_imm_d(1));
1413 abld.SHL(enabled_mask, one, nir_system_values[SYSTEM_VALUE_SAMPLE_ID]);
1414 abld.AND(*reg, enabled_mask, coverage_mask);
1415 } else {
1416 /* In per-pixel mode, the coverage mask is sufficient. */
1417 *reg = coverage_mask;
1418 }
1419 return reg;
1420 }
1421
1422 fs_reg
1423 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1424 {
1425 if (!src.abs && !src.negate)
1426 return src;
1427
1428 fs_reg temp = bld.vgrf(src.type);
1429 bld.MOV(temp, src);
1430
1431 return temp;
1432 }
1433
1434 void
1435 fs_visitor::emit_discard_jump()
1436 {
1437 assert(brw_wm_prog_data(this->prog_data)->uses_kill);
1438
1439 /* For performance, after a discard, jump to the end of the
1440 * shader if all relevant channels have been discarded.
1441 */
1442 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1443 discard_jump->flag_subreg = 1;
1444
1445 discard_jump->predicate = BRW_PREDICATE_ALIGN1_ANY4H;
1446 discard_jump->predicate_inverse = true;
1447 }
1448
1449 void
1450 fs_visitor::emit_gs_thread_end()
1451 {
1452 assert(stage == MESA_SHADER_GEOMETRY);
1453
1454 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1455
1456 if (gs_compile->control_data_header_size_bits > 0) {
1457 emit_gs_control_data_bits(this->final_gs_vertex_count);
1458 }
1459
1460 const fs_builder abld = bld.annotate("thread end");
1461 fs_inst *inst;
1462
1463 if (gs_prog_data->static_vertex_count != -1) {
1464 foreach_in_list_reverse(fs_inst, prev, &this->instructions) {
1465 if (prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8 ||
1466 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
1467 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
1468 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) {
1469 prev->eot = true;
1470
1471 /* Delete now dead instructions. */
1472 foreach_in_list_reverse_safe(exec_node, dead, &this->instructions) {
1473 if (dead == prev)
1474 break;
1475 dead->remove();
1476 }
1477 return;
1478 } else if (prev->is_control_flow() || prev->has_side_effects()) {
1479 break;
1480 }
1481 }
1482 fs_reg hdr = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1483 abld.MOV(hdr, fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)));
1484 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, hdr);
1485 inst->mlen = 1;
1486 } else {
1487 fs_reg payload = abld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1488 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
1489 sources[0] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1490 sources[1] = this->final_gs_vertex_count;
1491 abld.LOAD_PAYLOAD(payload, sources, 2, 2);
1492 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
1493 inst->mlen = 2;
1494 }
1495 inst->eot = true;
1496 inst->offset = 0;
1497 }
1498
1499 void
1500 fs_visitor::assign_curb_setup()
1501 {
1502 unsigned uniform_push_length = DIV_ROUND_UP(stage_prog_data->nr_params, 8);
1503
1504 unsigned ubo_push_length = 0;
1505 unsigned ubo_push_start[4];
1506 for (int i = 0; i < 4; i++) {
1507 ubo_push_start[i] = 8 * (ubo_push_length + uniform_push_length);
1508 ubo_push_length += stage_prog_data->ubo_ranges[i].length;
1509 }
1510
1511 prog_data->curb_read_length = uniform_push_length + ubo_push_length;
1512
1513 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1514 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1515 for (unsigned int i = 0; i < inst->sources; i++) {
1516 if (inst->src[i].file == UNIFORM) {
1517 int uniform_nr = inst->src[i].nr + inst->src[i].offset / 4;
1518 int constant_nr;
1519 if (inst->src[i].nr >= UBO_START) {
1520 /* constant_nr is in 32-bit units, the rest are in bytes */
1521 constant_nr = ubo_push_start[inst->src[i].nr - UBO_START] +
1522 inst->src[i].offset / 4;
1523 } else if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1524 constant_nr = push_constant_loc[uniform_nr];
1525 } else {
1526 /* Section 5.11 of the OpenGL 4.1 spec says:
1527 * "Out-of-bounds reads return undefined values, which include
1528 * values from other variables of the active program or zero."
1529 * Just return the first push constant.
1530 */
1531 constant_nr = 0;
1532 }
1533
1534 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1535 constant_nr / 8,
1536 constant_nr % 8);
1537 brw_reg.abs = inst->src[i].abs;
1538 brw_reg.negate = inst->src[i].negate;
1539
1540 assert(inst->src[i].stride == 0);
1541 inst->src[i] = byte_offset(
1542 retype(brw_reg, inst->src[i].type),
1543 inst->src[i].offset % 4);
1544 }
1545 }
1546 }
1547
1548 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1549 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1550 }
1551
1552 void
1553 fs_visitor::calculate_urb_setup()
1554 {
1555 assert(stage == MESA_SHADER_FRAGMENT);
1556 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1557 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1558
1559 memset(prog_data->urb_setup, -1,
1560 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1561
1562 int urb_next = 0;
1563 /* Figure out where each of the incoming setup attributes lands. */
1564 if (devinfo->gen >= 6) {
1565 if (util_bitcount64(nir->info.inputs_read &
1566 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1567 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1568 * first 16 varying inputs, so we can put them wherever we want.
1569 * Just put them in order.
1570 *
1571 * This is useful because it means that (a) inputs not used by the
1572 * fragment shader won't take up valuable register space, and (b) we
1573 * won't have to recompile the fragment shader if it gets paired with
1574 * a different vertex (or geometry) shader.
1575 */
1576 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1577 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1578 BITFIELD64_BIT(i)) {
1579 prog_data->urb_setup[i] = urb_next++;
1580 }
1581 }
1582 } else {
1583 /* We have enough input varyings that the SF/SBE pipeline stage can't
1584 * arbitrarily rearrange them to suit our whim; we have to put them
1585 * in an order that matches the output of the previous pipeline stage
1586 * (geometry or vertex shader).
1587 */
1588 struct brw_vue_map prev_stage_vue_map;
1589 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1590 key->input_slots_valid,
1591 nir->info.separate_shader);
1592
1593 int first_slot =
1594 brw_compute_first_urb_slot_required(nir->info.inputs_read,
1595 &prev_stage_vue_map);
1596
1597 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1598 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1599 slot++) {
1600 int varying = prev_stage_vue_map.slot_to_varying[slot];
1601 if (varying != BRW_VARYING_SLOT_PAD &&
1602 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1603 BITFIELD64_BIT(varying))) {
1604 prog_data->urb_setup[varying] = slot - first_slot;
1605 }
1606 }
1607 urb_next = prev_stage_vue_map.num_slots - first_slot;
1608 }
1609 } else {
1610 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1611 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1612 /* Point size is packed into the header, not as a general attribute */
1613 if (i == VARYING_SLOT_PSIZ)
1614 continue;
1615
1616 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1617 /* The back color slot is skipped when the front color is
1618 * also written to. In addition, some slots can be
1619 * written in the vertex shader and not read in the
1620 * fragment shader. So the register number must always be
1621 * incremented, mapped or not.
1622 */
1623 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1624 prog_data->urb_setup[i] = urb_next;
1625 urb_next++;
1626 }
1627 }
1628
1629 /*
1630 * It's a FS only attribute, and we did interpolation for this attribute
1631 * in SF thread. So, count it here, too.
1632 *
1633 * See compile_sf_prog() for more info.
1634 */
1635 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1636 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1637 }
1638
1639 prog_data->num_varying_inputs = urb_next;
1640 }
1641
1642 void
1643 fs_visitor::assign_urb_setup()
1644 {
1645 assert(stage == MESA_SHADER_FRAGMENT);
1646 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1647
1648 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1649
1650 /* Offset all the urb_setup[] index by the actual position of the
1651 * setup regs, now that the location of the constants has been chosen.
1652 */
1653 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1654 for (int i = 0; i < inst->sources; i++) {
1655 if (inst->src[i].file == ATTR) {
1656 /* ATTR regs in the FS are in units of logical scalar inputs each
1657 * of which consumes half of a GRF register.
1658 */
1659 assert(inst->src[i].offset < REG_SIZE / 2);
1660 const unsigned grf = urb_start + inst->src[i].nr / 2;
1661 const unsigned offset = (inst->src[i].nr % 2) * (REG_SIZE / 2) +
1662 inst->src[i].offset;
1663 const unsigned width = inst->src[i].stride == 0 ?
1664 1 : MIN2(inst->exec_size, 8);
1665 struct brw_reg reg = stride(
1666 byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1667 offset),
1668 width * inst->src[i].stride,
1669 width, inst->src[i].stride);
1670 reg.abs = inst->src[i].abs;
1671 reg.negate = inst->src[i].negate;
1672 inst->src[i] = reg;
1673 }
1674 }
1675 }
1676
1677 /* Each attribute is 4 setup channels, each of which is half a reg. */
1678 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1679 }
1680
1681 void
1682 fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
1683 {
1684 for (int i = 0; i < inst->sources; i++) {
1685 if (inst->src[i].file == ATTR) {
1686 int grf = payload.num_regs +
1687 prog_data->curb_read_length +
1688 inst->src[i].nr +
1689 inst->src[i].offset / REG_SIZE;
1690
1691 /* As explained at brw_reg_from_fs_reg, From the Haswell PRM:
1692 *
1693 * VertStride must be used to cross GRF register boundaries. This
1694 * rule implies that elements within a 'Width' cannot cross GRF
1695 * boundaries.
1696 *
1697 * So, for registers that are large enough, we have to split the exec
1698 * size in two and trust the compression state to sort it out.
1699 */
1700 unsigned total_size = inst->exec_size *
1701 inst->src[i].stride *
1702 type_sz(inst->src[i].type);
1703
1704 assert(total_size <= 2 * REG_SIZE);
1705 const unsigned exec_size =
1706 (total_size <= REG_SIZE) ? inst->exec_size : inst->exec_size / 2;
1707
1708 unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
1709 struct brw_reg reg =
1710 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1711 inst->src[i].offset % REG_SIZE),
1712 exec_size * inst->src[i].stride,
1713 width, inst->src[i].stride);
1714 reg.abs = inst->src[i].abs;
1715 reg.negate = inst->src[i].negate;
1716
1717 inst->src[i] = reg;
1718 }
1719 }
1720 }
1721
1722 void
1723 fs_visitor::assign_vs_urb_setup()
1724 {
1725 struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
1726
1727 assert(stage == MESA_SHADER_VERTEX);
1728
1729 /* Each attribute is 4 regs. */
1730 this->first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
1731
1732 assert(vs_prog_data->base.urb_read_length <= 15);
1733
1734 /* Rewrite all ATTR file references to the hw grf that they land in. */
1735 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1736 convert_attr_sources_to_hw_regs(inst);
1737 }
1738 }
1739
1740 void
1741 fs_visitor::assign_tcs_single_patch_urb_setup()
1742 {
1743 assert(stage == MESA_SHADER_TESS_CTRL);
1744
1745 /* Rewrite all ATTR file references to HW_REGs. */
1746 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1747 convert_attr_sources_to_hw_regs(inst);
1748 }
1749 }
1750
1751 void
1752 fs_visitor::assign_tes_urb_setup()
1753 {
1754 assert(stage == MESA_SHADER_TESS_EVAL);
1755
1756 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1757
1758 first_non_payload_grf += 8 * vue_prog_data->urb_read_length;
1759
1760 /* Rewrite all ATTR file references to HW_REGs. */
1761 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1762 convert_attr_sources_to_hw_regs(inst);
1763 }
1764 }
1765
1766 void
1767 fs_visitor::assign_gs_urb_setup()
1768 {
1769 assert(stage == MESA_SHADER_GEOMETRY);
1770
1771 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1772
1773 first_non_payload_grf +=
1774 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
1775
1776 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1777 /* Rewrite all ATTR file references to GRFs. */
1778 convert_attr_sources_to_hw_regs(inst);
1779 }
1780 }
1781
1782
1783 /**
1784 * Split large virtual GRFs into separate components if we can.
1785 *
1786 * This is mostly duplicated with what brw_fs_vector_splitting does,
1787 * but that's really conservative because it's afraid of doing
1788 * splitting that doesn't result in real progress after the rest of
1789 * the optimization phases, which would cause infinite looping in
1790 * optimization. We can do it once here, safely. This also has the
1791 * opportunity to split interpolated values, or maybe even uniforms,
1792 * which we don't have at the IR level.
1793 *
1794 * We want to split, because virtual GRFs are what we register
1795 * allocate and spill (due to contiguousness requirements for some
1796 * instructions), and they're what we naturally generate in the
1797 * codegen process, but most virtual GRFs don't actually need to be
1798 * contiguous sets of GRFs. If we split, we'll end up with reduced
1799 * live intervals and better dead code elimination and coalescing.
1800 */
1801 void
1802 fs_visitor::split_virtual_grfs()
1803 {
1804 /* Compact the register file so we eliminate dead vgrfs. This
1805 * only defines split points for live registers, so if we have
1806 * too large dead registers they will hit assertions later.
1807 */
1808 compact_virtual_grfs();
1809
1810 int num_vars = this->alloc.count;
1811
1812 /* Count the total number of registers */
1813 int reg_count = 0;
1814 int vgrf_to_reg[num_vars];
1815 for (int i = 0; i < num_vars; i++) {
1816 vgrf_to_reg[i] = reg_count;
1817 reg_count += alloc.sizes[i];
1818 }
1819
1820 /* An array of "split points". For each register slot, this indicates
1821 * if this slot can be separated from the previous slot. Every time an
1822 * instruction uses multiple elements of a register (as a source or
1823 * destination), we mark the used slots as inseparable. Then we go
1824 * through and split the registers into the smallest pieces we can.
1825 */
1826 bool split_points[reg_count];
1827 memset(split_points, 0, sizeof(split_points));
1828
1829 /* Mark all used registers as fully splittable */
1830 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1831 if (inst->dst.file == VGRF) {
1832 int reg = vgrf_to_reg[inst->dst.nr];
1833 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.nr]; j++)
1834 split_points[reg + j] = true;
1835 }
1836
1837 for (int i = 0; i < inst->sources; i++) {
1838 if (inst->src[i].file == VGRF) {
1839 int reg = vgrf_to_reg[inst->src[i].nr];
1840 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].nr]; j++)
1841 split_points[reg + j] = true;
1842 }
1843 }
1844 }
1845
1846 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1847 if (inst->dst.file == VGRF) {
1848 int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
1849 for (unsigned j = 1; j < regs_written(inst); j++)
1850 split_points[reg + j] = false;
1851 }
1852 for (int i = 0; i < inst->sources; i++) {
1853 if (inst->src[i].file == VGRF) {
1854 int reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
1855 for (unsigned j = 1; j < regs_read(inst, i); j++)
1856 split_points[reg + j] = false;
1857 }
1858 }
1859 }
1860
1861 int new_virtual_grf[reg_count];
1862 int new_reg_offset[reg_count];
1863
1864 int reg = 0;
1865 for (int i = 0; i < num_vars; i++) {
1866 /* The first one should always be 0 as a quick sanity check. */
1867 assert(split_points[reg] == false);
1868
1869 /* j = 0 case */
1870 new_reg_offset[reg] = 0;
1871 reg++;
1872 int offset = 1;
1873
1874 /* j > 0 case */
1875 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1876 /* If this is a split point, reset the offset to 0 and allocate a
1877 * new virtual GRF for the previous offset many registers
1878 */
1879 if (split_points[reg]) {
1880 assert(offset <= MAX_VGRF_SIZE);
1881 int grf = alloc.allocate(offset);
1882 for (int k = reg - offset; k < reg; k++)
1883 new_virtual_grf[k] = grf;
1884 offset = 0;
1885 }
1886 new_reg_offset[reg] = offset;
1887 offset++;
1888 reg++;
1889 }
1890
1891 /* The last one gets the original register number */
1892 assert(offset <= MAX_VGRF_SIZE);
1893 alloc.sizes[i] = offset;
1894 for (int k = reg - offset; k < reg; k++)
1895 new_virtual_grf[k] = i;
1896 }
1897 assert(reg == reg_count);
1898
1899 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1900 if (inst->dst.file == VGRF) {
1901 reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
1902 inst->dst.nr = new_virtual_grf[reg];
1903 inst->dst.offset = new_reg_offset[reg] * REG_SIZE +
1904 inst->dst.offset % REG_SIZE;
1905 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1906 }
1907 for (int i = 0; i < inst->sources; i++) {
1908 if (inst->src[i].file == VGRF) {
1909 reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
1910 inst->src[i].nr = new_virtual_grf[reg];
1911 inst->src[i].offset = new_reg_offset[reg] * REG_SIZE +
1912 inst->src[i].offset % REG_SIZE;
1913 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1914 }
1915 }
1916 }
1917 invalidate_live_intervals();
1918 }
1919
1920 /**
1921 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1922 *
1923 * During code generation, we create tons of temporary variables, many of
1924 * which get immediately killed and are never used again. Yet, in later
1925 * optimization and analysis passes, such as compute_live_intervals, we need
1926 * to loop over all the virtual GRFs. Compacting them can save a lot of
1927 * overhead.
1928 */
1929 bool
1930 fs_visitor::compact_virtual_grfs()
1931 {
1932 bool progress = false;
1933 int remap_table[this->alloc.count];
1934 memset(remap_table, -1, sizeof(remap_table));
1935
1936 /* Mark which virtual GRFs are used. */
1937 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1938 if (inst->dst.file == VGRF)
1939 remap_table[inst->dst.nr] = 0;
1940
1941 for (int i = 0; i < inst->sources; i++) {
1942 if (inst->src[i].file == VGRF)
1943 remap_table[inst->src[i].nr] = 0;
1944 }
1945 }
1946
1947 /* Compact the GRF arrays. */
1948 int new_index = 0;
1949 for (unsigned i = 0; i < this->alloc.count; i++) {
1950 if (remap_table[i] == -1) {
1951 /* We just found an unused register. This means that we are
1952 * actually going to compact something.
1953 */
1954 progress = true;
1955 } else {
1956 remap_table[i] = new_index;
1957 alloc.sizes[new_index] = alloc.sizes[i];
1958 invalidate_live_intervals();
1959 ++new_index;
1960 }
1961 }
1962
1963 this->alloc.count = new_index;
1964
1965 /* Patch all the instructions to use the newly renumbered registers */
1966 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1967 if (inst->dst.file == VGRF)
1968 inst->dst.nr = remap_table[inst->dst.nr];
1969
1970 for (int i = 0; i < inst->sources; i++) {
1971 if (inst->src[i].file == VGRF)
1972 inst->src[i].nr = remap_table[inst->src[i].nr];
1973 }
1974 }
1975
1976 /* Patch all the references to delta_xy, since they're used in register
1977 * allocation. If they're unused, switch them to BAD_FILE so we don't
1978 * think some random VGRF is delta_xy.
1979 */
1980 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
1981 if (delta_xy[i].file == VGRF) {
1982 if (remap_table[delta_xy[i].nr] != -1) {
1983 delta_xy[i].nr = remap_table[delta_xy[i].nr];
1984 } else {
1985 delta_xy[i].file = BAD_FILE;
1986 }
1987 }
1988 }
1989
1990 return progress;
1991 }
1992
1993 static int
1994 get_subgroup_id_param_index(const brw_stage_prog_data *prog_data)
1995 {
1996 if (prog_data->nr_params == 0)
1997 return -1;
1998
1999 /* The local thread id is always the last parameter in the list */
2000 uint32_t last_param = prog_data->param[prog_data->nr_params - 1];
2001 if (last_param == BRW_PARAM_BUILTIN_SUBGROUP_ID)
2002 return prog_data->nr_params - 1;
2003
2004 return -1;
2005 }
2006
2007 /**
2008 * Struct for handling complex alignments.
2009 *
2010 * A complex alignment is stored as multiplier and an offset. A value is
2011 * considered to be aligned if it is {offset} larger than a multiple of {mul}.
2012 * For instance, with an alignment of {8, 2}, cplx_align_apply would do the
2013 * following:
2014 *
2015 * N | cplx_align_apply({8, 2}, N)
2016 * ----+-----------------------------
2017 * 4 | 6
2018 * 6 | 6
2019 * 8 | 14
2020 * 10 | 14
2021 * 12 | 14
2022 * 14 | 14
2023 * 16 | 22
2024 */
2025 struct cplx_align {
2026 unsigned mul:4;
2027 unsigned offset:4;
2028 };
2029
2030 #define CPLX_ALIGN_MAX_MUL 8
2031
2032 static void
2033 cplx_align_assert_sane(struct cplx_align a)
2034 {
2035 assert(a.mul > 0 && util_is_power_of_two_nonzero(a.mul));
2036 assert(a.offset < a.mul);
2037 }
2038
2039 /**
2040 * Combines two alignments to produce a least multiple of sorts.
2041 *
2042 * The returned alignment is the smallest (in terms of multiplier) such that
2043 * anything aligned to both a and b will be aligned to the new alignment.
2044 * This function will assert-fail if a and b are not compatible, i.e. if the
2045 * offset parameters are such that no common alignment is possible.
2046 */
2047 static struct cplx_align
2048 cplx_align_combine(struct cplx_align a, struct cplx_align b)
2049 {
2050 cplx_align_assert_sane(a);
2051 cplx_align_assert_sane(b);
2052
2053 /* Assert that the alignments agree. */
2054 assert((a.offset & (b.mul - 1)) == (b.offset & (a.mul - 1)));
2055
2056 return a.mul > b.mul ? a : b;
2057 }
2058
2059 /**
2060 * Apply a complex alignment
2061 *
2062 * This function will return the smallest number greater than or equal to
2063 * offset that is aligned to align.
2064 */
2065 static unsigned
2066 cplx_align_apply(struct cplx_align align, unsigned offset)
2067 {
2068 return ALIGN(offset - align.offset, align.mul) + align.offset;
2069 }
2070
2071 #define UNIFORM_SLOT_SIZE 4
2072
2073 struct uniform_slot_info {
2074 /** True if the given uniform slot is live */
2075 unsigned is_live:1;
2076
2077 /** True if this slot and the next slot must remain contiguous */
2078 unsigned contiguous:1;
2079
2080 struct cplx_align align;
2081 };
2082
2083 static void
2084 mark_uniform_slots_read(struct uniform_slot_info *slots,
2085 unsigned num_slots, unsigned alignment)
2086 {
2087 assert(alignment > 0 && util_is_power_of_two_nonzero(alignment));
2088 assert(alignment <= CPLX_ALIGN_MAX_MUL);
2089
2090 /* We can't align a slot to anything less than the slot size */
2091 alignment = MAX2(alignment, UNIFORM_SLOT_SIZE);
2092
2093 struct cplx_align align = {alignment, 0};
2094 cplx_align_assert_sane(align);
2095
2096 for (unsigned i = 0; i < num_slots; i++) {
2097 slots[i].is_live = true;
2098 if (i < num_slots - 1)
2099 slots[i].contiguous = true;
2100
2101 align.offset = (i * UNIFORM_SLOT_SIZE) & (align.mul - 1);
2102 if (slots[i].align.mul == 0) {
2103 slots[i].align = align;
2104 } else {
2105 slots[i].align = cplx_align_combine(slots[i].align, align);
2106 }
2107 }
2108 }
2109
2110 /**
2111 * Assign UNIFORM file registers to either push constants or pull constants.
2112 *
2113 * We allow a fragment shader to have more than the specified minimum
2114 * maximum number of fragment shader uniform components (64). If
2115 * there are too many of these, they'd fill up all of register space.
2116 * So, this will push some of them out to the pull constant buffer and
2117 * update the program to load them.
2118 */
2119 void
2120 fs_visitor::assign_constant_locations()
2121 {
2122 /* Only the first compile gets to decide on locations. */
2123 if (push_constant_loc) {
2124 assert(pull_constant_loc);
2125 return;
2126 }
2127
2128 struct uniform_slot_info slots[uniforms];
2129 memset(slots, 0, sizeof(slots));
2130
2131 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2132 for (int i = 0 ; i < inst->sources; i++) {
2133 if (inst->src[i].file != UNIFORM)
2134 continue;
2135
2136 /* NIR tightly packs things so the uniform number might not be
2137 * aligned (if we have a double right after a float, for instance).
2138 * This is fine because the process of re-arranging them will ensure
2139 * that things are properly aligned. The offset into that uniform,
2140 * however, must be aligned.
2141 *
2142 * In Vulkan, we have explicit offsets but everything is crammed
2143 * into a single "variable" so inst->src[i].nr will always be 0.
2144 * Everything will be properly aligned relative to that one base.
2145 */
2146 assert(inst->src[i].offset % type_sz(inst->src[i].type) == 0);
2147
2148 unsigned u = inst->src[i].nr +
2149 inst->src[i].offset / UNIFORM_SLOT_SIZE;
2150
2151 if (u >= uniforms)
2152 continue;
2153
2154 unsigned slots_read;
2155 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
2156 slots_read = DIV_ROUND_UP(inst->src[2].ud, UNIFORM_SLOT_SIZE);
2157 } else {
2158 unsigned bytes_read = inst->components_read(i) *
2159 type_sz(inst->src[i].type);
2160 slots_read = DIV_ROUND_UP(bytes_read, UNIFORM_SLOT_SIZE);
2161 }
2162
2163 assert(u + slots_read <= uniforms);
2164 mark_uniform_slots_read(&slots[u], slots_read,
2165 type_sz(inst->src[i].type));
2166 }
2167 }
2168
2169 int subgroup_id_index = get_subgroup_id_param_index(stage_prog_data);
2170
2171 /* Only allow 16 registers (128 uniform components) as push constants.
2172 *
2173 * Just demote the end of the list. We could probably do better
2174 * here, demoting things that are rarely used in the program first.
2175 *
2176 * If changing this value, note the limitation about total_regs in
2177 * brw_curbe.c.
2178 */
2179 unsigned int max_push_components = 16 * 8;
2180 if (subgroup_id_index >= 0)
2181 max_push_components--; /* Save a slot for the thread ID */
2182
2183 /* We push small arrays, but no bigger than 16 floats. This is big enough
2184 * for a vec4 but hopefully not large enough to push out other stuff. We
2185 * should probably use a better heuristic at some point.
2186 */
2187 const unsigned int max_chunk_size = 16;
2188
2189 unsigned int num_push_constants = 0;
2190 unsigned int num_pull_constants = 0;
2191
2192 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2193 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2194
2195 /* Default to -1 meaning no location */
2196 memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
2197 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2198
2199 int chunk_start = -1;
2200 struct cplx_align align;
2201 for (unsigned u = 0; u < uniforms; u++) {
2202 if (!slots[u].is_live) {
2203 assert(chunk_start == -1);
2204 continue;
2205 }
2206
2207 /* Skip subgroup_id_index to put it in the last push register. */
2208 if (subgroup_id_index == (int)u)
2209 continue;
2210
2211 if (chunk_start == -1) {
2212 chunk_start = u;
2213 align = slots[u].align;
2214 } else {
2215 /* Offset into the chunk */
2216 unsigned chunk_offset = (u - chunk_start) * UNIFORM_SLOT_SIZE;
2217
2218 /* Shift the slot alignment down by the chunk offset so it is
2219 * comparable with the base chunk alignment.
2220 */
2221 struct cplx_align slot_align = slots[u].align;
2222 slot_align.offset =
2223 (slot_align.offset - chunk_offset) & (align.mul - 1);
2224
2225 align = cplx_align_combine(align, slot_align);
2226 }
2227
2228 /* Sanity check the alignment */
2229 cplx_align_assert_sane(align);
2230
2231 if (slots[u].contiguous)
2232 continue;
2233
2234 /* Adjust the alignment to be in terms of slots, not bytes */
2235 assert((align.mul & (UNIFORM_SLOT_SIZE - 1)) == 0);
2236 assert((align.offset & (UNIFORM_SLOT_SIZE - 1)) == 0);
2237 align.mul /= UNIFORM_SLOT_SIZE;
2238 align.offset /= UNIFORM_SLOT_SIZE;
2239
2240 unsigned push_start_align = cplx_align_apply(align, num_push_constants);
2241 unsigned chunk_size = u - chunk_start + 1;
2242 if ((!compiler->supports_pull_constants && u < UBO_START) ||
2243 (chunk_size < max_chunk_size &&
2244 push_start_align + chunk_size <= max_push_components)) {
2245 /* Align up the number of push constants */
2246 num_push_constants = push_start_align;
2247 for (unsigned i = 0; i < chunk_size; i++)
2248 push_constant_loc[chunk_start + i] = num_push_constants++;
2249 } else {
2250 /* We need to pull this one */
2251 num_pull_constants = cplx_align_apply(align, num_pull_constants);
2252 for (unsigned i = 0; i < chunk_size; i++)
2253 pull_constant_loc[chunk_start + i] = num_pull_constants++;
2254 }
2255
2256 /* Reset the chunk and start again */
2257 chunk_start = -1;
2258 }
2259
2260 /* Add the CS local thread ID uniform at the end of the push constants */
2261 if (subgroup_id_index >= 0)
2262 push_constant_loc[subgroup_id_index] = num_push_constants++;
2263
2264 /* As the uniforms are going to be reordered, stash the old array and
2265 * create two new arrays for push/pull params.
2266 */
2267 uint32_t *param = stage_prog_data->param;
2268 stage_prog_data->nr_params = num_push_constants;
2269 if (num_push_constants) {
2270 stage_prog_data->param = rzalloc_array(mem_ctx, uint32_t,
2271 num_push_constants);
2272 } else {
2273 stage_prog_data->param = NULL;
2274 }
2275 assert(stage_prog_data->nr_pull_params == 0);
2276 assert(stage_prog_data->pull_param == NULL);
2277 if (num_pull_constants > 0) {
2278 stage_prog_data->nr_pull_params = num_pull_constants;
2279 stage_prog_data->pull_param = rzalloc_array(mem_ctx, uint32_t,
2280 num_pull_constants);
2281 }
2282
2283 /* Now that we know how many regular uniforms we'll push, reduce the
2284 * UBO push ranges so we don't exceed the 3DSTATE_CONSTANT limits.
2285 */
2286 unsigned push_length = DIV_ROUND_UP(stage_prog_data->nr_params, 8);
2287 for (int i = 0; i < 4; i++) {
2288 struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
2289
2290 if (push_length + range->length > 64)
2291 range->length = 64 - push_length;
2292
2293 push_length += range->length;
2294 }
2295 assert(push_length <= 64);
2296
2297 /* Up until now, the param[] array has been indexed by reg + offset
2298 * of UNIFORM registers. Move pull constants into pull_param[] and
2299 * condense param[] to only contain the uniforms we chose to push.
2300 *
2301 * NOTE: Because we are condensing the params[] array, we know that
2302 * push_constant_loc[i] <= i and we can do it in one smooth loop without
2303 * having to make a copy.
2304 */
2305 for (unsigned int i = 0; i < uniforms; i++) {
2306 uint32_t value = param[i];
2307 if (pull_constant_loc[i] != -1) {
2308 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
2309 } else if (push_constant_loc[i] != -1) {
2310 stage_prog_data->param[push_constant_loc[i]] = value;
2311 }
2312 }
2313 ralloc_free(param);
2314 }
2315
2316 bool
2317 fs_visitor::get_pull_locs(const fs_reg &src,
2318 unsigned *out_surf_index,
2319 unsigned *out_pull_index)
2320 {
2321 assert(src.file == UNIFORM);
2322
2323 if (src.nr >= UBO_START) {
2324 const struct brw_ubo_range *range =
2325 &prog_data->ubo_ranges[src.nr - UBO_START];
2326
2327 /* If this access is in our (reduced) range, use the push data. */
2328 if (src.offset / 32 < range->length)
2329 return false;
2330
2331 *out_surf_index = prog_data->binding_table.ubo_start + range->block;
2332 *out_pull_index = (32 * range->start + src.offset) / 4;
2333 return true;
2334 }
2335
2336 const unsigned location = src.nr + src.offset / 4;
2337
2338 if (location < uniforms && pull_constant_loc[location] != -1) {
2339 /* A regular uniform push constant */
2340 *out_surf_index = stage_prog_data->binding_table.pull_constants_start;
2341 *out_pull_index = pull_constant_loc[location];
2342 return true;
2343 }
2344
2345 return false;
2346 }
2347
2348 /**
2349 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
2350 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
2351 */
2352 void
2353 fs_visitor::lower_constant_loads()
2354 {
2355 unsigned index, pull_index;
2356
2357 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2358 /* Set up the annotation tracking for new generated instructions. */
2359 const fs_builder ibld(this, block, inst);
2360
2361 for (int i = 0; i < inst->sources; i++) {
2362 if (inst->src[i].file != UNIFORM)
2363 continue;
2364
2365 /* We'll handle this case later */
2366 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
2367 continue;
2368
2369 if (!get_pull_locs(inst->src[i], &index, &pull_index))
2370 continue;
2371
2372 assert(inst->src[i].stride == 0);
2373
2374 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
2375 const fs_builder ubld = ibld.exec_all().group(block_sz / 4, 0);
2376 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
2377 const unsigned base = pull_index * 4;
2378
2379 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
2380 dst, brw_imm_ud(index), brw_imm_ud(base & ~(block_sz - 1)));
2381
2382 /* Rewrite the instruction to use the temporary VGRF. */
2383 inst->src[i].file = VGRF;
2384 inst->src[i].nr = dst.nr;
2385 inst->src[i].offset = (base & (block_sz - 1)) +
2386 inst->src[i].offset % 4;
2387 }
2388
2389 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
2390 inst->src[0].file == UNIFORM) {
2391
2392 if (!get_pull_locs(inst->src[0], &index, &pull_index))
2393 continue;
2394
2395 VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
2396 brw_imm_ud(index),
2397 inst->src[1],
2398 pull_index * 4);
2399 inst->remove(block);
2400 }
2401 }
2402 invalidate_live_intervals();
2403 }
2404
2405 bool
2406 fs_visitor::opt_algebraic()
2407 {
2408 bool progress = false;
2409
2410 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2411 switch (inst->opcode) {
2412 case BRW_OPCODE_MOV:
2413 if (!devinfo->has_64bit_types &&
2414 (inst->dst.type == BRW_REGISTER_TYPE_DF ||
2415 inst->dst.type == BRW_REGISTER_TYPE_UQ ||
2416 inst->dst.type == BRW_REGISTER_TYPE_Q)) {
2417 assert(inst->dst.type == inst->src[0].type);
2418 assert(!inst->saturate);
2419 assert(!inst->src[0].abs);
2420 assert(!inst->src[0].negate);
2421 const brw::fs_builder ibld(this, block, inst);
2422
2423 if (inst->src[0].file == IMM) {
2424 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 1),
2425 brw_imm_ud(inst->src[0].u64 >> 32));
2426 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 0),
2427 brw_imm_ud(inst->src[0].u64));
2428 } else {
2429 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 1),
2430 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 1));
2431 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 0),
2432 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0));
2433 }
2434
2435 inst->remove(block);
2436 progress = true;
2437 }
2438
2439 if ((inst->conditional_mod == BRW_CONDITIONAL_Z ||
2440 inst->conditional_mod == BRW_CONDITIONAL_NZ) &&
2441 inst->dst.is_null() &&
2442 (inst->src[0].abs || inst->src[0].negate)) {
2443 inst->src[0].abs = false;
2444 inst->src[0].negate = false;
2445 progress = true;
2446 break;
2447 }
2448
2449 if (inst->src[0].file != IMM)
2450 break;
2451
2452 if (inst->saturate) {
2453 /* Full mixed-type saturates don't happen. However, we can end up
2454 * with things like:
2455 *
2456 * mov.sat(8) g21<1>DF -1F
2457 *
2458 * Other mixed-size-but-same-base-type cases may also be possible.
2459 */
2460 if (inst->dst.type != inst->src[0].type &&
2461 inst->dst.type != BRW_REGISTER_TYPE_DF &&
2462 inst->src[0].type != BRW_REGISTER_TYPE_F)
2463 assert(!"unimplemented: saturate mixed types");
2464
2465 if (brw_saturate_immediate(inst->src[0].type,
2466 &inst->src[0].as_brw_reg())) {
2467 inst->saturate = false;
2468 progress = true;
2469 }
2470 }
2471 break;
2472
2473 case BRW_OPCODE_MUL:
2474 if (inst->src[1].file != IMM)
2475 continue;
2476
2477 /* a * 1.0 = a */
2478 if (inst->src[1].is_one()) {
2479 inst->opcode = BRW_OPCODE_MOV;
2480 inst->src[1] = reg_undef;
2481 progress = true;
2482 break;
2483 }
2484
2485 /* a * -1.0 = -a */
2486 if (inst->src[1].is_negative_one()) {
2487 inst->opcode = BRW_OPCODE_MOV;
2488 inst->src[0].negate = !inst->src[0].negate;
2489 inst->src[1] = reg_undef;
2490 progress = true;
2491 break;
2492 }
2493
2494 /* a * 0.0 = 0.0 */
2495 if (inst->src[1].is_zero()) {
2496 inst->opcode = BRW_OPCODE_MOV;
2497 inst->src[0] = inst->src[1];
2498 inst->src[1] = reg_undef;
2499 progress = true;
2500 break;
2501 }
2502
2503 if (inst->src[0].file == IMM) {
2504 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2505 inst->opcode = BRW_OPCODE_MOV;
2506 inst->src[0].f *= inst->src[1].f;
2507 inst->src[1] = reg_undef;
2508 progress = true;
2509 break;
2510 }
2511 break;
2512 case BRW_OPCODE_ADD:
2513 if (inst->src[1].file != IMM)
2514 continue;
2515
2516 /* a + 0.0 = a */
2517 if (inst->src[1].is_zero()) {
2518 inst->opcode = BRW_OPCODE_MOV;
2519 inst->src[1] = reg_undef;
2520 progress = true;
2521 break;
2522 }
2523
2524 if (inst->src[0].file == IMM) {
2525 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2526 inst->opcode = BRW_OPCODE_MOV;
2527 inst->src[0].f += inst->src[1].f;
2528 inst->src[1] = reg_undef;
2529 progress = true;
2530 break;
2531 }
2532 break;
2533 case BRW_OPCODE_OR:
2534 if (inst->src[0].equals(inst->src[1]) ||
2535 inst->src[1].is_zero()) {
2536 inst->opcode = BRW_OPCODE_MOV;
2537 inst->src[1] = reg_undef;
2538 progress = true;
2539 break;
2540 }
2541 break;
2542 case BRW_OPCODE_LRP:
2543 if (inst->src[1].equals(inst->src[2])) {
2544 inst->opcode = BRW_OPCODE_MOV;
2545 inst->src[0] = inst->src[1];
2546 inst->src[1] = reg_undef;
2547 inst->src[2] = reg_undef;
2548 progress = true;
2549 break;
2550 }
2551 break;
2552 case BRW_OPCODE_CMP:
2553 if ((inst->conditional_mod == BRW_CONDITIONAL_Z ||
2554 inst->conditional_mod == BRW_CONDITIONAL_NZ) &&
2555 inst->src[1].is_zero() &&
2556 (inst->src[0].abs || inst->src[0].negate)) {
2557 inst->src[0].abs = false;
2558 inst->src[0].negate = false;
2559 progress = true;
2560 break;
2561 }
2562 break;
2563 case BRW_OPCODE_SEL:
2564 if (!devinfo->has_64bit_types &&
2565 (inst->dst.type == BRW_REGISTER_TYPE_DF ||
2566 inst->dst.type == BRW_REGISTER_TYPE_UQ ||
2567 inst->dst.type == BRW_REGISTER_TYPE_Q)) {
2568 assert(inst->dst.type == inst->src[0].type);
2569 assert(!inst->saturate);
2570 assert(!inst->src[0].abs && !inst->src[0].negate);
2571 assert(!inst->src[1].abs && !inst->src[1].negate);
2572 const brw::fs_builder ibld(this, block, inst);
2573
2574 set_predicate(inst->predicate,
2575 ibld.SEL(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 0),
2576 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
2577 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0)));
2578 set_predicate(inst->predicate,
2579 ibld.SEL(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 1),
2580 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 1),
2581 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 1)));
2582
2583 inst->remove(block);
2584 progress = true;
2585 }
2586 if (inst->src[0].equals(inst->src[1])) {
2587 inst->opcode = BRW_OPCODE_MOV;
2588 inst->src[1] = reg_undef;
2589 inst->predicate = BRW_PREDICATE_NONE;
2590 inst->predicate_inverse = false;
2591 progress = true;
2592 } else if (inst->saturate && inst->src[1].file == IMM) {
2593 switch (inst->conditional_mod) {
2594 case BRW_CONDITIONAL_LE:
2595 case BRW_CONDITIONAL_L:
2596 switch (inst->src[1].type) {
2597 case BRW_REGISTER_TYPE_F:
2598 if (inst->src[1].f >= 1.0f) {
2599 inst->opcode = BRW_OPCODE_MOV;
2600 inst->src[1] = reg_undef;
2601 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2602 progress = true;
2603 }
2604 break;
2605 default:
2606 break;
2607 }
2608 break;
2609 case BRW_CONDITIONAL_GE:
2610 case BRW_CONDITIONAL_G:
2611 switch (inst->src[1].type) {
2612 case BRW_REGISTER_TYPE_F:
2613 if (inst->src[1].f <= 0.0f) {
2614 inst->opcode = BRW_OPCODE_MOV;
2615 inst->src[1] = reg_undef;
2616 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2617 progress = true;
2618 }
2619 break;
2620 default:
2621 break;
2622 }
2623 default:
2624 break;
2625 }
2626 }
2627 break;
2628 case BRW_OPCODE_MAD:
2629 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2630 inst->opcode = BRW_OPCODE_MOV;
2631 inst->src[1] = reg_undef;
2632 inst->src[2] = reg_undef;
2633 progress = true;
2634 } else if (inst->src[0].is_zero()) {
2635 inst->opcode = BRW_OPCODE_MUL;
2636 inst->src[0] = inst->src[2];
2637 inst->src[2] = reg_undef;
2638 progress = true;
2639 } else if (inst->src[1].is_one()) {
2640 inst->opcode = BRW_OPCODE_ADD;
2641 inst->src[1] = inst->src[2];
2642 inst->src[2] = reg_undef;
2643 progress = true;
2644 } else if (inst->src[2].is_one()) {
2645 inst->opcode = BRW_OPCODE_ADD;
2646 inst->src[2] = reg_undef;
2647 progress = true;
2648 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2649 inst->opcode = BRW_OPCODE_ADD;
2650 inst->src[1].f *= inst->src[2].f;
2651 inst->src[2] = reg_undef;
2652 progress = true;
2653 }
2654 break;
2655 case SHADER_OPCODE_BROADCAST:
2656 if (is_uniform(inst->src[0])) {
2657 inst->opcode = BRW_OPCODE_MOV;
2658 inst->sources = 1;
2659 inst->force_writemask_all = true;
2660 progress = true;
2661 } else if (inst->src[1].file == IMM) {
2662 inst->opcode = BRW_OPCODE_MOV;
2663 /* It's possible that the selected component will be too large and
2664 * overflow the register. This can happen if someone does a
2665 * readInvocation() from GLSL or SPIR-V and provides an OOB
2666 * invocationIndex. If this happens and we some how manage
2667 * to constant fold it in and get here, then component() may cause
2668 * us to start reading outside of the VGRF which will lead to an
2669 * assert later. Instead, just let it wrap around if it goes over
2670 * exec_size.
2671 */
2672 const unsigned comp = inst->src[1].ud & (inst->exec_size - 1);
2673 inst->src[0] = component(inst->src[0], comp);
2674 inst->sources = 1;
2675 inst->force_writemask_all = true;
2676 progress = true;
2677 }
2678 break;
2679
2680 case SHADER_OPCODE_SHUFFLE:
2681 if (is_uniform(inst->src[0])) {
2682 inst->opcode = BRW_OPCODE_MOV;
2683 inst->sources = 1;
2684 progress = true;
2685 } else if (inst->src[1].file == IMM) {
2686 inst->opcode = BRW_OPCODE_MOV;
2687 inst->src[0] = component(inst->src[0],
2688 inst->src[1].ud);
2689 inst->sources = 1;
2690 progress = true;
2691 }
2692 break;
2693
2694 default:
2695 break;
2696 }
2697
2698 /* Swap if src[0] is immediate. */
2699 if (progress && inst->is_commutative()) {
2700 if (inst->src[0].file == IMM) {
2701 fs_reg tmp = inst->src[1];
2702 inst->src[1] = inst->src[0];
2703 inst->src[0] = tmp;
2704 }
2705 }
2706 }
2707 return progress;
2708 }
2709
2710 /**
2711 * Optimize sample messages that have constant zero values for the trailing
2712 * texture coordinates. We can just reduce the message length for these
2713 * instructions instead of reserving a register for it. Trailing parameters
2714 * that aren't sent default to zero anyway. This will cause the dead code
2715 * eliminator to remove the MOV instruction that would otherwise be emitted to
2716 * set up the zero value.
2717 */
2718 bool
2719 fs_visitor::opt_zero_samples()
2720 {
2721 /* Gen4 infers the texturing opcode based on the message length so we can't
2722 * change it.
2723 */
2724 if (devinfo->gen < 5)
2725 return false;
2726
2727 bool progress = false;
2728
2729 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2730 if (!inst->is_tex())
2731 continue;
2732
2733 fs_inst *load_payload = (fs_inst *) inst->prev;
2734
2735 if (load_payload->is_head_sentinel() ||
2736 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2737 continue;
2738
2739 /* We don't want to remove the message header or the first parameter.
2740 * Removing the first parameter is not allowed, see the Haswell PRM
2741 * volume 7, page 149:
2742 *
2743 * "Parameter 0 is required except for the sampleinfo message, which
2744 * has no parameter 0"
2745 */
2746 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2747 load_payload->src[(inst->mlen - inst->header_size) /
2748 (inst->exec_size / 8) +
2749 inst->header_size - 1].is_zero()) {
2750 inst->mlen -= inst->exec_size / 8;
2751 progress = true;
2752 }
2753 }
2754
2755 if (progress)
2756 invalidate_live_intervals();
2757
2758 return progress;
2759 }
2760
2761 /**
2762 * Optimize sample messages which are followed by the final RT write.
2763 *
2764 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2765 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2766 * final texturing results copied to the framebuffer write payload and modify
2767 * them to write to the framebuffer directly.
2768 */
2769 bool
2770 fs_visitor::opt_sampler_eot()
2771 {
2772 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2773
2774 if (stage != MESA_SHADER_FRAGMENT || dispatch_width > 16)
2775 return false;
2776
2777 if (devinfo->gen != 9 && !devinfo->is_cherryview)
2778 return false;
2779
2780 /* FINISHME: It should be possible to implement this optimization when there
2781 * are multiple drawbuffers.
2782 */
2783 if (key->nr_color_regions != 1)
2784 return false;
2785
2786 /* Requires emitting a bunch of saturating MOV instructions during logical
2787 * send lowering to clamp the color payload, which the sampler unit isn't
2788 * going to do for us.
2789 */
2790 if (key->clamp_fragment_color)
2791 return false;
2792
2793 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2794 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2795 fs_inst *fb_write = (fs_inst *)block->end();
2796 assert(fb_write->eot);
2797 assert(fb_write->opcode == FS_OPCODE_FB_WRITE_LOGICAL);
2798
2799 /* There wasn't one; nothing to do. */
2800 if (unlikely(fb_write->prev->is_head_sentinel()))
2801 return false;
2802
2803 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2804
2805 /* 3D Sampler » Messages » Message Format
2806 *
2807 * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
2808 * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
2809 */
2810 if (tex_inst->opcode != SHADER_OPCODE_TEX_LOGICAL &&
2811 tex_inst->opcode != SHADER_OPCODE_TXD_LOGICAL &&
2812 tex_inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
2813 tex_inst->opcode != SHADER_OPCODE_TXL_LOGICAL &&
2814 tex_inst->opcode != FS_OPCODE_TXB_LOGICAL &&
2815 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL &&
2816 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_W_LOGICAL &&
2817 tex_inst->opcode != SHADER_OPCODE_TXF_UMS_LOGICAL)
2818 return false;
2819
2820 /* XXX - This shouldn't be necessary. */
2821 if (tex_inst->prev->is_head_sentinel())
2822 return false;
2823
2824 /* Check that the FB write sources are fully initialized by the single
2825 * texturing instruction.
2826 */
2827 for (unsigned i = 0; i < FB_WRITE_LOGICAL_NUM_SRCS; i++) {
2828 if (i == FB_WRITE_LOGICAL_SRC_COLOR0) {
2829 if (!fb_write->src[i].equals(tex_inst->dst) ||
2830 fb_write->size_read(i) != tex_inst->size_written)
2831 return false;
2832 } else if (i != FB_WRITE_LOGICAL_SRC_COMPONENTS) {
2833 if (fb_write->src[i].file != BAD_FILE)
2834 return false;
2835 }
2836 }
2837
2838 assert(!tex_inst->eot); /* We can't get here twice */
2839 assert((tex_inst->offset & (0xff << 24)) == 0);
2840
2841 const fs_builder ibld(this, block, tex_inst);
2842
2843 tex_inst->offset |= fb_write->target << 24;
2844 tex_inst->eot = true;
2845 tex_inst->dst = ibld.null_reg_ud();
2846 tex_inst->size_written = 0;
2847 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2848
2849 /* Marking EOT is sufficient, lower_logical_sends() will notice the EOT
2850 * flag and submit a header together with the sampler message as required
2851 * by the hardware.
2852 */
2853 invalidate_live_intervals();
2854 return true;
2855 }
2856
2857 bool
2858 fs_visitor::opt_register_renaming()
2859 {
2860 bool progress = false;
2861 int depth = 0;
2862
2863 unsigned remap[alloc.count];
2864 memset(remap, ~0u, sizeof(unsigned) * alloc.count);
2865
2866 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2867 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2868 depth++;
2869 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2870 inst->opcode == BRW_OPCODE_WHILE) {
2871 depth--;
2872 }
2873
2874 /* Rewrite instruction sources. */
2875 for (int i = 0; i < inst->sources; i++) {
2876 if (inst->src[i].file == VGRF &&
2877 remap[inst->src[i].nr] != ~0u &&
2878 remap[inst->src[i].nr] != inst->src[i].nr) {
2879 inst->src[i].nr = remap[inst->src[i].nr];
2880 progress = true;
2881 }
2882 }
2883
2884 const unsigned dst = inst->dst.nr;
2885
2886 if (depth == 0 &&
2887 inst->dst.file == VGRF &&
2888 alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
2889 !inst->is_partial_write()) {
2890 if (remap[dst] == ~0u) {
2891 remap[dst] = dst;
2892 } else {
2893 remap[dst] = alloc.allocate(regs_written(inst));
2894 inst->dst.nr = remap[dst];
2895 progress = true;
2896 }
2897 } else if (inst->dst.file == VGRF &&
2898 remap[dst] != ~0u &&
2899 remap[dst] != dst) {
2900 inst->dst.nr = remap[dst];
2901 progress = true;
2902 }
2903 }
2904
2905 if (progress) {
2906 invalidate_live_intervals();
2907
2908 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2909 if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != ~0u) {
2910 delta_xy[i].nr = remap[delta_xy[i].nr];
2911 }
2912 }
2913 }
2914
2915 return progress;
2916 }
2917
2918 /**
2919 * Remove redundant or useless discard jumps.
2920 *
2921 * For example, we can eliminate jumps in the following sequence:
2922 *
2923 * discard-jump (redundant with the next jump)
2924 * discard-jump (useless; jumps to the next instruction)
2925 * placeholder-halt
2926 */
2927 bool
2928 fs_visitor::opt_redundant_discard_jumps()
2929 {
2930 bool progress = false;
2931
2932 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2933
2934 fs_inst *placeholder_halt = NULL;
2935 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2936 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2937 placeholder_halt = inst;
2938 break;
2939 }
2940 }
2941
2942 if (!placeholder_halt)
2943 return false;
2944
2945 /* Delete any HALTs immediately before the placeholder halt. */
2946 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2947 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2948 prev = (fs_inst *) placeholder_halt->prev) {
2949 prev->remove(last_bblock);
2950 progress = true;
2951 }
2952
2953 if (progress)
2954 invalidate_live_intervals();
2955
2956 return progress;
2957 }
2958
2959 /**
2960 * Compute a bitmask with GRF granularity with a bit set for each GRF starting
2961 * from \p r.offset which overlaps the region starting at \p s.offset and
2962 * spanning \p ds bytes.
2963 */
2964 static inline unsigned
2965 mask_relative_to(const fs_reg &r, const fs_reg &s, unsigned ds)
2966 {
2967 const int rel_offset = reg_offset(s) - reg_offset(r);
2968 const int shift = rel_offset / REG_SIZE;
2969 const unsigned n = DIV_ROUND_UP(rel_offset % REG_SIZE + ds, REG_SIZE);
2970 assert(reg_space(r) == reg_space(s) &&
2971 shift >= 0 && shift < int(8 * sizeof(unsigned)));
2972 return ((1 << n) - 1) << shift;
2973 }
2974
2975 bool
2976 fs_visitor::opt_peephole_csel()
2977 {
2978 if (devinfo->gen < 8)
2979 return false;
2980
2981 bool progress = false;
2982
2983 foreach_block_reverse(block, cfg) {
2984 int ip = block->end_ip + 1;
2985
2986 foreach_inst_in_block_reverse_safe(fs_inst, inst, block) {
2987 ip--;
2988
2989 if (inst->opcode != BRW_OPCODE_SEL ||
2990 inst->predicate != BRW_PREDICATE_NORMAL ||
2991 (inst->dst.type != BRW_REGISTER_TYPE_F &&
2992 inst->dst.type != BRW_REGISTER_TYPE_D &&
2993 inst->dst.type != BRW_REGISTER_TYPE_UD))
2994 continue;
2995
2996 /* Because it is a 3-src instruction, CSEL cannot have an immediate
2997 * value as a source, but we can sometimes handle zero.
2998 */
2999 if ((inst->src[0].file != VGRF && inst->src[0].file != ATTR &&
3000 inst->src[0].file != UNIFORM) ||
3001 (inst->src[1].file != VGRF && inst->src[1].file != ATTR &&
3002 inst->src[1].file != UNIFORM && !inst->src[1].is_zero()))
3003 continue;
3004
3005 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3006 if (!scan_inst->flags_written())
3007 continue;
3008
3009 if ((scan_inst->opcode != BRW_OPCODE_CMP &&
3010 scan_inst->opcode != BRW_OPCODE_MOV) ||
3011 scan_inst->predicate != BRW_PREDICATE_NONE ||
3012 (scan_inst->src[0].file != VGRF &&
3013 scan_inst->src[0].file != ATTR &&
3014 scan_inst->src[0].file != UNIFORM) ||
3015 scan_inst->src[0].type != BRW_REGISTER_TYPE_F)
3016 break;
3017
3018 if (scan_inst->opcode == BRW_OPCODE_CMP && !scan_inst->src[1].is_zero())
3019 break;
3020
3021 const brw::fs_builder ibld(this, block, inst);
3022
3023 const enum brw_conditional_mod cond =
3024 inst->predicate_inverse
3025 ? brw_negate_cmod(scan_inst->conditional_mod)
3026 : scan_inst->conditional_mod;
3027
3028 fs_inst *csel_inst = NULL;
3029
3030 if (inst->src[1].file != IMM) {
3031 csel_inst = ibld.CSEL(inst->dst,
3032 inst->src[0],
3033 inst->src[1],
3034 scan_inst->src[0],
3035 cond);
3036 } else if (cond == BRW_CONDITIONAL_NZ) {
3037 /* Consider the sequence
3038 *
3039 * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
3040 * (+f0) sel g124<1>UD g2<8,8,1>UD 0x00000000UD
3041 *
3042 * The sel will pick the immediate value 0 if r0 is ±0.0.
3043 * Therefore, this sequence is equivalent:
3044 *
3045 * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
3046 * (+f0) sel g124<1>F g2<8,8,1>F (abs)g3<8,8,1>F
3047 *
3048 * The abs is ensures that the result is 0UD when g3 is -0.0F.
3049 * By normal cmp-sel merging, this is also equivalent:
3050 *
3051 * csel.nz g124<1>F g2<4,4,1>F (abs)g3<4,4,1>F g3<4,4,1>F
3052 */
3053 csel_inst = ibld.CSEL(inst->dst,
3054 inst->src[0],
3055 scan_inst->src[0],
3056 scan_inst->src[0],
3057 cond);
3058
3059 csel_inst->src[1].abs = true;
3060 }
3061
3062 if (csel_inst != NULL) {
3063 progress = true;
3064 inst->remove(block);
3065 }
3066
3067 break;
3068 }
3069 }
3070 }
3071
3072 return progress;
3073 }
3074
3075 bool
3076 fs_visitor::compute_to_mrf()
3077 {
3078 bool progress = false;
3079 int next_ip = 0;
3080
3081 /* No MRFs on Gen >= 7. */
3082 if (devinfo->gen >= 7)
3083 return false;
3084
3085 calculate_live_intervals();
3086
3087 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3088 int ip = next_ip;
3089 next_ip++;
3090
3091 if (inst->opcode != BRW_OPCODE_MOV ||
3092 inst->is_partial_write() ||
3093 inst->dst.file != MRF || inst->src[0].file != VGRF ||
3094 inst->dst.type != inst->src[0].type ||
3095 inst->src[0].abs || inst->src[0].negate ||
3096 !inst->src[0].is_contiguous() ||
3097 inst->src[0].offset % REG_SIZE != 0)
3098 continue;
3099
3100 /* Can't compute-to-MRF this GRF if someone else was going to
3101 * read it later.
3102 */
3103 if (this->virtual_grf_end[inst->src[0].nr] > ip)
3104 continue;
3105
3106 /* Found a move of a GRF to a MRF. Let's see if we can go rewrite the
3107 * things that computed the value of all GRFs of the source region. The
3108 * regs_left bitset keeps track of the registers we haven't yet found a
3109 * generating instruction for.
3110 */
3111 unsigned regs_left = (1 << regs_read(inst, 0)) - 1;
3112
3113 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3114 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
3115 inst->src[0], inst->size_read(0))) {
3116 /* Found the last thing to write our reg we want to turn
3117 * into a compute-to-MRF.
3118 */
3119
3120 /* If this one instruction didn't populate all the
3121 * channels, bail. We might be able to rewrite everything
3122 * that writes that reg, but it would require smarter
3123 * tracking.
3124 */
3125 if (scan_inst->is_partial_write())
3126 break;
3127
3128 /* Handling things not fully contained in the source of the copy
3129 * would need us to understand coalescing out more than one MOV at
3130 * a time.
3131 */
3132 if (!region_contained_in(scan_inst->dst, scan_inst->size_written,
3133 inst->src[0], inst->size_read(0)))
3134 break;
3135
3136 /* SEND instructions can't have MRF as a destination. */
3137 if (scan_inst->mlen)
3138 break;
3139
3140 if (devinfo->gen == 6) {
3141 /* gen6 math instructions must have the destination be
3142 * GRF, so no compute-to-MRF for them.
3143 */
3144 if (scan_inst->is_math()) {
3145 break;
3146 }
3147 }
3148
3149 /* Clear the bits for any registers this instruction overwrites. */
3150 regs_left &= ~mask_relative_to(
3151 inst->src[0], scan_inst->dst, scan_inst->size_written);
3152 if (!regs_left)
3153 break;
3154 }
3155
3156 /* We don't handle control flow here. Most computation of
3157 * values that end up in MRFs are shortly before the MRF
3158 * write anyway.
3159 */
3160 if (block->start() == scan_inst)
3161 break;
3162
3163 /* You can't read from an MRF, so if someone else reads our
3164 * MRF's source GRF that we wanted to rewrite, that stops us.
3165 */
3166 bool interfered = false;
3167 for (int i = 0; i < scan_inst->sources; i++) {
3168 if (regions_overlap(scan_inst->src[i], scan_inst->size_read(i),
3169 inst->src[0], inst->size_read(0))) {
3170 interfered = true;
3171 }
3172 }
3173 if (interfered)
3174 break;
3175
3176 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
3177 inst->dst, inst->size_written)) {
3178 /* If somebody else writes our MRF here, we can't
3179 * compute-to-MRF before that.
3180 */
3181 break;
3182 }
3183
3184 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1 &&
3185 regions_overlap(fs_reg(MRF, scan_inst->base_mrf), scan_inst->mlen * REG_SIZE,
3186 inst->dst, inst->size_written)) {
3187 /* Found a SEND instruction, which means that there are
3188 * live values in MRFs from base_mrf to base_mrf +
3189 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3190 * above it.
3191 */
3192 break;
3193 }
3194 }
3195
3196 if (regs_left)
3197 continue;
3198
3199 /* Found all generating instructions of our MRF's source value, so it
3200 * should be safe to rewrite them to point to the MRF directly.
3201 */
3202 regs_left = (1 << regs_read(inst, 0)) - 1;
3203
3204 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3205 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
3206 inst->src[0], inst->size_read(0))) {
3207 /* Clear the bits for any registers this instruction overwrites. */
3208 regs_left &= ~mask_relative_to(
3209 inst->src[0], scan_inst->dst, scan_inst->size_written);
3210
3211 const unsigned rel_offset = reg_offset(scan_inst->dst) -
3212 reg_offset(inst->src[0]);
3213
3214 if (inst->dst.nr & BRW_MRF_COMPR4) {
3215 /* Apply the same address transformation done by the hardware
3216 * for COMPR4 MRF writes.
3217 */
3218 assert(rel_offset < 2 * REG_SIZE);
3219 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE * 4;
3220
3221 /* Clear the COMPR4 bit if the generating instruction is not
3222 * compressed.
3223 */
3224 if (scan_inst->size_written < 2 * REG_SIZE)
3225 scan_inst->dst.nr &= ~BRW_MRF_COMPR4;
3226
3227 } else {
3228 /* Calculate the MRF number the result of this instruction is
3229 * ultimately written to.
3230 */
3231 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE;
3232 }
3233
3234 scan_inst->dst.file = MRF;
3235 scan_inst->dst.offset = inst->dst.offset + rel_offset % REG_SIZE;
3236 scan_inst->saturate |= inst->saturate;
3237 if (!regs_left)
3238 break;
3239 }
3240 }
3241
3242 assert(!regs_left);
3243 inst->remove(block);
3244 progress = true;
3245 }
3246
3247 if (progress)
3248 invalidate_live_intervals();
3249
3250 return progress;
3251 }
3252
3253 /**
3254 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
3255 * flow. We could probably do better here with some form of divergence
3256 * analysis.
3257 */
3258 bool
3259 fs_visitor::eliminate_find_live_channel()
3260 {
3261 bool progress = false;
3262 unsigned depth = 0;
3263
3264 if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
3265 /* The optimization below assumes that channel zero is live on thread
3266 * dispatch, which may not be the case if the fixed function dispatches
3267 * threads sparsely.
3268 */
3269 return false;
3270 }
3271
3272 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3273 switch (inst->opcode) {
3274 case BRW_OPCODE_IF:
3275 case BRW_OPCODE_DO:
3276 depth++;
3277 break;
3278
3279 case BRW_OPCODE_ENDIF:
3280 case BRW_OPCODE_WHILE:
3281 depth--;
3282 break;
3283
3284 case FS_OPCODE_DISCARD_JUMP:
3285 /* This can potentially make control flow non-uniform until the end
3286 * of the program.
3287 */
3288 return progress;
3289
3290 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
3291 if (depth == 0) {
3292 inst->opcode = BRW_OPCODE_MOV;
3293 inst->src[0] = brw_imm_ud(0u);
3294 inst->sources = 1;
3295 inst->force_writemask_all = true;
3296 progress = true;
3297 }
3298 break;
3299
3300 default:
3301 break;
3302 }
3303 }
3304
3305 return progress;
3306 }
3307
3308 /**
3309 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
3310 * instructions to FS_OPCODE_REP_FB_WRITE.
3311 */
3312 void
3313 fs_visitor::emit_repclear_shader()
3314 {
3315 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3316 int base_mrf = 0;
3317 int color_mrf = base_mrf + 2;
3318 fs_inst *mov;
3319
3320 if (uniforms > 0) {
3321 mov = bld.exec_all().group(4, 0)
3322 .MOV(brw_message_reg(color_mrf),
3323 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
3324 } else {
3325 struct brw_reg reg =
3326 brw_reg(BRW_GENERAL_REGISTER_FILE, 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
3327 BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
3328 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
3329
3330 mov = bld.exec_all().group(4, 0)
3331 .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
3332 }
3333
3334 fs_inst *write = NULL;
3335 if (key->nr_color_regions == 1) {
3336 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3337 write->saturate = key->clamp_fragment_color;
3338 write->base_mrf = color_mrf;
3339 write->target = 0;
3340 write->header_size = 0;
3341 write->mlen = 1;
3342 } else {
3343 assume(key->nr_color_regions > 0);
3344
3345 struct brw_reg header =
3346 retype(brw_message_reg(base_mrf), BRW_REGISTER_TYPE_UD);
3347 bld.exec_all().group(16, 0)
3348 .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3349
3350 for (int i = 0; i < key->nr_color_regions; ++i) {
3351 if (i > 0) {
3352 bld.exec_all().group(1, 0)
3353 .MOV(component(header, 2), brw_imm_ud(i));
3354 }
3355
3356 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3357 write->saturate = key->clamp_fragment_color;
3358 write->base_mrf = base_mrf;
3359 write->target = i;
3360 write->header_size = 2;
3361 write->mlen = 3;
3362 }
3363 }
3364 write->eot = true;
3365 write->last_rt = true;
3366
3367 calculate_cfg();
3368
3369 assign_constant_locations();
3370 assign_curb_setup();
3371
3372 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
3373 if (uniforms > 0) {
3374 assert(mov->src[0].file == FIXED_GRF);
3375 mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
3376 }
3377 }
3378
3379 /**
3380 * Walks through basic blocks, looking for repeated MRF writes and
3381 * removing the later ones.
3382 */
3383 bool
3384 fs_visitor::remove_duplicate_mrf_writes()
3385 {
3386 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
3387 bool progress = false;
3388
3389 /* Need to update the MRF tracking for compressed instructions. */
3390 if (dispatch_width >= 16)
3391 return false;
3392
3393 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3394
3395 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3396 if (inst->is_control_flow()) {
3397 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3398 }
3399
3400 if (inst->opcode == BRW_OPCODE_MOV &&
3401 inst->dst.file == MRF) {
3402 fs_inst *prev_inst = last_mrf_move[inst->dst.nr];
3403 if (prev_inst && prev_inst->opcode == BRW_OPCODE_MOV &&
3404 inst->dst.equals(prev_inst->dst) &&
3405 inst->src[0].equals(prev_inst->src[0]) &&
3406 inst->saturate == prev_inst->saturate &&
3407 inst->predicate == prev_inst->predicate &&
3408 inst->conditional_mod == prev_inst->conditional_mod &&
3409 inst->exec_size == prev_inst->exec_size) {
3410 inst->remove(block);
3411 progress = true;
3412 continue;
3413 }
3414 }
3415
3416 /* Clear out the last-write records for MRFs that were overwritten. */
3417 if (inst->dst.file == MRF) {
3418 last_mrf_move[inst->dst.nr] = NULL;
3419 }
3420
3421 if (inst->mlen > 0 && inst->base_mrf != -1) {
3422 /* Found a SEND instruction, which will include two or fewer
3423 * implied MRF writes. We could do better here.
3424 */
3425 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3426 last_mrf_move[inst->base_mrf + i] = NULL;
3427 }
3428 }
3429
3430 /* Clear out any MRF move records whose sources got overwritten. */
3431 for (unsigned i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
3432 if (last_mrf_move[i] &&
3433 regions_overlap(inst->dst, inst->size_written,
3434 last_mrf_move[i]->src[0],
3435 last_mrf_move[i]->size_read(0))) {
3436 last_mrf_move[i] = NULL;
3437 }
3438 }
3439
3440 if (inst->opcode == BRW_OPCODE_MOV &&
3441 inst->dst.file == MRF &&
3442 inst->src[0].file != ARF &&
3443 !inst->is_partial_write()) {
3444 last_mrf_move[inst->dst.nr] = inst;
3445 }
3446 }
3447
3448 if (progress)
3449 invalidate_live_intervals();
3450
3451 return progress;
3452 }
3453
3454 /**
3455 * Rounding modes for conversion instructions are included for each
3456 * conversion, but right now it is a state. So once it is set,
3457 * we don't need to call it again for subsequent calls.
3458 *
3459 * This is useful for vector/matrices conversions, as setting the
3460 * mode once is enough for the full vector/matrix
3461 */
3462 bool
3463 fs_visitor::remove_extra_rounding_modes()
3464 {
3465 bool progress = false;
3466
3467 foreach_block (block, cfg) {
3468 brw_rnd_mode prev_mode = BRW_RND_MODE_UNSPECIFIED;
3469
3470 foreach_inst_in_block_safe (fs_inst, inst, block) {
3471 if (inst->opcode == SHADER_OPCODE_RND_MODE) {
3472 assert(inst->src[0].file == BRW_IMMEDIATE_VALUE);
3473 const brw_rnd_mode mode = (brw_rnd_mode) inst->src[0].d;
3474 if (mode == prev_mode) {
3475 inst->remove(block);
3476 progress = true;
3477 } else {
3478 prev_mode = mode;
3479 }
3480 }
3481 }
3482 }
3483
3484 if (progress)
3485 invalidate_live_intervals();
3486
3487 return progress;
3488 }
3489
3490 static void
3491 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
3492 {
3493 /* Clear the flag for registers that actually got read (as expected). */
3494 for (int i = 0; i < inst->sources; i++) {
3495 int grf;
3496 if (inst->src[i].file == VGRF || inst->src[i].file == FIXED_GRF) {
3497 grf = inst->src[i].nr;
3498 } else {
3499 continue;
3500 }
3501
3502 if (grf >= first_grf &&
3503 grf < first_grf + grf_len) {
3504 deps[grf - first_grf] = false;
3505 if (inst->exec_size == 16)
3506 deps[grf - first_grf + 1] = false;
3507 }
3508 }
3509 }
3510
3511 /**
3512 * Implements this workaround for the original 965:
3513 *
3514 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
3515 * check for post destination dependencies on this instruction, software
3516 * must ensure that there is no destination hazard for the case of ‘write
3517 * followed by a posted write’ shown in the following example.
3518 *
3519 * 1. mov r3 0
3520 * 2. send r3.xy <rest of send instruction>
3521 * 3. mov r2 r3
3522 *
3523 * Due to no post-destination dependency check on the ‘send’, the above
3524 * code sequence could have two instructions (1 and 2) in flight at the
3525 * same time that both consider ‘r3’ as the target of their final writes.
3526 */
3527 void
3528 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
3529 fs_inst *inst)
3530 {
3531 int write_len = regs_written(inst);
3532 int first_write_grf = inst->dst.nr;
3533 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3534 assert(write_len < (int)sizeof(needs_dep) - 1);
3535
3536 memset(needs_dep, false, sizeof(needs_dep));
3537 memset(needs_dep, true, write_len);
3538
3539 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
3540
3541 /* Walk backwards looking for writes to registers we're writing which
3542 * aren't read since being written. If we hit the start of the program,
3543 * we assume that there are no outstanding dependencies on entry to the
3544 * program.
3545 */
3546 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3547 /* If we hit control flow, assume that there *are* outstanding
3548 * dependencies, and force their cleanup before our instruction.
3549 */
3550 if (block->start() == scan_inst && block->num != 0) {
3551 for (int i = 0; i < write_len; i++) {
3552 if (needs_dep[i])
3553 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
3554 first_write_grf + i);
3555 }
3556 return;
3557 }
3558
3559 /* We insert our reads as late as possible on the assumption that any
3560 * instruction but a MOV that might have left us an outstanding
3561 * dependency has more latency than a MOV.
3562 */
3563 if (scan_inst->dst.file == VGRF) {
3564 for (unsigned i = 0; i < regs_written(scan_inst); i++) {
3565 int reg = scan_inst->dst.nr + i;
3566
3567 if (reg >= first_write_grf &&
3568 reg < first_write_grf + write_len &&
3569 needs_dep[reg - first_write_grf]) {
3570 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
3571 needs_dep[reg - first_write_grf] = false;
3572 if (scan_inst->exec_size == 16)
3573 needs_dep[reg - first_write_grf + 1] = false;
3574 }
3575 }
3576 }
3577
3578 /* Clear the flag for registers that actually got read (as expected). */
3579 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3580
3581 /* Continue the loop only if we haven't resolved all the dependencies */
3582 int i;
3583 for (i = 0; i < write_len; i++) {
3584 if (needs_dep[i])
3585 break;
3586 }
3587 if (i == write_len)
3588 return;
3589 }
3590 }
3591
3592 /**
3593 * Implements this workaround for the original 965:
3594 *
3595 * "[DevBW, DevCL] Errata: A destination register from a send can not be
3596 * used as a destination register until after it has been sourced by an
3597 * instruction with a different destination register.
3598 */
3599 void
3600 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
3601 {
3602 int write_len = regs_written(inst);
3603 unsigned first_write_grf = inst->dst.nr;
3604 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3605 assert(write_len < (int)sizeof(needs_dep) - 1);
3606
3607 memset(needs_dep, false, sizeof(needs_dep));
3608 memset(needs_dep, true, write_len);
3609 /* Walk forwards looking for writes to registers we're writing which aren't
3610 * read before being written.
3611 */
3612 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst) {
3613 /* If we hit control flow, force resolve all remaining dependencies. */
3614 if (block->end() == scan_inst && block->num != cfg->num_blocks - 1) {
3615 for (int i = 0; i < write_len; i++) {
3616 if (needs_dep[i])
3617 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3618 first_write_grf + i);
3619 }
3620 return;
3621 }
3622
3623 /* Clear the flag for registers that actually got read (as expected). */
3624 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3625
3626 /* We insert our reads as late as possible since they're reading the
3627 * result of a SEND, which has massive latency.
3628 */
3629 if (scan_inst->dst.file == VGRF &&
3630 scan_inst->dst.nr >= first_write_grf &&
3631 scan_inst->dst.nr < first_write_grf + write_len &&
3632 needs_dep[scan_inst->dst.nr - first_write_grf]) {
3633 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3634 scan_inst->dst.nr);
3635 needs_dep[scan_inst->dst.nr - first_write_grf] = false;
3636 }
3637
3638 /* Continue the loop only if we haven't resolved all the dependencies */
3639 int i;
3640 for (i = 0; i < write_len; i++) {
3641 if (needs_dep[i])
3642 break;
3643 }
3644 if (i == write_len)
3645 return;
3646 }
3647 }
3648
3649 void
3650 fs_visitor::insert_gen4_send_dependency_workarounds()
3651 {
3652 if (devinfo->gen != 4 || devinfo->is_g4x)
3653 return;
3654
3655 bool progress = false;
3656
3657 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3658 if (inst->mlen != 0 && inst->dst.file == VGRF) {
3659 insert_gen4_pre_send_dependency_workarounds(block, inst);
3660 insert_gen4_post_send_dependency_workarounds(block, inst);
3661 progress = true;
3662 }
3663 }
3664
3665 if (progress)
3666 invalidate_live_intervals();
3667 }
3668
3669 /**
3670 * Turns the generic expression-style uniform pull constant load instruction
3671 * into a hardware-specific series of instructions for loading a pull
3672 * constant.
3673 *
3674 * The expression style allows the CSE pass before this to optimize out
3675 * repeated loads from the same offset, and gives the pre-register-allocation
3676 * scheduling full flexibility, while the conversion to native instructions
3677 * allows the post-register-allocation scheduler the best information
3678 * possible.
3679 *
3680 * Note that execution masking for setting up pull constant loads is special:
3681 * the channels that need to be written are unrelated to the current execution
3682 * mask, since a later instruction will use one of the result channels as a
3683 * source operand for all 8 or 16 of its channels.
3684 */
3685 void
3686 fs_visitor::lower_uniform_pull_constant_loads()
3687 {
3688 foreach_block_and_inst (block, fs_inst, inst, cfg) {
3689 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
3690 continue;
3691
3692 if (devinfo->gen >= 7) {
3693 const fs_builder ubld = fs_builder(this, block, inst).exec_all();
3694 const fs_reg payload = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD);
3695
3696 ubld.group(8, 0).MOV(payload,
3697 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3698 ubld.group(1, 0).MOV(component(payload, 2),
3699 brw_imm_ud(inst->src[1].ud / 16));
3700
3701 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3702 inst->src[1] = payload;
3703 inst->header_size = 1;
3704 inst->mlen = 1;
3705
3706 invalidate_live_intervals();
3707 } else {
3708 /* Before register allocation, we didn't tell the scheduler about the
3709 * MRF we use. We know it's safe to use this MRF because nothing
3710 * else does except for register spill/unspill, which generates and
3711 * uses its MRF within a single IR instruction.
3712 */
3713 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3714 inst->mlen = 1;
3715 }
3716 }
3717 }
3718
3719 bool
3720 fs_visitor::lower_load_payload()
3721 {
3722 bool progress = false;
3723
3724 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3725 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3726 continue;
3727
3728 assert(inst->dst.file == MRF || inst->dst.file == VGRF);
3729 assert(inst->saturate == false);
3730 fs_reg dst = inst->dst;
3731
3732 /* Get rid of COMPR4. We'll add it back in if we need it */
3733 if (dst.file == MRF)
3734 dst.nr = dst.nr & ~BRW_MRF_COMPR4;
3735
3736 const fs_builder ibld(this, block, inst);
3737 const fs_builder hbld = ibld.exec_all().group(8, 0);
3738
3739 for (uint8_t i = 0; i < inst->header_size; i++) {
3740 if (inst->src[i].file != BAD_FILE) {
3741 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3742 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3743 hbld.MOV(mov_dst, mov_src);
3744 }
3745 dst = offset(dst, hbld, 1);
3746 }
3747
3748 if (inst->dst.file == MRF && (inst->dst.nr & BRW_MRF_COMPR4) &&
3749 inst->exec_size > 8) {
3750 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3751 * a straightforward copy. Instead, the result of the
3752 * LOAD_PAYLOAD is treated as interleaved and the first four
3753 * non-header sources are unpacked as:
3754 *
3755 * m + 0: r0
3756 * m + 1: g0
3757 * m + 2: b0
3758 * m + 3: a0
3759 * m + 4: r1
3760 * m + 5: g1
3761 * m + 6: b1
3762 * m + 7: a1
3763 *
3764 * This is used for gen <= 5 fb writes.
3765 */
3766 assert(inst->exec_size == 16);
3767 assert(inst->header_size + 4 <= inst->sources);
3768 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3769 if (inst->src[i].file != BAD_FILE) {
3770 if (devinfo->has_compr4) {
3771 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3772 compr4_dst.nr |= BRW_MRF_COMPR4;
3773 ibld.MOV(compr4_dst, inst->src[i]);
3774 } else {
3775 /* Platform doesn't have COMPR4. We have to fake it */
3776 fs_reg mov_dst = retype(dst, inst->src[i].type);
3777 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3778 mov_dst.nr += 4;
3779 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3780 }
3781 }
3782
3783 dst.nr++;
3784 }
3785
3786 /* The loop above only ever incremented us through the first set
3787 * of 4 registers. However, thanks to the magic of COMPR4, we
3788 * actually wrote to the first 8 registers, so we need to take
3789 * that into account now.
3790 */
3791 dst.nr += 4;
3792
3793 /* The COMPR4 code took care of the first 4 sources. We'll let
3794 * the regular path handle any remaining sources. Yes, we are
3795 * modifying the instruction but we're about to delete it so
3796 * this really doesn't hurt anything.
3797 */
3798 inst->header_size += 4;
3799 }
3800
3801 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3802 if (inst->src[i].file != BAD_FILE)
3803 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3804 dst = offset(dst, ibld, 1);
3805 }
3806
3807 inst->remove(block);
3808 progress = true;
3809 }
3810
3811 if (progress)
3812 invalidate_live_intervals();
3813
3814 return progress;
3815 }
3816
3817 bool
3818 fs_visitor::lower_integer_multiplication()
3819 {
3820 bool progress = false;
3821
3822 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3823 const fs_builder ibld(this, block, inst);
3824
3825 if (inst->opcode == BRW_OPCODE_MUL) {
3826 if (inst->dst.is_accumulator() ||
3827 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3828 inst->dst.type != BRW_REGISTER_TYPE_UD))
3829 continue;
3830
3831 if (devinfo->has_integer_dword_mul)
3832 continue;
3833
3834 if (inst->src[1].file == IMM &&
3835 inst->src[1].ud < (1 << 16)) {
3836 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3837 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3838 * src1 are used.
3839 *
3840 * If multiplying by an immediate value that fits in 16-bits, do a
3841 * single MUL instruction with that value in the proper location.
3842 */
3843 if (devinfo->gen < 7) {
3844 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8),
3845 inst->dst.type);
3846 ibld.MOV(imm, inst->src[1]);
3847 ibld.MUL(inst->dst, imm, inst->src[0]);
3848 } else {
3849 const bool ud = (inst->src[1].type == BRW_REGISTER_TYPE_UD);
3850 ibld.MUL(inst->dst, inst->src[0],
3851 ud ? brw_imm_uw(inst->src[1].ud)
3852 : brw_imm_w(inst->src[1].d));
3853 }
3854 } else {
3855 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3856 * do 32-bit integer multiplication in one instruction, but instead
3857 * must do a sequence (which actually calculates a 64-bit result):
3858 *
3859 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3860 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3861 * mov(8) g2<1>D acc0<8,8,1>D
3862 *
3863 * But on Gen > 6, the ability to use second accumulator register
3864 * (acc1) for non-float data types was removed, preventing a simple
3865 * implementation in SIMD16. A 16-channel result can be calculated by
3866 * executing the three instructions twice in SIMD8, once with quarter
3867 * control of 1Q for the first eight channels and again with 2Q for
3868 * the second eight channels.
3869 *
3870 * Which accumulator register is implicitly accessed (by AccWrEnable
3871 * for instance) is determined by the quarter control. Unfortunately
3872 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3873 * implicit accumulator access by an instruction with 2Q will access
3874 * acc1 regardless of whether the data type is usable in acc1.
3875 *
3876 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3877 * integer data types.
3878 *
3879 * Since we only want the low 32-bits of the result, we can do two
3880 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3881 * adjust the high result and add them (like the mach is doing):
3882 *
3883 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3884 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3885 * shl(8) g9<1>D g8<8,8,1>D 16D
3886 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3887 *
3888 * We avoid the shl instruction by realizing that we only want to add
3889 * the low 16-bits of the "high" result to the high 16-bits of the
3890 * "low" result and using proper regioning on the add:
3891 *
3892 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3893 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3894 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3895 *
3896 * Since it does not use the (single) accumulator register, we can
3897 * schedule multi-component multiplications much better.
3898 */
3899
3900 bool needs_mov = false;
3901 fs_reg orig_dst = inst->dst;
3902 fs_reg low = inst->dst;
3903 if (orig_dst.is_null() || orig_dst.file == MRF ||
3904 regions_overlap(inst->dst, inst->size_written,
3905 inst->src[0], inst->size_read(0)) ||
3906 regions_overlap(inst->dst, inst->size_written,
3907 inst->src[1], inst->size_read(1))) {
3908 needs_mov = true;
3909 /* Get a new VGRF but keep the same stride as inst->dst */
3910 low = fs_reg(VGRF, alloc.allocate(regs_written(inst)),
3911 inst->dst.type);
3912 low.stride = inst->dst.stride;
3913 low.offset = inst->dst.offset % REG_SIZE;
3914 }
3915
3916 /* Get a new VGRF but keep the same stride as inst->dst */
3917 fs_reg high(VGRF, alloc.allocate(regs_written(inst)),
3918 inst->dst.type);
3919 high.stride = inst->dst.stride;
3920 high.offset = inst->dst.offset % REG_SIZE;
3921
3922 if (devinfo->gen >= 7) {
3923 if (inst->src[1].abs)
3924 lower_src_modifiers(this, block, inst, 1);
3925
3926 if (inst->src[1].file == IMM) {
3927 ibld.MUL(low, inst->src[0],
3928 brw_imm_uw(inst->src[1].ud & 0xffff));
3929 ibld.MUL(high, inst->src[0],
3930 brw_imm_uw(inst->src[1].ud >> 16));
3931 } else {
3932 ibld.MUL(low, inst->src[0],
3933 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
3934 ibld.MUL(high, inst->src[0],
3935 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 1));
3936 }
3937 } else {
3938 if (inst->src[0].abs)
3939 lower_src_modifiers(this, block, inst, 0);
3940
3941 ibld.MUL(low, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 0),
3942 inst->src[1]);
3943 ibld.MUL(high, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 1),
3944 inst->src[1]);
3945 }
3946
3947 ibld.ADD(subscript(low, BRW_REGISTER_TYPE_UW, 1),
3948 subscript(low, BRW_REGISTER_TYPE_UW, 1),
3949 subscript(high, BRW_REGISTER_TYPE_UW, 0));
3950
3951 if (needs_mov || inst->conditional_mod) {
3952 set_condmod(inst->conditional_mod,
3953 ibld.MOV(orig_dst, low));
3954 }
3955 }
3956
3957 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3958 /* According to the BDW+ BSpec page for the "Multiply Accumulate
3959 * High" instruction:
3960 *
3961 * "An added preliminary mov is required for source modification on
3962 * src1:
3963 * mov (8) r3.0<1>:d -r3<8;8,1>:d
3964 * mul (8) acc0:d r2.0<8;8,1>:d r3.0<16;8,2>:uw
3965 * mach (8) r5.0<1>:d r2.0<8;8,1>:d r3.0<8;8,1>:d"
3966 */
3967 if (devinfo->gen >= 8 && (inst->src[1].negate || inst->src[1].abs))
3968 lower_src_modifiers(this, block, inst, 1);
3969
3970 /* Should have been lowered to 8-wide. */
3971 assert(inst->exec_size <= get_lowered_simd_width(devinfo, inst));
3972 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3973 inst->dst.type);
3974 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3975 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3976
3977 if (devinfo->gen >= 8) {
3978 /* Until Gen8, integer multiplies read 32-bits from one source,
3979 * and 16-bits from the other, and relying on the MACH instruction
3980 * to generate the high bits of the result.
3981 *
3982 * On Gen8, the multiply instruction does a full 32x32-bit
3983 * multiply, but in order to do a 64-bit multiply we can simulate
3984 * the previous behavior and then use a MACH instruction.
3985 */
3986 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3987 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3988 mul->src[1].type = BRW_REGISTER_TYPE_UW;
3989 mul->src[1].stride *= 2;
3990
3991 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3992 inst->group > 0) {
3993 /* Among other things the quarter control bits influence which
3994 * accumulator register is used by the hardware for instructions
3995 * that access the accumulator implicitly (e.g. MACH). A
3996 * second-half instruction would normally map to acc1, which
3997 * doesn't exist on Gen7 and up (the hardware does emulate it for
3998 * floating-point instructions *only* by taking advantage of the
3999 * extra precision of acc0 not normally used for floating point
4000 * arithmetic).
4001 *
4002 * HSW and up are careful enough not to try to access an
4003 * accumulator register that doesn't exist, but on earlier Gen7
4004 * hardware we need to make sure that the quarter control bits are
4005 * zero to avoid non-deterministic behaviour and emit an extra MOV
4006 * to get the result masked correctly according to the current
4007 * channel enables.
4008 */
4009 mach->group = 0;
4010 mach->force_writemask_all = true;
4011 mach->dst = ibld.vgrf(inst->dst.type);
4012 ibld.MOV(inst->dst, mach->dst);
4013 }
4014 } else {
4015 continue;
4016 }
4017
4018 inst->remove(block);
4019 progress = true;
4020 }
4021
4022 if (progress)
4023 invalidate_live_intervals();
4024
4025 return progress;
4026 }
4027
4028 bool
4029 fs_visitor::lower_minmax()
4030 {
4031 assert(devinfo->gen < 6);
4032
4033 bool progress = false;
4034
4035 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4036 const fs_builder ibld(this, block, inst);
4037
4038 if (inst->opcode == BRW_OPCODE_SEL &&
4039 inst->predicate == BRW_PREDICATE_NONE) {
4040 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
4041 * the original SEL.L/GE instruction
4042 */
4043 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
4044 inst->conditional_mod);
4045 inst->predicate = BRW_PREDICATE_NORMAL;
4046 inst->conditional_mod = BRW_CONDITIONAL_NONE;
4047
4048 progress = true;
4049 }
4050 }
4051
4052 if (progress)
4053 invalidate_live_intervals();
4054
4055 return progress;
4056 }
4057
4058 static void
4059 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
4060 fs_reg *dst, fs_reg color, unsigned components)
4061 {
4062 if (key->clamp_fragment_color) {
4063 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
4064 assert(color.type == BRW_REGISTER_TYPE_F);
4065
4066 for (unsigned i = 0; i < components; i++)
4067 set_saturate(true,
4068 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
4069
4070 color = tmp;
4071 }
4072
4073 for (unsigned i = 0; i < components; i++)
4074 dst[i] = offset(color, bld, i);
4075 }
4076
4077 static void
4078 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
4079 const struct brw_wm_prog_data *prog_data,
4080 const brw_wm_prog_key *key,
4081 const fs_visitor::thread_payload &payload)
4082 {
4083 assert(inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
4084 const gen_device_info *devinfo = bld.shader->devinfo;
4085 const fs_reg &color0 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR0];
4086 const fs_reg &color1 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR1];
4087 const fs_reg &src0_alpha = inst->src[FB_WRITE_LOGICAL_SRC_SRC0_ALPHA];
4088 const fs_reg &src_depth = inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH];
4089 const fs_reg &dst_depth = inst->src[FB_WRITE_LOGICAL_SRC_DST_DEPTH];
4090 const fs_reg &src_stencil = inst->src[FB_WRITE_LOGICAL_SRC_SRC_STENCIL];
4091 fs_reg sample_mask = inst->src[FB_WRITE_LOGICAL_SRC_OMASK];
4092 const unsigned components =
4093 inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
4094
4095 /* We can potentially have a message length of up to 15, so we have to set
4096 * base_mrf to either 0 or 1 in order to fit in m0..m15.
4097 */
4098 fs_reg sources[15];
4099 int header_size = 2, payload_header_size;
4100 unsigned length = 0;
4101
4102 if (devinfo->gen < 6) {
4103 /* TODO: Support SIMD32 on gen4-5 */
4104 assert(bld.group() < 16);
4105
4106 /* For gen4-5, we always have a header consisting of g0 and g1. We have
4107 * an implied MOV from g0,g1 to the start of the message. The MOV from
4108 * g0 is handled by the hardware and the MOV from g1 is provided by the
4109 * generator. This is required because, on gen4-5, the generator may
4110 * generate two write messages with different message lengths in order
4111 * to handle AA data properly.
4112 *
4113 * Also, since the pixel mask goes in the g0 portion of the message and
4114 * since render target writes are the last thing in the shader, we write
4115 * the pixel mask directly into g0 and it will get copied as part of the
4116 * implied write.
4117 */
4118 if (prog_data->uses_kill) {
4119 bld.exec_all().group(1, 0)
4120 .MOV(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW),
4121 brw_flag_reg(0, 1));
4122 }
4123
4124 assert(length == 0);
4125 length = 2;
4126 } else if ((devinfo->gen <= 7 && !devinfo->is_haswell &&
4127 prog_data->uses_kill) ||
4128 color1.file != BAD_FILE ||
4129 key->nr_color_regions > 1) {
4130 /* From the Sandy Bridge PRM, volume 4, page 198:
4131 *
4132 * "Dispatched Pixel Enables. One bit per pixel indicating
4133 * which pixels were originally enabled when the thread was
4134 * dispatched. This field is only required for the end-of-
4135 * thread message and on all dual-source messages."
4136 */
4137 const fs_builder ubld = bld.exec_all().group(8, 0);
4138
4139 fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
4140 if (bld.group() < 16) {
4141 /* The header starts off as g0 and g1 for the first half */
4142 ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
4143 BRW_REGISTER_TYPE_UD));
4144 } else {
4145 /* The header starts off as g0 and g2 for the second half */
4146 assert(bld.group() < 32);
4147 const fs_reg header_sources[2] = {
4148 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
4149 retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD),
4150 };
4151 ubld.LOAD_PAYLOAD(header, header_sources, 2, 0);
4152 }
4153
4154 uint32_t g00_bits = 0;
4155
4156 /* Set "Source0 Alpha Present to RenderTarget" bit in message
4157 * header.
4158 */
4159 if (inst->target > 0 && key->replicate_alpha)
4160 g00_bits |= 1 << 11;
4161
4162 /* Set computes stencil to render target */
4163 if (prog_data->computed_stencil)
4164 g00_bits |= 1 << 14;
4165
4166 if (g00_bits) {
4167 /* OR extra bits into g0.0 */
4168 ubld.group(1, 0).OR(component(header, 0),
4169 retype(brw_vec1_grf(0, 0),
4170 BRW_REGISTER_TYPE_UD),
4171 brw_imm_ud(g00_bits));
4172 }
4173
4174 /* Set the render target index for choosing BLEND_STATE. */
4175 if (inst->target > 0) {
4176 ubld.group(1, 0).MOV(component(header, 2), brw_imm_ud(inst->target));
4177 }
4178
4179 if (prog_data->uses_kill) {
4180 assert(bld.group() < 16);
4181 ubld.group(1, 0).MOV(retype(component(header, 15),
4182 BRW_REGISTER_TYPE_UW),
4183 brw_flag_reg(0, 1));
4184 }
4185
4186 assert(length == 0);
4187 sources[0] = header;
4188 sources[1] = horiz_offset(header, 8);
4189 length = 2;
4190 }
4191 assert(length == 0 || length == 2);
4192 header_size = length;
4193
4194 if (payload.aa_dest_stencil_reg[0]) {
4195 assert(inst->group < 16);
4196 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
4197 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
4198 .MOV(sources[length],
4199 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg[0], 0)));
4200 length++;
4201 }
4202
4203 if (sample_mask.file != BAD_FILE) {
4204 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1),
4205 BRW_REGISTER_TYPE_UD);
4206
4207 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
4208 * relevant. Since it's unsigned single words one vgrf is always
4209 * 16-wide, but only the lower or higher 8 channels will be used by the
4210 * hardware when doing a SIMD8 write depending on whether we have
4211 * selected the subspans for the first or second half respectively.
4212 */
4213 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
4214 sample_mask.type = BRW_REGISTER_TYPE_UW;
4215 sample_mask.stride *= 2;
4216
4217 bld.exec_all().annotate("FB write oMask")
4218 .MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
4219 inst->group % 16),
4220 sample_mask);
4221 length++;
4222 }
4223
4224 payload_header_size = length;
4225
4226 if (src0_alpha.file != BAD_FILE) {
4227 /* FIXME: This is being passed at the wrong location in the payload and
4228 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
4229 * It's supposed to be immediately before oMask but there seems to be no
4230 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
4231 * requires header sources to form a contiguous segment at the beginning
4232 * of the message and src0_alpha has per-channel semantics.
4233 */
4234 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
4235 length++;
4236 } else if (key->replicate_alpha && inst->target != 0) {
4237 /* Handle the case when fragment shader doesn't write to draw buffer
4238 * zero. No need to call setup_color_payload() for src0_alpha because
4239 * alpha value will be undefined.
4240 */
4241 length++;
4242 }
4243
4244 setup_color_payload(bld, key, &sources[length], color0, components);
4245 length += 4;
4246
4247 if (color1.file != BAD_FILE) {
4248 setup_color_payload(bld, key, &sources[length], color1, components);
4249 length += 4;
4250 }
4251
4252 if (src_depth.file != BAD_FILE) {
4253 sources[length] = src_depth;
4254 length++;
4255 }
4256
4257 if (dst_depth.file != BAD_FILE) {
4258 sources[length] = dst_depth;
4259 length++;
4260 }
4261
4262 if (src_stencil.file != BAD_FILE) {
4263 assert(devinfo->gen >= 9);
4264 assert(bld.dispatch_width() == 8);
4265
4266 /* XXX: src_stencil is only available on gen9+. dst_depth is never
4267 * available on gen9+. As such it's impossible to have both enabled at the
4268 * same time and therefore length cannot overrun the array.
4269 */
4270 assert(length < 15);
4271
4272 sources[length] = bld.vgrf(BRW_REGISTER_TYPE_UD);
4273 bld.exec_all().annotate("FB write OS")
4274 .MOV(retype(sources[length], BRW_REGISTER_TYPE_UB),
4275 subscript(src_stencil, BRW_REGISTER_TYPE_UB, 0));
4276 length++;
4277 }
4278
4279 fs_inst *load;
4280 if (devinfo->gen >= 7) {
4281 /* Send from the GRF */
4282 fs_reg payload = fs_reg(VGRF, -1, BRW_REGISTER_TYPE_F);
4283 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
4284 payload.nr = bld.shader->alloc.allocate(regs_written(load));
4285 load->dst = payload;
4286
4287 inst->src[0] = payload;
4288 inst->resize_sources(1);
4289 } else {
4290 /* Send from the MRF */
4291 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
4292 sources, length, payload_header_size);
4293
4294 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
4295 * will do this for us if we just give it a COMPR4 destination.
4296 */
4297 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
4298 load->dst.nr |= BRW_MRF_COMPR4;
4299
4300 if (devinfo->gen < 6) {
4301 /* Set up src[0] for the implied MOV from grf0-1 */
4302 inst->resize_sources(1);
4303 inst->src[0] = brw_vec8_grf(0, 0);
4304 } else {
4305 inst->resize_sources(0);
4306 }
4307 inst->base_mrf = 1;
4308 }
4309
4310 inst->opcode = FS_OPCODE_FB_WRITE;
4311 inst->mlen = regs_written(load);
4312 inst->header_size = header_size;
4313 }
4314
4315 static void
4316 lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
4317 {
4318 const fs_builder &ubld = bld.exec_all().group(8, 0);
4319 const unsigned length = 2;
4320 const fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, length);
4321
4322 if (bld.group() < 16) {
4323 ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
4324 BRW_REGISTER_TYPE_UD));
4325 } else {
4326 assert(bld.group() < 32);
4327 const fs_reg header_sources[] = {
4328 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
4329 retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD)
4330 };
4331 ubld.LOAD_PAYLOAD(header, header_sources, ARRAY_SIZE(header_sources), 0);
4332 }
4333
4334 inst->resize_sources(1);
4335 inst->src[0] = header;
4336 inst->opcode = FS_OPCODE_FB_READ;
4337 inst->mlen = length;
4338 inst->header_size = length;
4339 }
4340
4341 static void
4342 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
4343 const fs_reg &coordinate,
4344 const fs_reg &shadow_c,
4345 const fs_reg &lod, const fs_reg &lod2,
4346 const fs_reg &surface,
4347 const fs_reg &sampler,
4348 unsigned coord_components,
4349 unsigned grad_components)
4350 {
4351 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
4352 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
4353 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
4354 fs_reg msg_end = msg_begin;
4355
4356 /* g0 header. */
4357 msg_end = offset(msg_end, bld.group(8, 0), 1);
4358
4359 for (unsigned i = 0; i < coord_components; i++)
4360 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
4361 offset(coordinate, bld, i));
4362
4363 msg_end = offset(msg_end, bld, coord_components);
4364
4365 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
4366 * require all three components to be present and zero if they are unused.
4367 */
4368 if (coord_components > 0 &&
4369 (has_lod || shadow_c.file != BAD_FILE ||
4370 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
4371 for (unsigned i = coord_components; i < 3; i++)
4372 bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
4373
4374 msg_end = offset(msg_end, bld, 3 - coord_components);
4375 }
4376
4377 if (op == SHADER_OPCODE_TXD) {
4378 /* TXD unsupported in SIMD16 mode. */
4379 assert(bld.dispatch_width() == 8);
4380
4381 /* the slots for u and v are always present, but r is optional */
4382 if (coord_components < 2)
4383 msg_end = offset(msg_end, bld, 2 - coord_components);
4384
4385 /* P = u, v, r
4386 * dPdx = dudx, dvdx, drdx
4387 * dPdy = dudy, dvdy, drdy
4388 *
4389 * 1-arg: Does not exist.
4390 *
4391 * 2-arg: dudx dvdx dudy dvdy
4392 * dPdx.x dPdx.y dPdy.x dPdy.y
4393 * m4 m5 m6 m7
4394 *
4395 * 3-arg: dudx dvdx drdx dudy dvdy drdy
4396 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
4397 * m5 m6 m7 m8 m9 m10
4398 */
4399 for (unsigned i = 0; i < grad_components; i++)
4400 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
4401
4402 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
4403
4404 for (unsigned i = 0; i < grad_components; i++)
4405 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
4406
4407 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
4408 }
4409
4410 if (has_lod) {
4411 /* Bias/LOD with shadow comparator is unsupported in SIMD16 -- *Without*
4412 * shadow comparator (including RESINFO) it's unsupported in SIMD8 mode.
4413 */
4414 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
4415 bld.dispatch_width() == 16);
4416
4417 const brw_reg_type type =
4418 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
4419 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
4420 bld.MOV(retype(msg_end, type), lod);
4421 msg_end = offset(msg_end, bld, 1);
4422 }
4423
4424 if (shadow_c.file != BAD_FILE) {
4425 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
4426 /* There's no plain shadow compare message, so we use shadow
4427 * compare with a bias of 0.0.
4428 */
4429 bld.MOV(msg_end, brw_imm_f(0.0f));
4430 msg_end = offset(msg_end, bld, 1);
4431 }
4432
4433 bld.MOV(msg_end, shadow_c);
4434 msg_end = offset(msg_end, bld, 1);
4435 }
4436
4437 inst->opcode = op;
4438 inst->src[0] = reg_undef;
4439 inst->src[1] = surface;
4440 inst->src[2] = sampler;
4441 inst->resize_sources(3);
4442 inst->base_mrf = msg_begin.nr;
4443 inst->mlen = msg_end.nr - msg_begin.nr;
4444 inst->header_size = 1;
4445 }
4446
4447 static void
4448 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
4449 const fs_reg &coordinate,
4450 const fs_reg &shadow_c,
4451 const fs_reg &lod, const fs_reg &lod2,
4452 const fs_reg &sample_index,
4453 const fs_reg &surface,
4454 const fs_reg &sampler,
4455 unsigned coord_components,
4456 unsigned grad_components)
4457 {
4458 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
4459 fs_reg msg_coords = message;
4460 unsigned header_size = 0;
4461
4462 if (inst->offset != 0) {
4463 /* The offsets set up by the visitor are in the m1 header, so we can't
4464 * go headerless.
4465 */
4466 header_size = 1;
4467 message.nr--;
4468 }
4469
4470 for (unsigned i = 0; i < coord_components; i++)
4471 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type),
4472 offset(coordinate, bld, i));
4473
4474 fs_reg msg_end = offset(msg_coords, bld, coord_components);
4475 fs_reg msg_lod = offset(msg_coords, bld, 4);
4476
4477 if (shadow_c.file != BAD_FILE) {
4478 fs_reg msg_shadow = msg_lod;
4479 bld.MOV(msg_shadow, shadow_c);
4480 msg_lod = offset(msg_shadow, bld, 1);
4481 msg_end = msg_lod;
4482 }
4483
4484 switch (op) {
4485 case SHADER_OPCODE_TXL:
4486 case FS_OPCODE_TXB:
4487 bld.MOV(msg_lod, lod);
4488 msg_end = offset(msg_lod, bld, 1);
4489 break;
4490 case SHADER_OPCODE_TXD:
4491 /**
4492 * P = u, v, r
4493 * dPdx = dudx, dvdx, drdx
4494 * dPdy = dudy, dvdy, drdy
4495 *
4496 * Load up these values:
4497 * - dudx dudy dvdx dvdy drdx drdy
4498 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
4499 */
4500 msg_end = msg_lod;
4501 for (unsigned i = 0; i < grad_components; i++) {
4502 bld.MOV(msg_end, offset(lod, bld, i));
4503 msg_end = offset(msg_end, bld, 1);
4504
4505 bld.MOV(msg_end, offset(lod2, bld, i));
4506 msg_end = offset(msg_end, bld, 1);
4507 }
4508 break;
4509 case SHADER_OPCODE_TXS:
4510 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
4511 bld.MOV(msg_lod, lod);
4512 msg_end = offset(msg_lod, bld, 1);
4513 break;
4514 case SHADER_OPCODE_TXF:
4515 msg_lod = offset(msg_coords, bld, 3);
4516 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
4517 msg_end = offset(msg_lod, bld, 1);
4518 break;
4519 case SHADER_OPCODE_TXF_CMS:
4520 msg_lod = offset(msg_coords, bld, 3);
4521 /* lod */
4522 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
4523 /* sample index */
4524 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
4525 msg_end = offset(msg_lod, bld, 2);
4526 break;
4527 default:
4528 break;
4529 }
4530
4531 inst->opcode = op;
4532 inst->src[0] = reg_undef;
4533 inst->src[1] = surface;
4534 inst->src[2] = sampler;
4535 inst->resize_sources(3);
4536 inst->base_mrf = message.nr;
4537 inst->mlen = msg_end.nr - message.nr;
4538 inst->header_size = header_size;
4539
4540 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4541 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4542 }
4543
4544 static bool
4545 is_high_sampler(const struct gen_device_info *devinfo, const fs_reg &sampler)
4546 {
4547 if (devinfo->gen < 8 && !devinfo->is_haswell)
4548 return false;
4549
4550 return sampler.file != IMM || sampler.ud >= 16;
4551 }
4552
4553 static unsigned
4554 sampler_msg_type(const gen_device_info *devinfo,
4555 opcode opcode, bool shadow_compare)
4556 {
4557 assert(devinfo->gen >= 5);
4558 switch (opcode) {
4559 case SHADER_OPCODE_TEX:
4560 return shadow_compare ? GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE :
4561 GEN5_SAMPLER_MESSAGE_SAMPLE;
4562 case FS_OPCODE_TXB:
4563 return shadow_compare ? GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE :
4564 GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
4565 case SHADER_OPCODE_TXL:
4566 return shadow_compare ? GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE :
4567 GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
4568 case SHADER_OPCODE_TXL_LZ:
4569 return shadow_compare ? GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ :
4570 GEN9_SAMPLER_MESSAGE_SAMPLE_LZ;
4571 case SHADER_OPCODE_TXS:
4572 case SHADER_OPCODE_IMAGE_SIZE:
4573 return GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
4574 case SHADER_OPCODE_TXD:
4575 assert(!shadow_compare || devinfo->gen >= 8 || devinfo->is_haswell);
4576 return shadow_compare ? HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE :
4577 GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
4578 case SHADER_OPCODE_TXF:
4579 return GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
4580 case SHADER_OPCODE_TXF_LZ:
4581 assert(devinfo->gen >= 9);
4582 return GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ;
4583 case SHADER_OPCODE_TXF_CMS_W:
4584 assert(devinfo->gen >= 9);
4585 return GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
4586 case SHADER_OPCODE_TXF_CMS:
4587 return devinfo->gen >= 7 ? GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS :
4588 GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
4589 case SHADER_OPCODE_TXF_UMS:
4590 assert(devinfo->gen >= 7);
4591 return GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS;
4592 case SHADER_OPCODE_TXF_MCS:
4593 assert(devinfo->gen >= 7);
4594 return GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
4595 case SHADER_OPCODE_LOD:
4596 return GEN5_SAMPLER_MESSAGE_LOD;
4597 case SHADER_OPCODE_TG4:
4598 assert(devinfo->gen >= 7);
4599 return shadow_compare ? GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C :
4600 GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
4601 break;
4602 case SHADER_OPCODE_TG4_OFFSET:
4603 assert(devinfo->gen >= 7);
4604 return shadow_compare ? GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C :
4605 GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
4606 case SHADER_OPCODE_SAMPLEINFO:
4607 return GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
4608 default:
4609 unreachable("not reached");
4610 }
4611 }
4612
4613 static void
4614 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
4615 const fs_reg &coordinate,
4616 const fs_reg &shadow_c,
4617 fs_reg lod, const fs_reg &lod2,
4618 const fs_reg &min_lod,
4619 const fs_reg &sample_index,
4620 const fs_reg &mcs,
4621 const fs_reg &surface,
4622 const fs_reg &sampler,
4623 const fs_reg &tg4_offset,
4624 unsigned coord_components,
4625 unsigned grad_components)
4626 {
4627 const gen_device_info *devinfo = bld.shader->devinfo;
4628 const brw_stage_prog_data *prog_data = bld.shader->stage_prog_data;
4629 unsigned reg_width = bld.dispatch_width() / 8;
4630 unsigned header_size = 0, length = 0;
4631 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
4632 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
4633 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
4634
4635 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
4636 inst->offset != 0 || inst->eot ||
4637 op == SHADER_OPCODE_SAMPLEINFO ||
4638 is_high_sampler(devinfo, sampler)) {
4639 /* For general texture offsets (no txf workaround), we need a header to
4640 * put them in.
4641 *
4642 * TG4 needs to place its channel select in the header, for interaction
4643 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
4644 * larger sampler numbers we need to offset the Sampler State Pointer in
4645 * the header.
4646 */
4647 fs_reg header = retype(sources[0], BRW_REGISTER_TYPE_UD);
4648 header_size = 1;
4649 length++;
4650
4651 /* If we're requesting fewer than four channels worth of response,
4652 * and we have an explicit header, we need to set up the sampler
4653 * writemask. It's reversed from normal: 1 means "don't write".
4654 */
4655 if (!inst->eot && regs_written(inst) != 4 * reg_width) {
4656 assert(regs_written(inst) % reg_width == 0);
4657 unsigned mask = ~((1 << (regs_written(inst) / reg_width)) - 1) & 0xf;
4658 inst->offset |= mask << 12;
4659 }
4660
4661 /* Build the actual header */
4662 const fs_builder ubld = bld.exec_all().group(8, 0);
4663 const fs_builder ubld1 = ubld.group(1, 0);
4664 ubld.MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
4665 if (inst->offset) {
4666 ubld1.MOV(component(header, 2), brw_imm_ud(inst->offset));
4667 } else if (bld.shader->stage != MESA_SHADER_VERTEX &&
4668 bld.shader->stage != MESA_SHADER_FRAGMENT) {
4669 /* The vertex and fragment stages have g0.2 set to 0, so
4670 * header0.2 is 0 when g0 is copied. Other stages may not, so we
4671 * must set it to 0 to avoid setting undesirable bits in the
4672 * message.
4673 */
4674 ubld1.MOV(component(header, 2), brw_imm_ud(0));
4675 }
4676
4677 if (is_high_sampler(devinfo, sampler)) {
4678 if (sampler.file == BRW_IMMEDIATE_VALUE) {
4679 assert(sampler.ud >= 16);
4680 const int sampler_state_size = 16; /* 16 bytes */
4681
4682 ubld1.ADD(component(header, 3),
4683 retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
4684 brw_imm_ud(16 * (sampler.ud / 16) * sampler_state_size));
4685 } else {
4686 fs_reg tmp = ubld1.vgrf(BRW_REGISTER_TYPE_UD);
4687 ubld1.AND(tmp, sampler, brw_imm_ud(0x0f0));
4688 ubld1.SHL(tmp, tmp, brw_imm_ud(4));
4689 ubld1.ADD(component(header, 3),
4690 retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
4691 tmp);
4692 }
4693 }
4694 }
4695
4696 if (shadow_c.file != BAD_FILE) {
4697 bld.MOV(sources[length], shadow_c);
4698 length++;
4699 }
4700
4701 bool coordinate_done = false;
4702
4703 /* Set up the LOD info */
4704 switch (op) {
4705 case FS_OPCODE_TXB:
4706 case SHADER_OPCODE_TXL:
4707 if (devinfo->gen >= 9 && op == SHADER_OPCODE_TXL && lod.is_zero()) {
4708 op = SHADER_OPCODE_TXL_LZ;
4709 break;
4710 }
4711 bld.MOV(sources[length], lod);
4712 length++;
4713 break;
4714 case SHADER_OPCODE_TXD:
4715 /* TXD should have been lowered in SIMD16 mode. */
4716 assert(bld.dispatch_width() == 8);
4717
4718 /* Load dPdx and the coordinate together:
4719 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
4720 */
4721 for (unsigned i = 0; i < coord_components; i++) {
4722 bld.MOV(sources[length++], offset(coordinate, bld, i));
4723
4724 /* For cube map array, the coordinate is (u,v,r,ai) but there are
4725 * only derivatives for (u, v, r).
4726 */
4727 if (i < grad_components) {
4728 bld.MOV(sources[length++], offset(lod, bld, i));
4729 bld.MOV(sources[length++], offset(lod2, bld, i));
4730 }
4731 }
4732
4733 coordinate_done = true;
4734 break;
4735 case SHADER_OPCODE_TXS:
4736 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
4737 length++;
4738 break;
4739 case SHADER_OPCODE_IMAGE_SIZE:
4740 /* We need an LOD; just use 0 */
4741 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
4742 length++;
4743 break;
4744 case SHADER_OPCODE_TXF:
4745 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
4746 * On Gen9 they are u, v, lod, r
4747 */
4748 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D), coordinate);
4749
4750 if (devinfo->gen >= 9) {
4751 if (coord_components >= 2) {
4752 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D),
4753 offset(coordinate, bld, 1));
4754 } else {
4755 sources[length] = brw_imm_d(0);
4756 }
4757 length++;
4758 }
4759
4760 if (devinfo->gen >= 9 && lod.is_zero()) {
4761 op = SHADER_OPCODE_TXF_LZ;
4762 } else {
4763 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
4764 length++;
4765 }
4766
4767 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++)
4768 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4769 offset(coordinate, bld, i));
4770
4771 coordinate_done = true;
4772 break;
4773
4774 case SHADER_OPCODE_TXF_CMS:
4775 case SHADER_OPCODE_TXF_CMS_W:
4776 case SHADER_OPCODE_TXF_UMS:
4777 case SHADER_OPCODE_TXF_MCS:
4778 if (op == SHADER_OPCODE_TXF_UMS ||
4779 op == SHADER_OPCODE_TXF_CMS ||
4780 op == SHADER_OPCODE_TXF_CMS_W) {
4781 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
4782 length++;
4783 }
4784
4785 if (op == SHADER_OPCODE_TXF_CMS || op == SHADER_OPCODE_TXF_CMS_W) {
4786 /* Data from the multisample control surface. */
4787 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
4788 length++;
4789
4790 /* On Gen9+ we'll use ld2dms_w instead which has two registers for
4791 * the MCS data.
4792 */
4793 if (op == SHADER_OPCODE_TXF_CMS_W) {
4794 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD),
4795 mcs.file == IMM ?
4796 mcs :
4797 offset(mcs, bld, 1));
4798 length++;
4799 }
4800 }
4801
4802 /* There is no offsetting for this message; just copy in the integer
4803 * texture coordinates.
4804 */
4805 for (unsigned i = 0; i < coord_components; i++)
4806 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4807 offset(coordinate, bld, i));
4808
4809 coordinate_done = true;
4810 break;
4811 case SHADER_OPCODE_TG4_OFFSET:
4812 /* More crazy intermixing */
4813 for (unsigned i = 0; i < 2; i++) /* u, v */
4814 bld.MOV(sources[length++], offset(coordinate, bld, i));
4815
4816 for (unsigned i = 0; i < 2; i++) /* offu, offv */
4817 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4818 offset(tg4_offset, bld, i));
4819
4820 if (coord_components == 3) /* r if present */
4821 bld.MOV(sources[length++], offset(coordinate, bld, 2));
4822
4823 coordinate_done = true;
4824 break;
4825 default:
4826 break;
4827 }
4828
4829 /* Set up the coordinate (except for cases where it was done above) */
4830 if (!coordinate_done) {
4831 for (unsigned i = 0; i < coord_components; i++)
4832 bld.MOV(sources[length++], offset(coordinate, bld, i));
4833 }
4834
4835 if (min_lod.file != BAD_FILE) {
4836 /* Account for all of the missing coordinate sources */
4837 length += 4 - coord_components;
4838 if (op == SHADER_OPCODE_TXD)
4839 length += (3 - grad_components) * 2;
4840
4841 bld.MOV(sources[length++], min_lod);
4842 }
4843
4844 unsigned mlen;
4845 if (reg_width == 2)
4846 mlen = length * reg_width - header_size;
4847 else
4848 mlen = length * reg_width;
4849
4850 const fs_reg src_payload = fs_reg(VGRF, bld.shader->alloc.allocate(mlen),
4851 BRW_REGISTER_TYPE_F);
4852 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
4853
4854 /* Generate the SEND. */
4855 inst->opcode = SHADER_OPCODE_SEND;
4856 inst->mlen = mlen;
4857 inst->header_size = header_size;
4858
4859 const unsigned msg_type =
4860 sampler_msg_type(devinfo, op, inst->shadow_compare);
4861 const unsigned simd_mode =
4862 inst->exec_size <= 8 ? BRW_SAMPLER_SIMD_MODE_SIMD8 :
4863 BRW_SAMPLER_SIMD_MODE_SIMD16;
4864
4865 uint32_t base_binding_table_index;
4866 switch (op) {
4867 case SHADER_OPCODE_TG4:
4868 case SHADER_OPCODE_TG4_OFFSET:
4869 base_binding_table_index = prog_data->binding_table.gather_texture_start;
4870 break;
4871 case SHADER_OPCODE_IMAGE_SIZE:
4872 base_binding_table_index = prog_data->binding_table.image_start;
4873 break;
4874 default:
4875 base_binding_table_index = prog_data->binding_table.texture_start;
4876 break;
4877 }
4878
4879 inst->sfid = BRW_SFID_SAMPLER;
4880 if (surface.file == IMM && sampler.file == IMM) {
4881 inst->desc = brw_sampler_desc(devinfo,
4882 surface.ud + base_binding_table_index,
4883 sampler.ud % 16,
4884 msg_type,
4885 simd_mode,
4886 0 /* return_format unused on gen7+ */);
4887 inst->src[0] = brw_imm_ud(0);
4888 } else {
4889 /* Immediate portion of the descriptor */
4890 inst->desc = brw_sampler_desc(devinfo,
4891 0, /* surface */
4892 0, /* sampler */
4893 msg_type,
4894 simd_mode,
4895 0 /* return_format unused on gen7+ */);
4896 const fs_builder ubld = bld.group(1, 0).exec_all();
4897 fs_reg desc = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4898 if (surface.equals(sampler)) {
4899 /* This case is common in GL */
4900 ubld.MUL(desc, surface, brw_imm_ud(0x101));
4901 } else {
4902 if (sampler.file == IMM) {
4903 ubld.OR(desc, surface, brw_imm_ud(sampler.ud << 8));
4904 } else {
4905 ubld.SHL(desc, sampler, brw_imm_ud(8));
4906 ubld.OR(desc, desc, surface);
4907 }
4908 }
4909 if (base_binding_table_index)
4910 ubld.ADD(desc, desc, brw_imm_ud(base_binding_table_index));
4911 ubld.AND(desc, desc, brw_imm_ud(0xfff));
4912
4913 inst->src[0] = component(desc, 0);
4914 }
4915 inst->src[1] = brw_imm_ud(0); /* ex_desc */
4916
4917 inst->src[2] = src_payload;
4918 inst->resize_sources(3);
4919
4920 if (inst->eot) {
4921 /* EOT sampler messages don't make sense to split because it would
4922 * involve ending half of the thread early.
4923 */
4924 assert(inst->group == 0);
4925 /* We need to use SENDC for EOT sampler messages */
4926 inst->check_tdr = true;
4927 inst->send_has_side_effects = true;
4928 }
4929
4930 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4931 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4932 }
4933
4934 static void
4935 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
4936 {
4937 const gen_device_info *devinfo = bld.shader->devinfo;
4938 const fs_reg &coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
4939 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4940 const fs_reg &lod = inst->src[TEX_LOGICAL_SRC_LOD];
4941 const fs_reg &lod2 = inst->src[TEX_LOGICAL_SRC_LOD2];
4942 const fs_reg &min_lod = inst->src[TEX_LOGICAL_SRC_MIN_LOD];
4943 const fs_reg &sample_index = inst->src[TEX_LOGICAL_SRC_SAMPLE_INDEX];
4944 const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
4945 const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
4946 const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
4947 const fs_reg &tg4_offset = inst->src[TEX_LOGICAL_SRC_TG4_OFFSET];
4948 assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
4949 const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4950 assert(inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
4951 const unsigned grad_components = inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
4952
4953 if (devinfo->gen >= 7) {
4954 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
4955 shadow_c, lod, lod2, min_lod,
4956 sample_index,
4957 mcs, surface, sampler, tg4_offset,
4958 coord_components, grad_components);
4959 } else if (devinfo->gen >= 5) {
4960 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
4961 shadow_c, lod, lod2, sample_index,
4962 surface, sampler,
4963 coord_components, grad_components);
4964 } else {
4965 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
4966 shadow_c, lod, lod2,
4967 surface, sampler,
4968 coord_components, grad_components);
4969 }
4970 }
4971
4972 /**
4973 * Initialize the header present in some typed and untyped surface
4974 * messages.
4975 */
4976 static fs_reg
4977 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
4978 {
4979 fs_builder ubld = bld.exec_all().group(8, 0);
4980 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4981 ubld.MOV(dst, brw_imm_d(0));
4982 ubld.group(1, 0).MOV(component(dst, 7), sample_mask);
4983 return dst;
4984 }
4985
4986 static void
4987 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst)
4988 {
4989 const gen_device_info *devinfo = bld.shader->devinfo;
4990
4991 /* Get the logical send arguments. */
4992 const fs_reg &addr = inst->src[0];
4993 const fs_reg &src = inst->src[1];
4994 const fs_reg &surface = inst->src[2];
4995 const UNUSED fs_reg &dims = inst->src[3];
4996 const fs_reg &arg = inst->src[4];
4997 assert(arg.file == IMM);
4998
4999 /* Calculate the total number of components of the payload. */
5000 const unsigned addr_sz = inst->components_read(0);
5001 const unsigned src_sz = inst->components_read(1);
5002
5003 const bool is_typed_access =
5004 inst->opcode == SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL ||
5005 inst->opcode == SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL ||
5006 inst->opcode == SHADER_OPCODE_TYPED_ATOMIC_LOGICAL;
5007
5008 /* From the BDW PRM Volume 7, page 147:
5009 *
5010 * "For the Data Cache Data Port*, the header must be present for the
5011 * following message types: [...] Typed read/write/atomics"
5012 *
5013 * Earlier generations have a similar wording. Because of this restriction
5014 * we don't attempt to implement sample masks via predication for such
5015 * messages prior to Gen9, since we have to provide a header anyway. On
5016 * Gen11+ the header has been removed so we can only use predication.
5017 */
5018 const unsigned header_sz = devinfo->gen < 9 && is_typed_access ? 1 : 0;
5019 const unsigned sz = header_sz + addr_sz + src_sz;
5020
5021 /* Allocate space for the payload. */
5022 fs_reg *const components = new fs_reg[sz];
5023 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
5024 unsigned n = 0;
5025
5026 const bool has_side_effects = inst->has_side_effects();
5027 fs_reg sample_mask = has_side_effects ? bld.sample_mask_reg() :
5028 fs_reg(brw_imm_d(0xffff));
5029
5030 /* Construct the payload. */
5031 if (header_sz)
5032 components[n++] = emit_surface_header(bld, sample_mask);
5033
5034 for (unsigned i = 0; i < addr_sz; i++)
5035 components[n++] = offset(addr, bld, i);
5036
5037 for (unsigned i = 0; i < src_sz; i++)
5038 components[n++] = offset(src, bld, i);
5039
5040 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
5041
5042 /* Predicate the instruction on the sample mask if no header is
5043 * provided.
5044 */
5045 if (!header_sz && sample_mask.file != BAD_FILE &&
5046 sample_mask.file != IMM) {
5047 const fs_builder ubld = bld.group(1, 0).exec_all();
5048 if (inst->predicate) {
5049 assert(inst->predicate == BRW_PREDICATE_NORMAL);
5050 assert(!inst->predicate_inverse);
5051 assert(inst->flag_subreg < 2);
5052 /* Combine the sample mask with the existing predicate by using a
5053 * vertical predication mode.
5054 */
5055 inst->predicate = BRW_PREDICATE_ALIGN1_ALLV;
5056 ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg + 2),
5057 sample_mask.type),
5058 sample_mask);
5059 } else {
5060 inst->flag_subreg = 2;
5061 inst->predicate = BRW_PREDICATE_NORMAL;
5062 inst->predicate_inverse = false;
5063 ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg), sample_mask.type),
5064 sample_mask);
5065 }
5066 }
5067
5068 uint32_t sfid;
5069 switch (inst->opcode) {
5070 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5071 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5072 /* Byte scattered opcodes go through the normal data cache */
5073 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
5074 break;
5075
5076 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5077 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5078 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5079 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5080 /* Untyped Surface messages go through the data cache but the SFID value
5081 * changed on Haswell.
5082 */
5083 sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
5084 HSW_SFID_DATAPORT_DATA_CACHE_1 :
5085 GEN7_SFID_DATAPORT_DATA_CACHE);
5086 break;
5087
5088 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5089 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5090 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5091 /* Typed surface messages go through the render cache on IVB and the
5092 * data cache on HSW+.
5093 */
5094 sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
5095 HSW_SFID_DATAPORT_DATA_CACHE_1 :
5096 GEN6_SFID_DATAPORT_RENDER_CACHE);
5097 break;
5098
5099 default:
5100 unreachable("Unsupported surface opcode");
5101 }
5102
5103 uint32_t desc;
5104 switch (inst->opcode) {
5105 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5106 desc = brw_dp_untyped_surface_rw_desc(devinfo, inst->exec_size,
5107 arg.ud, /* num_channels */
5108 false /* write */);
5109 break;
5110
5111 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5112 desc = brw_dp_untyped_surface_rw_desc(devinfo, inst->exec_size,
5113 arg.ud, /* num_channels */
5114 true /* write */);
5115 break;
5116
5117 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5118 desc = brw_dp_byte_scattered_rw_desc(devinfo, inst->exec_size,
5119 arg.ud, /* bit_size */
5120 false /* write */);
5121 break;
5122
5123 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5124 desc = brw_dp_byte_scattered_rw_desc(devinfo, inst->exec_size,
5125 arg.ud, /* bit_size */
5126 true /* write */);
5127 break;
5128
5129 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5130 desc = brw_dp_untyped_atomic_desc(devinfo, inst->exec_size,
5131 arg.ud, /* atomic_op */
5132 !inst->dst.is_null());
5133 break;
5134
5135 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5136 desc = brw_dp_untyped_atomic_float_desc(devinfo, inst->exec_size,
5137 arg.ud, /* atomic_op */
5138 !inst->dst.is_null());
5139 break;
5140
5141 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5142 desc = brw_dp_typed_surface_rw_desc(devinfo, inst->exec_size, inst->group,
5143 arg.ud, /* num_channels */
5144 false /* write */);
5145 break;
5146
5147 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5148 desc = brw_dp_typed_surface_rw_desc(devinfo, inst->exec_size, inst->group,
5149 arg.ud, /* num_channels */
5150 true /* write */);
5151 break;
5152
5153 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5154 desc = brw_dp_typed_atomic_desc(devinfo, inst->exec_size, inst->group,
5155 arg.ud, /* atomic_op */
5156 !inst->dst.is_null());
5157 break;
5158
5159 default:
5160 unreachable("Unknown surface logical instruction");
5161 }
5162
5163 /* Update the original instruction. */
5164 inst->opcode = SHADER_OPCODE_SEND;
5165 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
5166 inst->header_size = header_sz;
5167 inst->send_has_side_effects = has_side_effects;
5168 inst->send_is_volatile = !has_side_effects;
5169
5170 /* Set up SFID and descriptors */
5171 inst->sfid = sfid;
5172 inst->desc = desc;
5173 if (surface.file == IMM) {
5174 inst->desc |= surface.ud & 0xff;
5175 inst->src[0] = brw_imm_ud(0);
5176 } else {
5177 const fs_builder ubld = bld.exec_all().group(1, 0);
5178 fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5179 ubld.AND(tmp, surface, brw_imm_ud(0xff));
5180 inst->src[0] = component(tmp, 0);
5181 }
5182 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5183
5184 /* Finally, the payload */
5185 inst->src[2] = payload;
5186
5187 inst->resize_sources(3);
5188
5189 delete[] components;
5190 }
5191
5192 static void
5193 lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
5194 {
5195 const gen_device_info *devinfo = bld.shader->devinfo;
5196
5197 if (devinfo->gen >= 7) {
5198 fs_reg index = inst->src[0];
5199 /* We are switching the instruction from an ALU-like instruction to a
5200 * send-from-grf instruction. Since sends can't handle strides or
5201 * source modifiers, we have to make a copy of the offset source.
5202 */
5203 fs_reg offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
5204 bld.MOV(offset, inst->src[1]);
5205
5206 const unsigned simd_mode =
5207 inst->exec_size <= 8 ? BRW_SAMPLER_SIMD_MODE_SIMD8 :
5208 BRW_SAMPLER_SIMD_MODE_SIMD16;
5209
5210 inst->opcode = SHADER_OPCODE_SEND;
5211 inst->mlen = inst->exec_size / 8;
5212 inst->resize_sources(3);
5213
5214 inst->sfid = BRW_SFID_SAMPLER;
5215 inst->desc = brw_sampler_desc(devinfo, 0, 0,
5216 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
5217 simd_mode, 0);
5218 if (index.file == IMM) {
5219 inst->desc |= index.ud & 0xff;
5220 inst->src[0] = brw_imm_ud(0);
5221 } else {
5222 const fs_builder ubld = bld.exec_all().group(1, 0);
5223 fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5224 ubld.AND(tmp, index, brw_imm_ud(0xff));
5225 inst->src[0] = component(tmp, 0);
5226 }
5227 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5228 inst->src[2] = offset; /* payload */
5229 } else {
5230 const fs_reg payload(MRF, FIRST_PULL_LOAD_MRF(devinfo->gen),
5231 BRW_REGISTER_TYPE_UD);
5232
5233 bld.MOV(byte_offset(payload, REG_SIZE), inst->src[1]);
5234
5235 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4;
5236 inst->resize_sources(1);
5237 inst->base_mrf = payload.nr;
5238 inst->header_size = 1;
5239 inst->mlen = 1 + inst->exec_size / 8;
5240 }
5241 }
5242
5243 static void
5244 lower_math_logical_send(const fs_builder &bld, fs_inst *inst)
5245 {
5246 assert(bld.shader->devinfo->gen < 6);
5247
5248 inst->base_mrf = 2;
5249 inst->mlen = inst->sources * inst->exec_size / 8;
5250
5251 if (inst->sources > 1) {
5252 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
5253 * "Message Payload":
5254 *
5255 * "Operand0[7]. For the INT DIV functions, this operand is the
5256 * denominator."
5257 * ...
5258 * "Operand1[7]. For the INT DIV functions, this operand is the
5259 * numerator."
5260 */
5261 const bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
5262 const fs_reg src0 = is_int_div ? inst->src[1] : inst->src[0];
5263 const fs_reg src1 = is_int_div ? inst->src[0] : inst->src[1];
5264
5265 inst->resize_sources(1);
5266 inst->src[0] = src0;
5267
5268 assert(inst->exec_size == 8);
5269 bld.MOV(fs_reg(MRF, inst->base_mrf + 1, src1.type), src1);
5270 }
5271 }
5272
5273 bool
5274 fs_visitor::lower_logical_sends()
5275 {
5276 bool progress = false;
5277
5278 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5279 const fs_builder ibld(this, block, inst);
5280
5281 switch (inst->opcode) {
5282 case FS_OPCODE_FB_WRITE_LOGICAL:
5283 assert(stage == MESA_SHADER_FRAGMENT);
5284 lower_fb_write_logical_send(ibld, inst,
5285 brw_wm_prog_data(prog_data),
5286 (const brw_wm_prog_key *)key,
5287 payload);
5288 break;
5289
5290 case FS_OPCODE_FB_READ_LOGICAL:
5291 lower_fb_read_logical_send(ibld, inst);
5292 break;
5293
5294 case SHADER_OPCODE_TEX_LOGICAL:
5295 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
5296 break;
5297
5298 case SHADER_OPCODE_TXD_LOGICAL:
5299 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
5300 break;
5301
5302 case SHADER_OPCODE_TXF_LOGICAL:
5303 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
5304 break;
5305
5306 case SHADER_OPCODE_TXL_LOGICAL:
5307 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
5308 break;
5309
5310 case SHADER_OPCODE_TXS_LOGICAL:
5311 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
5312 break;
5313
5314 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
5315 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_IMAGE_SIZE);
5316 break;
5317
5318 case FS_OPCODE_TXB_LOGICAL:
5319 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
5320 break;
5321
5322 case SHADER_OPCODE_TXF_CMS_LOGICAL:
5323 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
5324 break;
5325
5326 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
5327 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS_W);
5328 break;
5329
5330 case SHADER_OPCODE_TXF_UMS_LOGICAL:
5331 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
5332 break;
5333
5334 case SHADER_OPCODE_TXF_MCS_LOGICAL:
5335 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
5336 break;
5337
5338 case SHADER_OPCODE_LOD_LOGICAL:
5339 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
5340 break;
5341
5342 case SHADER_OPCODE_TG4_LOGICAL:
5343 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
5344 break;
5345
5346 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
5347 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
5348 break;
5349
5350 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
5351 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_SAMPLEINFO);
5352 break;
5353
5354 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5355 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5356 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5357 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5358 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5359 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5360 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5361 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5362 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5363 lower_surface_logical_send(ibld, inst);
5364 break;
5365
5366 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
5367 lower_varying_pull_constant_logical_send(ibld, inst);
5368 break;
5369
5370 case SHADER_OPCODE_RCP:
5371 case SHADER_OPCODE_RSQ:
5372 case SHADER_OPCODE_SQRT:
5373 case SHADER_OPCODE_EXP2:
5374 case SHADER_OPCODE_LOG2:
5375 case SHADER_OPCODE_SIN:
5376 case SHADER_OPCODE_COS:
5377 case SHADER_OPCODE_POW:
5378 case SHADER_OPCODE_INT_QUOTIENT:
5379 case SHADER_OPCODE_INT_REMAINDER:
5380 /* The math opcodes are overloaded for the send-like and
5381 * expression-like instructions which seems kind of icky. Gen6+ has
5382 * a native (but rather quirky) MATH instruction so we don't need to
5383 * do anything here. On Gen4-5 we'll have to lower the Gen6-like
5384 * logical instructions (which we can easily recognize because they
5385 * have mlen = 0) into send-like virtual instructions.
5386 */
5387 if (devinfo->gen < 6 && inst->mlen == 0) {
5388 lower_math_logical_send(ibld, inst);
5389 break;
5390
5391 } else {
5392 continue;
5393 }
5394
5395 default:
5396 continue;
5397 }
5398
5399 progress = true;
5400 }
5401
5402 if (progress)
5403 invalidate_live_intervals();
5404
5405 return progress;
5406 }
5407
5408 /**
5409 * Get the closest allowed SIMD width for instruction \p inst accounting for
5410 * some common regioning and execution control restrictions that apply to FPU
5411 * instructions. These restrictions don't necessarily have any relevance to
5412 * instructions not executed by the FPU pipeline like extended math, control
5413 * flow or send message instructions.
5414 *
5415 * For virtual opcodes it's really up to the instruction -- In some cases
5416 * (e.g. where a virtual instruction unrolls into a simple sequence of FPU
5417 * instructions) it may simplify virtual instruction lowering if we can
5418 * enforce FPU-like regioning restrictions already on the virtual instruction,
5419 * in other cases (e.g. virtual send-like instructions) this may be
5420 * excessively restrictive.
5421 */
5422 static unsigned
5423 get_fpu_lowered_simd_width(const struct gen_device_info *devinfo,
5424 const fs_inst *inst)
5425 {
5426 /* Maximum execution size representable in the instruction controls. */
5427 unsigned max_width = MIN2(32, inst->exec_size);
5428
5429 /* According to the PRMs:
5430 * "A. In Direct Addressing mode, a source cannot span more than 2
5431 * adjacent GRF registers.
5432 * B. A destination cannot span more than 2 adjacent GRF registers."
5433 *
5434 * Look for the source or destination with the largest register region
5435 * which is the one that is going to limit the overall execution size of
5436 * the instruction due to this rule.
5437 */
5438 unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
5439
5440 for (unsigned i = 0; i < inst->sources; i++)
5441 reg_count = MAX2(reg_count, DIV_ROUND_UP(inst->size_read(i), REG_SIZE));
5442
5443 /* Calculate the maximum execution size of the instruction based on the
5444 * factor by which it goes over the hardware limit of 2 GRFs.
5445 */
5446 if (reg_count > 2)
5447 max_width = MIN2(max_width, inst->exec_size / DIV_ROUND_UP(reg_count, 2));
5448
5449 /* According to the IVB PRMs:
5450 * "When destination spans two registers, the source MUST span two
5451 * registers. The exception to the above rule:
5452 *
5453 * - When source is scalar, the source registers are not incremented.
5454 * - When source is packed integer Word and destination is packed
5455 * integer DWord, the source register is not incremented but the
5456 * source sub register is incremented."
5457 *
5458 * The hardware specs from Gen4 to Gen7.5 mention similar regioning
5459 * restrictions. The code below intentionally doesn't check whether the
5460 * destination type is integer because empirically the hardware doesn't
5461 * seem to care what the actual type is as long as it's dword-aligned.
5462 */
5463 if (devinfo->gen < 8) {
5464 for (unsigned i = 0; i < inst->sources; i++) {
5465 /* IVB implements DF scalars as <0;2,1> regions. */
5466 const bool is_scalar_exception = is_uniform(inst->src[i]) &&
5467 (devinfo->is_haswell || type_sz(inst->src[i].type) != 8);
5468 const bool is_packed_word_exception =
5469 type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
5470 type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1;
5471
5472 /* We check size_read(i) against size_written instead of REG_SIZE
5473 * because we want to properly handle SIMD32. In SIMD32, you can end
5474 * up with writes to 4 registers and a source that reads 2 registers
5475 * and we may still need to lower all the way to SIMD8 in that case.
5476 */
5477 if (inst->size_written > REG_SIZE &&
5478 inst->size_read(i) != 0 &&
5479 inst->size_read(i) < inst->size_written &&
5480 !is_scalar_exception && !is_packed_word_exception) {
5481 const unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
5482 max_width = MIN2(max_width, inst->exec_size / reg_count);
5483 }
5484 }
5485 }
5486
5487 if (devinfo->gen < 6) {
5488 /* From the G45 PRM, Volume 4 Page 361:
5489 *
5490 * "Operand Alignment Rule: With the exceptions listed below, a
5491 * source/destination operand in general should be aligned to even
5492 * 256-bit physical register with a region size equal to two 256-bit
5493 * physical registers."
5494 *
5495 * Normally we enforce this by allocating virtual registers to the
5496 * even-aligned class. But we need to handle payload registers.
5497 */
5498 for (unsigned i = 0; i < inst->sources; i++) {
5499 if (inst->src[i].file == FIXED_GRF && (inst->src[i].nr & 1) &&
5500 inst->size_read(i) > REG_SIZE) {
5501 max_width = MIN2(max_width, 8);
5502 }
5503 }
5504 }
5505
5506 /* From the IVB PRMs:
5507 * "When an instruction is SIMD32, the low 16 bits of the execution mask
5508 * are applied for both halves of the SIMD32 instruction. If different
5509 * execution mask channels are required, split the instruction into two
5510 * SIMD16 instructions."
5511 *
5512 * There is similar text in the HSW PRMs. Gen4-6 don't even implement
5513 * 32-wide control flow support in hardware and will behave similarly.
5514 */
5515 if (devinfo->gen < 8 && !inst->force_writemask_all)
5516 max_width = MIN2(max_width, 16);
5517
5518 /* From the IVB PRMs (applies to HSW too):
5519 * "Instructions with condition modifiers must not use SIMD32."
5520 *
5521 * From the BDW PRMs (applies to later hardware too):
5522 * "Ternary instruction with condition modifiers must not use SIMD32."
5523 */
5524 if (inst->conditional_mod && (devinfo->gen < 8 || inst->is_3src(devinfo)))
5525 max_width = MIN2(max_width, 16);
5526
5527 /* From the IVB PRMs (applies to other devices that don't have the
5528 * gen_device_info::supports_simd16_3src flag set):
5529 * "In Align16 access mode, SIMD16 is not allowed for DW operations and
5530 * SIMD8 is not allowed for DF operations."
5531 */
5532 if (inst->is_3src(devinfo) && !devinfo->supports_simd16_3src)
5533 max_width = MIN2(max_width, inst->exec_size / reg_count);
5534
5535 /* Pre-Gen8 EUs are hardwired to use the QtrCtrl+1 (where QtrCtrl is
5536 * the 8-bit quarter of the execution mask signals specified in the
5537 * instruction control fields) for the second compressed half of any
5538 * single-precision instruction (for double-precision instructions
5539 * it's hardwired to use NibCtrl+1, at least on HSW), which means that
5540 * the EU will apply the wrong execution controls for the second
5541 * sequential GRF write if the number of channels per GRF is not exactly
5542 * eight in single-precision mode (or four in double-float mode).
5543 *
5544 * In this situation we calculate the maximum size of the split
5545 * instructions so they only ever write to a single register.
5546 */
5547 if (devinfo->gen < 8 && inst->size_written > REG_SIZE &&
5548 !inst->force_writemask_all) {
5549 const unsigned channels_per_grf = inst->exec_size /
5550 DIV_ROUND_UP(inst->size_written, REG_SIZE);
5551 const unsigned exec_type_size = get_exec_type_size(inst);
5552 assert(exec_type_size);
5553
5554 /* The hardware shifts exactly 8 channels per compressed half of the
5555 * instruction in single-precision mode and exactly 4 in double-precision.
5556 */
5557 if (channels_per_grf != (exec_type_size == 8 ? 4 : 8))
5558 max_width = MIN2(max_width, channels_per_grf);
5559
5560 /* Lower all non-force_writemask_all DF instructions to SIMD4 on IVB/BYT
5561 * because HW applies the same channel enable signals to both halves of
5562 * the compressed instruction which will be just wrong under
5563 * non-uniform control flow.
5564 */
5565 if (devinfo->gen == 7 && !devinfo->is_haswell &&
5566 (exec_type_size == 8 || type_sz(inst->dst.type) == 8))
5567 max_width = MIN2(max_width, 4);
5568 }
5569
5570 /* Only power-of-two execution sizes are representable in the instruction
5571 * control fields.
5572 */
5573 return 1 << _mesa_logbase2(max_width);
5574 }
5575
5576 /**
5577 * Get the maximum allowed SIMD width for instruction \p inst accounting for
5578 * various payload size restrictions that apply to sampler message
5579 * instructions.
5580 *
5581 * This is only intended to provide a maximum theoretical bound for the
5582 * execution size of the message based on the number of argument components
5583 * alone, which in most cases will determine whether the SIMD8 or SIMD16
5584 * variant of the message can be used, though some messages may have
5585 * additional restrictions not accounted for here (e.g. pre-ILK hardware uses
5586 * the message length to determine the exact SIMD width and argument count,
5587 * which makes a number of sampler message combinations impossible to
5588 * represent).
5589 */
5590 static unsigned
5591 get_sampler_lowered_simd_width(const struct gen_device_info *devinfo,
5592 const fs_inst *inst)
5593 {
5594 /* If we have a min_lod parameter on anything other than a simple sample
5595 * message, it will push it over 5 arguments and we have to fall back to
5596 * SIMD8.
5597 */
5598 if (inst->opcode != SHADER_OPCODE_TEX &&
5599 inst->components_read(TEX_LOGICAL_SRC_MIN_LOD))
5600 return 8;
5601
5602 /* Calculate the number of coordinate components that have to be present
5603 * assuming that additional arguments follow the texel coordinates in the
5604 * message payload. On IVB+ there is no need for padding, on ILK-SNB we
5605 * need to pad to four or three components depending on the message,
5606 * pre-ILK we need to pad to at most three components.
5607 */
5608 const unsigned req_coord_components =
5609 (devinfo->gen >= 7 ||
5610 !inst->components_read(TEX_LOGICAL_SRC_COORDINATE)) ? 0 :
5611 (devinfo->gen >= 5 && inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
5612 inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL) ? 4 :
5613 3;
5614
5615 /* On Gen9+ the LOD argument is for free if we're able to use the LZ
5616 * variant of the TXL or TXF message.
5617 */
5618 const bool implicit_lod = devinfo->gen >= 9 &&
5619 (inst->opcode == SHADER_OPCODE_TXL ||
5620 inst->opcode == SHADER_OPCODE_TXF) &&
5621 inst->src[TEX_LOGICAL_SRC_LOD].is_zero();
5622
5623 /* Calculate the total number of argument components that need to be passed
5624 * to the sampler unit.
5625 */
5626 const unsigned num_payload_components =
5627 MAX2(inst->components_read(TEX_LOGICAL_SRC_COORDINATE),
5628 req_coord_components) +
5629 inst->components_read(TEX_LOGICAL_SRC_SHADOW_C) +
5630 (implicit_lod ? 0 : inst->components_read(TEX_LOGICAL_SRC_LOD)) +
5631 inst->components_read(TEX_LOGICAL_SRC_LOD2) +
5632 inst->components_read(TEX_LOGICAL_SRC_SAMPLE_INDEX) +
5633 (inst->opcode == SHADER_OPCODE_TG4_OFFSET_LOGICAL ?
5634 inst->components_read(TEX_LOGICAL_SRC_TG4_OFFSET) : 0) +
5635 inst->components_read(TEX_LOGICAL_SRC_MCS);
5636
5637 /* SIMD16 messages with more than five arguments exceed the maximum message
5638 * size supported by the sampler, regardless of whether a header is
5639 * provided or not.
5640 */
5641 return MIN2(inst->exec_size,
5642 num_payload_components > MAX_SAMPLER_MESSAGE_SIZE / 2 ? 8 : 16);
5643 }
5644
5645 /**
5646 * Get the closest native SIMD width supported by the hardware for instruction
5647 * \p inst. The instruction will be left untouched by
5648 * fs_visitor::lower_simd_width() if the returned value is equal to the
5649 * original execution size.
5650 */
5651 static unsigned
5652 get_lowered_simd_width(const struct gen_device_info *devinfo,
5653 const fs_inst *inst)
5654 {
5655 switch (inst->opcode) {
5656 case BRW_OPCODE_MOV:
5657 case BRW_OPCODE_SEL:
5658 case BRW_OPCODE_NOT:
5659 case BRW_OPCODE_AND:
5660 case BRW_OPCODE_OR:
5661 case BRW_OPCODE_XOR:
5662 case BRW_OPCODE_SHR:
5663 case BRW_OPCODE_SHL:
5664 case BRW_OPCODE_ASR:
5665 case BRW_OPCODE_CMPN:
5666 case BRW_OPCODE_CSEL:
5667 case BRW_OPCODE_F32TO16:
5668 case BRW_OPCODE_F16TO32:
5669 case BRW_OPCODE_BFREV:
5670 case BRW_OPCODE_BFE:
5671 case BRW_OPCODE_ADD:
5672 case BRW_OPCODE_MUL:
5673 case BRW_OPCODE_AVG:
5674 case BRW_OPCODE_FRC:
5675 case BRW_OPCODE_RNDU:
5676 case BRW_OPCODE_RNDD:
5677 case BRW_OPCODE_RNDE:
5678 case BRW_OPCODE_RNDZ:
5679 case BRW_OPCODE_LZD:
5680 case BRW_OPCODE_FBH:
5681 case BRW_OPCODE_FBL:
5682 case BRW_OPCODE_CBIT:
5683 case BRW_OPCODE_SAD2:
5684 case BRW_OPCODE_MAD:
5685 case BRW_OPCODE_LRP:
5686 case FS_OPCODE_PACK:
5687 case SHADER_OPCODE_SEL_EXEC:
5688 case SHADER_OPCODE_CLUSTER_BROADCAST:
5689 return get_fpu_lowered_simd_width(devinfo, inst);
5690
5691 case BRW_OPCODE_CMP: {
5692 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says that
5693 * when the destination is a GRF the dependency-clear bit on the flag
5694 * register is cleared early.
5695 *
5696 * Suggested workarounds are to disable coissuing CMP instructions
5697 * or to split CMP(16) instructions into two CMP(8) instructions.
5698 *
5699 * We choose to split into CMP(8) instructions since disabling
5700 * coissuing would affect CMP instructions not otherwise affected by
5701 * the errata.
5702 */
5703 const unsigned max_width = (devinfo->gen == 7 && !devinfo->is_haswell &&
5704 !inst->dst.is_null() ? 8 : ~0);
5705 return MIN2(max_width, get_fpu_lowered_simd_width(devinfo, inst));
5706 }
5707 case BRW_OPCODE_BFI1:
5708 case BRW_OPCODE_BFI2:
5709 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
5710 * should
5711 * "Force BFI instructions to be executed always in SIMD8."
5712 */
5713 return MIN2(devinfo->is_haswell ? 8 : ~0u,
5714 get_fpu_lowered_simd_width(devinfo, inst));
5715
5716 case BRW_OPCODE_IF:
5717 assert(inst->src[0].file == BAD_FILE || inst->exec_size <= 16);
5718 return inst->exec_size;
5719
5720 case SHADER_OPCODE_RCP:
5721 case SHADER_OPCODE_RSQ:
5722 case SHADER_OPCODE_SQRT:
5723 case SHADER_OPCODE_EXP2:
5724 case SHADER_OPCODE_LOG2:
5725 case SHADER_OPCODE_SIN:
5726 case SHADER_OPCODE_COS:
5727 /* Unary extended math instructions are limited to SIMD8 on Gen4 and
5728 * Gen6.
5729 */
5730 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
5731 devinfo->gen == 5 || devinfo->is_g4x ? MIN2(16, inst->exec_size) :
5732 MIN2(8, inst->exec_size));
5733
5734 case SHADER_OPCODE_POW:
5735 /* SIMD16 is only allowed on Gen7+. */
5736 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
5737 MIN2(8, inst->exec_size));
5738
5739 case SHADER_OPCODE_INT_QUOTIENT:
5740 case SHADER_OPCODE_INT_REMAINDER:
5741 /* Integer division is limited to SIMD8 on all generations. */
5742 return MIN2(8, inst->exec_size);
5743
5744 case FS_OPCODE_LINTERP:
5745 case SHADER_OPCODE_GET_BUFFER_SIZE:
5746 case FS_OPCODE_DDX_COARSE:
5747 case FS_OPCODE_DDX_FINE:
5748 case FS_OPCODE_DDY_COARSE:
5749 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
5750 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
5751 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
5752 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
5753 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
5754 return MIN2(16, inst->exec_size);
5755
5756 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
5757 /* Pre-ILK hardware doesn't have a SIMD8 variant of the texel fetch
5758 * message used to implement varying pull constant loads, so expand it
5759 * to SIMD16. An alternative with longer message payload length but
5760 * shorter return payload would be to use the SIMD8 sampler message that
5761 * takes (header, u, v, r) as parameters instead of (header, u).
5762 */
5763 return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
5764
5765 case FS_OPCODE_DDY_FINE:
5766 /* The implementation of this virtual opcode may require emitting
5767 * compressed Align16 instructions, which are severely limited on some
5768 * generations.
5769 *
5770 * From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
5771 * Region Restrictions):
5772 *
5773 * "In Align16 access mode, SIMD16 is not allowed for DW operations
5774 * and SIMD8 is not allowed for DF operations."
5775 *
5776 * In this context, "DW operations" means "operations acting on 32-bit
5777 * values", so it includes operations on floats.
5778 *
5779 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
5780 * (Instruction Compression -> Rules and Restrictions):
5781 *
5782 * "A compressed instruction must be in Align1 access mode. Align16
5783 * mode instructions cannot be compressed."
5784 *
5785 * Similar text exists in the g45 PRM.
5786 *
5787 * Empirically, compressed align16 instructions using odd register
5788 * numbers don't appear to work on Sandybridge either.
5789 */
5790 return (devinfo->gen == 4 || devinfo->gen == 6 ||
5791 (devinfo->gen == 7 && !devinfo->is_haswell) ?
5792 MIN2(8, inst->exec_size) : MIN2(16, inst->exec_size));
5793
5794 case SHADER_OPCODE_MULH:
5795 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
5796 * is 8-wide on Gen7+.
5797 */
5798 return (devinfo->gen >= 7 ? 8 :
5799 get_fpu_lowered_simd_width(devinfo, inst));
5800
5801 case FS_OPCODE_FB_WRITE_LOGICAL:
5802 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
5803 * here.
5804 */
5805 assert(devinfo->gen != 6 ||
5806 inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH].file == BAD_FILE ||
5807 inst->exec_size == 8);
5808 /* Dual-source FB writes are unsupported in SIMD16 mode. */
5809 return (inst->src[FB_WRITE_LOGICAL_SRC_COLOR1].file != BAD_FILE ?
5810 8 : MIN2(16, inst->exec_size));
5811
5812 case FS_OPCODE_FB_READ_LOGICAL:
5813 return MIN2(16, inst->exec_size);
5814
5815 case SHADER_OPCODE_TEX_LOGICAL:
5816 case SHADER_OPCODE_TXF_CMS_LOGICAL:
5817 case SHADER_OPCODE_TXF_UMS_LOGICAL:
5818 case SHADER_OPCODE_TXF_MCS_LOGICAL:
5819 case SHADER_OPCODE_LOD_LOGICAL:
5820 case SHADER_OPCODE_TG4_LOGICAL:
5821 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
5822 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
5823 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
5824 return get_sampler_lowered_simd_width(devinfo, inst);
5825
5826 case SHADER_OPCODE_TXD_LOGICAL:
5827 /* TXD is unsupported in SIMD16 mode. */
5828 return 8;
5829
5830 case SHADER_OPCODE_TXL_LOGICAL:
5831 case FS_OPCODE_TXB_LOGICAL:
5832 /* Only one execution size is representable pre-ILK depending on whether
5833 * the shadow reference argument is present.
5834 */
5835 if (devinfo->gen == 4)
5836 return inst->src[TEX_LOGICAL_SRC_SHADOW_C].file == BAD_FILE ? 16 : 8;
5837 else
5838 return get_sampler_lowered_simd_width(devinfo, inst);
5839
5840 case SHADER_OPCODE_TXF_LOGICAL:
5841 case SHADER_OPCODE_TXS_LOGICAL:
5842 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
5843 * messages. Use SIMD16 instead.
5844 */
5845 if (devinfo->gen == 4)
5846 return 16;
5847 else
5848 return get_sampler_lowered_simd_width(devinfo, inst);
5849
5850 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5851 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5852 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5853 return 8;
5854
5855 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5856 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5857 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5858 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5859 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5860 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5861 return MIN2(16, inst->exec_size);
5862
5863 case SHADER_OPCODE_URB_READ_SIMD8:
5864 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
5865 case SHADER_OPCODE_URB_WRITE_SIMD8:
5866 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
5867 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
5868 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
5869 return MIN2(8, inst->exec_size);
5870
5871 case SHADER_OPCODE_QUAD_SWIZZLE: {
5872 const unsigned swiz = inst->src[1].ud;
5873 return (is_uniform(inst->src[0]) ?
5874 get_fpu_lowered_simd_width(devinfo, inst) :
5875 devinfo->gen < 11 && type_sz(inst->src[0].type) == 4 ? 8 :
5876 swiz == BRW_SWIZZLE_XYXY || swiz == BRW_SWIZZLE_ZWZW ? 4 :
5877 get_fpu_lowered_simd_width(devinfo, inst));
5878 }
5879 case SHADER_OPCODE_MOV_INDIRECT: {
5880 /* From IVB and HSW PRMs:
5881 *
5882 * "2.When the destination requires two registers and the sources are
5883 * indirect, the sources must use 1x1 regioning mode.
5884 *
5885 * In case of DF instructions in HSW/IVB, the exec_size is limited by
5886 * the EU decompression logic not handling VxH indirect addressing
5887 * correctly.
5888 */
5889 const unsigned max_size = (devinfo->gen >= 8 ? 2 : 1) * REG_SIZE;
5890 /* Prior to Broadwell, we only have 8 address subregisters. */
5891 return MIN3(devinfo->gen >= 8 ? 16 : 8,
5892 max_size / (inst->dst.stride * type_sz(inst->dst.type)),
5893 inst->exec_size);
5894 }
5895
5896 case SHADER_OPCODE_LOAD_PAYLOAD: {
5897 const unsigned reg_count =
5898 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
5899
5900 if (reg_count > 2) {
5901 /* Only LOAD_PAYLOAD instructions with per-channel destination region
5902 * can be easily lowered (which excludes headers and heterogeneous
5903 * types).
5904 */
5905 assert(!inst->header_size);
5906 for (unsigned i = 0; i < inst->sources; i++)
5907 assert(type_sz(inst->dst.type) == type_sz(inst->src[i].type) ||
5908 inst->src[i].file == BAD_FILE);
5909
5910 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
5911 } else {
5912 return inst->exec_size;
5913 }
5914 }
5915 default:
5916 return inst->exec_size;
5917 }
5918 }
5919
5920 /**
5921 * Return true if splitting out the group of channels of instruction \p inst
5922 * given by lbld.group() requires allocating a temporary for the i-th source
5923 * of the lowered instruction.
5924 */
5925 static inline bool
5926 needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
5927 {
5928 return !(is_periodic(inst->src[i], lbld.dispatch_width()) ||
5929 (inst->components_read(i) == 1 &&
5930 lbld.dispatch_width() <= inst->exec_size)) ||
5931 (inst->flags_written() &
5932 flag_mask(inst->src[i], type_sz(inst->src[i].type)));
5933 }
5934
5935 /**
5936 * Extract the data that would be consumed by the channel group given by
5937 * lbld.group() from the i-th source region of instruction \p inst and return
5938 * it as result in packed form.
5939 */
5940 static fs_reg
5941 emit_unzip(const fs_builder &lbld, fs_inst *inst, unsigned i)
5942 {
5943 assert(lbld.group() >= inst->group);
5944
5945 /* Specified channel group from the source region. */
5946 const fs_reg src = horiz_offset(inst->src[i], lbld.group() - inst->group);
5947
5948 if (needs_src_copy(lbld, inst, i)) {
5949 /* Builder of the right width to perform the copy avoiding uninitialized
5950 * data if the lowered execution size is greater than the original
5951 * execution size of the instruction.
5952 */
5953 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
5954 inst->exec_size), 0);
5955 const fs_reg tmp = lbld.vgrf(inst->src[i].type, inst->components_read(i));
5956
5957 for (unsigned k = 0; k < inst->components_read(i); ++k)
5958 cbld.MOV(offset(tmp, lbld, k), offset(src, inst->exec_size, k));
5959
5960 return tmp;
5961
5962 } else if (is_periodic(inst->src[i], lbld.dispatch_width())) {
5963 /* The source is invariant for all dispatch_width-wide groups of the
5964 * original region.
5965 */
5966 return inst->src[i];
5967
5968 } else {
5969 /* We can just point the lowered instruction at the right channel group
5970 * from the original region.
5971 */
5972 return src;
5973 }
5974 }
5975
5976 /**
5977 * Return true if splitting out the group of channels of instruction \p inst
5978 * given by lbld.group() requires allocating a temporary for the destination
5979 * of the lowered instruction and copying the data back to the original
5980 * destination region.
5981 */
5982 static inline bool
5983 needs_dst_copy(const fs_builder &lbld, const fs_inst *inst)
5984 {
5985 /* If the instruction writes more than one component we'll have to shuffle
5986 * the results of multiple lowered instructions in order to make sure that
5987 * they end up arranged correctly in the original destination region.
5988 */
5989 if (inst->size_written > inst->dst.component_size(inst->exec_size))
5990 return true;
5991
5992 /* If the lowered execution size is larger than the original the result of
5993 * the instruction won't fit in the original destination, so we'll have to
5994 * allocate a temporary in any case.
5995 */
5996 if (lbld.dispatch_width() > inst->exec_size)
5997 return true;
5998
5999 for (unsigned i = 0; i < inst->sources; i++) {
6000 /* If we already made a copy of the source for other reasons there won't
6001 * be any overlap with the destination.
6002 */
6003 if (needs_src_copy(lbld, inst, i))
6004 continue;
6005
6006 /* In order to keep the logic simple we emit a copy whenever the
6007 * destination region doesn't exactly match an overlapping source, which
6008 * may point at the source and destination not being aligned group by
6009 * group which could cause one of the lowered instructions to overwrite
6010 * the data read from the same source by other lowered instructions.
6011 */
6012 if (regions_overlap(inst->dst, inst->size_written,
6013 inst->src[i], inst->size_read(i)) &&
6014 !inst->dst.equals(inst->src[i]))
6015 return true;
6016 }
6017
6018 return false;
6019 }
6020
6021 /**
6022 * Insert data from a packed temporary into the channel group given by
6023 * lbld.group() of the destination region of instruction \p inst and return
6024 * the temporary as result. Any copy instructions that are required for
6025 * unzipping the previous value (in the case of partial writes) will be
6026 * inserted using \p lbld_before and any copy instructions required for
6027 * zipping up the destination of \p inst will be inserted using \p lbld_after.
6028 */
6029 static fs_reg
6030 emit_zip(const fs_builder &lbld_before, const fs_builder &lbld_after,
6031 fs_inst *inst)
6032 {
6033 assert(lbld_before.dispatch_width() == lbld_after.dispatch_width());
6034 assert(lbld_before.group() == lbld_after.group());
6035 assert(lbld_after.group() >= inst->group);
6036
6037 /* Specified channel group from the destination region. */
6038 const fs_reg dst = horiz_offset(inst->dst, lbld_after.group() - inst->group);
6039 const unsigned dst_size = inst->size_written /
6040 inst->dst.component_size(inst->exec_size);
6041
6042 if (needs_dst_copy(lbld_after, inst)) {
6043 const fs_reg tmp = lbld_after.vgrf(inst->dst.type, dst_size);
6044
6045 if (inst->predicate) {
6046 /* Handle predication by copying the original contents of
6047 * the destination into the temporary before emitting the
6048 * lowered instruction.
6049 */
6050 const fs_builder gbld_before =
6051 lbld_before.group(MIN2(lbld_before.dispatch_width(),
6052 inst->exec_size), 0);
6053 for (unsigned k = 0; k < dst_size; ++k) {
6054 gbld_before.MOV(offset(tmp, lbld_before, k),
6055 offset(dst, inst->exec_size, k));
6056 }
6057 }
6058
6059 const fs_builder gbld_after =
6060 lbld_after.group(MIN2(lbld_after.dispatch_width(),
6061 inst->exec_size), 0);
6062 for (unsigned k = 0; k < dst_size; ++k) {
6063 /* Use a builder of the right width to perform the copy avoiding
6064 * uninitialized data if the lowered execution size is greater than
6065 * the original execution size of the instruction.
6066 */
6067 gbld_after.MOV(offset(dst, inst->exec_size, k),
6068 offset(tmp, lbld_after, k));
6069 }
6070
6071 return tmp;
6072
6073 } else {
6074 /* No need to allocate a temporary for the lowered instruction, just
6075 * take the right group of channels from the original region.
6076 */
6077 return dst;
6078 }
6079 }
6080
6081 bool
6082 fs_visitor::lower_simd_width()
6083 {
6084 bool progress = false;
6085
6086 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
6087 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
6088
6089 if (lower_width != inst->exec_size) {
6090 /* Builder matching the original instruction. We may also need to
6091 * emit an instruction of width larger than the original, set the
6092 * execution size of the builder to the highest of both for now so
6093 * we're sure that both cases can be handled.
6094 */
6095 const unsigned max_width = MAX2(inst->exec_size, lower_width);
6096 const fs_builder ibld = bld.at(block, inst)
6097 .exec_all(inst->force_writemask_all)
6098 .group(max_width, inst->group / max_width);
6099
6100 /* Split the copies in chunks of the execution width of either the
6101 * original or the lowered instruction, whichever is lower.
6102 */
6103 const unsigned n = DIV_ROUND_UP(inst->exec_size, lower_width);
6104 const unsigned dst_size = inst->size_written /
6105 inst->dst.component_size(inst->exec_size);
6106
6107 assert(!inst->writes_accumulator && !inst->mlen);
6108
6109 /* Inserting the zip, unzip, and duplicated instructions in all of
6110 * the right spots is somewhat tricky. All of the unzip and any
6111 * instructions from the zip which unzip the destination prior to
6112 * writing need to happen before all of the per-group instructions
6113 * and the zip instructions need to happen after. In order to sort
6114 * this all out, we insert the unzip instructions before \p inst,
6115 * insert the per-group instructions after \p inst (i.e. before
6116 * inst->next), and insert the zip instructions before the
6117 * instruction after \p inst. Since we are inserting instructions
6118 * after \p inst, inst->next is a moving target and we need to save
6119 * it off here so that we insert the zip instructions in the right
6120 * place.
6121 *
6122 * Since we're inserting split instructions after after_inst, the
6123 * instructions will end up in the reverse order that we insert them.
6124 * However, certain render target writes require that the low group
6125 * instructions come before the high group. From the Ivy Bridge PRM
6126 * Vol. 4, Pt. 1, Section 3.9.11:
6127 *
6128 * "If multiple SIMD8 Dual Source messages are delivered by the
6129 * pixel shader thread, each SIMD8_DUALSRC_LO message must be
6130 * issued before the SIMD8_DUALSRC_HI message with the same Slot
6131 * Group Select setting."
6132 *
6133 * And, from Section 3.9.11.1 of the same PRM:
6134 *
6135 * "When SIMD32 or SIMD16 PS threads send render target writes
6136 * with multiple SIMD8 and SIMD16 messages, the following must
6137 * hold:
6138 *
6139 * All the slots (as described above) must have a corresponding
6140 * render target write irrespective of the slot's validity. A slot
6141 * is considered valid when at least one sample is enabled. For
6142 * example, a SIMD16 PS thread must send two SIMD8 render target
6143 * writes to cover all the slots.
6144 *
6145 * PS thread must send SIMD render target write messages with
6146 * increasing slot numbers. For example, SIMD16 thread has
6147 * Slot[15:0] and if two SIMD8 render target writes are used, the
6148 * first SIMD8 render target write must send Slot[7:0] and the
6149 * next one must send Slot[15:8]."
6150 *
6151 * In order to make low group instructions come before high group
6152 * instructions (this is required for some render target writes), we
6153 * split from the highest group to lowest.
6154 */
6155 exec_node *const after_inst = inst->next;
6156 for (int i = n - 1; i >= 0; i--) {
6157 /* Emit a copy of the original instruction with the lowered width.
6158 * If the EOT flag was set throw it away except for the last
6159 * instruction to avoid killing the thread prematurely.
6160 */
6161 fs_inst split_inst = *inst;
6162 split_inst.exec_size = lower_width;
6163 split_inst.eot = inst->eot && i == int(n - 1);
6164
6165 /* Select the correct channel enables for the i-th group, then
6166 * transform the sources and destination and emit the lowered
6167 * instruction.
6168 */
6169 const fs_builder lbld = ibld.group(lower_width, i);
6170
6171 for (unsigned j = 0; j < inst->sources; j++)
6172 split_inst.src[j] = emit_unzip(lbld.at(block, inst), inst, j);
6173
6174 split_inst.dst = emit_zip(lbld.at(block, inst),
6175 lbld.at(block, after_inst), inst);
6176 split_inst.size_written =
6177 split_inst.dst.component_size(lower_width) * dst_size;
6178
6179 lbld.at(block, inst->next).emit(split_inst);
6180 }
6181
6182 inst->remove(block);
6183 progress = true;
6184 }
6185 }
6186
6187 if (progress)
6188 invalidate_live_intervals();
6189
6190 return progress;
6191 }
6192
6193 void
6194 fs_visitor::dump_instructions()
6195 {
6196 dump_instructions(NULL);
6197 }
6198
6199 void
6200 fs_visitor::dump_instructions(const char *name)
6201 {
6202 FILE *file = stderr;
6203 if (name && geteuid() != 0) {
6204 file = fopen(name, "w");
6205 if (!file)
6206 file = stderr;
6207 }
6208
6209 if (cfg) {
6210 calculate_register_pressure();
6211 int ip = 0, max_pressure = 0;
6212 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
6213 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
6214 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
6215 dump_instruction(inst, file);
6216 ip++;
6217 }
6218 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
6219 } else {
6220 int ip = 0;
6221 foreach_in_list(backend_instruction, inst, &instructions) {
6222 fprintf(file, "%4d: ", ip++);
6223 dump_instruction(inst, file);
6224 }
6225 }
6226
6227 if (file != stderr) {
6228 fclose(file);
6229 }
6230 }
6231
6232 void
6233 fs_visitor::dump_instruction(backend_instruction *be_inst)
6234 {
6235 dump_instruction(be_inst, stderr);
6236 }
6237
6238 void
6239 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
6240 {
6241 fs_inst *inst = (fs_inst *)be_inst;
6242
6243 if (inst->predicate) {
6244 fprintf(file, "(%cf%d.%d) ",
6245 inst->predicate_inverse ? '-' : '+',
6246 inst->flag_subreg / 2,
6247 inst->flag_subreg % 2);
6248 }
6249
6250 fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
6251 if (inst->saturate)
6252 fprintf(file, ".sat");
6253 if (inst->conditional_mod) {
6254 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
6255 if (!inst->predicate &&
6256 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
6257 inst->opcode != BRW_OPCODE_CSEL &&
6258 inst->opcode != BRW_OPCODE_IF &&
6259 inst->opcode != BRW_OPCODE_WHILE))) {
6260 fprintf(file, ".f%d.%d", inst->flag_subreg / 2,
6261 inst->flag_subreg % 2);
6262 }
6263 }
6264 fprintf(file, "(%d) ", inst->exec_size);
6265
6266 if (inst->mlen) {
6267 fprintf(file, "(mlen: %d) ", inst->mlen);
6268 }
6269
6270 if (inst->ex_mlen) {
6271 fprintf(file, "(ex_mlen: %d) ", inst->ex_mlen);
6272 }
6273
6274 if (inst->eot) {
6275 fprintf(file, "(EOT) ");
6276 }
6277
6278 switch (inst->dst.file) {
6279 case VGRF:
6280 fprintf(file, "vgrf%d", inst->dst.nr);
6281 break;
6282 case FIXED_GRF:
6283 fprintf(file, "g%d", inst->dst.nr);
6284 break;
6285 case MRF:
6286 fprintf(file, "m%d", inst->dst.nr);
6287 break;
6288 case BAD_FILE:
6289 fprintf(file, "(null)");
6290 break;
6291 case UNIFORM:
6292 fprintf(file, "***u%d***", inst->dst.nr);
6293 break;
6294 case ATTR:
6295 fprintf(file, "***attr%d***", inst->dst.nr);
6296 break;
6297 case ARF:
6298 switch (inst->dst.nr) {
6299 case BRW_ARF_NULL:
6300 fprintf(file, "null");
6301 break;
6302 case BRW_ARF_ADDRESS:
6303 fprintf(file, "a0.%d", inst->dst.subnr);
6304 break;
6305 case BRW_ARF_ACCUMULATOR:
6306 fprintf(file, "acc%d", inst->dst.subnr);
6307 break;
6308 case BRW_ARF_FLAG:
6309 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
6310 break;
6311 default:
6312 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
6313 break;
6314 }
6315 break;
6316 case IMM:
6317 unreachable("not reached");
6318 }
6319
6320 if (inst->dst.offset ||
6321 (inst->dst.file == VGRF &&
6322 alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
6323 const unsigned reg_size = (inst->dst.file == UNIFORM ? 4 : REG_SIZE);
6324 fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
6325 inst->dst.offset % reg_size);
6326 }
6327
6328 if (inst->dst.stride != 1)
6329 fprintf(file, "<%u>", inst->dst.stride);
6330 fprintf(file, ":%s, ", brw_reg_type_to_letters(inst->dst.type));
6331
6332 for (int i = 0; i < inst->sources; i++) {
6333 if (inst->src[i].negate)
6334 fprintf(file, "-");
6335 if (inst->src[i].abs)
6336 fprintf(file, "|");
6337 switch (inst->src[i].file) {
6338 case VGRF:
6339 fprintf(file, "vgrf%d", inst->src[i].nr);
6340 break;
6341 case FIXED_GRF:
6342 fprintf(file, "g%d", inst->src[i].nr);
6343 break;
6344 case MRF:
6345 fprintf(file, "***m%d***", inst->src[i].nr);
6346 break;
6347 case ATTR:
6348 fprintf(file, "attr%d", inst->src[i].nr);
6349 break;
6350 case UNIFORM:
6351 fprintf(file, "u%d", inst->src[i].nr);
6352 break;
6353 case BAD_FILE:
6354 fprintf(file, "(null)");
6355 break;
6356 case IMM:
6357 switch (inst->src[i].type) {
6358 case BRW_REGISTER_TYPE_F:
6359 fprintf(file, "%-gf", inst->src[i].f);
6360 break;
6361 case BRW_REGISTER_TYPE_DF:
6362 fprintf(file, "%fdf", inst->src[i].df);
6363 break;
6364 case BRW_REGISTER_TYPE_W:
6365 case BRW_REGISTER_TYPE_D:
6366 fprintf(file, "%dd", inst->src[i].d);
6367 break;
6368 case BRW_REGISTER_TYPE_UW:
6369 case BRW_REGISTER_TYPE_UD:
6370 fprintf(file, "%uu", inst->src[i].ud);
6371 break;
6372 case BRW_REGISTER_TYPE_Q:
6373 fprintf(file, "%" PRId64 "q", inst->src[i].d64);
6374 break;
6375 case BRW_REGISTER_TYPE_UQ:
6376 fprintf(file, "%" PRIu64 "uq", inst->src[i].u64);
6377 break;
6378 case BRW_REGISTER_TYPE_VF:
6379 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
6380 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
6381 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
6382 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
6383 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
6384 break;
6385 case BRW_REGISTER_TYPE_V:
6386 case BRW_REGISTER_TYPE_UV:
6387 fprintf(file, "%08x%s", inst->src[i].ud,
6388 inst->src[i].type == BRW_REGISTER_TYPE_V ? "V" : "UV");
6389 break;
6390 default:
6391 fprintf(file, "???");
6392 break;
6393 }
6394 break;
6395 case ARF:
6396 switch (inst->src[i].nr) {
6397 case BRW_ARF_NULL:
6398 fprintf(file, "null");
6399 break;
6400 case BRW_ARF_ADDRESS:
6401 fprintf(file, "a0.%d", inst->src[i].subnr);
6402 break;
6403 case BRW_ARF_ACCUMULATOR:
6404 fprintf(file, "acc%d", inst->src[i].subnr);
6405 break;
6406 case BRW_ARF_FLAG:
6407 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
6408 break;
6409 default:
6410 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
6411 break;
6412 }
6413 break;
6414 }
6415
6416 if (inst->src[i].offset ||
6417 (inst->src[i].file == VGRF &&
6418 alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
6419 const unsigned reg_size = (inst->src[i].file == UNIFORM ? 4 : REG_SIZE);
6420 fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
6421 inst->src[i].offset % reg_size);
6422 }
6423
6424 if (inst->src[i].abs)
6425 fprintf(file, "|");
6426
6427 if (inst->src[i].file != IMM) {
6428 unsigned stride;
6429 if (inst->src[i].file == ARF || inst->src[i].file == FIXED_GRF) {
6430 unsigned hstride = inst->src[i].hstride;
6431 stride = (hstride == 0 ? 0 : (1 << (hstride - 1)));
6432 } else {
6433 stride = inst->src[i].stride;
6434 }
6435 if (stride != 1)
6436 fprintf(file, "<%u>", stride);
6437
6438 fprintf(file, ":%s", brw_reg_type_to_letters(inst->src[i].type));
6439 }
6440
6441 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
6442 fprintf(file, ", ");
6443 }
6444
6445 fprintf(file, " ");
6446
6447 if (inst->force_writemask_all)
6448 fprintf(file, "NoMask ");
6449
6450 if (inst->exec_size != dispatch_width)
6451 fprintf(file, "group%d ", inst->group);
6452
6453 fprintf(file, "\n");
6454 }
6455
6456 void
6457 fs_visitor::setup_fs_payload_gen6()
6458 {
6459 assert(stage == MESA_SHADER_FRAGMENT);
6460 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
6461 const unsigned payload_width = MIN2(16, dispatch_width);
6462 assert(dispatch_width % payload_width == 0);
6463 assert(devinfo->gen >= 6);
6464
6465 prog_data->uses_src_depth = prog_data->uses_src_w =
6466 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
6467
6468 prog_data->uses_sample_mask =
6469 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
6470
6471 /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
6472 *
6473 * "MSDISPMODE_PERSAMPLE is required in order to select
6474 * POSOFFSET_SAMPLE"
6475 *
6476 * So we can only really get sample positions if we are doing real
6477 * per-sample dispatch. If we need gl_SamplePosition and we don't have
6478 * persample dispatch, we hard-code it to 0.5.
6479 */
6480 prog_data->uses_pos_offset = prog_data->persample_dispatch &&
6481 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
6482
6483 /* R0: PS thread payload header. */
6484 payload.num_regs++;
6485
6486 for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
6487 /* R1: masks, pixel X/Y coordinates. */
6488 payload.subspan_coord_reg[j] = payload.num_regs++;
6489 }
6490
6491 for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
6492 /* R3-26: barycentric interpolation coordinates. These appear in the
6493 * same order that they appear in the brw_barycentric_mode enum. Each
6494 * set of coordinates occupies 2 registers if dispatch width == 8 and 4
6495 * registers if dispatch width == 16. Coordinates only appear if they
6496 * were enabled using the "Barycentric Interpolation Mode" bits in
6497 * WM_STATE.
6498 */
6499 for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
6500 if (prog_data->barycentric_interp_modes & (1 << i)) {
6501 payload.barycentric_coord_reg[i][j] = payload.num_regs;
6502 payload.num_regs += payload_width / 4;
6503 }
6504 }
6505
6506 /* R27-28: interpolated depth if uses source depth */
6507 if (prog_data->uses_src_depth) {
6508 payload.source_depth_reg[j] = payload.num_regs;
6509 payload.num_regs += payload_width / 8;
6510 }
6511
6512 /* R29-30: interpolated W set if GEN6_WM_USES_SOURCE_W. */
6513 if (prog_data->uses_src_w) {
6514 payload.source_w_reg[j] = payload.num_regs;
6515 payload.num_regs += payload_width / 8;
6516 }
6517
6518 /* R31: MSAA position offsets. */
6519 if (prog_data->uses_pos_offset) {
6520 payload.sample_pos_reg[j] = payload.num_regs;
6521 payload.num_regs++;
6522 }
6523
6524 /* R32-33: MSAA input coverage mask */
6525 if (prog_data->uses_sample_mask) {
6526 assert(devinfo->gen >= 7);
6527 payload.sample_mask_in_reg[j] = payload.num_regs;
6528 payload.num_regs += payload_width / 8;
6529 }
6530 }
6531
6532 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
6533 source_depth_to_render_target = true;
6534 }
6535 }
6536
6537 void
6538 fs_visitor::setup_vs_payload()
6539 {
6540 /* R0: thread header, R1: urb handles */
6541 payload.num_regs = 2;
6542 }
6543
6544 void
6545 fs_visitor::setup_gs_payload()
6546 {
6547 assert(stage == MESA_SHADER_GEOMETRY);
6548
6549 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
6550 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
6551
6552 /* R0: thread header, R1: output URB handles */
6553 payload.num_regs = 2;
6554
6555 if (gs_prog_data->include_primitive_id) {
6556 /* R2: Primitive ID 0..7 */
6557 payload.num_regs++;
6558 }
6559
6560 /* Always enable VUE handles so we can safely use pull model if needed.
6561 *
6562 * The push model for a GS uses a ton of register space even for trivial
6563 * scenarios with just a few inputs, so just make things easier and a bit
6564 * safer by always having pull model available.
6565 */
6566 gs_prog_data->base.include_vue_handles = true;
6567
6568 /* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
6569 payload.num_regs += nir->info.gs.vertices_in;
6570
6571 /* Use a maximum of 24 registers for push-model inputs. */
6572 const unsigned max_push_components = 24;
6573
6574 /* If pushing our inputs would take too many registers, reduce the URB read
6575 * length (which is in HWords, or 8 registers), and resort to pulling.
6576 *
6577 * Note that the GS reads <URB Read Length> HWords for every vertex - so we
6578 * have to multiply by VerticesIn to obtain the total storage requirement.
6579 */
6580 if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
6581 max_push_components) {
6582 vue_prog_data->urb_read_length =
6583 ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
6584 }
6585 }
6586
6587 void
6588 fs_visitor::setup_cs_payload()
6589 {
6590 assert(devinfo->gen >= 7);
6591 payload.num_regs = 1;
6592 }
6593
6594 void
6595 fs_visitor::calculate_register_pressure()
6596 {
6597 invalidate_live_intervals();
6598 calculate_live_intervals();
6599
6600 unsigned num_instructions = 0;
6601 foreach_block(block, cfg)
6602 num_instructions += block->instructions.length();
6603
6604 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
6605
6606 for (unsigned reg = 0; reg < alloc.count; reg++) {
6607 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
6608 regs_live_at_ip[ip] += alloc.sizes[reg];
6609 }
6610 }
6611
6612 void
6613 fs_visitor::optimize()
6614 {
6615 /* Start by validating the shader we currently have. */
6616 validate();
6617
6618 /* bld is the common builder object pointing at the end of the program we
6619 * used to translate it into i965 IR. For the optimization and lowering
6620 * passes coming next, any code added after the end of the program without
6621 * having explicitly called fs_builder::at() clearly points at a mistake.
6622 * Ideally optimization passes wouldn't be part of the visitor so they
6623 * wouldn't have access to bld at all, but they do, so just in case some
6624 * pass forgets to ask for a location explicitly set it to NULL here to
6625 * make it trip. The dispatch width is initialized to a bogus value to
6626 * make sure that optimizations set the execution controls explicitly to
6627 * match the code they are manipulating instead of relying on the defaults.
6628 */
6629 bld = fs_builder(this, 64);
6630
6631 assign_constant_locations();
6632 lower_constant_loads();
6633
6634 validate();
6635
6636 split_virtual_grfs();
6637 validate();
6638
6639 #define OPT(pass, args...) ({ \
6640 pass_num++; \
6641 bool this_progress = pass(args); \
6642 \
6643 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
6644 char filename[64]; \
6645 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
6646 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
6647 \
6648 backend_shader::dump_instructions(filename); \
6649 } \
6650 \
6651 validate(); \
6652 \
6653 progress = progress || this_progress; \
6654 this_progress; \
6655 })
6656
6657 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
6658 char filename[64];
6659 snprintf(filename, 64, "%s%d-%s-00-00-start",
6660 stage_abbrev, dispatch_width, nir->info.name);
6661
6662 backend_shader::dump_instructions(filename);
6663 }
6664
6665 bool progress = false;
6666 int iteration = 0;
6667 int pass_num = 0;
6668
6669 OPT(remove_extra_rounding_modes);
6670
6671 do {
6672 progress = false;
6673 pass_num = 0;
6674 iteration++;
6675
6676 OPT(remove_duplicate_mrf_writes);
6677
6678 OPT(opt_algebraic);
6679 OPT(opt_cse);
6680 OPT(opt_copy_propagation);
6681 OPT(opt_predicated_break, this);
6682 OPT(opt_cmod_propagation);
6683 OPT(dead_code_eliminate);
6684 OPT(opt_peephole_sel);
6685 OPT(dead_control_flow_eliminate, this);
6686 OPT(opt_register_renaming);
6687 OPT(opt_saturate_propagation);
6688 OPT(register_coalesce);
6689 OPT(compute_to_mrf);
6690 OPT(eliminate_find_live_channel);
6691
6692 OPT(compact_virtual_grfs);
6693 } while (progress);
6694
6695 /* Do this after cmod propagation has had every possible opportunity to
6696 * propagate results into SEL instructions.
6697 */
6698 if (OPT(opt_peephole_csel))
6699 OPT(dead_code_eliminate);
6700
6701 progress = false;
6702 pass_num = 0;
6703
6704 if (OPT(lower_pack)) {
6705 OPT(register_coalesce);
6706 OPT(dead_code_eliminate);
6707 }
6708
6709 OPT(lower_simd_width);
6710
6711 /* After SIMD lowering just in case we had to unroll the EOT send. */
6712 OPT(opt_sampler_eot);
6713
6714 OPT(lower_logical_sends);
6715
6716 if (progress) {
6717 OPT(opt_copy_propagation);
6718 /* Only run after logical send lowering because it's easier to implement
6719 * in terms of physical sends.
6720 */
6721 if (OPT(opt_zero_samples))
6722 OPT(opt_copy_propagation);
6723 /* Run after logical send lowering to give it a chance to CSE the
6724 * LOAD_PAYLOAD instructions created to construct the payloads of
6725 * e.g. texturing messages in cases where it wasn't possible to CSE the
6726 * whole logical instruction.
6727 */
6728 OPT(opt_cse);
6729 OPT(register_coalesce);
6730 OPT(compute_to_mrf);
6731 OPT(dead_code_eliminate);
6732 OPT(remove_duplicate_mrf_writes);
6733 OPT(opt_peephole_sel);
6734 }
6735
6736 OPT(opt_redundant_discard_jumps);
6737
6738 if (OPT(lower_load_payload)) {
6739 split_virtual_grfs();
6740 OPT(register_coalesce);
6741 OPT(lower_simd_width);
6742 OPT(compute_to_mrf);
6743 OPT(dead_code_eliminate);
6744 }
6745
6746 OPT(opt_combine_constants);
6747 OPT(lower_integer_multiplication);
6748
6749 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
6750 OPT(opt_cmod_propagation);
6751 OPT(opt_cse);
6752 OPT(opt_copy_propagation);
6753 OPT(dead_code_eliminate);
6754 }
6755
6756 if (OPT(lower_regioning)) {
6757 OPT(opt_copy_propagation);
6758 OPT(dead_code_eliminate);
6759 OPT(lower_simd_width);
6760 }
6761
6762 lower_uniform_pull_constant_loads();
6763
6764 validate();
6765 }
6766
6767 /**
6768 * Three source instruction must have a GRF/MRF destination register.
6769 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
6770 */
6771 void
6772 fs_visitor::fixup_3src_null_dest()
6773 {
6774 bool progress = false;
6775
6776 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
6777 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
6778 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
6779 inst->dst.type);
6780 progress = true;
6781 }
6782 }
6783
6784 if (progress)
6785 invalidate_live_intervals();
6786 }
6787
6788 void
6789 fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
6790 {
6791 bool allocated_without_spills;
6792
6793 static const enum instruction_scheduler_mode pre_modes[] = {
6794 SCHEDULE_PRE,
6795 SCHEDULE_PRE_NON_LIFO,
6796 SCHEDULE_PRE_LIFO,
6797 };
6798
6799 bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
6800
6801 /* Try each scheduling heuristic to see if it can successfully register
6802 * allocate without spilling. They should be ordered by decreasing
6803 * performance but increasing likelihood of allocating.
6804 */
6805 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
6806 schedule_instructions(pre_modes[i]);
6807
6808 if (0) {
6809 assign_regs_trivial();
6810 allocated_without_spills = true;
6811 } else {
6812 allocated_without_spills = assign_regs(false, spill_all);
6813 }
6814 if (allocated_without_spills)
6815 break;
6816 }
6817
6818 if (!allocated_without_spills) {
6819 if (!allow_spilling)
6820 fail("Failure to register allocate and spilling is not allowed.");
6821
6822 /* We assume that any spilling is worse than just dropping back to
6823 * SIMD8. There's probably actually some intermediate point where
6824 * SIMD16 with a couple of spills is still better.
6825 */
6826 if (dispatch_width > min_dispatch_width) {
6827 fail("Failure to register allocate. Reduce number of "
6828 "live scalar values to avoid this.");
6829 } else {
6830 compiler->shader_perf_log(log_data,
6831 "%s shader triggered register spilling. "
6832 "Try reducing the number of live scalar "
6833 "values to improve performance.\n",
6834 stage_name);
6835 }
6836
6837 /* Since we're out of heuristics, just go spill registers until we
6838 * get an allocation.
6839 */
6840 while (!assign_regs(true, spill_all)) {
6841 if (failed)
6842 break;
6843 }
6844 }
6845
6846 /* This must come after all optimization and register allocation, since
6847 * it inserts dead code that happens to have side effects, and it does
6848 * so based on the actual physical registers in use.
6849 */
6850 insert_gen4_send_dependency_workarounds();
6851
6852 if (failed)
6853 return;
6854
6855 opt_bank_conflicts();
6856
6857 schedule_instructions(SCHEDULE_POST);
6858
6859 if (last_scratch > 0) {
6860 MAYBE_UNUSED unsigned max_scratch_size = 2 * 1024 * 1024;
6861
6862 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
6863
6864 if (stage == MESA_SHADER_COMPUTE) {
6865 if (devinfo->is_haswell) {
6866 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
6867 * field documentation, Haswell supports a minimum of 2kB of
6868 * scratch space for compute shaders, unlike every other stage
6869 * and platform.
6870 */
6871 prog_data->total_scratch = MAX2(prog_data->total_scratch, 2048);
6872 } else if (devinfo->gen <= 7) {
6873 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
6874 * field documentation, platforms prior to Haswell measure scratch
6875 * size linearly with a range of [1kB, 12kB] and 1kB granularity.
6876 */
6877 prog_data->total_scratch = ALIGN(last_scratch, 1024);
6878 max_scratch_size = 12 * 1024;
6879 }
6880 }
6881
6882 /* We currently only support up to 2MB of scratch space. If we
6883 * need to support more eventually, the documentation suggests
6884 * that we could allocate a larger buffer, and partition it out
6885 * ourselves. We'd just have to undo the hardware's address
6886 * calculation by subtracting (FFTID * Per Thread Scratch Space)
6887 * and then add FFTID * (Larger Per Thread Scratch Space).
6888 *
6889 * See 3D-Media-GPGPU Engine > Media GPGPU Pipeline >
6890 * Thread Group Tracking > Local Memory/Scratch Space.
6891 */
6892 assert(prog_data->total_scratch < max_scratch_size);
6893 }
6894 }
6895
6896 bool
6897 fs_visitor::run_vs()
6898 {
6899 assert(stage == MESA_SHADER_VERTEX);
6900
6901 setup_vs_payload();
6902
6903 if (shader_time_index >= 0)
6904 emit_shader_time_begin();
6905
6906 emit_nir_code();
6907
6908 if (failed)
6909 return false;
6910
6911 compute_clip_distance();
6912
6913 emit_urb_writes();
6914
6915 if (shader_time_index >= 0)
6916 emit_shader_time_end();
6917
6918 calculate_cfg();
6919
6920 optimize();
6921
6922 assign_curb_setup();
6923 assign_vs_urb_setup();
6924
6925 fixup_3src_null_dest();
6926 allocate_registers(8, true);
6927
6928 return !failed;
6929 }
6930
6931 bool
6932 fs_visitor::run_tcs_single_patch()
6933 {
6934 assert(stage == MESA_SHADER_TESS_CTRL);
6935
6936 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
6937
6938 /* r1-r4 contain the ICP handles. */
6939 payload.num_regs = 5;
6940
6941 if (shader_time_index >= 0)
6942 emit_shader_time_begin();
6943
6944 /* Initialize gl_InvocationID */
6945 fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
6946 fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
6947 bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
6948 bld.MOV(channels_ud, channels_uw);
6949
6950 if (tcs_prog_data->instances == 1) {
6951 invocation_id = channels_ud;
6952 } else {
6953 const unsigned invocation_id_mask = devinfo->gen >= 11 ?
6954 INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
6955 const unsigned invocation_id_shift = devinfo->gen >= 11 ? 16 : 17;
6956
6957 invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
6958
6959 /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
6960 fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
6961 fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
6962 bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
6963 brw_imm_ud(invocation_id_mask));
6964 bld.SHR(instance_times_8, t, brw_imm_ud(invocation_id_shift - 3));
6965
6966 bld.ADD(invocation_id, instance_times_8, channels_ud);
6967 }
6968
6969 /* Fix the disptach mask */
6970 if (nir->info.tess.tcs_vertices_out % 8) {
6971 bld.CMP(bld.null_reg_ud(), invocation_id,
6972 brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
6973 bld.IF(BRW_PREDICATE_NORMAL);
6974 }
6975
6976 emit_nir_code();
6977
6978 if (nir->info.tess.tcs_vertices_out % 8) {
6979 bld.emit(BRW_OPCODE_ENDIF);
6980 }
6981
6982 /* Emit EOT write; set TR DS Cache bit */
6983 fs_reg srcs[3] = {
6984 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
6985 fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
6986 fs_reg(brw_imm_ud(0)),
6987 };
6988 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
6989 bld.LOAD_PAYLOAD(payload, srcs, 3, 2);
6990
6991 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
6992 bld.null_reg_ud(), payload);
6993 inst->mlen = 3;
6994 inst->eot = true;
6995
6996 if (shader_time_index >= 0)
6997 emit_shader_time_end();
6998
6999 if (failed)
7000 return false;
7001
7002 calculate_cfg();
7003
7004 optimize();
7005
7006 assign_curb_setup();
7007 assign_tcs_single_patch_urb_setup();
7008
7009 fixup_3src_null_dest();
7010 allocate_registers(8, true);
7011
7012 return !failed;
7013 }
7014
7015 bool
7016 fs_visitor::run_tes()
7017 {
7018 assert(stage == MESA_SHADER_TESS_EVAL);
7019
7020 /* R0: thread header, R1-3: gl_TessCoord.xyz, R4: URB handles */
7021 payload.num_regs = 5;
7022
7023 if (shader_time_index >= 0)
7024 emit_shader_time_begin();
7025
7026 emit_nir_code();
7027
7028 if (failed)
7029 return false;
7030
7031 emit_urb_writes();
7032
7033 if (shader_time_index >= 0)
7034 emit_shader_time_end();
7035
7036 calculate_cfg();
7037
7038 optimize();
7039
7040 assign_curb_setup();
7041 assign_tes_urb_setup();
7042
7043 fixup_3src_null_dest();
7044 allocate_registers(8, true);
7045
7046 return !failed;
7047 }
7048
7049 bool
7050 fs_visitor::run_gs()
7051 {
7052 assert(stage == MESA_SHADER_GEOMETRY);
7053
7054 setup_gs_payload();
7055
7056 this->final_gs_vertex_count = vgrf(glsl_type::uint_type);
7057
7058 if (gs_compile->control_data_header_size_bits > 0) {
7059 /* Create a VGRF to store accumulated control data bits. */
7060 this->control_data_bits = vgrf(glsl_type::uint_type);
7061
7062 /* If we're outputting more than 32 control data bits, then EmitVertex()
7063 * will set control_data_bits to 0 after emitting the first vertex.
7064 * Otherwise, we need to initialize it to 0 here.
7065 */
7066 if (gs_compile->control_data_header_size_bits <= 32) {
7067 const fs_builder abld = bld.annotate("initialize control data bits");
7068 abld.MOV(this->control_data_bits, brw_imm_ud(0u));
7069 }
7070 }
7071
7072 if (shader_time_index >= 0)
7073 emit_shader_time_begin();
7074
7075 emit_nir_code();
7076
7077 emit_gs_thread_end();
7078
7079 if (shader_time_index >= 0)
7080 emit_shader_time_end();
7081
7082 if (failed)
7083 return false;
7084
7085 calculate_cfg();
7086
7087 optimize();
7088
7089 assign_curb_setup();
7090 assign_gs_urb_setup();
7091
7092 fixup_3src_null_dest();
7093 allocate_registers(8, true);
7094
7095 return !failed;
7096 }
7097
7098 /* From the SKL PRM, Volume 16, Workarounds:
7099 *
7100 * 0877 3D Pixel Shader Hang possible when pixel shader dispatched with
7101 * only header phases (R0-R2)
7102 *
7103 * WA: Enable a non-header phase (e.g. push constant) when dispatch would
7104 * have been header only.
7105 *
7106 * Instead of enabling push constants one can alternatively enable one of the
7107 * inputs. Here one simply chooses "layer" which shouldn't impose much
7108 * overhead.
7109 */
7110 static void
7111 gen9_ps_header_only_workaround(struct brw_wm_prog_data *wm_prog_data)
7112 {
7113 if (wm_prog_data->num_varying_inputs)
7114 return;
7115
7116 if (wm_prog_data->base.curb_read_length)
7117 return;
7118
7119 wm_prog_data->urb_setup[VARYING_SLOT_LAYER] = 0;
7120 wm_prog_data->num_varying_inputs = 1;
7121 }
7122
7123 bool
7124 fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
7125 {
7126 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
7127 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
7128
7129 assert(stage == MESA_SHADER_FRAGMENT);
7130
7131 if (devinfo->gen >= 6)
7132 setup_fs_payload_gen6();
7133 else
7134 setup_fs_payload_gen4();
7135
7136 if (0) {
7137 emit_dummy_fs();
7138 } else if (do_rep_send) {
7139 assert(dispatch_width == 16);
7140 emit_repclear_shader();
7141 } else {
7142 if (shader_time_index >= 0)
7143 emit_shader_time_begin();
7144
7145 calculate_urb_setup();
7146 if (nir->info.inputs_read > 0 ||
7147 (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
7148 if (devinfo->gen < 6)
7149 emit_interpolation_setup_gen4();
7150 else
7151 emit_interpolation_setup_gen6();
7152 }
7153
7154 /* We handle discards by keeping track of the still-live pixels in f0.1.
7155 * Initialize it with the dispatched pixels.
7156 */
7157 if (wm_prog_data->uses_kill) {
7158 const fs_reg dispatch_mask =
7159 devinfo->gen >= 6 ? brw_vec1_grf(1, 7) : brw_vec1_grf(0, 0);
7160 bld.exec_all().group(1, 0)
7161 .MOV(retype(brw_flag_reg(0, 1), BRW_REGISTER_TYPE_UW),
7162 retype(dispatch_mask, BRW_REGISTER_TYPE_UW));
7163 }
7164
7165 emit_nir_code();
7166
7167 if (failed)
7168 return false;
7169
7170 if (wm_prog_data->uses_kill)
7171 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
7172
7173 if (wm_key->alpha_test_func)
7174 emit_alpha_test();
7175
7176 emit_fb_writes();
7177
7178 if (shader_time_index >= 0)
7179 emit_shader_time_end();
7180
7181 calculate_cfg();
7182
7183 optimize();
7184
7185 assign_curb_setup();
7186
7187 if (devinfo->gen >= 9)
7188 gen9_ps_header_only_workaround(wm_prog_data);
7189
7190 assign_urb_setup();
7191
7192 fixup_3src_null_dest();
7193 allocate_registers(8, allow_spilling);
7194
7195 if (failed)
7196 return false;
7197 }
7198
7199 return !failed;
7200 }
7201
7202 bool
7203 fs_visitor::run_cs(unsigned min_dispatch_width)
7204 {
7205 assert(stage == MESA_SHADER_COMPUTE);
7206 assert(dispatch_width >= min_dispatch_width);
7207
7208 setup_cs_payload();
7209
7210 if (shader_time_index >= 0)
7211 emit_shader_time_begin();
7212
7213 if (devinfo->is_haswell && prog_data->total_shared > 0) {
7214 /* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
7215 const fs_builder abld = bld.exec_all().group(1, 0);
7216 abld.MOV(retype(brw_sr0_reg(1), BRW_REGISTER_TYPE_UW),
7217 suboffset(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW), 1));
7218 }
7219
7220 emit_nir_code();
7221
7222 if (failed)
7223 return false;
7224
7225 emit_cs_terminate();
7226
7227 if (shader_time_index >= 0)
7228 emit_shader_time_end();
7229
7230 calculate_cfg();
7231
7232 optimize();
7233
7234 assign_curb_setup();
7235
7236 fixup_3src_null_dest();
7237 allocate_registers(min_dispatch_width, true);
7238
7239 if (failed)
7240 return false;
7241
7242 return !failed;
7243 }
7244
7245 /**
7246 * Return a bitfield where bit n is set if barycentric interpolation mode n
7247 * (see enum brw_barycentric_mode) is needed by the fragment shader.
7248 *
7249 * We examine the load_barycentric intrinsics rather than looking at input
7250 * variables so that we catch interpolateAtCentroid() messages too, which
7251 * also need the BRW_BARYCENTRIC_[NON]PERSPECTIVE_CENTROID mode set up.
7252 */
7253 static unsigned
7254 brw_compute_barycentric_interp_modes(const struct gen_device_info *devinfo,
7255 const nir_shader *shader)
7256 {
7257 unsigned barycentric_interp_modes = 0;
7258
7259 nir_foreach_function(f, shader) {
7260 if (!f->impl)
7261 continue;
7262
7263 nir_foreach_block(block, f->impl) {
7264 nir_foreach_instr(instr, block) {
7265 if (instr->type != nir_instr_type_intrinsic)
7266 continue;
7267
7268 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
7269 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
7270 continue;
7271
7272 /* Ignore WPOS; it doesn't require interpolation. */
7273 if (nir_intrinsic_base(intrin) == VARYING_SLOT_POS)
7274 continue;
7275
7276 intrin = nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
7277 enum glsl_interp_mode interp = (enum glsl_interp_mode)
7278 nir_intrinsic_interp_mode(intrin);
7279 nir_intrinsic_op bary_op = intrin->intrinsic;
7280 enum brw_barycentric_mode bary =
7281 brw_barycentric_mode(interp, bary_op);
7282
7283 barycentric_interp_modes |= 1 << bary;
7284
7285 if (devinfo->needs_unlit_centroid_workaround &&
7286 bary_op == nir_intrinsic_load_barycentric_centroid)
7287 barycentric_interp_modes |= 1 << centroid_to_pixel(bary);
7288 }
7289 }
7290 }
7291
7292 return barycentric_interp_modes;
7293 }
7294
7295 static void
7296 brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
7297 const nir_shader *shader)
7298 {
7299 prog_data->flat_inputs = 0;
7300
7301 nir_foreach_variable(var, &shader->inputs) {
7302 unsigned slots = glsl_count_attribute_slots(var->type, false);
7303 for (unsigned s = 0; s < slots; s++) {
7304 int input_index = prog_data->urb_setup[var->data.location + s];
7305
7306 if (input_index < 0)
7307 continue;
7308
7309 /* flat shading */
7310 if (var->data.interpolation == INTERP_MODE_FLAT)
7311 prog_data->flat_inputs |= 1 << input_index;
7312 }
7313 }
7314 }
7315
7316 static uint8_t
7317 computed_depth_mode(const nir_shader *shader)
7318 {
7319 if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
7320 switch (shader->info.fs.depth_layout) {
7321 case FRAG_DEPTH_LAYOUT_NONE:
7322 case FRAG_DEPTH_LAYOUT_ANY:
7323 return BRW_PSCDEPTH_ON;
7324 case FRAG_DEPTH_LAYOUT_GREATER:
7325 return BRW_PSCDEPTH_ON_GE;
7326 case FRAG_DEPTH_LAYOUT_LESS:
7327 return BRW_PSCDEPTH_ON_LE;
7328 case FRAG_DEPTH_LAYOUT_UNCHANGED:
7329 return BRW_PSCDEPTH_OFF;
7330 }
7331 }
7332 return BRW_PSCDEPTH_OFF;
7333 }
7334
7335 /**
7336 * Move load_interpolated_input with simple (payload-based) barycentric modes
7337 * to the top of the program so we don't emit multiple PLNs for the same input.
7338 *
7339 * This works around CSE not being able to handle non-dominating cases
7340 * such as:
7341 *
7342 * if (...) {
7343 * interpolate input
7344 * } else {
7345 * interpolate the same exact input
7346 * }
7347 *
7348 * This should be replaced by global value numbering someday.
7349 */
7350 static bool
7351 move_interpolation_to_top(nir_shader *nir)
7352 {
7353 bool progress = false;
7354
7355 nir_foreach_function(f, nir) {
7356 if (!f->impl)
7357 continue;
7358
7359 nir_block *top = nir_start_block(f->impl);
7360 exec_node *cursor_node = NULL;
7361
7362 nir_foreach_block(block, f->impl) {
7363 if (block == top)
7364 continue;
7365
7366 nir_foreach_instr_safe(instr, block) {
7367 if (instr->type != nir_instr_type_intrinsic)
7368 continue;
7369
7370 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
7371 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
7372 continue;
7373 nir_intrinsic_instr *bary_intrinsic =
7374 nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
7375 nir_intrinsic_op op = bary_intrinsic->intrinsic;
7376
7377 /* Leave interpolateAtSample/Offset() where they are. */
7378 if (op == nir_intrinsic_load_barycentric_at_sample ||
7379 op == nir_intrinsic_load_barycentric_at_offset)
7380 continue;
7381
7382 nir_instr *move[3] = {
7383 &bary_intrinsic->instr,
7384 intrin->src[1].ssa->parent_instr,
7385 instr
7386 };
7387
7388 for (unsigned i = 0; i < ARRAY_SIZE(move); i++) {
7389 if (move[i]->block != top) {
7390 move[i]->block = top;
7391 exec_node_remove(&move[i]->node);
7392 if (cursor_node) {
7393 exec_node_insert_after(cursor_node, &move[i]->node);
7394 } else {
7395 exec_list_push_head(&top->instr_list, &move[i]->node);
7396 }
7397 cursor_node = &move[i]->node;
7398 progress = true;
7399 }
7400 }
7401 }
7402 }
7403 nir_metadata_preserve(f->impl, (nir_metadata)
7404 ((unsigned) nir_metadata_block_index |
7405 (unsigned) nir_metadata_dominance));
7406 }
7407
7408 return progress;
7409 }
7410
7411 /**
7412 * Demote per-sample barycentric intrinsics to centroid.
7413 *
7414 * Useful when rendering to a non-multisampled buffer.
7415 */
7416 static bool
7417 demote_sample_qualifiers(nir_shader *nir)
7418 {
7419 bool progress = true;
7420
7421 nir_foreach_function(f, nir) {
7422 if (!f->impl)
7423 continue;
7424
7425 nir_builder b;
7426 nir_builder_init(&b, f->impl);
7427
7428 nir_foreach_block(block, f->impl) {
7429 nir_foreach_instr_safe(instr, block) {
7430 if (instr->type != nir_instr_type_intrinsic)
7431 continue;
7432
7433 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
7434 if (intrin->intrinsic != nir_intrinsic_load_barycentric_sample &&
7435 intrin->intrinsic != nir_intrinsic_load_barycentric_at_sample)
7436 continue;
7437
7438 b.cursor = nir_before_instr(instr);
7439 nir_ssa_def *centroid =
7440 nir_load_barycentric(&b, nir_intrinsic_load_barycentric_centroid,
7441 nir_intrinsic_interp_mode(intrin));
7442 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
7443 nir_src_for_ssa(centroid));
7444 nir_instr_remove(instr);
7445 progress = true;
7446 }
7447 }
7448
7449 nir_metadata_preserve(f->impl, (nir_metadata)
7450 ((unsigned) nir_metadata_block_index |
7451 (unsigned) nir_metadata_dominance));
7452 }
7453
7454 return progress;
7455 }
7456
7457 /**
7458 * Pre-gen6, the register file of the EUs was shared between threads,
7459 * and each thread used some subset allocated on a 16-register block
7460 * granularity. The unit states wanted these block counts.
7461 */
7462 static inline int
7463 brw_register_blocks(int reg_count)
7464 {
7465 return ALIGN(reg_count, 16) / 16 - 1;
7466 }
7467
7468 const unsigned *
7469 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
7470 void *mem_ctx,
7471 const struct brw_wm_prog_key *key,
7472 struct brw_wm_prog_data *prog_data,
7473 nir_shader *shader,
7474 struct gl_program *prog,
7475 int shader_time_index8, int shader_time_index16,
7476 int shader_time_index32, bool allow_spilling,
7477 bool use_rep_send, struct brw_vue_map *vue_map,
7478 char **error_str)
7479 {
7480 const struct gen_device_info *devinfo = compiler->devinfo;
7481
7482 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
7483 brw_nir_lower_fs_inputs(shader, devinfo, key);
7484 brw_nir_lower_fs_outputs(shader);
7485
7486 if (devinfo->gen < 6) {
7487 brw_setup_vue_interpolation(vue_map, shader, prog_data, devinfo);
7488 }
7489
7490 if (!key->multisample_fbo)
7491 NIR_PASS_V(shader, demote_sample_qualifiers);
7492 NIR_PASS_V(shader, move_interpolation_to_top);
7493 shader = brw_postprocess_nir(shader, compiler, true);
7494
7495 /* key->alpha_test_func means simulating alpha testing via discards,
7496 * so the shader definitely kills pixels.
7497 */
7498 prog_data->uses_kill = shader->info.fs.uses_discard ||
7499 key->alpha_test_func;
7500 prog_data->uses_omask = key->multisample_fbo &&
7501 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
7502 prog_data->computed_depth_mode = computed_depth_mode(shader);
7503 prog_data->computed_stencil =
7504 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
7505
7506 prog_data->persample_dispatch =
7507 key->multisample_fbo &&
7508 (key->persample_interp ||
7509 (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
7510 SYSTEM_BIT_SAMPLE_POS)) ||
7511 shader->info.fs.uses_sample_qualifier ||
7512 shader->info.outputs_read);
7513
7514 prog_data->has_render_target_reads = shader->info.outputs_read != 0ull;
7515
7516 prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
7517 prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
7518 prog_data->inner_coverage = shader->info.fs.inner_coverage;
7519
7520 prog_data->barycentric_interp_modes =
7521 brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
7522
7523 cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
7524
7525 fs_visitor v8(compiler, log_data, mem_ctx, key,
7526 &prog_data->base, prog, shader, 8,
7527 shader_time_index8);
7528 if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
7529 if (error_str)
7530 *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
7531
7532 return NULL;
7533 } else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
7534 simd8_cfg = v8.cfg;
7535 prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
7536 prog_data->reg_blocks_8 = brw_register_blocks(v8.grf_used);
7537 }
7538
7539 if (v8.max_dispatch_width >= 16 &&
7540 likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
7541 /* Try a SIMD16 compile */
7542 fs_visitor v16(compiler, log_data, mem_ctx, key,
7543 &prog_data->base, prog, shader, 16,
7544 shader_time_index16);
7545 v16.import_uniforms(&v8);
7546 if (!v16.run_fs(allow_spilling, use_rep_send)) {
7547 compiler->shader_perf_log(log_data,
7548 "SIMD16 shader failed to compile: %s",
7549 v16.fail_msg);
7550 } else {
7551 simd16_cfg = v16.cfg;
7552 prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
7553 prog_data->reg_blocks_16 = brw_register_blocks(v16.grf_used);
7554 }
7555 }
7556
7557 /* Currently, the compiler only supports SIMD32 on SNB+ */
7558 if (v8.max_dispatch_width >= 32 && !use_rep_send &&
7559 compiler->devinfo->gen >= 6 &&
7560 unlikely(INTEL_DEBUG & DEBUG_DO32)) {
7561 /* Try a SIMD32 compile */
7562 fs_visitor v32(compiler, log_data, mem_ctx, key,
7563 &prog_data->base, prog, shader, 32,
7564 shader_time_index32);
7565 v32.import_uniforms(&v8);
7566 if (!v32.run_fs(allow_spilling, false)) {
7567 compiler->shader_perf_log(log_data,
7568 "SIMD32 shader failed to compile: %s",
7569 v32.fail_msg);
7570 } else {
7571 simd32_cfg = v32.cfg;
7572 prog_data->dispatch_grf_start_reg_32 = v32.payload.num_regs;
7573 prog_data->reg_blocks_32 = brw_register_blocks(v32.grf_used);
7574 }
7575 }
7576
7577 /* When the caller requests a repclear shader, they want SIMD16-only */
7578 if (use_rep_send)
7579 simd8_cfg = NULL;
7580
7581 /* Prior to Iron Lake, the PS had a single shader offset with a jump table
7582 * at the top to select the shader. We've never implemented that.
7583 * Instead, we just give them exactly one shader and we pick the widest one
7584 * available.
7585 */
7586 if (compiler->devinfo->gen < 5) {
7587 if (simd32_cfg || simd16_cfg)
7588 simd8_cfg = NULL;
7589 if (simd32_cfg)
7590 simd16_cfg = NULL;
7591 }
7592
7593 /* If computed depth is enabled SNB only allows SIMD8. */
7594 if (compiler->devinfo->gen == 6 &&
7595 prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF)
7596 assert(simd16_cfg == NULL && simd32_cfg == NULL);
7597
7598 if (compiler->devinfo->gen <= 5 && !simd8_cfg) {
7599 /* Iron lake and earlier only have one Dispatch GRF start field. Make
7600 * the data available in the base prog data struct for convenience.
7601 */
7602 if (simd16_cfg) {
7603 prog_data->base.dispatch_grf_start_reg =
7604 prog_data->dispatch_grf_start_reg_16;
7605 } else if (simd32_cfg) {
7606 prog_data->base.dispatch_grf_start_reg =
7607 prog_data->dispatch_grf_start_reg_32;
7608 }
7609 }
7610
7611 if (prog_data->persample_dispatch) {
7612 /* Starting with SandyBridge (where we first get MSAA), the different
7613 * pixel dispatch combinations are grouped into classifications A
7614 * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
7615 * generations, the only configurations supporting persample dispatch
7616 * are are this in which only one dispatch width is enabled.
7617 */
7618 if (simd32_cfg || simd16_cfg)
7619 simd8_cfg = NULL;
7620 if (simd32_cfg)
7621 simd16_cfg = NULL;
7622 }
7623
7624 /* We have to compute the flat inputs after the visitor is finished running
7625 * because it relies on prog_data->urb_setup which is computed in
7626 * fs_visitor::calculate_urb_setup().
7627 */
7628 brw_compute_flat_inputs(prog_data, shader);
7629
7630 fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
7631 v8.promoted_constants, v8.runtime_check_aads_emit,
7632 MESA_SHADER_FRAGMENT);
7633
7634 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
7635 g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
7636 shader->info.label ?
7637 shader->info.label : "unnamed",
7638 shader->info.name));
7639 }
7640
7641 if (simd8_cfg) {
7642 prog_data->dispatch_8 = true;
7643 g.generate_code(simd8_cfg, 8);
7644 }
7645
7646 if (simd16_cfg) {
7647 prog_data->dispatch_16 = true;
7648 prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
7649 }
7650
7651 if (simd32_cfg) {
7652 prog_data->dispatch_32 = true;
7653 prog_data->prog_offset_32 = g.generate_code(simd32_cfg, 32);
7654 }
7655
7656 return g.get_assembly();
7657 }
7658
7659 fs_reg *
7660 fs_visitor::emit_cs_work_group_id_setup()
7661 {
7662 assert(stage == MESA_SHADER_COMPUTE);
7663
7664 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
7665
7666 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
7667 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
7668 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
7669
7670 bld.MOV(*reg, r0_1);
7671 bld.MOV(offset(*reg, bld, 1), r0_6);
7672 bld.MOV(offset(*reg, bld, 2), r0_7);
7673
7674 return reg;
7675 }
7676
7677 static void
7678 fill_push_const_block_info(struct brw_push_const_block *block, unsigned dwords)
7679 {
7680 block->dwords = dwords;
7681 block->regs = DIV_ROUND_UP(dwords, 8);
7682 block->size = block->regs * 32;
7683 }
7684
7685 static void
7686 cs_fill_push_const_info(const struct gen_device_info *devinfo,
7687 struct brw_cs_prog_data *cs_prog_data)
7688 {
7689 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
7690 int subgroup_id_index = get_subgroup_id_param_index(prog_data);
7691 bool cross_thread_supported = devinfo->gen > 7 || devinfo->is_haswell;
7692
7693 /* The thread ID should be stored in the last param dword */
7694 assert(subgroup_id_index == -1 ||
7695 subgroup_id_index == (int)prog_data->nr_params - 1);
7696
7697 unsigned cross_thread_dwords, per_thread_dwords;
7698 if (!cross_thread_supported) {
7699 cross_thread_dwords = 0u;
7700 per_thread_dwords = prog_data->nr_params;
7701 } else if (subgroup_id_index >= 0) {
7702 /* Fill all but the last register with cross-thread payload */
7703 cross_thread_dwords = 8 * (subgroup_id_index / 8);
7704 per_thread_dwords = prog_data->nr_params - cross_thread_dwords;
7705 assert(per_thread_dwords > 0 && per_thread_dwords <= 8);
7706 } else {
7707 /* Fill all data using cross-thread payload */
7708 cross_thread_dwords = prog_data->nr_params;
7709 per_thread_dwords = 0u;
7710 }
7711
7712 fill_push_const_block_info(&cs_prog_data->push.cross_thread, cross_thread_dwords);
7713 fill_push_const_block_info(&cs_prog_data->push.per_thread, per_thread_dwords);
7714
7715 unsigned total_dwords =
7716 (cs_prog_data->push.per_thread.size * cs_prog_data->threads +
7717 cs_prog_data->push.cross_thread.size) / 4;
7718 fill_push_const_block_info(&cs_prog_data->push.total, total_dwords);
7719
7720 assert(cs_prog_data->push.cross_thread.dwords % 8 == 0 ||
7721 cs_prog_data->push.per_thread.size == 0);
7722 assert(cs_prog_data->push.cross_thread.dwords +
7723 cs_prog_data->push.per_thread.dwords ==
7724 prog_data->nr_params);
7725 }
7726
7727 static void
7728 cs_set_simd_size(struct brw_cs_prog_data *cs_prog_data, unsigned size)
7729 {
7730 cs_prog_data->simd_size = size;
7731 unsigned group_size = cs_prog_data->local_size[0] *
7732 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
7733 cs_prog_data->threads = (group_size + size - 1) / size;
7734 }
7735
7736 static nir_shader *
7737 compile_cs_to_nir(const struct brw_compiler *compiler,
7738 void *mem_ctx,
7739 const struct brw_cs_prog_key *key,
7740 const nir_shader *src_shader,
7741 unsigned dispatch_width)
7742 {
7743 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
7744 shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, true);
7745 brw_nir_lower_cs_intrinsics(shader, dispatch_width);
7746 return brw_postprocess_nir(shader, compiler, true);
7747 }
7748
7749 const unsigned *
7750 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
7751 void *mem_ctx,
7752 const struct brw_cs_prog_key *key,
7753 struct brw_cs_prog_data *prog_data,
7754 const nir_shader *src_shader,
7755 int shader_time_index,
7756 char **error_str)
7757 {
7758 prog_data->local_size[0] = src_shader->info.cs.local_size[0];
7759 prog_data->local_size[1] = src_shader->info.cs.local_size[1];
7760 prog_data->local_size[2] = src_shader->info.cs.local_size[2];
7761 unsigned local_workgroup_size =
7762 src_shader->info.cs.local_size[0] * src_shader->info.cs.local_size[1] *
7763 src_shader->info.cs.local_size[2];
7764
7765 unsigned min_dispatch_width =
7766 DIV_ROUND_UP(local_workgroup_size, compiler->devinfo->max_cs_threads);
7767 min_dispatch_width = MAX2(8, min_dispatch_width);
7768 min_dispatch_width = util_next_power_of_two(min_dispatch_width);
7769 assert(min_dispatch_width <= 32);
7770
7771 fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
7772 cfg_t *cfg = NULL;
7773 const char *fail_msg = NULL;
7774 unsigned promoted_constants = 0;
7775
7776 /* Now the main event: Visit the shader IR and generate our CS IR for it.
7777 */
7778 if (min_dispatch_width <= 8) {
7779 nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
7780 src_shader, 8);
7781 v8 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
7782 NULL, /* Never used in core profile */
7783 nir8, 8, shader_time_index);
7784 if (!v8->run_cs(min_dispatch_width)) {
7785 fail_msg = v8->fail_msg;
7786 } else {
7787 /* We should always be able to do SIMD32 for compute shaders */
7788 assert(v8->max_dispatch_width >= 32);
7789
7790 cfg = v8->cfg;
7791 cs_set_simd_size(prog_data, 8);
7792 cs_fill_push_const_info(compiler->devinfo, prog_data);
7793 promoted_constants = v8->promoted_constants;
7794 }
7795 }
7796
7797 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
7798 !fail_msg && min_dispatch_width <= 16) {
7799 /* Try a SIMD16 compile */
7800 nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
7801 src_shader, 16);
7802 v16 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
7803 NULL, /* Never used in core profile */
7804 nir16, 16, shader_time_index);
7805 if (v8)
7806 v16->import_uniforms(v8);
7807
7808 if (!v16->run_cs(min_dispatch_width)) {
7809 compiler->shader_perf_log(log_data,
7810 "SIMD16 shader failed to compile: %s",
7811 v16->fail_msg);
7812 if (!cfg) {
7813 fail_msg =
7814 "Couldn't generate SIMD16 program and not "
7815 "enough threads for SIMD8";
7816 }
7817 } else {
7818 /* We should always be able to do SIMD32 for compute shaders */
7819 assert(v16->max_dispatch_width >= 32);
7820
7821 cfg = v16->cfg;
7822 cs_set_simd_size(prog_data, 16);
7823 cs_fill_push_const_info(compiler->devinfo, prog_data);
7824 promoted_constants = v16->promoted_constants;
7825 }
7826 }
7827
7828 /* We should always be able to do SIMD32 for compute shaders */
7829 assert(!v16 || v16->max_dispatch_width >= 32);
7830
7831 if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
7832 /* Try a SIMD32 compile */
7833 nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
7834 src_shader, 32);
7835 v32 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
7836 NULL, /* Never used in core profile */
7837 nir32, 32, shader_time_index);
7838 if (v8)
7839 v32->import_uniforms(v8);
7840 else if (v16)
7841 v32->import_uniforms(v16);
7842
7843 if (!v32->run_cs(min_dispatch_width)) {
7844 compiler->shader_perf_log(log_data,
7845 "SIMD32 shader failed to compile: %s",
7846 v16->fail_msg);
7847 if (!cfg) {
7848 fail_msg =
7849 "Couldn't generate SIMD32 program and not "
7850 "enough threads for SIMD16";
7851 }
7852 } else {
7853 cfg = v32->cfg;
7854 cs_set_simd_size(prog_data, 32);
7855 cs_fill_push_const_info(compiler->devinfo, prog_data);
7856 promoted_constants = v32->promoted_constants;
7857 }
7858 }
7859
7860 const unsigned *ret = NULL;
7861 if (unlikely(cfg == NULL)) {
7862 assert(fail_msg);
7863 if (error_str)
7864 *error_str = ralloc_strdup(mem_ctx, fail_msg);
7865 } else {
7866 fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
7867 promoted_constants, false, MESA_SHADER_COMPUTE);
7868 if (INTEL_DEBUG & DEBUG_CS) {
7869 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
7870 src_shader->info.label ?
7871 src_shader->info.label : "unnamed",
7872 src_shader->info.name);
7873 g.enable_debug(name);
7874 }
7875
7876 g.generate_code(cfg, prog_data->simd_size);
7877
7878 ret = g.get_assembly();
7879 }
7880
7881 delete v8;
7882 delete v16;
7883 delete v32;
7884
7885 return ret;
7886 }
7887
7888 /**
7889 * Test the dispatch mask packing assumptions of
7890 * brw_stage_has_packed_dispatch(). Call this from e.g. the top of
7891 * fs_visitor::emit_nir_code() to cause a GPU hang if any shader invocation is
7892 * executed with an unexpected dispatch mask.
7893 */
7894 static UNUSED void
7895 brw_fs_test_dispatch_packing(const fs_builder &bld)
7896 {
7897 const gl_shader_stage stage = bld.shader->stage;
7898
7899 if (brw_stage_has_packed_dispatch(bld.shader->devinfo, stage,
7900 bld.shader->stage_prog_data)) {
7901 const fs_builder ubld = bld.exec_all().group(1, 0);
7902 const fs_reg tmp = component(bld.vgrf(BRW_REGISTER_TYPE_UD), 0);
7903 const fs_reg mask = (stage == MESA_SHADER_FRAGMENT ? brw_vmask_reg() :
7904 brw_dmask_reg());
7905
7906 ubld.ADD(tmp, mask, brw_imm_ud(1));
7907 ubld.AND(tmp, mask, tmp);
7908
7909 /* This will loop forever if the dispatch mask doesn't have the expected
7910 * form '2^n-1', in which case tmp will be non-zero.
7911 */
7912 bld.emit(BRW_OPCODE_DO);
7913 bld.CMP(bld.null_reg_ud(), tmp, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
7914 set_predicate(BRW_PREDICATE_NORMAL, bld.emit(BRW_OPCODE_WHILE));
7915 }
7916 }