intel/fs/gen11+: Handle ROR/ROL in lower_simd_width().
[mesa.git] / src / intel / compiler / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include "main/macros.h"
32 #include "brw_eu.h"
33 #include "brw_fs.h"
34 #include "brw_nir.h"
35 #include "brw_vec4_gs_visitor.h"
36 #include "brw_cfg.h"
37 #include "brw_dead_control_flow.h"
38 #include "dev/gen_debug.h"
39 #include "compiler/glsl_types.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "program/prog_parameter.h"
42 #include "util/u_math.h"
43
44 using namespace brw;
45
46 static unsigned get_lowered_simd_width(const struct gen_device_info *devinfo,
47 const fs_inst *inst);
48
49 void
50 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
51 const fs_reg *src, unsigned sources)
52 {
53 memset((void*)this, 0, sizeof(*this));
54
55 this->src = new fs_reg[MAX2(sources, 3)];
56 for (unsigned i = 0; i < sources; i++)
57 this->src[i] = src[i];
58
59 this->opcode = opcode;
60 this->dst = dst;
61 this->sources = sources;
62 this->exec_size = exec_size;
63 this->base_mrf = -1;
64
65 assert(dst.file != IMM && dst.file != UNIFORM);
66
67 assert(this->exec_size != 0);
68
69 this->conditional_mod = BRW_CONDITIONAL_NONE;
70
71 /* This will be the case for almost all instructions. */
72 switch (dst.file) {
73 case VGRF:
74 case ARF:
75 case FIXED_GRF:
76 case MRF:
77 case ATTR:
78 this->size_written = dst.component_size(exec_size);
79 break;
80 case BAD_FILE:
81 this->size_written = 0;
82 break;
83 case IMM:
84 case UNIFORM:
85 unreachable("Invalid destination register file");
86 }
87
88 this->writes_accumulator = false;
89 }
90
91 fs_inst::fs_inst()
92 {
93 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
94 }
95
96 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
97 {
98 init(opcode, exec_size, reg_undef, NULL, 0);
99 }
100
101 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
102 {
103 init(opcode, exec_size, dst, NULL, 0);
104 }
105
106 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
107 const fs_reg &src0)
108 {
109 const fs_reg src[1] = { src0 };
110 init(opcode, exec_size, dst, src, 1);
111 }
112
113 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
114 const fs_reg &src0, const fs_reg &src1)
115 {
116 const fs_reg src[2] = { src0, src1 };
117 init(opcode, exec_size, dst, src, 2);
118 }
119
120 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
121 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
122 {
123 const fs_reg src[3] = { src0, src1, src2 };
124 init(opcode, exec_size, dst, src, 3);
125 }
126
127 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
128 const fs_reg src[], unsigned sources)
129 {
130 init(opcode, exec_width, dst, src, sources);
131 }
132
133 fs_inst::fs_inst(const fs_inst &that)
134 {
135 memcpy((void*)this, &that, sizeof(that));
136
137 this->src = new fs_reg[MAX2(that.sources, 3)];
138
139 for (unsigned i = 0; i < that.sources; i++)
140 this->src[i] = that.src[i];
141 }
142
143 fs_inst::~fs_inst()
144 {
145 delete[] this->src;
146 }
147
148 void
149 fs_inst::resize_sources(uint8_t num_sources)
150 {
151 if (this->sources != num_sources) {
152 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
153
154 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
155 src[i] = this->src[i];
156
157 delete[] this->src;
158 this->src = src;
159 this->sources = num_sources;
160 }
161 }
162
163 void
164 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
165 const fs_reg &dst,
166 const fs_reg &surf_index,
167 const fs_reg &varying_offset,
168 uint32_t const_offset)
169 {
170 /* We have our constant surface use a pitch of 4 bytes, so our index can
171 * be any component of a vector, and then we load 4 contiguous
172 * components starting from that.
173 *
174 * We break down the const_offset to a portion added to the variable offset
175 * and a portion done using fs_reg::offset, which means that if you have
176 * GLSL using something like "uniform vec4 a[20]; gl_FragColor = a[i]",
177 * we'll temporarily generate 4 vec4 loads from offset i * 4, and CSE can
178 * later notice that those loads are all the same and eliminate the
179 * redundant ones.
180 */
181 fs_reg vec4_offset = vgrf(glsl_type::uint_type);
182 bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
183
184 /* The pull load message will load a vec4 (16 bytes). If we are loading
185 * a double this means we are only loading 2 elements worth of data.
186 * We also want to use a 32-bit data type for the dst of the load operation
187 * so other parts of the driver don't get confused about the size of the
188 * result.
189 */
190 fs_reg vec4_result = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
191 fs_inst *inst = bld.emit(FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
192 vec4_result, surf_index, vec4_offset);
193 inst->size_written = 4 * vec4_result.component_size(inst->exec_size);
194
195 shuffle_from_32bit_read(bld, dst, vec4_result,
196 (const_offset & 0xf) / type_sz(dst.type), 1);
197 }
198
199 /**
200 * A helper for MOV generation for fixing up broken hardware SEND dependency
201 * handling.
202 */
203 void
204 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
205 {
206 /* The caller always wants uncompressed to emit the minimal extra
207 * dependencies, and to avoid having to deal with aligning its regs to 2.
208 */
209 const fs_builder ubld = bld.annotate("send dependency resolve")
210 .half(0);
211
212 ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
213 }
214
215 bool
216 fs_inst::is_send_from_grf() const
217 {
218 switch (opcode) {
219 case SHADER_OPCODE_SEND:
220 case SHADER_OPCODE_SHADER_TIME_ADD:
221 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
222 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
223 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
224 case SHADER_OPCODE_URB_WRITE_SIMD8:
225 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
226 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
227 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
228 case SHADER_OPCODE_URB_READ_SIMD8:
229 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
230 case SHADER_OPCODE_INTERLOCK:
231 case SHADER_OPCODE_MEMORY_FENCE:
232 case SHADER_OPCODE_BARRIER:
233 return true;
234 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
235 return src[1].file == VGRF;
236 case FS_OPCODE_FB_WRITE:
237 case FS_OPCODE_FB_READ:
238 return src[0].file == VGRF;
239 default:
240 if (is_tex())
241 return src[0].file == VGRF;
242
243 return false;
244 }
245 }
246
247 bool
248 fs_inst::is_control_source(unsigned arg) const
249 {
250 switch (opcode) {
251 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
252 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
253 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
254 return arg == 0;
255
256 case SHADER_OPCODE_BROADCAST:
257 case SHADER_OPCODE_SHUFFLE:
258 case SHADER_OPCODE_QUAD_SWIZZLE:
259 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
260 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
261 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
262 case SHADER_OPCODE_GET_BUFFER_SIZE:
263 return arg == 1;
264
265 case SHADER_OPCODE_MOV_INDIRECT:
266 case SHADER_OPCODE_CLUSTER_BROADCAST:
267 case SHADER_OPCODE_TEX:
268 case FS_OPCODE_TXB:
269 case SHADER_OPCODE_TXD:
270 case SHADER_OPCODE_TXF:
271 case SHADER_OPCODE_TXF_LZ:
272 case SHADER_OPCODE_TXF_CMS:
273 case SHADER_OPCODE_TXF_CMS_W:
274 case SHADER_OPCODE_TXF_UMS:
275 case SHADER_OPCODE_TXF_MCS:
276 case SHADER_OPCODE_TXL:
277 case SHADER_OPCODE_TXL_LZ:
278 case SHADER_OPCODE_TXS:
279 case SHADER_OPCODE_LOD:
280 case SHADER_OPCODE_TG4:
281 case SHADER_OPCODE_TG4_OFFSET:
282 case SHADER_OPCODE_SAMPLEINFO:
283 return arg == 1 || arg == 2;
284
285 case SHADER_OPCODE_SEND:
286 return arg == 0 || arg == 1;
287
288 default:
289 return false;
290 }
291 }
292
293 bool
294 fs_inst::is_payload(unsigned arg) const
295 {
296 switch (opcode) {
297 case FS_OPCODE_FB_WRITE:
298 case FS_OPCODE_FB_READ:
299 case SHADER_OPCODE_URB_WRITE_SIMD8:
300 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
301 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
302 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
303 case SHADER_OPCODE_URB_READ_SIMD8:
304 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
305 case VEC4_OPCODE_UNTYPED_ATOMIC:
306 case VEC4_OPCODE_UNTYPED_SURFACE_READ:
307 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
308 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
309 case SHADER_OPCODE_SHADER_TIME_ADD:
310 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
311 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
312 case SHADER_OPCODE_INTERLOCK:
313 case SHADER_OPCODE_MEMORY_FENCE:
314 case SHADER_OPCODE_BARRIER:
315 return arg == 0;
316
317 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
318 return arg == 1;
319
320 case SHADER_OPCODE_SEND:
321 return arg == 2 || arg == 3;
322
323 default:
324 if (is_tex())
325 return arg == 0;
326 else
327 return false;
328 }
329 }
330
331 /**
332 * Returns true if this instruction's sources and destinations cannot
333 * safely be the same register.
334 *
335 * In most cases, a register can be written over safely by the same
336 * instruction that is its last use. For a single instruction, the
337 * sources are dereferenced before writing of the destination starts
338 * (naturally).
339 *
340 * However, there are a few cases where this can be problematic:
341 *
342 * - Virtual opcodes that translate to multiple instructions in the
343 * code generator: if src == dst and one instruction writes the
344 * destination before a later instruction reads the source, then
345 * src will have been clobbered.
346 *
347 * - SIMD16 compressed instructions with certain regioning (see below).
348 *
349 * The register allocator uses this information to set up conflicts between
350 * GRF sources and the destination.
351 */
352 bool
353 fs_inst::has_source_and_destination_hazard() const
354 {
355 switch (opcode) {
356 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
357 /* Multiple partial writes to the destination */
358 return true;
359 case SHADER_OPCODE_SHUFFLE:
360 /* This instruction returns an arbitrary channel from the source and
361 * gets split into smaller instructions in the generator. It's possible
362 * that one of the instructions will read from a channel corresponding
363 * to an earlier instruction.
364 */
365 case SHADER_OPCODE_SEL_EXEC:
366 /* This is implemented as
367 *
368 * mov(16) g4<1>D 0D { align1 WE_all 1H };
369 * mov(16) g4<1>D g5<8,8,1>D { align1 1H }
370 *
371 * Because the source is only read in the second instruction, the first
372 * may stomp all over it.
373 */
374 return true;
375 case SHADER_OPCODE_QUAD_SWIZZLE:
376 switch (src[1].ud) {
377 case BRW_SWIZZLE_XXXX:
378 case BRW_SWIZZLE_YYYY:
379 case BRW_SWIZZLE_ZZZZ:
380 case BRW_SWIZZLE_WWWW:
381 case BRW_SWIZZLE_XXZZ:
382 case BRW_SWIZZLE_YYWW:
383 case BRW_SWIZZLE_XYXY:
384 case BRW_SWIZZLE_ZWZW:
385 /* These can be implemented as a single Align1 region on all
386 * platforms, so there's never a hazard between source and
387 * destination. C.f. fs_generator::generate_quad_swizzle().
388 */
389 return false;
390 default:
391 return !is_uniform(src[0]);
392 }
393 default:
394 /* The SIMD16 compressed instruction
395 *
396 * add(16) g4<1>F g4<8,8,1>F g6<8,8,1>F
397 *
398 * is actually decoded in hardware as:
399 *
400 * add(8) g4<1>F g4<8,8,1>F g6<8,8,1>F
401 * add(8) g5<1>F g5<8,8,1>F g7<8,8,1>F
402 *
403 * Which is safe. However, if we have uniform accesses
404 * happening, we get into trouble:
405 *
406 * add(8) g4<1>F g4<0,1,0>F g6<8,8,1>F
407 * add(8) g5<1>F g4<0,1,0>F g7<8,8,1>F
408 *
409 * Now our destination for the first instruction overwrote the
410 * second instruction's src0, and we get garbage for those 8
411 * pixels. There's a similar issue for the pre-gen6
412 * pixel_x/pixel_y, which are registers of 16-bit values and thus
413 * would get stomped by the first decode as well.
414 */
415 if (exec_size == 16) {
416 for (int i = 0; i < sources; i++) {
417 if (src[i].file == VGRF && (src[i].stride == 0 ||
418 src[i].type == BRW_REGISTER_TYPE_UW ||
419 src[i].type == BRW_REGISTER_TYPE_W ||
420 src[i].type == BRW_REGISTER_TYPE_UB ||
421 src[i].type == BRW_REGISTER_TYPE_B)) {
422 return true;
423 }
424 }
425 }
426 return false;
427 }
428 }
429
430 bool
431 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
432 {
433 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
434 return false;
435
436 fs_reg reg = this->src[0];
437 if (reg.file != VGRF || reg.offset != 0 || reg.stride != 1)
438 return false;
439
440 if (grf_alloc.sizes[reg.nr] * REG_SIZE != this->size_written)
441 return false;
442
443 for (int i = 0; i < this->sources; i++) {
444 reg.type = this->src[i].type;
445 if (!this->src[i].equals(reg))
446 return false;
447
448 if (i < this->header_size) {
449 reg.offset += REG_SIZE;
450 } else {
451 reg = horiz_offset(reg, this->exec_size);
452 }
453 }
454
455 return true;
456 }
457
458 bool
459 fs_inst::can_do_source_mods(const struct gen_device_info *devinfo) const
460 {
461 if (devinfo->gen == 6 && is_math())
462 return false;
463
464 if (is_send_from_grf())
465 return false;
466
467 /* From GEN:BUG:1604601757:
468 *
469 * "When multiplying a DW and any lower precision integer, source modifier
470 * is not supported."
471 */
472 if (devinfo->gen >= 12 && (opcode == BRW_OPCODE_MUL ||
473 opcode == BRW_OPCODE_MAD)) {
474 const brw_reg_type exec_type = get_exec_type(this);
475 const unsigned min_type_sz = opcode == BRW_OPCODE_MAD ?
476 MIN2(type_sz(src[1].type), type_sz(src[2].type)) :
477 MIN2(type_sz(src[0].type), type_sz(src[1].type));
478
479 if (brw_reg_type_is_integer(exec_type) &&
480 type_sz(exec_type) >= 4 &&
481 type_sz(exec_type) != min_type_sz)
482 return false;
483 }
484
485 if (!backend_instruction::can_do_source_mods())
486 return false;
487
488 return true;
489 }
490
491 bool
492 fs_inst::can_do_cmod()
493 {
494 if (!backend_instruction::can_do_cmod())
495 return false;
496
497 /* The accumulator result appears to get used for the conditional modifier
498 * generation. When negating a UD value, there is a 33rd bit generated for
499 * the sign in the accumulator value, so now you can't check, for example,
500 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
501 */
502 for (unsigned i = 0; i < sources; i++) {
503 if (type_is_unsigned_int(src[i].type) && src[i].negate)
504 return false;
505 }
506
507 return true;
508 }
509
510 bool
511 fs_inst::can_change_types() const
512 {
513 return dst.type == src[0].type &&
514 !src[0].abs && !src[0].negate && !saturate &&
515 (opcode == BRW_OPCODE_MOV ||
516 (opcode == BRW_OPCODE_SEL &&
517 dst.type == src[1].type &&
518 predicate != BRW_PREDICATE_NONE &&
519 !src[1].abs && !src[1].negate));
520 }
521
522 void
523 fs_reg::init()
524 {
525 memset((void*)this, 0, sizeof(*this));
526 type = BRW_REGISTER_TYPE_UD;
527 stride = 1;
528 }
529
530 /** Generic unset register constructor. */
531 fs_reg::fs_reg()
532 {
533 init();
534 this->file = BAD_FILE;
535 }
536
537 fs_reg::fs_reg(struct ::brw_reg reg) :
538 backend_reg(reg)
539 {
540 this->offset = 0;
541 this->stride = 1;
542 if (this->file == IMM &&
543 (this->type != BRW_REGISTER_TYPE_V &&
544 this->type != BRW_REGISTER_TYPE_UV &&
545 this->type != BRW_REGISTER_TYPE_VF)) {
546 this->stride = 0;
547 }
548 }
549
550 bool
551 fs_reg::equals(const fs_reg &r) const
552 {
553 return (this->backend_reg::equals(r) &&
554 stride == r.stride);
555 }
556
557 bool
558 fs_reg::negative_equals(const fs_reg &r) const
559 {
560 return (this->backend_reg::negative_equals(r) &&
561 stride == r.stride);
562 }
563
564 bool
565 fs_reg::is_contiguous() const
566 {
567 return stride == 1;
568 }
569
570 unsigned
571 fs_reg::component_size(unsigned width) const
572 {
573 const unsigned stride = ((file != ARF && file != FIXED_GRF) ? this->stride :
574 hstride == 0 ? 0 :
575 1 << (hstride - 1));
576 return MAX2(width * stride, 1) * type_sz(type);
577 }
578
579 extern "C" int
580 type_size_scalar(const struct glsl_type *type, bool bindless)
581 {
582 unsigned int size, i;
583
584 switch (type->base_type) {
585 case GLSL_TYPE_UINT:
586 case GLSL_TYPE_INT:
587 case GLSL_TYPE_FLOAT:
588 case GLSL_TYPE_BOOL:
589 return type->components();
590 case GLSL_TYPE_UINT16:
591 case GLSL_TYPE_INT16:
592 case GLSL_TYPE_FLOAT16:
593 return DIV_ROUND_UP(type->components(), 2);
594 case GLSL_TYPE_UINT8:
595 case GLSL_TYPE_INT8:
596 return DIV_ROUND_UP(type->components(), 4);
597 case GLSL_TYPE_DOUBLE:
598 case GLSL_TYPE_UINT64:
599 case GLSL_TYPE_INT64:
600 return type->components() * 2;
601 case GLSL_TYPE_ARRAY:
602 return type_size_scalar(type->fields.array, bindless) * type->length;
603 case GLSL_TYPE_STRUCT:
604 case GLSL_TYPE_INTERFACE:
605 size = 0;
606 for (i = 0; i < type->length; i++) {
607 size += type_size_scalar(type->fields.structure[i].type, bindless);
608 }
609 return size;
610 case GLSL_TYPE_SAMPLER:
611 case GLSL_TYPE_IMAGE:
612 if (bindless)
613 return type->components() * 2;
614 case GLSL_TYPE_ATOMIC_UINT:
615 /* Samplers, atomics, and images take up no register space, since
616 * they're baked in at link time.
617 */
618 return 0;
619 case GLSL_TYPE_SUBROUTINE:
620 return 1;
621 case GLSL_TYPE_VOID:
622 case GLSL_TYPE_ERROR:
623 case GLSL_TYPE_FUNCTION:
624 unreachable("not reached");
625 }
626
627 return 0;
628 }
629
630 /**
631 * Create a MOV to read the timestamp register.
632 */
633 fs_reg
634 fs_visitor::get_timestamp(const fs_builder &bld)
635 {
636 assert(devinfo->gen >= 7);
637
638 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
639 BRW_ARF_TIMESTAMP,
640 0),
641 BRW_REGISTER_TYPE_UD));
642
643 fs_reg dst = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
644
645 /* We want to read the 3 fields we care about even if it's not enabled in
646 * the dispatch.
647 */
648 bld.group(4, 0).exec_all().MOV(dst, ts);
649
650 return dst;
651 }
652
653 void
654 fs_visitor::emit_shader_time_begin()
655 {
656 /* We want only the low 32 bits of the timestamp. Since it's running
657 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
658 * which is plenty of time for our purposes. It is identical across the
659 * EUs, but since it's tracking GPU core speed it will increment at a
660 * varying rate as render P-states change.
661 */
662 shader_start_time = component(
663 get_timestamp(bld.annotate("shader time start")), 0);
664 }
665
666 void
667 fs_visitor::emit_shader_time_end()
668 {
669 /* Insert our code just before the final SEND with EOT. */
670 exec_node *end = this->instructions.get_tail();
671 assert(end && ((fs_inst *) end)->eot);
672 const fs_builder ibld = bld.annotate("shader time end")
673 .exec_all().at(NULL, end);
674 const fs_reg timestamp = get_timestamp(ibld);
675
676 /* We only use the low 32 bits of the timestamp - see
677 * emit_shader_time_begin()).
678 *
679 * We could also check if render P-states have changed (or anything
680 * else that might disrupt timing) by setting smear to 2 and checking if
681 * that field is != 0.
682 */
683 const fs_reg shader_end_time = component(timestamp, 0);
684
685 /* Check that there weren't any timestamp reset events (assuming these
686 * were the only two timestamp reads that happened).
687 */
688 const fs_reg reset = component(timestamp, 2);
689 set_condmod(BRW_CONDITIONAL_Z,
690 ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
691 ibld.IF(BRW_PREDICATE_NORMAL);
692
693 fs_reg start = shader_start_time;
694 start.negate = true;
695 const fs_reg diff = component(fs_reg(VGRF, alloc.allocate(1),
696 BRW_REGISTER_TYPE_UD),
697 0);
698 const fs_builder cbld = ibld.group(1, 0);
699 cbld.group(1, 0).ADD(diff, start, shader_end_time);
700
701 /* If there were no instructions between the two timestamp gets, the diff
702 * is 2 cycles. Remove that overhead, so I can forget about that when
703 * trying to determine the time taken for single instructions.
704 */
705 cbld.ADD(diff, diff, brw_imm_ud(-2u));
706 SHADER_TIME_ADD(cbld, 0, diff);
707 SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
708 ibld.emit(BRW_OPCODE_ELSE);
709 SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
710 ibld.emit(BRW_OPCODE_ENDIF);
711 }
712
713 void
714 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
715 int shader_time_subindex,
716 fs_reg value)
717 {
718 int index = shader_time_index * 3 + shader_time_subindex;
719 struct brw_reg offset = brw_imm_d(index * BRW_SHADER_TIME_STRIDE);
720
721 fs_reg payload;
722 if (dispatch_width == 8)
723 payload = vgrf(glsl_type::uvec2_type);
724 else
725 payload = vgrf(glsl_type::uint_type);
726
727 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
728 }
729
730 void
731 fs_visitor::vfail(const char *format, va_list va)
732 {
733 char *msg;
734
735 if (failed)
736 return;
737
738 failed = true;
739
740 msg = ralloc_vasprintf(mem_ctx, format, va);
741 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
742
743 this->fail_msg = msg;
744
745 if (debug_enabled) {
746 fprintf(stderr, "%s", msg);
747 }
748 }
749
750 void
751 fs_visitor::fail(const char *format, ...)
752 {
753 va_list va;
754
755 va_start(va, format);
756 vfail(format, va);
757 va_end(va);
758 }
759
760 /**
761 * Mark this program as impossible to compile with dispatch width greater
762 * than n.
763 *
764 * During the SIMD8 compile (which happens first), we can detect and flag
765 * things that are unsupported in SIMD16+ mode, so the compiler can skip the
766 * SIMD16+ compile altogether.
767 *
768 * During a compile of dispatch width greater than n (if one happens anyway),
769 * this just calls fail().
770 */
771 void
772 fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
773 {
774 if (dispatch_width > n) {
775 fail("%s", msg);
776 } else {
777 max_dispatch_width = n;
778 compiler->shader_perf_log(log_data,
779 "Shader dispatch width limited to SIMD%d: %s",
780 n, msg);
781 }
782 }
783
784 /**
785 * Returns true if the instruction has a flag that means it won't
786 * update an entire destination register.
787 *
788 * For example, dead code elimination and live variable analysis want to know
789 * when a write to a variable screens off any preceding values that were in
790 * it.
791 */
792 bool
793 fs_inst::is_partial_write() const
794 {
795 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
796 (this->exec_size * type_sz(this->dst.type)) < 32 ||
797 !this->dst.is_contiguous() ||
798 this->dst.offset % REG_SIZE != 0);
799 }
800
801 unsigned
802 fs_inst::components_read(unsigned i) const
803 {
804 /* Return zero if the source is not present. */
805 if (src[i].file == BAD_FILE)
806 return 0;
807
808 switch (opcode) {
809 case FS_OPCODE_LINTERP:
810 if (i == 0)
811 return 2;
812 else
813 return 1;
814
815 case FS_OPCODE_PIXEL_X:
816 case FS_OPCODE_PIXEL_Y:
817 assert(i == 0);
818 return 2;
819
820 case FS_OPCODE_FB_WRITE_LOGICAL:
821 assert(src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
822 /* First/second FB write color. */
823 if (i < 2)
824 return src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
825 else
826 return 1;
827
828 case SHADER_OPCODE_TEX_LOGICAL:
829 case SHADER_OPCODE_TXD_LOGICAL:
830 case SHADER_OPCODE_TXF_LOGICAL:
831 case SHADER_OPCODE_TXL_LOGICAL:
832 case SHADER_OPCODE_TXS_LOGICAL:
833 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
834 case FS_OPCODE_TXB_LOGICAL:
835 case SHADER_OPCODE_TXF_CMS_LOGICAL:
836 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
837 case SHADER_OPCODE_TXF_UMS_LOGICAL:
838 case SHADER_OPCODE_TXF_MCS_LOGICAL:
839 case SHADER_OPCODE_LOD_LOGICAL:
840 case SHADER_OPCODE_TG4_LOGICAL:
841 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
842 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
843 assert(src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM &&
844 src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
845 /* Texture coordinates. */
846 if (i == TEX_LOGICAL_SRC_COORDINATE)
847 return src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
848 /* Texture derivatives. */
849 else if ((i == TEX_LOGICAL_SRC_LOD || i == TEX_LOGICAL_SRC_LOD2) &&
850 opcode == SHADER_OPCODE_TXD_LOGICAL)
851 return src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
852 /* Texture offset. */
853 else if (i == TEX_LOGICAL_SRC_TG4_OFFSET)
854 return 2;
855 /* MCS */
856 else if (i == TEX_LOGICAL_SRC_MCS && opcode == SHADER_OPCODE_TXF_CMS_W_LOGICAL)
857 return 2;
858 else
859 return 1;
860
861 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
862 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
863 assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM);
864 /* Surface coordinates. */
865 if (i == SURFACE_LOGICAL_SRC_ADDRESS)
866 return src[SURFACE_LOGICAL_SRC_IMM_DIMS].ud;
867 /* Surface operation source (ignored for reads). */
868 else if (i == SURFACE_LOGICAL_SRC_DATA)
869 return 0;
870 else
871 return 1;
872
873 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
874 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
875 assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM &&
876 src[SURFACE_LOGICAL_SRC_IMM_ARG].file == IMM);
877 /* Surface coordinates. */
878 if (i == SURFACE_LOGICAL_SRC_ADDRESS)
879 return src[SURFACE_LOGICAL_SRC_IMM_DIMS].ud;
880 /* Surface operation source. */
881 else if (i == SURFACE_LOGICAL_SRC_DATA)
882 return src[SURFACE_LOGICAL_SRC_IMM_ARG].ud;
883 else
884 return 1;
885
886 case SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL:
887 assert(src[2].file == IMM);
888 return 1;
889
890 case SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL:
891 assert(src[2].file == IMM);
892 return i == 1 ? src[2].ud : 1;
893
894 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
895 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
896 assert(src[2].file == IMM);
897 if (i == 1) {
898 /* Data source */
899 const unsigned op = src[2].ud;
900 switch (op) {
901 case BRW_AOP_INC:
902 case BRW_AOP_DEC:
903 case BRW_AOP_PREDEC:
904 return 0;
905 case BRW_AOP_CMPWR:
906 return 2;
907 default:
908 return 1;
909 }
910 } else {
911 return 1;
912 }
913
914 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
915 assert(src[2].file == IMM);
916 if (i == 1) {
917 /* Data source */
918 const unsigned op = src[2].ud;
919 return op == BRW_AOP_FCMPWR ? 2 : 1;
920 } else {
921 return 1;
922 }
923
924 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
925 case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
926 /* Scattered logical opcodes use the following params:
927 * src[0] Surface coordinates
928 * src[1] Surface operation source (ignored for reads)
929 * src[2] Surface
930 * src[3] IMM with always 1 dimension.
931 * src[4] IMM with arg bitsize for scattered read/write 8, 16, 32
932 */
933 assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM &&
934 src[SURFACE_LOGICAL_SRC_IMM_ARG].file == IMM);
935 return i == SURFACE_LOGICAL_SRC_DATA ? 0 : 1;
936
937 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
938 case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
939 assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM &&
940 src[SURFACE_LOGICAL_SRC_IMM_ARG].file == IMM);
941 return 1;
942
943 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
944 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
945 assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM &&
946 src[SURFACE_LOGICAL_SRC_IMM_ARG].file == IMM);
947 const unsigned op = src[SURFACE_LOGICAL_SRC_IMM_ARG].ud;
948 /* Surface coordinates. */
949 if (i == SURFACE_LOGICAL_SRC_ADDRESS)
950 return src[SURFACE_LOGICAL_SRC_IMM_DIMS].ud;
951 /* Surface operation source. */
952 else if (i == SURFACE_LOGICAL_SRC_DATA && op == BRW_AOP_CMPWR)
953 return 2;
954 else if (i == SURFACE_LOGICAL_SRC_DATA &&
955 (op == BRW_AOP_INC || op == BRW_AOP_DEC || op == BRW_AOP_PREDEC))
956 return 0;
957 else
958 return 1;
959 }
960 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
961 return (i == 0 ? 2 : 1);
962
963 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL: {
964 assert(src[SURFACE_LOGICAL_SRC_IMM_DIMS].file == IMM &&
965 src[SURFACE_LOGICAL_SRC_IMM_ARG].file == IMM);
966 const unsigned op = src[SURFACE_LOGICAL_SRC_IMM_ARG].ud;
967 /* Surface coordinates. */
968 if (i == SURFACE_LOGICAL_SRC_ADDRESS)
969 return src[SURFACE_LOGICAL_SRC_IMM_DIMS].ud;
970 /* Surface operation source. */
971 else if (i == SURFACE_LOGICAL_SRC_DATA && op == BRW_AOP_FCMPWR)
972 return 2;
973 else
974 return 1;
975 }
976
977 default:
978 return 1;
979 }
980 }
981
982 unsigned
983 fs_inst::size_read(int arg) const
984 {
985 switch (opcode) {
986 case SHADER_OPCODE_SEND:
987 if (arg == 2) {
988 return mlen * REG_SIZE;
989 } else if (arg == 3) {
990 return ex_mlen * REG_SIZE;
991 }
992 break;
993
994 case FS_OPCODE_FB_WRITE:
995 case FS_OPCODE_REP_FB_WRITE:
996 if (arg == 0) {
997 if (base_mrf >= 0)
998 return src[0].file == BAD_FILE ? 0 : 2 * REG_SIZE;
999 else
1000 return mlen * REG_SIZE;
1001 }
1002 break;
1003
1004 case FS_OPCODE_FB_READ:
1005 case SHADER_OPCODE_URB_WRITE_SIMD8:
1006 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
1007 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
1008 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
1009 case SHADER_OPCODE_URB_READ_SIMD8:
1010 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
1011 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
1012 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
1013 if (arg == 0)
1014 return mlen * REG_SIZE;
1015 break;
1016
1017 case FS_OPCODE_SET_SAMPLE_ID:
1018 if (arg == 1)
1019 return 1;
1020 break;
1021
1022 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
1023 /* The payload is actually stored in src1 */
1024 if (arg == 1)
1025 return mlen * REG_SIZE;
1026 break;
1027
1028 case FS_OPCODE_LINTERP:
1029 if (arg == 1)
1030 return 16;
1031 break;
1032
1033 case SHADER_OPCODE_LOAD_PAYLOAD:
1034 if (arg < this->header_size)
1035 return REG_SIZE;
1036 break;
1037
1038 case CS_OPCODE_CS_TERMINATE:
1039 case SHADER_OPCODE_BARRIER:
1040 return REG_SIZE;
1041
1042 case SHADER_OPCODE_MOV_INDIRECT:
1043 if (arg == 0) {
1044 assert(src[2].file == IMM);
1045 return src[2].ud;
1046 }
1047 break;
1048
1049 default:
1050 if (is_tex() && arg == 0 && src[0].file == VGRF)
1051 return mlen * REG_SIZE;
1052 break;
1053 }
1054
1055 switch (src[arg].file) {
1056 case UNIFORM:
1057 case IMM:
1058 return components_read(arg) * type_sz(src[arg].type);
1059 case BAD_FILE:
1060 case ARF:
1061 case FIXED_GRF:
1062 case VGRF:
1063 case ATTR:
1064 return components_read(arg) * src[arg].component_size(exec_size);
1065 case MRF:
1066 unreachable("MRF registers are not allowed as sources");
1067 }
1068 return 0;
1069 }
1070
1071 namespace {
1072 unsigned
1073 predicate_width(brw_predicate predicate)
1074 {
1075 switch (predicate) {
1076 case BRW_PREDICATE_NONE: return 1;
1077 case BRW_PREDICATE_NORMAL: return 1;
1078 case BRW_PREDICATE_ALIGN1_ANY2H: return 2;
1079 case BRW_PREDICATE_ALIGN1_ALL2H: return 2;
1080 case BRW_PREDICATE_ALIGN1_ANY4H: return 4;
1081 case BRW_PREDICATE_ALIGN1_ALL4H: return 4;
1082 case BRW_PREDICATE_ALIGN1_ANY8H: return 8;
1083 case BRW_PREDICATE_ALIGN1_ALL8H: return 8;
1084 case BRW_PREDICATE_ALIGN1_ANY16H: return 16;
1085 case BRW_PREDICATE_ALIGN1_ALL16H: return 16;
1086 case BRW_PREDICATE_ALIGN1_ANY32H: return 32;
1087 case BRW_PREDICATE_ALIGN1_ALL32H: return 32;
1088 default: unreachable("Unsupported predicate");
1089 }
1090 }
1091
1092 /* Return the subset of flag registers that an instruction could
1093 * potentially read or write based on the execution controls and flag
1094 * subregister number of the instruction.
1095 */
1096 unsigned
1097 flag_mask(const fs_inst *inst, unsigned width)
1098 {
1099 assert(util_is_power_of_two_nonzero(width));
1100 const unsigned start = (inst->flag_subreg * 16 + inst->group) &
1101 ~(width - 1);
1102 const unsigned end = start + ALIGN(inst->exec_size, width);
1103 return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
1104 }
1105
1106 unsigned
1107 bit_mask(unsigned n)
1108 {
1109 return (n >= CHAR_BIT * sizeof(bit_mask(n)) ? ~0u : (1u << n) - 1);
1110 }
1111
1112 unsigned
1113 flag_mask(const fs_reg &r, unsigned sz)
1114 {
1115 if (r.file == ARF) {
1116 const unsigned start = (r.nr - BRW_ARF_FLAG) * 4 + r.subnr;
1117 const unsigned end = start + sz;
1118 return bit_mask(end) & ~bit_mask(start);
1119 } else {
1120 return 0;
1121 }
1122 }
1123 }
1124
1125 unsigned
1126 fs_inst::flags_read(const gen_device_info *devinfo) const
1127 {
1128 if (predicate == BRW_PREDICATE_ALIGN1_ANYV ||
1129 predicate == BRW_PREDICATE_ALIGN1_ALLV) {
1130 /* The vertical predication modes combine corresponding bits from
1131 * f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
1132 */
1133 const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
1134 return flag_mask(this, 1) << shift | flag_mask(this, 1);
1135 } else if (predicate) {
1136 return flag_mask(this, predicate_width(predicate));
1137 } else {
1138 unsigned mask = 0;
1139 for (int i = 0; i < sources; i++) {
1140 mask |= flag_mask(src[i], size_read(i));
1141 }
1142 return mask;
1143 }
1144 }
1145
1146 unsigned
1147 fs_inst::flags_written() const
1148 {
1149 if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
1150 opcode != BRW_OPCODE_CSEL &&
1151 opcode != BRW_OPCODE_IF &&
1152 opcode != BRW_OPCODE_WHILE)) ||
1153 opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL ||
1154 opcode == FS_OPCODE_FB_WRITE) {
1155 return flag_mask(this, 1);
1156 } else {
1157 return flag_mask(dst, size_written);
1158 }
1159 }
1160
1161 /**
1162 * Returns how many MRFs an FS opcode will write over.
1163 *
1164 * Note that this is not the 0 or 1 implied writes in an actual gen
1165 * instruction -- the FS opcodes often generate MOVs in addition.
1166 */
1167 int
1168 fs_visitor::implied_mrf_writes(const fs_inst *inst) const
1169 {
1170 if (inst->mlen == 0)
1171 return 0;
1172
1173 if (inst->base_mrf == -1)
1174 return 0;
1175
1176 switch (inst->opcode) {
1177 case SHADER_OPCODE_RCP:
1178 case SHADER_OPCODE_RSQ:
1179 case SHADER_OPCODE_SQRT:
1180 case SHADER_OPCODE_EXP2:
1181 case SHADER_OPCODE_LOG2:
1182 case SHADER_OPCODE_SIN:
1183 case SHADER_OPCODE_COS:
1184 return 1 * dispatch_width / 8;
1185 case SHADER_OPCODE_POW:
1186 case SHADER_OPCODE_INT_QUOTIENT:
1187 case SHADER_OPCODE_INT_REMAINDER:
1188 return 2 * dispatch_width / 8;
1189 case SHADER_OPCODE_TEX:
1190 case FS_OPCODE_TXB:
1191 case SHADER_OPCODE_TXD:
1192 case SHADER_OPCODE_TXF:
1193 case SHADER_OPCODE_TXF_CMS:
1194 case SHADER_OPCODE_TXF_MCS:
1195 case SHADER_OPCODE_TG4:
1196 case SHADER_OPCODE_TG4_OFFSET:
1197 case SHADER_OPCODE_TXL:
1198 case SHADER_OPCODE_TXS:
1199 case SHADER_OPCODE_LOD:
1200 case SHADER_OPCODE_SAMPLEINFO:
1201 return 1;
1202 case FS_OPCODE_FB_WRITE:
1203 case FS_OPCODE_REP_FB_WRITE:
1204 return inst->src[0].file == BAD_FILE ? 0 : 2;
1205 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
1206 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1207 return 1;
1208 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
1209 return inst->mlen;
1210 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1211 return inst->mlen;
1212 default:
1213 unreachable("not reached");
1214 }
1215 }
1216
1217 fs_reg
1218 fs_visitor::vgrf(const glsl_type *const type)
1219 {
1220 int reg_width = dispatch_width / 8;
1221 return fs_reg(VGRF,
1222 alloc.allocate(type_size_scalar(type, false) * reg_width),
1223 brw_type_for_base_type(type));
1224 }
1225
1226 fs_reg::fs_reg(enum brw_reg_file file, int nr)
1227 {
1228 init();
1229 this->file = file;
1230 this->nr = nr;
1231 this->type = BRW_REGISTER_TYPE_F;
1232 this->stride = (file == UNIFORM ? 0 : 1);
1233 }
1234
1235 fs_reg::fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type)
1236 {
1237 init();
1238 this->file = file;
1239 this->nr = nr;
1240 this->type = type;
1241 this->stride = (file == UNIFORM ? 0 : 1);
1242 }
1243
1244 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
1245 * This brings in those uniform definitions
1246 */
1247 void
1248 fs_visitor::import_uniforms(fs_visitor *v)
1249 {
1250 this->push_constant_loc = v->push_constant_loc;
1251 this->pull_constant_loc = v->pull_constant_loc;
1252 this->uniforms = v->uniforms;
1253 this->subgroup_id = v->subgroup_id;
1254 }
1255
1256 void
1257 fs_visitor::emit_fragcoord_interpolation(fs_reg wpos)
1258 {
1259 assert(stage == MESA_SHADER_FRAGMENT);
1260
1261 /* gl_FragCoord.x */
1262 bld.MOV(wpos, this->pixel_x);
1263 wpos = offset(wpos, bld, 1);
1264
1265 /* gl_FragCoord.y */
1266 bld.MOV(wpos, this->pixel_y);
1267 wpos = offset(wpos, bld, 1);
1268
1269 /* gl_FragCoord.z */
1270 if (devinfo->gen >= 6) {
1271 bld.MOV(wpos, fetch_payload_reg(bld, payload.source_depth_reg));
1272 } else {
1273 bld.emit(FS_OPCODE_LINTERP, wpos,
1274 this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
1275 component(interp_reg(VARYING_SLOT_POS, 2), 0));
1276 }
1277 wpos = offset(wpos, bld, 1);
1278
1279 /* gl_FragCoord.w: Already set up in emit_interpolation */
1280 bld.MOV(wpos, this->wpos_w);
1281 }
1282
1283 enum brw_barycentric_mode
1284 brw_barycentric_mode(enum glsl_interp_mode mode, nir_intrinsic_op op)
1285 {
1286 /* Barycentric modes don't make sense for flat inputs. */
1287 assert(mode != INTERP_MODE_FLAT);
1288
1289 unsigned bary;
1290 switch (op) {
1291 case nir_intrinsic_load_barycentric_pixel:
1292 case nir_intrinsic_load_barycentric_at_offset:
1293 bary = BRW_BARYCENTRIC_PERSPECTIVE_PIXEL;
1294 break;
1295 case nir_intrinsic_load_barycentric_centroid:
1296 bary = BRW_BARYCENTRIC_PERSPECTIVE_CENTROID;
1297 break;
1298 case nir_intrinsic_load_barycentric_sample:
1299 case nir_intrinsic_load_barycentric_at_sample:
1300 bary = BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE;
1301 break;
1302 default:
1303 unreachable("invalid intrinsic");
1304 }
1305
1306 if (mode == INTERP_MODE_NOPERSPECTIVE)
1307 bary += 3;
1308
1309 return (enum brw_barycentric_mode) bary;
1310 }
1311
1312 /**
1313 * Turn one of the two CENTROID barycentric modes into PIXEL mode.
1314 */
1315 static enum brw_barycentric_mode
1316 centroid_to_pixel(enum brw_barycentric_mode bary)
1317 {
1318 assert(bary == BRW_BARYCENTRIC_PERSPECTIVE_CENTROID ||
1319 bary == BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID);
1320 return (enum brw_barycentric_mode) ((unsigned) bary - 1);
1321 }
1322
1323 fs_reg *
1324 fs_visitor::emit_frontfacing_interpolation()
1325 {
1326 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1327
1328 if (devinfo->gen >= 12) {
1329 fs_reg g1 = fs_reg(retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_W));
1330
1331 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_W);
1332 bld.ASR(tmp, g1, brw_imm_d(15));
1333 bld.NOT(*reg, tmp);
1334 } else if (devinfo->gen >= 6) {
1335 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1336 * a boolean result from this (~0/true or 0/false).
1337 *
1338 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1339 * this task in only one instruction:
1340 * - a negation source modifier will flip the bit; and
1341 * - a W -> D type conversion will sign extend the bit into the high
1342 * word of the destination.
1343 *
1344 * An ASR 15 fills the low word of the destination.
1345 */
1346 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1347 g0.negate = true;
1348
1349 bld.ASR(*reg, g0, brw_imm_d(15));
1350 } else {
1351 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1352 * a boolean result from this (1/true or 0/false).
1353 *
1354 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1355 * the negation source modifier to flip it. Unfortunately the SHR
1356 * instruction only operates on UD (or D with an abs source modifier)
1357 * sources without negation.
1358 *
1359 * Instead, use ASR (which will give ~0/true or 0/false).
1360 */
1361 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1362 g1_6.negate = true;
1363
1364 bld.ASR(*reg, g1_6, brw_imm_d(31));
1365 }
1366
1367 return reg;
1368 }
1369
1370 void
1371 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1372 {
1373 assert(stage == MESA_SHADER_FRAGMENT);
1374 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1375 assert(dst.type == BRW_REGISTER_TYPE_F);
1376
1377 if (wm_prog_data->persample_dispatch) {
1378 /* Convert int_sample_pos to floating point */
1379 bld.MOV(dst, int_sample_pos);
1380 /* Scale to the range [0, 1] */
1381 bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
1382 }
1383 else {
1384 /* From ARB_sample_shading specification:
1385 * "When rendering to a non-multisample buffer, or if multisample
1386 * rasterization is disabled, gl_SamplePosition will always be
1387 * (0.5, 0.5).
1388 */
1389 bld.MOV(dst, brw_imm_f(0.5f));
1390 }
1391 }
1392
1393 fs_reg *
1394 fs_visitor::emit_samplepos_setup()
1395 {
1396 assert(devinfo->gen >= 6);
1397
1398 const fs_builder abld = bld.annotate("compute sample position");
1399 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1400 fs_reg pos = *reg;
1401 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1402 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1403
1404 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1405 * mode will be enabled.
1406 *
1407 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1408 * R31.1:0 Position Offset X/Y for Slot[3:0]
1409 * R31.3:2 Position Offset X/Y for Slot[7:4]
1410 * .....
1411 *
1412 * The X, Y sample positions come in as bytes in thread payload. So, read
1413 * the positions using vstride=16, width=8, hstride=2.
1414 */
1415 const fs_reg sample_pos_reg =
1416 fetch_payload_reg(abld, payload.sample_pos_reg, BRW_REGISTER_TYPE_W);
1417
1418 /* Compute gl_SamplePosition.x */
1419 abld.MOV(int_sample_x, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 0));
1420 compute_sample_position(offset(pos, abld, 0), int_sample_x);
1421
1422 /* Compute gl_SamplePosition.y */
1423 abld.MOV(int_sample_y, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 1));
1424 compute_sample_position(offset(pos, abld, 1), int_sample_y);
1425 return reg;
1426 }
1427
1428 fs_reg *
1429 fs_visitor::emit_sampleid_setup()
1430 {
1431 assert(stage == MESA_SHADER_FRAGMENT);
1432 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1433 assert(devinfo->gen >= 6);
1434
1435 const fs_builder abld = bld.annotate("compute sample id");
1436 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uint_type));
1437
1438 if (!key->multisample_fbo) {
1439 /* As per GL_ARB_sample_shading specification:
1440 * "When rendering to a non-multisample buffer, or if multisample
1441 * rasterization is disabled, gl_SampleID will always be zero."
1442 */
1443 abld.MOV(*reg, brw_imm_d(0));
1444 } else if (devinfo->gen >= 8) {
1445 /* Sample ID comes in as 4-bit numbers in g1.0:
1446 *
1447 * 15:12 Slot 3 SampleID (only used in SIMD16)
1448 * 11:8 Slot 2 SampleID (only used in SIMD16)
1449 * 7:4 Slot 1 SampleID
1450 * 3:0 Slot 0 SampleID
1451 *
1452 * Each slot corresponds to four channels, so we want to replicate each
1453 * half-byte value to 4 channels in a row:
1454 *
1455 * dst+0: .7 .6 .5 .4 .3 .2 .1 .0
1456 * 7:4 7:4 7:4 7:4 3:0 3:0 3:0 3:0
1457 *
1458 * dst+1: .7 .6 .5 .4 .3 .2 .1 .0 (if SIMD16)
1459 * 15:12 15:12 15:12 15:12 11:8 11:8 11:8 11:8
1460 *
1461 * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
1462 * channels to read the first byte (7:0), and the second group of 8
1463 * channels to read the second byte (15:8). Then, we shift right by
1464 * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
1465 * values into place. Finally, we AND with 0xf to keep the low nibble.
1466 *
1467 * shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
1468 * and(16) dst<1>D tmp<8,8,1>W 0xf:W
1469 *
1470 * TODO: These payload bits exist on Gen7 too, but they appear to always
1471 * be zero, so this code fails to work. We should find out why.
1472 */
1473 const fs_reg tmp = abld.vgrf(BRW_REGISTER_TYPE_UW);
1474
1475 for (unsigned i = 0; i < DIV_ROUND_UP(dispatch_width, 16); i++) {
1476 const fs_builder hbld = abld.group(MIN2(16, dispatch_width), i);
1477 hbld.SHR(offset(tmp, hbld, i),
1478 stride(retype(brw_vec1_grf(1 + i, 0), BRW_REGISTER_TYPE_UB),
1479 1, 8, 0),
1480 brw_imm_v(0x44440000));
1481 }
1482
1483 abld.AND(*reg, tmp, brw_imm_w(0xf));
1484 } else {
1485 const fs_reg t1 = component(abld.vgrf(BRW_REGISTER_TYPE_UD), 0);
1486 const fs_reg t2 = abld.vgrf(BRW_REGISTER_TYPE_UW);
1487
1488 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1489 * 8x multisampling, subspan 0 will represent sample N (where N
1490 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1491 * 7. We can find the value of N by looking at R0.0 bits 7:6
1492 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1493 * (since samples are always delivered in pairs). That is, we
1494 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1495 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1496 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1497 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1498 * populating a temporary variable with the sequence (0, 1, 2, 3),
1499 * and then reading from it using vstride=1, width=4, hstride=0.
1500 * These computations hold good for 4x multisampling as well.
1501 *
1502 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1503 * the first four slots are sample 0 of subspan 0; the next four
1504 * are sample 1 of subspan 0; the third group is sample 0 of
1505 * subspan 1, and finally sample 1 of subspan 1.
1506 */
1507
1508 /* SKL+ has an extra bit for the Starting Sample Pair Index to
1509 * accomodate 16x MSAA.
1510 */
1511 abld.exec_all().group(1, 0)
1512 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
1513 brw_imm_ud(0xc0));
1514 abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
1515
1516 /* This works for SIMD8-SIMD16. It also works for SIMD32 but only if we
1517 * can assume 4x MSAA. Disallow it on IVB+
1518 *
1519 * FINISHME: One day, we could come up with a way to do this that
1520 * actually works on gen7.
1521 */
1522 if (devinfo->gen >= 7)
1523 limit_dispatch_width(16, "gl_SampleId is unsupported in SIMD32 on gen7");
1524 abld.exec_all().group(8, 0).MOV(t2, brw_imm_v(0x32103210));
1525
1526 /* This special instruction takes care of setting vstride=1,
1527 * width=4, hstride=0 of t2 during an ADD instruction.
1528 */
1529 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1530 }
1531
1532 return reg;
1533 }
1534
1535 fs_reg *
1536 fs_visitor::emit_samplemaskin_setup()
1537 {
1538 assert(stage == MESA_SHADER_FRAGMENT);
1539 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
1540 assert(devinfo->gen >= 6);
1541
1542 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1543
1544 fs_reg coverage_mask =
1545 fetch_payload_reg(bld, payload.sample_mask_in_reg, BRW_REGISTER_TYPE_D);
1546
1547 if (wm_prog_data->persample_dispatch) {
1548 /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
1549 * and a mask representing which sample is being processed by the
1550 * current shader invocation.
1551 *
1552 * From the OES_sample_variables specification:
1553 * "When per-sample shading is active due to the use of a fragment input
1554 * qualified by "sample" or due to the use of the gl_SampleID or
1555 * gl_SamplePosition variables, only the bit for the current sample is
1556 * set in gl_SampleMaskIn."
1557 */
1558 const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
1559
1560 if (nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
1561 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
1562
1563 fs_reg one = vgrf(glsl_type::int_type);
1564 fs_reg enabled_mask = vgrf(glsl_type::int_type);
1565 abld.MOV(one, brw_imm_d(1));
1566 abld.SHL(enabled_mask, one, nir_system_values[SYSTEM_VALUE_SAMPLE_ID]);
1567 abld.AND(*reg, enabled_mask, coverage_mask);
1568 } else {
1569 /* In per-pixel mode, the coverage mask is sufficient. */
1570 *reg = coverage_mask;
1571 }
1572 return reg;
1573 }
1574
1575 fs_reg
1576 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1577 {
1578 if (!src.abs && !src.negate)
1579 return src;
1580
1581 fs_reg temp = bld.vgrf(src.type);
1582 bld.MOV(temp, src);
1583
1584 return temp;
1585 }
1586
1587 void
1588 fs_visitor::emit_discard_jump()
1589 {
1590 assert(brw_wm_prog_data(this->prog_data)->uses_kill);
1591
1592 /* For performance, after a discard, jump to the end of the
1593 * shader if all relevant channels have been discarded.
1594 */
1595 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1596 discard_jump->flag_subreg = 1;
1597
1598 discard_jump->predicate = BRW_PREDICATE_ALIGN1_ANY4H;
1599 discard_jump->predicate_inverse = true;
1600 }
1601
1602 void
1603 fs_visitor::emit_gs_thread_end()
1604 {
1605 assert(stage == MESA_SHADER_GEOMETRY);
1606
1607 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1608
1609 if (gs_compile->control_data_header_size_bits > 0) {
1610 emit_gs_control_data_bits(this->final_gs_vertex_count);
1611 }
1612
1613 const fs_builder abld = bld.annotate("thread end");
1614 fs_inst *inst;
1615
1616 if (gs_prog_data->static_vertex_count != -1) {
1617 foreach_in_list_reverse(fs_inst, prev, &this->instructions) {
1618 if (prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8 ||
1619 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
1620 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
1621 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) {
1622 prev->eot = true;
1623
1624 /* Delete now dead instructions. */
1625 foreach_in_list_reverse_safe(exec_node, dead, &this->instructions) {
1626 if (dead == prev)
1627 break;
1628 dead->remove();
1629 }
1630 return;
1631 } else if (prev->is_control_flow() || prev->has_side_effects()) {
1632 break;
1633 }
1634 }
1635 fs_reg hdr = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1636 abld.MOV(hdr, fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)));
1637 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, hdr);
1638 inst->mlen = 1;
1639 } else {
1640 fs_reg payload = abld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1641 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
1642 sources[0] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1643 sources[1] = this->final_gs_vertex_count;
1644 abld.LOAD_PAYLOAD(payload, sources, 2, 2);
1645 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
1646 inst->mlen = 2;
1647 }
1648 inst->eot = true;
1649 inst->offset = 0;
1650 }
1651
1652 void
1653 fs_visitor::assign_curb_setup()
1654 {
1655 unsigned uniform_push_length = DIV_ROUND_UP(stage_prog_data->nr_params, 8);
1656
1657 unsigned ubo_push_length = 0;
1658 unsigned ubo_push_start[4];
1659 for (int i = 0; i < 4; i++) {
1660 ubo_push_start[i] = 8 * (ubo_push_length + uniform_push_length);
1661 ubo_push_length += stage_prog_data->ubo_ranges[i].length;
1662 }
1663
1664 prog_data->curb_read_length = uniform_push_length + ubo_push_length;
1665
1666 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1667 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1668 for (unsigned int i = 0; i < inst->sources; i++) {
1669 if (inst->src[i].file == UNIFORM) {
1670 int uniform_nr = inst->src[i].nr + inst->src[i].offset / 4;
1671 int constant_nr;
1672 if (inst->src[i].nr >= UBO_START) {
1673 /* constant_nr is in 32-bit units, the rest are in bytes */
1674 constant_nr = ubo_push_start[inst->src[i].nr - UBO_START] +
1675 inst->src[i].offset / 4;
1676 } else if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1677 constant_nr = push_constant_loc[uniform_nr];
1678 } else {
1679 /* Section 5.11 of the OpenGL 4.1 spec says:
1680 * "Out-of-bounds reads return undefined values, which include
1681 * values from other variables of the active program or zero."
1682 * Just return the first push constant.
1683 */
1684 constant_nr = 0;
1685 }
1686
1687 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1688 constant_nr / 8,
1689 constant_nr % 8);
1690 brw_reg.abs = inst->src[i].abs;
1691 brw_reg.negate = inst->src[i].negate;
1692
1693 assert(inst->src[i].stride == 0);
1694 inst->src[i] = byte_offset(
1695 retype(brw_reg, inst->src[i].type),
1696 inst->src[i].offset % 4);
1697 }
1698 }
1699 }
1700
1701 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1702 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1703 }
1704
1705 static void
1706 calculate_urb_setup(const struct gen_device_info *devinfo,
1707 const struct brw_wm_prog_key *key,
1708 struct brw_wm_prog_data *prog_data,
1709 const nir_shader *nir)
1710 {
1711 memset(prog_data->urb_setup, -1,
1712 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1713
1714 int urb_next = 0;
1715 /* Figure out where each of the incoming setup attributes lands. */
1716 if (devinfo->gen >= 6) {
1717 if (util_bitcount64(nir->info.inputs_read &
1718 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1719 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1720 * first 16 varying inputs, so we can put them wherever we want.
1721 * Just put them in order.
1722 *
1723 * This is useful because it means that (a) inputs not used by the
1724 * fragment shader won't take up valuable register space, and (b) we
1725 * won't have to recompile the fragment shader if it gets paired with
1726 * a different vertex (or geometry) shader.
1727 */
1728 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1729 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1730 BITFIELD64_BIT(i)) {
1731 prog_data->urb_setup[i] = urb_next++;
1732 }
1733 }
1734 } else {
1735 /* We have enough input varyings that the SF/SBE pipeline stage can't
1736 * arbitrarily rearrange them to suit our whim; we have to put them
1737 * in an order that matches the output of the previous pipeline stage
1738 * (geometry or vertex shader).
1739 */
1740 struct brw_vue_map prev_stage_vue_map;
1741 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1742 key->input_slots_valid,
1743 nir->info.separate_shader);
1744
1745 int first_slot =
1746 brw_compute_first_urb_slot_required(nir->info.inputs_read,
1747 &prev_stage_vue_map);
1748
1749 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1750 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1751 slot++) {
1752 int varying = prev_stage_vue_map.slot_to_varying[slot];
1753 if (varying != BRW_VARYING_SLOT_PAD &&
1754 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1755 BITFIELD64_BIT(varying))) {
1756 prog_data->urb_setup[varying] = slot - first_slot;
1757 }
1758 }
1759 urb_next = prev_stage_vue_map.num_slots - first_slot;
1760 }
1761 } else {
1762 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1763 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1764 /* Point size is packed into the header, not as a general attribute */
1765 if (i == VARYING_SLOT_PSIZ)
1766 continue;
1767
1768 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1769 /* The back color slot is skipped when the front color is
1770 * also written to. In addition, some slots can be
1771 * written in the vertex shader and not read in the
1772 * fragment shader. So the register number must always be
1773 * incremented, mapped or not.
1774 */
1775 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1776 prog_data->urb_setup[i] = urb_next;
1777 urb_next++;
1778 }
1779 }
1780
1781 /*
1782 * It's a FS only attribute, and we did interpolation for this attribute
1783 * in SF thread. So, count it here, too.
1784 *
1785 * See compile_sf_prog() for more info.
1786 */
1787 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1788 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1789 }
1790
1791 prog_data->num_varying_inputs = urb_next;
1792 }
1793
1794 void
1795 fs_visitor::assign_urb_setup()
1796 {
1797 assert(stage == MESA_SHADER_FRAGMENT);
1798 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1799
1800 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1801
1802 /* Offset all the urb_setup[] index by the actual position of the
1803 * setup regs, now that the location of the constants has been chosen.
1804 */
1805 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1806 for (int i = 0; i < inst->sources; i++) {
1807 if (inst->src[i].file == ATTR) {
1808 /* ATTR regs in the FS are in units of logical scalar inputs each
1809 * of which consumes half of a GRF register.
1810 */
1811 assert(inst->src[i].offset < REG_SIZE / 2);
1812 const unsigned grf = urb_start + inst->src[i].nr / 2;
1813 const unsigned offset = (inst->src[i].nr % 2) * (REG_SIZE / 2) +
1814 inst->src[i].offset;
1815 const unsigned width = inst->src[i].stride == 0 ?
1816 1 : MIN2(inst->exec_size, 8);
1817 struct brw_reg reg = stride(
1818 byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1819 offset),
1820 width * inst->src[i].stride,
1821 width, inst->src[i].stride);
1822 reg.abs = inst->src[i].abs;
1823 reg.negate = inst->src[i].negate;
1824 inst->src[i] = reg;
1825 }
1826 }
1827 }
1828
1829 /* Each attribute is 4 setup channels, each of which is half a reg. */
1830 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1831 }
1832
1833 void
1834 fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
1835 {
1836 for (int i = 0; i < inst->sources; i++) {
1837 if (inst->src[i].file == ATTR) {
1838 int grf = payload.num_regs +
1839 prog_data->curb_read_length +
1840 inst->src[i].nr +
1841 inst->src[i].offset / REG_SIZE;
1842
1843 /* As explained at brw_reg_from_fs_reg, From the Haswell PRM:
1844 *
1845 * VertStride must be used to cross GRF register boundaries. This
1846 * rule implies that elements within a 'Width' cannot cross GRF
1847 * boundaries.
1848 *
1849 * So, for registers that are large enough, we have to split the exec
1850 * size in two and trust the compression state to sort it out.
1851 */
1852 unsigned total_size = inst->exec_size *
1853 inst->src[i].stride *
1854 type_sz(inst->src[i].type);
1855
1856 assert(total_size <= 2 * REG_SIZE);
1857 const unsigned exec_size =
1858 (total_size <= REG_SIZE) ? inst->exec_size : inst->exec_size / 2;
1859
1860 unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
1861 struct brw_reg reg =
1862 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1863 inst->src[i].offset % REG_SIZE),
1864 exec_size * inst->src[i].stride,
1865 width, inst->src[i].stride);
1866 reg.abs = inst->src[i].abs;
1867 reg.negate = inst->src[i].negate;
1868
1869 inst->src[i] = reg;
1870 }
1871 }
1872 }
1873
1874 void
1875 fs_visitor::assign_vs_urb_setup()
1876 {
1877 struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
1878
1879 assert(stage == MESA_SHADER_VERTEX);
1880
1881 /* Each attribute is 4 regs. */
1882 this->first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
1883
1884 assert(vs_prog_data->base.urb_read_length <= 15);
1885
1886 /* Rewrite all ATTR file references to the hw grf that they land in. */
1887 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1888 convert_attr_sources_to_hw_regs(inst);
1889 }
1890 }
1891
1892 void
1893 fs_visitor::assign_tcs_urb_setup()
1894 {
1895 assert(stage == MESA_SHADER_TESS_CTRL);
1896
1897 /* Rewrite all ATTR file references to HW_REGs. */
1898 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1899 convert_attr_sources_to_hw_regs(inst);
1900 }
1901 }
1902
1903 void
1904 fs_visitor::assign_tes_urb_setup()
1905 {
1906 assert(stage == MESA_SHADER_TESS_EVAL);
1907
1908 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1909
1910 first_non_payload_grf += 8 * vue_prog_data->urb_read_length;
1911
1912 /* Rewrite all ATTR file references to HW_REGs. */
1913 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1914 convert_attr_sources_to_hw_regs(inst);
1915 }
1916 }
1917
1918 void
1919 fs_visitor::assign_gs_urb_setup()
1920 {
1921 assert(stage == MESA_SHADER_GEOMETRY);
1922
1923 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
1924
1925 first_non_payload_grf +=
1926 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
1927
1928 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1929 /* Rewrite all ATTR file references to GRFs. */
1930 convert_attr_sources_to_hw_regs(inst);
1931 }
1932 }
1933
1934
1935 /**
1936 * Split large virtual GRFs into separate components if we can.
1937 *
1938 * This is mostly duplicated with what brw_fs_vector_splitting does,
1939 * but that's really conservative because it's afraid of doing
1940 * splitting that doesn't result in real progress after the rest of
1941 * the optimization phases, which would cause infinite looping in
1942 * optimization. We can do it once here, safely. This also has the
1943 * opportunity to split interpolated values, or maybe even uniforms,
1944 * which we don't have at the IR level.
1945 *
1946 * We want to split, because virtual GRFs are what we register
1947 * allocate and spill (due to contiguousness requirements for some
1948 * instructions), and they're what we naturally generate in the
1949 * codegen process, but most virtual GRFs don't actually need to be
1950 * contiguous sets of GRFs. If we split, we'll end up with reduced
1951 * live intervals and better dead code elimination and coalescing.
1952 */
1953 void
1954 fs_visitor::split_virtual_grfs()
1955 {
1956 /* Compact the register file so we eliminate dead vgrfs. This
1957 * only defines split points for live registers, so if we have
1958 * too large dead registers they will hit assertions later.
1959 */
1960 compact_virtual_grfs();
1961
1962 int num_vars = this->alloc.count;
1963
1964 /* Count the total number of registers */
1965 int reg_count = 0;
1966 int vgrf_to_reg[num_vars];
1967 for (int i = 0; i < num_vars; i++) {
1968 vgrf_to_reg[i] = reg_count;
1969 reg_count += alloc.sizes[i];
1970 }
1971
1972 /* An array of "split points". For each register slot, this indicates
1973 * if this slot can be separated from the previous slot. Every time an
1974 * instruction uses multiple elements of a register (as a source or
1975 * destination), we mark the used slots as inseparable. Then we go
1976 * through and split the registers into the smallest pieces we can.
1977 */
1978 bool *split_points = new bool[reg_count];
1979 memset(split_points, 0, reg_count * sizeof(*split_points));
1980
1981 /* Mark all used registers as fully splittable */
1982 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1983 if (inst->dst.file == VGRF) {
1984 int reg = vgrf_to_reg[inst->dst.nr];
1985 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.nr]; j++)
1986 split_points[reg + j] = true;
1987 }
1988
1989 for (int i = 0; i < inst->sources; i++) {
1990 if (inst->src[i].file == VGRF) {
1991 int reg = vgrf_to_reg[inst->src[i].nr];
1992 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].nr]; j++)
1993 split_points[reg + j] = true;
1994 }
1995 }
1996 }
1997
1998 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1999 /* We fix up undef instructions later */
2000 if (inst->opcode == SHADER_OPCODE_UNDEF) {
2001 /* UNDEF instructions are currently only used to undef entire
2002 * registers. We need this invariant later when we split them.
2003 */
2004 assert(inst->dst.file == VGRF);
2005 assert(inst->dst.offset == 0);
2006 assert(inst->size_written == alloc.sizes[inst->dst.nr] * REG_SIZE);
2007 continue;
2008 }
2009
2010 if (inst->dst.file == VGRF) {
2011 int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
2012 for (unsigned j = 1; j < regs_written(inst); j++)
2013 split_points[reg + j] = false;
2014 }
2015 for (int i = 0; i < inst->sources; i++) {
2016 if (inst->src[i].file == VGRF) {
2017 int reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
2018 for (unsigned j = 1; j < regs_read(inst, i); j++)
2019 split_points[reg + j] = false;
2020 }
2021 }
2022 }
2023
2024 int *new_virtual_grf = new int[reg_count];
2025 int *new_reg_offset = new int[reg_count];
2026
2027 int reg = 0;
2028 for (int i = 0; i < num_vars; i++) {
2029 /* The first one should always be 0 as a quick sanity check. */
2030 assert(split_points[reg] == false);
2031
2032 /* j = 0 case */
2033 new_reg_offset[reg] = 0;
2034 reg++;
2035 int offset = 1;
2036
2037 /* j > 0 case */
2038 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
2039 /* If this is a split point, reset the offset to 0 and allocate a
2040 * new virtual GRF for the previous offset many registers
2041 */
2042 if (split_points[reg]) {
2043 assert(offset <= MAX_VGRF_SIZE);
2044 int grf = alloc.allocate(offset);
2045 for (int k = reg - offset; k < reg; k++)
2046 new_virtual_grf[k] = grf;
2047 offset = 0;
2048 }
2049 new_reg_offset[reg] = offset;
2050 offset++;
2051 reg++;
2052 }
2053
2054 /* The last one gets the original register number */
2055 assert(offset <= MAX_VGRF_SIZE);
2056 alloc.sizes[i] = offset;
2057 for (int k = reg - offset; k < reg; k++)
2058 new_virtual_grf[k] = i;
2059 }
2060 assert(reg == reg_count);
2061
2062 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2063 if (inst->opcode == SHADER_OPCODE_UNDEF) {
2064 const fs_builder ibld(this, block, inst);
2065 assert(inst->size_written % REG_SIZE == 0);
2066 unsigned reg_offset = 0;
2067 while (reg_offset < inst->size_written / REG_SIZE) {
2068 reg = vgrf_to_reg[inst->dst.nr] + reg_offset;
2069 ibld.UNDEF(fs_reg(VGRF, new_virtual_grf[reg], inst->dst.type));
2070 reg_offset += alloc.sizes[new_virtual_grf[reg]];
2071 }
2072 inst->remove(block);
2073 continue;
2074 }
2075
2076 if (inst->dst.file == VGRF) {
2077 reg = vgrf_to_reg[inst->dst.nr] + inst->dst.offset / REG_SIZE;
2078 inst->dst.nr = new_virtual_grf[reg];
2079 inst->dst.offset = new_reg_offset[reg] * REG_SIZE +
2080 inst->dst.offset % REG_SIZE;
2081 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
2082 }
2083 for (int i = 0; i < inst->sources; i++) {
2084 if (inst->src[i].file == VGRF) {
2085 reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].offset / REG_SIZE;
2086 inst->src[i].nr = new_virtual_grf[reg];
2087 inst->src[i].offset = new_reg_offset[reg] * REG_SIZE +
2088 inst->src[i].offset % REG_SIZE;
2089 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
2090 }
2091 }
2092 }
2093 invalidate_live_intervals();
2094
2095 delete[] split_points;
2096 delete[] new_virtual_grf;
2097 delete[] new_reg_offset;
2098 }
2099
2100 /**
2101 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
2102 *
2103 * During code generation, we create tons of temporary variables, many of
2104 * which get immediately killed and are never used again. Yet, in later
2105 * optimization and analysis passes, such as compute_live_intervals, we need
2106 * to loop over all the virtual GRFs. Compacting them can save a lot of
2107 * overhead.
2108 */
2109 bool
2110 fs_visitor::compact_virtual_grfs()
2111 {
2112 bool progress = false;
2113 int *remap_table = new int[this->alloc.count];
2114 memset(remap_table, -1, this->alloc.count * sizeof(int));
2115
2116 /* Mark which virtual GRFs are used. */
2117 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
2118 if (inst->dst.file == VGRF)
2119 remap_table[inst->dst.nr] = 0;
2120
2121 for (int i = 0; i < inst->sources; i++) {
2122 if (inst->src[i].file == VGRF)
2123 remap_table[inst->src[i].nr] = 0;
2124 }
2125 }
2126
2127 /* Compact the GRF arrays. */
2128 int new_index = 0;
2129 for (unsigned i = 0; i < this->alloc.count; i++) {
2130 if (remap_table[i] == -1) {
2131 /* We just found an unused register. This means that we are
2132 * actually going to compact something.
2133 */
2134 progress = true;
2135 } else {
2136 remap_table[i] = new_index;
2137 alloc.sizes[new_index] = alloc.sizes[i];
2138 invalidate_live_intervals();
2139 ++new_index;
2140 }
2141 }
2142
2143 this->alloc.count = new_index;
2144
2145 /* Patch all the instructions to use the newly renumbered registers */
2146 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2147 if (inst->dst.file == VGRF)
2148 inst->dst.nr = remap_table[inst->dst.nr];
2149
2150 for (int i = 0; i < inst->sources; i++) {
2151 if (inst->src[i].file == VGRF)
2152 inst->src[i].nr = remap_table[inst->src[i].nr];
2153 }
2154 }
2155
2156 /* Patch all the references to delta_xy, since they're used in register
2157 * allocation. If they're unused, switch them to BAD_FILE so we don't
2158 * think some random VGRF is delta_xy.
2159 */
2160 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2161 if (delta_xy[i].file == VGRF) {
2162 if (remap_table[delta_xy[i].nr] != -1) {
2163 delta_xy[i].nr = remap_table[delta_xy[i].nr];
2164 } else {
2165 delta_xy[i].file = BAD_FILE;
2166 }
2167 }
2168 }
2169
2170 delete[] remap_table;
2171
2172 return progress;
2173 }
2174
2175 static int
2176 get_subgroup_id_param_index(const brw_stage_prog_data *prog_data)
2177 {
2178 if (prog_data->nr_params == 0)
2179 return -1;
2180
2181 /* The local thread id is always the last parameter in the list */
2182 uint32_t last_param = prog_data->param[prog_data->nr_params - 1];
2183 if (last_param == BRW_PARAM_BUILTIN_SUBGROUP_ID)
2184 return prog_data->nr_params - 1;
2185
2186 return -1;
2187 }
2188
2189 /**
2190 * Struct for handling complex alignments.
2191 *
2192 * A complex alignment is stored as multiplier and an offset. A value is
2193 * considered to be aligned if it is {offset} larger than a multiple of {mul}.
2194 * For instance, with an alignment of {8, 2}, cplx_align_apply would do the
2195 * following:
2196 *
2197 * N | cplx_align_apply({8, 2}, N)
2198 * ----+-----------------------------
2199 * 4 | 6
2200 * 6 | 6
2201 * 8 | 14
2202 * 10 | 14
2203 * 12 | 14
2204 * 14 | 14
2205 * 16 | 22
2206 */
2207 struct cplx_align {
2208 unsigned mul:4;
2209 unsigned offset:4;
2210 };
2211
2212 #define CPLX_ALIGN_MAX_MUL 8
2213
2214 static void
2215 cplx_align_assert_sane(struct cplx_align a)
2216 {
2217 assert(a.mul > 0 && util_is_power_of_two_nonzero(a.mul));
2218 assert(a.offset < a.mul);
2219 }
2220
2221 /**
2222 * Combines two alignments to produce a least multiple of sorts.
2223 *
2224 * The returned alignment is the smallest (in terms of multiplier) such that
2225 * anything aligned to both a and b will be aligned to the new alignment.
2226 * This function will assert-fail if a and b are not compatible, i.e. if the
2227 * offset parameters are such that no common alignment is possible.
2228 */
2229 static struct cplx_align
2230 cplx_align_combine(struct cplx_align a, struct cplx_align b)
2231 {
2232 cplx_align_assert_sane(a);
2233 cplx_align_assert_sane(b);
2234
2235 /* Assert that the alignments agree. */
2236 assert((a.offset & (b.mul - 1)) == (b.offset & (a.mul - 1)));
2237
2238 return a.mul > b.mul ? a : b;
2239 }
2240
2241 /**
2242 * Apply a complex alignment
2243 *
2244 * This function will return the smallest number greater than or equal to
2245 * offset that is aligned to align.
2246 */
2247 static unsigned
2248 cplx_align_apply(struct cplx_align align, unsigned offset)
2249 {
2250 return ALIGN(offset - align.offset, align.mul) + align.offset;
2251 }
2252
2253 #define UNIFORM_SLOT_SIZE 4
2254
2255 struct uniform_slot_info {
2256 /** True if the given uniform slot is live */
2257 unsigned is_live:1;
2258
2259 /** True if this slot and the next slot must remain contiguous */
2260 unsigned contiguous:1;
2261
2262 struct cplx_align align;
2263 };
2264
2265 static void
2266 mark_uniform_slots_read(struct uniform_slot_info *slots,
2267 unsigned num_slots, unsigned alignment)
2268 {
2269 assert(alignment > 0 && util_is_power_of_two_nonzero(alignment));
2270 assert(alignment <= CPLX_ALIGN_MAX_MUL);
2271
2272 /* We can't align a slot to anything less than the slot size */
2273 alignment = MAX2(alignment, UNIFORM_SLOT_SIZE);
2274
2275 struct cplx_align align = {alignment, 0};
2276 cplx_align_assert_sane(align);
2277
2278 for (unsigned i = 0; i < num_slots; i++) {
2279 slots[i].is_live = true;
2280 if (i < num_slots - 1)
2281 slots[i].contiguous = true;
2282
2283 align.offset = (i * UNIFORM_SLOT_SIZE) & (align.mul - 1);
2284 if (slots[i].align.mul == 0) {
2285 slots[i].align = align;
2286 } else {
2287 slots[i].align = cplx_align_combine(slots[i].align, align);
2288 }
2289 }
2290 }
2291
2292 /**
2293 * Assign UNIFORM file registers to either push constants or pull constants.
2294 *
2295 * We allow a fragment shader to have more than the specified minimum
2296 * maximum number of fragment shader uniform components (64). If
2297 * there are too many of these, they'd fill up all of register space.
2298 * So, this will push some of them out to the pull constant buffer and
2299 * update the program to load them.
2300 */
2301 void
2302 fs_visitor::assign_constant_locations()
2303 {
2304 /* Only the first compile gets to decide on locations. */
2305 if (push_constant_loc) {
2306 assert(pull_constant_loc);
2307 return;
2308 }
2309
2310 if (compiler->compact_params) {
2311 struct uniform_slot_info slots[uniforms];
2312 memset(slots, 0, sizeof(slots));
2313
2314 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2315 for (int i = 0 ; i < inst->sources; i++) {
2316 if (inst->src[i].file != UNIFORM)
2317 continue;
2318
2319 /* NIR tightly packs things so the uniform number might not be
2320 * aligned (if we have a double right after a float, for
2321 * instance). This is fine because the process of re-arranging
2322 * them will ensure that things are properly aligned. The offset
2323 * into that uniform, however, must be aligned.
2324 *
2325 * In Vulkan, we have explicit offsets but everything is crammed
2326 * into a single "variable" so inst->src[i].nr will always be 0.
2327 * Everything will be properly aligned relative to that one base.
2328 */
2329 assert(inst->src[i].offset % type_sz(inst->src[i].type) == 0);
2330
2331 unsigned u = inst->src[i].nr +
2332 inst->src[i].offset / UNIFORM_SLOT_SIZE;
2333
2334 if (u >= uniforms)
2335 continue;
2336
2337 unsigned slots_read;
2338 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
2339 slots_read = DIV_ROUND_UP(inst->src[2].ud, UNIFORM_SLOT_SIZE);
2340 } else {
2341 unsigned bytes_read = inst->components_read(i) *
2342 type_sz(inst->src[i].type);
2343 slots_read = DIV_ROUND_UP(bytes_read, UNIFORM_SLOT_SIZE);
2344 }
2345
2346 assert(u + slots_read <= uniforms);
2347 mark_uniform_slots_read(&slots[u], slots_read,
2348 type_sz(inst->src[i].type));
2349 }
2350 }
2351
2352 int subgroup_id_index = get_subgroup_id_param_index(stage_prog_data);
2353
2354 /* Only allow 16 registers (128 uniform components) as push constants.
2355 *
2356 * Just demote the end of the list. We could probably do better
2357 * here, demoting things that are rarely used in the program first.
2358 *
2359 * If changing this value, note the limitation about total_regs in
2360 * brw_curbe.c.
2361 */
2362 unsigned int max_push_components = 16 * 8;
2363 if (subgroup_id_index >= 0)
2364 max_push_components--; /* Save a slot for the thread ID */
2365
2366 /* We push small arrays, but no bigger than 16 floats. This is big
2367 * enough for a vec4 but hopefully not large enough to push out other
2368 * stuff. We should probably use a better heuristic at some point.
2369 */
2370 const unsigned int max_chunk_size = 16;
2371
2372 unsigned int num_push_constants = 0;
2373 unsigned int num_pull_constants = 0;
2374
2375 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2376 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2377
2378 /* Default to -1 meaning no location */
2379 memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
2380 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2381
2382 int chunk_start = -1;
2383 struct cplx_align align;
2384 for (unsigned u = 0; u < uniforms; u++) {
2385 if (!slots[u].is_live) {
2386 assert(chunk_start == -1);
2387 continue;
2388 }
2389
2390 /* Skip subgroup_id_index to put it in the last push register. */
2391 if (subgroup_id_index == (int)u)
2392 continue;
2393
2394 if (chunk_start == -1) {
2395 chunk_start = u;
2396 align = slots[u].align;
2397 } else {
2398 /* Offset into the chunk */
2399 unsigned chunk_offset = (u - chunk_start) * UNIFORM_SLOT_SIZE;
2400
2401 /* Shift the slot alignment down by the chunk offset so it is
2402 * comparable with the base chunk alignment.
2403 */
2404 struct cplx_align slot_align = slots[u].align;
2405 slot_align.offset =
2406 (slot_align.offset - chunk_offset) & (align.mul - 1);
2407
2408 align = cplx_align_combine(align, slot_align);
2409 }
2410
2411 /* Sanity check the alignment */
2412 cplx_align_assert_sane(align);
2413
2414 if (slots[u].contiguous)
2415 continue;
2416
2417 /* Adjust the alignment to be in terms of slots, not bytes */
2418 assert((align.mul & (UNIFORM_SLOT_SIZE - 1)) == 0);
2419 assert((align.offset & (UNIFORM_SLOT_SIZE - 1)) == 0);
2420 align.mul /= UNIFORM_SLOT_SIZE;
2421 align.offset /= UNIFORM_SLOT_SIZE;
2422
2423 unsigned push_start_align = cplx_align_apply(align, num_push_constants);
2424 unsigned chunk_size = u - chunk_start + 1;
2425 if ((!compiler->supports_pull_constants && u < UBO_START) ||
2426 (chunk_size < max_chunk_size &&
2427 push_start_align + chunk_size <= max_push_components)) {
2428 /* Align up the number of push constants */
2429 num_push_constants = push_start_align;
2430 for (unsigned i = 0; i < chunk_size; i++)
2431 push_constant_loc[chunk_start + i] = num_push_constants++;
2432 } else {
2433 /* We need to pull this one */
2434 num_pull_constants = cplx_align_apply(align, num_pull_constants);
2435 for (unsigned i = 0; i < chunk_size; i++)
2436 pull_constant_loc[chunk_start + i] = num_pull_constants++;
2437 }
2438
2439 /* Reset the chunk and start again */
2440 chunk_start = -1;
2441 }
2442
2443 /* Add the CS local thread ID uniform at the end of the push constants */
2444 if (subgroup_id_index >= 0)
2445 push_constant_loc[subgroup_id_index] = num_push_constants++;
2446
2447 /* As the uniforms are going to be reordered, stash the old array and
2448 * create two new arrays for push/pull params.
2449 */
2450 uint32_t *param = stage_prog_data->param;
2451 stage_prog_data->nr_params = num_push_constants;
2452 if (num_push_constants) {
2453 stage_prog_data->param = rzalloc_array(mem_ctx, uint32_t,
2454 num_push_constants);
2455 } else {
2456 stage_prog_data->param = NULL;
2457 }
2458 assert(stage_prog_data->nr_pull_params == 0);
2459 assert(stage_prog_data->pull_param == NULL);
2460 if (num_pull_constants > 0) {
2461 stage_prog_data->nr_pull_params = num_pull_constants;
2462 stage_prog_data->pull_param = rzalloc_array(mem_ctx, uint32_t,
2463 num_pull_constants);
2464 }
2465
2466 /* Up until now, the param[] array has been indexed by reg + offset
2467 * of UNIFORM registers. Move pull constants into pull_param[] and
2468 * condense param[] to only contain the uniforms we chose to push.
2469 *
2470 * NOTE: Because we are condensing the params[] array, we know that
2471 * push_constant_loc[i] <= i and we can do it in one smooth loop without
2472 * having to make a copy.
2473 */
2474 for (unsigned int i = 0; i < uniforms; i++) {
2475 uint32_t value = param[i];
2476 if (pull_constant_loc[i] != -1) {
2477 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
2478 } else if (push_constant_loc[i] != -1) {
2479 stage_prog_data->param[push_constant_loc[i]] = value;
2480 }
2481 }
2482 ralloc_free(param);
2483 } else {
2484 /* If we don't want to compact anything, just set up dummy push/pull
2485 * arrays. All the rest of the compiler cares about are these arrays.
2486 */
2487 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2488 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2489
2490 for (unsigned u = 0; u < uniforms; u++)
2491 push_constant_loc[u] = u;
2492
2493 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2494 }
2495
2496 /* Now that we know how many regular uniforms we'll push, reduce the
2497 * UBO push ranges so we don't exceed the 3DSTATE_CONSTANT limits.
2498 */
2499 unsigned push_length = DIV_ROUND_UP(stage_prog_data->nr_params, 8);
2500 for (int i = 0; i < 4; i++) {
2501 struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
2502
2503 if (push_length + range->length > 64)
2504 range->length = 64 - push_length;
2505
2506 push_length += range->length;
2507 }
2508 assert(push_length <= 64);
2509 }
2510
2511 bool
2512 fs_visitor::get_pull_locs(const fs_reg &src,
2513 unsigned *out_surf_index,
2514 unsigned *out_pull_index)
2515 {
2516 assert(src.file == UNIFORM);
2517
2518 if (src.nr >= UBO_START) {
2519 const struct brw_ubo_range *range =
2520 &prog_data->ubo_ranges[src.nr - UBO_START];
2521
2522 /* If this access is in our (reduced) range, use the push data. */
2523 if (src.offset / 32 < range->length)
2524 return false;
2525
2526 *out_surf_index = prog_data->binding_table.ubo_start + range->block;
2527 *out_pull_index = (32 * range->start + src.offset) / 4;
2528
2529 prog_data->has_ubo_pull = true;
2530 return true;
2531 }
2532
2533 const unsigned location = src.nr + src.offset / 4;
2534
2535 if (location < uniforms && pull_constant_loc[location] != -1) {
2536 /* A regular uniform push constant */
2537 *out_surf_index = stage_prog_data->binding_table.pull_constants_start;
2538 *out_pull_index = pull_constant_loc[location];
2539
2540 prog_data->has_ubo_pull = true;
2541 return true;
2542 }
2543
2544 return false;
2545 }
2546
2547 /**
2548 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
2549 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
2550 */
2551 void
2552 fs_visitor::lower_constant_loads()
2553 {
2554 unsigned index, pull_index;
2555
2556 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2557 /* Set up the annotation tracking for new generated instructions. */
2558 const fs_builder ibld(this, block, inst);
2559
2560 for (int i = 0; i < inst->sources; i++) {
2561 if (inst->src[i].file != UNIFORM)
2562 continue;
2563
2564 /* We'll handle this case later */
2565 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
2566 continue;
2567
2568 if (!get_pull_locs(inst->src[i], &index, &pull_index))
2569 continue;
2570
2571 assert(inst->src[i].stride == 0);
2572
2573 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
2574 const fs_builder ubld = ibld.exec_all().group(block_sz / 4, 0);
2575 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
2576 const unsigned base = pull_index * 4;
2577
2578 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
2579 dst, brw_imm_ud(index), brw_imm_ud(base & ~(block_sz - 1)));
2580
2581 /* Rewrite the instruction to use the temporary VGRF. */
2582 inst->src[i].file = VGRF;
2583 inst->src[i].nr = dst.nr;
2584 inst->src[i].offset = (base & (block_sz - 1)) +
2585 inst->src[i].offset % 4;
2586 }
2587
2588 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
2589 inst->src[0].file == UNIFORM) {
2590
2591 if (!get_pull_locs(inst->src[0], &index, &pull_index))
2592 continue;
2593
2594 VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
2595 brw_imm_ud(index),
2596 inst->src[1],
2597 pull_index * 4);
2598 inst->remove(block);
2599 }
2600 }
2601 invalidate_live_intervals();
2602 }
2603
2604 bool
2605 fs_visitor::opt_algebraic()
2606 {
2607 bool progress = false;
2608
2609 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2610 switch (inst->opcode) {
2611 case BRW_OPCODE_MOV:
2612 if (!devinfo->has_64bit_types &&
2613 (inst->dst.type == BRW_REGISTER_TYPE_DF ||
2614 inst->dst.type == BRW_REGISTER_TYPE_UQ ||
2615 inst->dst.type == BRW_REGISTER_TYPE_Q)) {
2616 assert(inst->dst.type == inst->src[0].type);
2617 assert(!inst->saturate);
2618 assert(!inst->src[0].abs);
2619 assert(!inst->src[0].negate);
2620 const brw::fs_builder ibld(this, block, inst);
2621
2622 if (inst->src[0].file == IMM) {
2623 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 1),
2624 brw_imm_ud(inst->src[0].u64 >> 32));
2625 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 0),
2626 brw_imm_ud(inst->src[0].u64));
2627 } else {
2628 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 1),
2629 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 1));
2630 ibld.MOV(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 0),
2631 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0));
2632 }
2633
2634 inst->remove(block);
2635 progress = true;
2636 }
2637
2638 if ((inst->conditional_mod == BRW_CONDITIONAL_Z ||
2639 inst->conditional_mod == BRW_CONDITIONAL_NZ) &&
2640 inst->dst.is_null() &&
2641 (inst->src[0].abs || inst->src[0].negate)) {
2642 inst->src[0].abs = false;
2643 inst->src[0].negate = false;
2644 progress = true;
2645 break;
2646 }
2647
2648 if (inst->src[0].file != IMM)
2649 break;
2650
2651 if (inst->saturate) {
2652 /* Full mixed-type saturates don't happen. However, we can end up
2653 * with things like:
2654 *
2655 * mov.sat(8) g21<1>DF -1F
2656 *
2657 * Other mixed-size-but-same-base-type cases may also be possible.
2658 */
2659 if (inst->dst.type != inst->src[0].type &&
2660 inst->dst.type != BRW_REGISTER_TYPE_DF &&
2661 inst->src[0].type != BRW_REGISTER_TYPE_F)
2662 assert(!"unimplemented: saturate mixed types");
2663
2664 if (brw_saturate_immediate(inst->src[0].type,
2665 &inst->src[0].as_brw_reg())) {
2666 inst->saturate = false;
2667 progress = true;
2668 }
2669 }
2670 break;
2671
2672 case BRW_OPCODE_MUL:
2673 if (inst->src[1].file != IMM)
2674 continue;
2675
2676 /* a * 1.0 = a */
2677 if (inst->src[1].is_one()) {
2678 inst->opcode = BRW_OPCODE_MOV;
2679 inst->src[1] = reg_undef;
2680 progress = true;
2681 break;
2682 }
2683
2684 /* a * -1.0 = -a */
2685 if (inst->src[1].is_negative_one()) {
2686 inst->opcode = BRW_OPCODE_MOV;
2687 inst->src[0].negate = !inst->src[0].negate;
2688 inst->src[1] = reg_undef;
2689 progress = true;
2690 break;
2691 }
2692
2693 if (inst->src[0].file == IMM) {
2694 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2695 inst->opcode = BRW_OPCODE_MOV;
2696 inst->src[0].f *= inst->src[1].f;
2697 inst->src[1] = reg_undef;
2698 progress = true;
2699 break;
2700 }
2701 break;
2702 case BRW_OPCODE_ADD:
2703 if (inst->src[1].file != IMM)
2704 continue;
2705
2706 if (inst->src[0].file == IMM) {
2707 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2708 inst->opcode = BRW_OPCODE_MOV;
2709 inst->src[0].f += inst->src[1].f;
2710 inst->src[1] = reg_undef;
2711 progress = true;
2712 break;
2713 }
2714 break;
2715 case BRW_OPCODE_OR:
2716 if (inst->src[0].equals(inst->src[1]) ||
2717 inst->src[1].is_zero()) {
2718 /* On Gen8+, the OR instruction can have a source modifier that
2719 * performs logical not on the operand. Cases of 'OR r0, ~r1, 0'
2720 * or 'OR r0, ~r1, ~r1' should become a NOT instead of a MOV.
2721 */
2722 if (inst->src[0].negate) {
2723 inst->opcode = BRW_OPCODE_NOT;
2724 inst->src[0].negate = false;
2725 } else {
2726 inst->opcode = BRW_OPCODE_MOV;
2727 }
2728 inst->src[1] = reg_undef;
2729 progress = true;
2730 break;
2731 }
2732 break;
2733 case BRW_OPCODE_CMP:
2734 if ((inst->conditional_mod == BRW_CONDITIONAL_Z ||
2735 inst->conditional_mod == BRW_CONDITIONAL_NZ) &&
2736 inst->src[1].is_zero() &&
2737 (inst->src[0].abs || inst->src[0].negate)) {
2738 inst->src[0].abs = false;
2739 inst->src[0].negate = false;
2740 progress = true;
2741 break;
2742 }
2743 break;
2744 case BRW_OPCODE_SEL:
2745 if (!devinfo->has_64bit_types &&
2746 (inst->dst.type == BRW_REGISTER_TYPE_DF ||
2747 inst->dst.type == BRW_REGISTER_TYPE_UQ ||
2748 inst->dst.type == BRW_REGISTER_TYPE_Q)) {
2749 assert(inst->dst.type == inst->src[0].type);
2750 assert(!inst->saturate);
2751 assert(!inst->src[0].abs && !inst->src[0].negate);
2752 assert(!inst->src[1].abs && !inst->src[1].negate);
2753 const brw::fs_builder ibld(this, block, inst);
2754
2755 set_predicate(inst->predicate,
2756 ibld.SEL(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 0),
2757 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
2758 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0)));
2759 set_predicate(inst->predicate,
2760 ibld.SEL(subscript(inst->dst, BRW_REGISTER_TYPE_UD, 1),
2761 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 1),
2762 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 1)));
2763
2764 inst->remove(block);
2765 progress = true;
2766 }
2767 if (inst->src[0].equals(inst->src[1])) {
2768 inst->opcode = BRW_OPCODE_MOV;
2769 inst->src[1] = reg_undef;
2770 inst->predicate = BRW_PREDICATE_NONE;
2771 inst->predicate_inverse = false;
2772 progress = true;
2773 } else if (inst->saturate && inst->src[1].file == IMM) {
2774 switch (inst->conditional_mod) {
2775 case BRW_CONDITIONAL_LE:
2776 case BRW_CONDITIONAL_L:
2777 switch (inst->src[1].type) {
2778 case BRW_REGISTER_TYPE_F:
2779 if (inst->src[1].f >= 1.0f) {
2780 inst->opcode = BRW_OPCODE_MOV;
2781 inst->src[1] = reg_undef;
2782 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2783 progress = true;
2784 }
2785 break;
2786 default:
2787 break;
2788 }
2789 break;
2790 case BRW_CONDITIONAL_GE:
2791 case BRW_CONDITIONAL_G:
2792 switch (inst->src[1].type) {
2793 case BRW_REGISTER_TYPE_F:
2794 if (inst->src[1].f <= 0.0f) {
2795 inst->opcode = BRW_OPCODE_MOV;
2796 inst->src[1] = reg_undef;
2797 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2798 progress = true;
2799 }
2800 break;
2801 default:
2802 break;
2803 }
2804 default:
2805 break;
2806 }
2807 }
2808 break;
2809 case BRW_OPCODE_MAD:
2810 if (inst->src[0].type != BRW_REGISTER_TYPE_F ||
2811 inst->src[1].type != BRW_REGISTER_TYPE_F ||
2812 inst->src[2].type != BRW_REGISTER_TYPE_F)
2813 break;
2814 if (inst->src[1].is_one()) {
2815 inst->opcode = BRW_OPCODE_ADD;
2816 inst->src[1] = inst->src[2];
2817 inst->src[2] = reg_undef;
2818 progress = true;
2819 } else if (inst->src[2].is_one()) {
2820 inst->opcode = BRW_OPCODE_ADD;
2821 inst->src[2] = reg_undef;
2822 progress = true;
2823 }
2824 break;
2825 case SHADER_OPCODE_BROADCAST:
2826 if (is_uniform(inst->src[0])) {
2827 inst->opcode = BRW_OPCODE_MOV;
2828 inst->sources = 1;
2829 inst->force_writemask_all = true;
2830 progress = true;
2831 } else if (inst->src[1].file == IMM) {
2832 inst->opcode = BRW_OPCODE_MOV;
2833 /* It's possible that the selected component will be too large and
2834 * overflow the register. This can happen if someone does a
2835 * readInvocation() from GLSL or SPIR-V and provides an OOB
2836 * invocationIndex. If this happens and we some how manage
2837 * to constant fold it in and get here, then component() may cause
2838 * us to start reading outside of the VGRF which will lead to an
2839 * assert later. Instead, just let it wrap around if it goes over
2840 * exec_size.
2841 */
2842 const unsigned comp = inst->src[1].ud & (inst->exec_size - 1);
2843 inst->src[0] = component(inst->src[0], comp);
2844 inst->sources = 1;
2845 inst->force_writemask_all = true;
2846 progress = true;
2847 }
2848 break;
2849
2850 case SHADER_OPCODE_SHUFFLE:
2851 if (is_uniform(inst->src[0])) {
2852 inst->opcode = BRW_OPCODE_MOV;
2853 inst->sources = 1;
2854 progress = true;
2855 } else if (inst->src[1].file == IMM) {
2856 inst->opcode = BRW_OPCODE_MOV;
2857 inst->src[0] = component(inst->src[0],
2858 inst->src[1].ud);
2859 inst->sources = 1;
2860 progress = true;
2861 }
2862 break;
2863
2864 default:
2865 break;
2866 }
2867
2868 /* Swap if src[0] is immediate. */
2869 if (progress && inst->is_commutative()) {
2870 if (inst->src[0].file == IMM) {
2871 fs_reg tmp = inst->src[1];
2872 inst->src[1] = inst->src[0];
2873 inst->src[0] = tmp;
2874 }
2875 }
2876 }
2877 return progress;
2878 }
2879
2880 /**
2881 * Optimize sample messages that have constant zero values for the trailing
2882 * texture coordinates. We can just reduce the message length for these
2883 * instructions instead of reserving a register for it. Trailing parameters
2884 * that aren't sent default to zero anyway. This will cause the dead code
2885 * eliminator to remove the MOV instruction that would otherwise be emitted to
2886 * set up the zero value.
2887 */
2888 bool
2889 fs_visitor::opt_zero_samples()
2890 {
2891 /* Gen4 infers the texturing opcode based on the message length so we can't
2892 * change it.
2893 */
2894 if (devinfo->gen < 5)
2895 return false;
2896
2897 bool progress = false;
2898
2899 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2900 if (!inst->is_tex())
2901 continue;
2902
2903 fs_inst *load_payload = (fs_inst *) inst->prev;
2904
2905 if (load_payload->is_head_sentinel() ||
2906 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2907 continue;
2908
2909 /* We don't want to remove the message header or the first parameter.
2910 * Removing the first parameter is not allowed, see the Haswell PRM
2911 * volume 7, page 149:
2912 *
2913 * "Parameter 0 is required except for the sampleinfo message, which
2914 * has no parameter 0"
2915 */
2916 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2917 load_payload->src[(inst->mlen - inst->header_size) /
2918 (inst->exec_size / 8) +
2919 inst->header_size - 1].is_zero()) {
2920 inst->mlen -= inst->exec_size / 8;
2921 progress = true;
2922 }
2923 }
2924
2925 if (progress)
2926 invalidate_live_intervals();
2927
2928 return progress;
2929 }
2930
2931 /**
2932 * Optimize sample messages which are followed by the final RT write.
2933 *
2934 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2935 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2936 * final texturing results copied to the framebuffer write payload and modify
2937 * them to write to the framebuffer directly.
2938 */
2939 bool
2940 fs_visitor::opt_sampler_eot()
2941 {
2942 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2943
2944 if (stage != MESA_SHADER_FRAGMENT || dispatch_width > 16)
2945 return false;
2946
2947 if (devinfo->gen != 9 && !devinfo->is_cherryview)
2948 return false;
2949
2950 /* FINISHME: It should be possible to implement this optimization when there
2951 * are multiple drawbuffers.
2952 */
2953 if (key->nr_color_regions != 1)
2954 return false;
2955
2956 /* Requires emitting a bunch of saturating MOV instructions during logical
2957 * send lowering to clamp the color payload, which the sampler unit isn't
2958 * going to do for us.
2959 */
2960 if (key->clamp_fragment_color)
2961 return false;
2962
2963 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2964 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2965 fs_inst *fb_write = (fs_inst *)block->end();
2966 assert(fb_write->eot);
2967 assert(fb_write->opcode == FS_OPCODE_FB_WRITE_LOGICAL);
2968
2969 /* There wasn't one; nothing to do. */
2970 if (unlikely(fb_write->prev->is_head_sentinel()))
2971 return false;
2972
2973 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2974
2975 /* 3D Sampler » Messages » Message Format
2976 *
2977 * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
2978 * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
2979 */
2980 if (tex_inst->opcode != SHADER_OPCODE_TEX_LOGICAL &&
2981 tex_inst->opcode != SHADER_OPCODE_TXD_LOGICAL &&
2982 tex_inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
2983 tex_inst->opcode != SHADER_OPCODE_TXL_LOGICAL &&
2984 tex_inst->opcode != FS_OPCODE_TXB_LOGICAL &&
2985 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL &&
2986 tex_inst->opcode != SHADER_OPCODE_TXF_CMS_W_LOGICAL &&
2987 tex_inst->opcode != SHADER_OPCODE_TXF_UMS_LOGICAL)
2988 return false;
2989
2990 /* XXX - This shouldn't be necessary. */
2991 if (tex_inst->prev->is_head_sentinel())
2992 return false;
2993
2994 /* Check that the FB write sources are fully initialized by the single
2995 * texturing instruction.
2996 */
2997 for (unsigned i = 0; i < FB_WRITE_LOGICAL_NUM_SRCS; i++) {
2998 if (i == FB_WRITE_LOGICAL_SRC_COLOR0) {
2999 if (!fb_write->src[i].equals(tex_inst->dst) ||
3000 fb_write->size_read(i) != tex_inst->size_written)
3001 return false;
3002 } else if (i != FB_WRITE_LOGICAL_SRC_COMPONENTS) {
3003 if (fb_write->src[i].file != BAD_FILE)
3004 return false;
3005 }
3006 }
3007
3008 assert(!tex_inst->eot); /* We can't get here twice */
3009 assert((tex_inst->offset & (0xff << 24)) == 0);
3010
3011 const fs_builder ibld(this, block, tex_inst);
3012
3013 tex_inst->offset |= fb_write->target << 24;
3014 tex_inst->eot = true;
3015 tex_inst->dst = ibld.null_reg_ud();
3016 tex_inst->size_written = 0;
3017 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
3018
3019 /* Marking EOT is sufficient, lower_logical_sends() will notice the EOT
3020 * flag and submit a header together with the sampler message as required
3021 * by the hardware.
3022 */
3023 invalidate_live_intervals();
3024 return true;
3025 }
3026
3027 bool
3028 fs_visitor::opt_register_renaming()
3029 {
3030 bool progress = false;
3031 int depth = 0;
3032
3033 unsigned remap[alloc.count];
3034 memset(remap, ~0u, sizeof(unsigned) * alloc.count);
3035
3036 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3037 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
3038 depth++;
3039 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
3040 inst->opcode == BRW_OPCODE_WHILE) {
3041 depth--;
3042 }
3043
3044 /* Rewrite instruction sources. */
3045 for (int i = 0; i < inst->sources; i++) {
3046 if (inst->src[i].file == VGRF &&
3047 remap[inst->src[i].nr] != ~0u &&
3048 remap[inst->src[i].nr] != inst->src[i].nr) {
3049 inst->src[i].nr = remap[inst->src[i].nr];
3050 progress = true;
3051 }
3052 }
3053
3054 const unsigned dst = inst->dst.nr;
3055
3056 if (depth == 0 &&
3057 inst->dst.file == VGRF &&
3058 alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
3059 !inst->is_partial_write()) {
3060 if (remap[dst] == ~0u) {
3061 remap[dst] = dst;
3062 } else {
3063 remap[dst] = alloc.allocate(regs_written(inst));
3064 inst->dst.nr = remap[dst];
3065 progress = true;
3066 }
3067 } else if (inst->dst.file == VGRF &&
3068 remap[dst] != ~0u &&
3069 remap[dst] != dst) {
3070 inst->dst.nr = remap[dst];
3071 progress = true;
3072 }
3073 }
3074
3075 if (progress) {
3076 invalidate_live_intervals();
3077
3078 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
3079 if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != ~0u) {
3080 delta_xy[i].nr = remap[delta_xy[i].nr];
3081 }
3082 }
3083 }
3084
3085 return progress;
3086 }
3087
3088 /**
3089 * Remove redundant or useless discard jumps.
3090 *
3091 * For example, we can eliminate jumps in the following sequence:
3092 *
3093 * discard-jump (redundant with the next jump)
3094 * discard-jump (useless; jumps to the next instruction)
3095 * placeholder-halt
3096 */
3097 bool
3098 fs_visitor::opt_redundant_discard_jumps()
3099 {
3100 bool progress = false;
3101
3102 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
3103
3104 fs_inst *placeholder_halt = NULL;
3105 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
3106 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
3107 placeholder_halt = inst;
3108 break;
3109 }
3110 }
3111
3112 if (!placeholder_halt)
3113 return false;
3114
3115 /* Delete any HALTs immediately before the placeholder halt. */
3116 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
3117 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
3118 prev = (fs_inst *) placeholder_halt->prev) {
3119 prev->remove(last_bblock);
3120 progress = true;
3121 }
3122
3123 if (progress)
3124 invalidate_live_intervals();
3125
3126 return progress;
3127 }
3128
3129 /**
3130 * Compute a bitmask with GRF granularity with a bit set for each GRF starting
3131 * from \p r.offset which overlaps the region starting at \p s.offset and
3132 * spanning \p ds bytes.
3133 */
3134 static inline unsigned
3135 mask_relative_to(const fs_reg &r, const fs_reg &s, unsigned ds)
3136 {
3137 const int rel_offset = reg_offset(s) - reg_offset(r);
3138 const int shift = rel_offset / REG_SIZE;
3139 const unsigned n = DIV_ROUND_UP(rel_offset % REG_SIZE + ds, REG_SIZE);
3140 assert(reg_space(r) == reg_space(s) &&
3141 shift >= 0 && shift < int(8 * sizeof(unsigned)));
3142 return ((1 << n) - 1) << shift;
3143 }
3144
3145 bool
3146 fs_visitor::compute_to_mrf()
3147 {
3148 bool progress = false;
3149 int next_ip = 0;
3150
3151 /* No MRFs on Gen >= 7. */
3152 if (devinfo->gen >= 7)
3153 return false;
3154
3155 calculate_live_intervals();
3156
3157 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3158 int ip = next_ip;
3159 next_ip++;
3160
3161 if (inst->opcode != BRW_OPCODE_MOV ||
3162 inst->is_partial_write() ||
3163 inst->dst.file != MRF || inst->src[0].file != VGRF ||
3164 inst->dst.type != inst->src[0].type ||
3165 inst->src[0].abs || inst->src[0].negate ||
3166 !inst->src[0].is_contiguous() ||
3167 inst->src[0].offset % REG_SIZE != 0)
3168 continue;
3169
3170 /* Can't compute-to-MRF this GRF if someone else was going to
3171 * read it later.
3172 */
3173 if (this->virtual_grf_end[inst->src[0].nr] > ip)
3174 continue;
3175
3176 /* Found a move of a GRF to a MRF. Let's see if we can go rewrite the
3177 * things that computed the value of all GRFs of the source region. The
3178 * regs_left bitset keeps track of the registers we haven't yet found a
3179 * generating instruction for.
3180 */
3181 unsigned regs_left = (1 << regs_read(inst, 0)) - 1;
3182
3183 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3184 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
3185 inst->src[0], inst->size_read(0))) {
3186 /* Found the last thing to write our reg we want to turn
3187 * into a compute-to-MRF.
3188 */
3189
3190 /* If this one instruction didn't populate all the
3191 * channels, bail. We might be able to rewrite everything
3192 * that writes that reg, but it would require smarter
3193 * tracking.
3194 */
3195 if (scan_inst->is_partial_write())
3196 break;
3197
3198 /* Handling things not fully contained in the source of the copy
3199 * would need us to understand coalescing out more than one MOV at
3200 * a time.
3201 */
3202 if (!region_contained_in(scan_inst->dst, scan_inst->size_written,
3203 inst->src[0], inst->size_read(0)))
3204 break;
3205
3206 /* SEND instructions can't have MRF as a destination. */
3207 if (scan_inst->mlen)
3208 break;
3209
3210 if (devinfo->gen == 6) {
3211 /* gen6 math instructions must have the destination be
3212 * GRF, so no compute-to-MRF for them.
3213 */
3214 if (scan_inst->is_math()) {
3215 break;
3216 }
3217 }
3218
3219 /* Clear the bits for any registers this instruction overwrites. */
3220 regs_left &= ~mask_relative_to(
3221 inst->src[0], scan_inst->dst, scan_inst->size_written);
3222 if (!regs_left)
3223 break;
3224 }
3225
3226 /* We don't handle control flow here. Most computation of
3227 * values that end up in MRFs are shortly before the MRF
3228 * write anyway.
3229 */
3230 if (block->start() == scan_inst)
3231 break;
3232
3233 /* You can't read from an MRF, so if someone else reads our
3234 * MRF's source GRF that we wanted to rewrite, that stops us.
3235 */
3236 bool interfered = false;
3237 for (int i = 0; i < scan_inst->sources; i++) {
3238 if (regions_overlap(scan_inst->src[i], scan_inst->size_read(i),
3239 inst->src[0], inst->size_read(0))) {
3240 interfered = true;
3241 }
3242 }
3243 if (interfered)
3244 break;
3245
3246 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
3247 inst->dst, inst->size_written)) {
3248 /* If somebody else writes our MRF here, we can't
3249 * compute-to-MRF before that.
3250 */
3251 break;
3252 }
3253
3254 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1 &&
3255 regions_overlap(fs_reg(MRF, scan_inst->base_mrf), scan_inst->mlen * REG_SIZE,
3256 inst->dst, inst->size_written)) {
3257 /* Found a SEND instruction, which means that there are
3258 * live values in MRFs from base_mrf to base_mrf +
3259 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3260 * above it.
3261 */
3262 break;
3263 }
3264 }
3265
3266 if (regs_left)
3267 continue;
3268
3269 /* Found all generating instructions of our MRF's source value, so it
3270 * should be safe to rewrite them to point to the MRF directly.
3271 */
3272 regs_left = (1 << regs_read(inst, 0)) - 1;
3273
3274 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3275 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
3276 inst->src[0], inst->size_read(0))) {
3277 /* Clear the bits for any registers this instruction overwrites. */
3278 regs_left &= ~mask_relative_to(
3279 inst->src[0], scan_inst->dst, scan_inst->size_written);
3280
3281 const unsigned rel_offset = reg_offset(scan_inst->dst) -
3282 reg_offset(inst->src[0]);
3283
3284 if (inst->dst.nr & BRW_MRF_COMPR4) {
3285 /* Apply the same address transformation done by the hardware
3286 * for COMPR4 MRF writes.
3287 */
3288 assert(rel_offset < 2 * REG_SIZE);
3289 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE * 4;
3290
3291 /* Clear the COMPR4 bit if the generating instruction is not
3292 * compressed.
3293 */
3294 if (scan_inst->size_written < 2 * REG_SIZE)
3295 scan_inst->dst.nr &= ~BRW_MRF_COMPR4;
3296
3297 } else {
3298 /* Calculate the MRF number the result of this instruction is
3299 * ultimately written to.
3300 */
3301 scan_inst->dst.nr = inst->dst.nr + rel_offset / REG_SIZE;
3302 }
3303
3304 scan_inst->dst.file = MRF;
3305 scan_inst->dst.offset = inst->dst.offset + rel_offset % REG_SIZE;
3306 scan_inst->saturate |= inst->saturate;
3307 if (!regs_left)
3308 break;
3309 }
3310 }
3311
3312 assert(!regs_left);
3313 inst->remove(block);
3314 progress = true;
3315 }
3316
3317 if (progress)
3318 invalidate_live_intervals();
3319
3320 return progress;
3321 }
3322
3323 /**
3324 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
3325 * flow. We could probably do better here with some form of divergence
3326 * analysis.
3327 */
3328 bool
3329 fs_visitor::eliminate_find_live_channel()
3330 {
3331 bool progress = false;
3332 unsigned depth = 0;
3333
3334 if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
3335 /* The optimization below assumes that channel zero is live on thread
3336 * dispatch, which may not be the case if the fixed function dispatches
3337 * threads sparsely.
3338 */
3339 return false;
3340 }
3341
3342 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3343 switch (inst->opcode) {
3344 case BRW_OPCODE_IF:
3345 case BRW_OPCODE_DO:
3346 depth++;
3347 break;
3348
3349 case BRW_OPCODE_ENDIF:
3350 case BRW_OPCODE_WHILE:
3351 depth--;
3352 break;
3353
3354 case FS_OPCODE_DISCARD_JUMP:
3355 /* This can potentially make control flow non-uniform until the end
3356 * of the program.
3357 */
3358 return progress;
3359
3360 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
3361 if (depth == 0) {
3362 inst->opcode = BRW_OPCODE_MOV;
3363 inst->src[0] = brw_imm_ud(0u);
3364 inst->sources = 1;
3365 inst->force_writemask_all = true;
3366 progress = true;
3367 }
3368 break;
3369
3370 default:
3371 break;
3372 }
3373 }
3374
3375 return progress;
3376 }
3377
3378 /**
3379 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
3380 * instructions to FS_OPCODE_REP_FB_WRITE.
3381 */
3382 void
3383 fs_visitor::emit_repclear_shader()
3384 {
3385 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3386 int base_mrf = 0;
3387 int color_mrf = base_mrf + 2;
3388 fs_inst *mov;
3389
3390 if (uniforms > 0) {
3391 mov = bld.exec_all().group(4, 0)
3392 .MOV(brw_message_reg(color_mrf),
3393 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
3394 } else {
3395 struct brw_reg reg =
3396 brw_reg(BRW_GENERAL_REGISTER_FILE, 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
3397 BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
3398 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
3399
3400 mov = bld.exec_all().group(4, 0)
3401 .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
3402 }
3403
3404 fs_inst *write = NULL;
3405 if (key->nr_color_regions == 1) {
3406 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3407 write->saturate = key->clamp_fragment_color;
3408 write->base_mrf = color_mrf;
3409 write->target = 0;
3410 write->header_size = 0;
3411 write->mlen = 1;
3412 } else {
3413 assume(key->nr_color_regions > 0);
3414
3415 struct brw_reg header =
3416 retype(brw_message_reg(base_mrf), BRW_REGISTER_TYPE_UD);
3417 bld.exec_all().group(16, 0)
3418 .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3419
3420 for (int i = 0; i < key->nr_color_regions; ++i) {
3421 if (i > 0) {
3422 bld.exec_all().group(1, 0)
3423 .MOV(component(header, 2), brw_imm_ud(i));
3424 }
3425
3426 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3427 write->saturate = key->clamp_fragment_color;
3428 write->base_mrf = base_mrf;
3429 write->target = i;
3430 write->header_size = 2;
3431 write->mlen = 3;
3432 }
3433 }
3434 write->eot = true;
3435 write->last_rt = true;
3436
3437 calculate_cfg();
3438
3439 assign_constant_locations();
3440 assign_curb_setup();
3441
3442 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
3443 if (uniforms > 0) {
3444 assert(mov->src[0].file == FIXED_GRF);
3445 mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
3446 }
3447
3448 lower_scoreboard();
3449 }
3450
3451 /**
3452 * Walks through basic blocks, looking for repeated MRF writes and
3453 * removing the later ones.
3454 */
3455 bool
3456 fs_visitor::remove_duplicate_mrf_writes()
3457 {
3458 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
3459 bool progress = false;
3460
3461 /* Need to update the MRF tracking for compressed instructions. */
3462 if (dispatch_width >= 16)
3463 return false;
3464
3465 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3466
3467 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3468 if (inst->is_control_flow()) {
3469 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3470 }
3471
3472 if (inst->opcode == BRW_OPCODE_MOV &&
3473 inst->dst.file == MRF) {
3474 fs_inst *prev_inst = last_mrf_move[inst->dst.nr];
3475 if (prev_inst && prev_inst->opcode == BRW_OPCODE_MOV &&
3476 inst->dst.equals(prev_inst->dst) &&
3477 inst->src[0].equals(prev_inst->src[0]) &&
3478 inst->saturate == prev_inst->saturate &&
3479 inst->predicate == prev_inst->predicate &&
3480 inst->conditional_mod == prev_inst->conditional_mod &&
3481 inst->exec_size == prev_inst->exec_size) {
3482 inst->remove(block);
3483 progress = true;
3484 continue;
3485 }
3486 }
3487
3488 /* Clear out the last-write records for MRFs that were overwritten. */
3489 if (inst->dst.file == MRF) {
3490 last_mrf_move[inst->dst.nr] = NULL;
3491 }
3492
3493 if (inst->mlen > 0 && inst->base_mrf != -1) {
3494 /* Found a SEND instruction, which will include two or fewer
3495 * implied MRF writes. We could do better here.
3496 */
3497 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3498 last_mrf_move[inst->base_mrf + i] = NULL;
3499 }
3500 }
3501
3502 /* Clear out any MRF move records whose sources got overwritten. */
3503 for (unsigned i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
3504 if (last_mrf_move[i] &&
3505 regions_overlap(inst->dst, inst->size_written,
3506 last_mrf_move[i]->src[0],
3507 last_mrf_move[i]->size_read(0))) {
3508 last_mrf_move[i] = NULL;
3509 }
3510 }
3511
3512 if (inst->opcode == BRW_OPCODE_MOV &&
3513 inst->dst.file == MRF &&
3514 inst->src[0].file != ARF &&
3515 !inst->is_partial_write()) {
3516 last_mrf_move[inst->dst.nr] = inst;
3517 }
3518 }
3519
3520 if (progress)
3521 invalidate_live_intervals();
3522
3523 return progress;
3524 }
3525
3526 /**
3527 * Rounding modes for conversion instructions are included for each
3528 * conversion, but right now it is a state. So once it is set,
3529 * we don't need to call it again for subsequent calls.
3530 *
3531 * This is useful for vector/matrices conversions, as setting the
3532 * mode once is enough for the full vector/matrix
3533 */
3534 bool
3535 fs_visitor::remove_extra_rounding_modes()
3536 {
3537 bool progress = false;
3538 unsigned execution_mode = this->nir->info.float_controls_execution_mode;
3539
3540 brw_rnd_mode base_mode = BRW_RND_MODE_UNSPECIFIED;
3541 if ((FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 |
3542 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32 |
3543 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64) &
3544 execution_mode)
3545 base_mode = BRW_RND_MODE_RTNE;
3546 if ((FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 |
3547 FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 |
3548 FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64) &
3549 execution_mode)
3550 base_mode = BRW_RND_MODE_RTZ;
3551
3552 foreach_block (block, cfg) {
3553 brw_rnd_mode prev_mode = base_mode;
3554
3555 foreach_inst_in_block_safe (fs_inst, inst, block) {
3556 if (inst->opcode == SHADER_OPCODE_RND_MODE) {
3557 assert(inst->src[0].file == BRW_IMMEDIATE_VALUE);
3558 const brw_rnd_mode mode = (brw_rnd_mode) inst->src[0].d;
3559 if (mode == prev_mode) {
3560 inst->remove(block);
3561 progress = true;
3562 } else {
3563 prev_mode = mode;
3564 }
3565 }
3566 }
3567 }
3568
3569 if (progress)
3570 invalidate_live_intervals();
3571
3572 return progress;
3573 }
3574
3575 static void
3576 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
3577 {
3578 /* Clear the flag for registers that actually got read (as expected). */
3579 for (int i = 0; i < inst->sources; i++) {
3580 int grf;
3581 if (inst->src[i].file == VGRF || inst->src[i].file == FIXED_GRF) {
3582 grf = inst->src[i].nr;
3583 } else {
3584 continue;
3585 }
3586
3587 if (grf >= first_grf &&
3588 grf < first_grf + grf_len) {
3589 deps[grf - first_grf] = false;
3590 if (inst->exec_size == 16)
3591 deps[grf - first_grf + 1] = false;
3592 }
3593 }
3594 }
3595
3596 /**
3597 * Implements this workaround for the original 965:
3598 *
3599 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
3600 * check for post destination dependencies on this instruction, software
3601 * must ensure that there is no destination hazard for the case of ‘write
3602 * followed by a posted write’ shown in the following example.
3603 *
3604 * 1. mov r3 0
3605 * 2. send r3.xy <rest of send instruction>
3606 * 3. mov r2 r3
3607 *
3608 * Due to no post-destination dependency check on the ‘send’, the above
3609 * code sequence could have two instructions (1 and 2) in flight at the
3610 * same time that both consider ‘r3’ as the target of their final writes.
3611 */
3612 void
3613 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
3614 fs_inst *inst)
3615 {
3616 int write_len = regs_written(inst);
3617 int first_write_grf = inst->dst.nr;
3618 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3619 assert(write_len < (int)sizeof(needs_dep) - 1);
3620
3621 memset(needs_dep, false, sizeof(needs_dep));
3622 memset(needs_dep, true, write_len);
3623
3624 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
3625
3626 /* Walk backwards looking for writes to registers we're writing which
3627 * aren't read since being written. If we hit the start of the program,
3628 * we assume that there are no outstanding dependencies on entry to the
3629 * program.
3630 */
3631 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3632 /* If we hit control flow, assume that there *are* outstanding
3633 * dependencies, and force their cleanup before our instruction.
3634 */
3635 if (block->start() == scan_inst && block->num != 0) {
3636 for (int i = 0; i < write_len; i++) {
3637 if (needs_dep[i])
3638 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
3639 first_write_grf + i);
3640 }
3641 return;
3642 }
3643
3644 /* We insert our reads as late as possible on the assumption that any
3645 * instruction but a MOV that might have left us an outstanding
3646 * dependency has more latency than a MOV.
3647 */
3648 if (scan_inst->dst.file == VGRF) {
3649 for (unsigned i = 0; i < regs_written(scan_inst); i++) {
3650 int reg = scan_inst->dst.nr + i;
3651
3652 if (reg >= first_write_grf &&
3653 reg < first_write_grf + write_len &&
3654 needs_dep[reg - first_write_grf]) {
3655 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
3656 needs_dep[reg - first_write_grf] = false;
3657 if (scan_inst->exec_size == 16)
3658 needs_dep[reg - first_write_grf + 1] = false;
3659 }
3660 }
3661 }
3662
3663 /* Clear the flag for registers that actually got read (as expected). */
3664 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3665
3666 /* Continue the loop only if we haven't resolved all the dependencies */
3667 int i;
3668 for (i = 0; i < write_len; i++) {
3669 if (needs_dep[i])
3670 break;
3671 }
3672 if (i == write_len)
3673 return;
3674 }
3675 }
3676
3677 /**
3678 * Implements this workaround for the original 965:
3679 *
3680 * "[DevBW, DevCL] Errata: A destination register from a send can not be
3681 * used as a destination register until after it has been sourced by an
3682 * instruction with a different destination register.
3683 */
3684 void
3685 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
3686 {
3687 int write_len = regs_written(inst);
3688 unsigned first_write_grf = inst->dst.nr;
3689 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3690 assert(write_len < (int)sizeof(needs_dep) - 1);
3691
3692 memset(needs_dep, false, sizeof(needs_dep));
3693 memset(needs_dep, true, write_len);
3694 /* Walk forwards looking for writes to registers we're writing which aren't
3695 * read before being written.
3696 */
3697 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst) {
3698 /* If we hit control flow, force resolve all remaining dependencies. */
3699 if (block->end() == scan_inst && block->num != cfg->num_blocks - 1) {
3700 for (int i = 0; i < write_len; i++) {
3701 if (needs_dep[i])
3702 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3703 first_write_grf + i);
3704 }
3705 return;
3706 }
3707
3708 /* Clear the flag for registers that actually got read (as expected). */
3709 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3710
3711 /* We insert our reads as late as possible since they're reading the
3712 * result of a SEND, which has massive latency.
3713 */
3714 if (scan_inst->dst.file == VGRF &&
3715 scan_inst->dst.nr >= first_write_grf &&
3716 scan_inst->dst.nr < first_write_grf + write_len &&
3717 needs_dep[scan_inst->dst.nr - first_write_grf]) {
3718 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3719 scan_inst->dst.nr);
3720 needs_dep[scan_inst->dst.nr - first_write_grf] = false;
3721 }
3722
3723 /* Continue the loop only if we haven't resolved all the dependencies */
3724 int i;
3725 for (i = 0; i < write_len; i++) {
3726 if (needs_dep[i])
3727 break;
3728 }
3729 if (i == write_len)
3730 return;
3731 }
3732 }
3733
3734 void
3735 fs_visitor::insert_gen4_send_dependency_workarounds()
3736 {
3737 if (devinfo->gen != 4 || devinfo->is_g4x)
3738 return;
3739
3740 bool progress = false;
3741
3742 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3743 if (inst->mlen != 0 && inst->dst.file == VGRF) {
3744 insert_gen4_pre_send_dependency_workarounds(block, inst);
3745 insert_gen4_post_send_dependency_workarounds(block, inst);
3746 progress = true;
3747 }
3748 }
3749
3750 if (progress)
3751 invalidate_live_intervals();
3752 }
3753
3754 /**
3755 * Turns the generic expression-style uniform pull constant load instruction
3756 * into a hardware-specific series of instructions for loading a pull
3757 * constant.
3758 *
3759 * The expression style allows the CSE pass before this to optimize out
3760 * repeated loads from the same offset, and gives the pre-register-allocation
3761 * scheduling full flexibility, while the conversion to native instructions
3762 * allows the post-register-allocation scheduler the best information
3763 * possible.
3764 *
3765 * Note that execution masking for setting up pull constant loads is special:
3766 * the channels that need to be written are unrelated to the current execution
3767 * mask, since a later instruction will use one of the result channels as a
3768 * source operand for all 8 or 16 of its channels.
3769 */
3770 void
3771 fs_visitor::lower_uniform_pull_constant_loads()
3772 {
3773 foreach_block_and_inst (block, fs_inst, inst, cfg) {
3774 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
3775 continue;
3776
3777 if (devinfo->gen >= 7) {
3778 const fs_builder ubld = fs_builder(this, block, inst).exec_all();
3779 const fs_reg payload = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD);
3780
3781 ubld.group(8, 0).MOV(payload,
3782 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
3783 ubld.group(1, 0).MOV(component(payload, 2),
3784 brw_imm_ud(inst->src[1].ud / 16));
3785
3786 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3787 inst->src[1] = payload;
3788 inst->header_size = 1;
3789 inst->mlen = 1;
3790
3791 invalidate_live_intervals();
3792 } else {
3793 /* Before register allocation, we didn't tell the scheduler about the
3794 * MRF we use. We know it's safe to use this MRF because nothing
3795 * else does except for register spill/unspill, which generates and
3796 * uses its MRF within a single IR instruction.
3797 */
3798 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3799 inst->mlen = 1;
3800 }
3801 }
3802 }
3803
3804 bool
3805 fs_visitor::lower_load_payload()
3806 {
3807 bool progress = false;
3808
3809 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3810 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3811 continue;
3812
3813 assert(inst->dst.file == MRF || inst->dst.file == VGRF);
3814 assert(inst->saturate == false);
3815 fs_reg dst = inst->dst;
3816
3817 /* Get rid of COMPR4. We'll add it back in if we need it */
3818 if (dst.file == MRF)
3819 dst.nr = dst.nr & ~BRW_MRF_COMPR4;
3820
3821 const fs_builder ibld(this, block, inst);
3822 const fs_builder hbld = ibld.exec_all().group(8, 0);
3823
3824 for (uint8_t i = 0; i < inst->header_size; i++) {
3825 if (inst->src[i].file != BAD_FILE) {
3826 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3827 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3828 hbld.MOV(mov_dst, mov_src);
3829 }
3830 dst = offset(dst, hbld, 1);
3831 }
3832
3833 if (inst->dst.file == MRF && (inst->dst.nr & BRW_MRF_COMPR4) &&
3834 inst->exec_size > 8) {
3835 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3836 * a straightforward copy. Instead, the result of the
3837 * LOAD_PAYLOAD is treated as interleaved and the first four
3838 * non-header sources are unpacked as:
3839 *
3840 * m + 0: r0
3841 * m + 1: g0
3842 * m + 2: b0
3843 * m + 3: a0
3844 * m + 4: r1
3845 * m + 5: g1
3846 * m + 6: b1
3847 * m + 7: a1
3848 *
3849 * This is used for gen <= 5 fb writes.
3850 */
3851 assert(inst->exec_size == 16);
3852 assert(inst->header_size + 4 <= inst->sources);
3853 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3854 if (inst->src[i].file != BAD_FILE) {
3855 if (devinfo->has_compr4) {
3856 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3857 compr4_dst.nr |= BRW_MRF_COMPR4;
3858 ibld.MOV(compr4_dst, inst->src[i]);
3859 } else {
3860 /* Platform doesn't have COMPR4. We have to fake it */
3861 fs_reg mov_dst = retype(dst, inst->src[i].type);
3862 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3863 mov_dst.nr += 4;
3864 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3865 }
3866 }
3867
3868 dst.nr++;
3869 }
3870
3871 /* The loop above only ever incremented us through the first set
3872 * of 4 registers. However, thanks to the magic of COMPR4, we
3873 * actually wrote to the first 8 registers, so we need to take
3874 * that into account now.
3875 */
3876 dst.nr += 4;
3877
3878 /* The COMPR4 code took care of the first 4 sources. We'll let
3879 * the regular path handle any remaining sources. Yes, we are
3880 * modifying the instruction but we're about to delete it so
3881 * this really doesn't hurt anything.
3882 */
3883 inst->header_size += 4;
3884 }
3885
3886 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3887 if (inst->src[i].file != BAD_FILE) {
3888 dst.type = inst->src[i].type;
3889 ibld.MOV(dst, inst->src[i]);
3890 } else {
3891 dst.type = BRW_REGISTER_TYPE_UD;
3892 }
3893 dst = offset(dst, ibld, 1);
3894 }
3895
3896 inst->remove(block);
3897 progress = true;
3898 }
3899
3900 if (progress)
3901 invalidate_live_intervals();
3902
3903 return progress;
3904 }
3905
3906 void
3907 fs_visitor::lower_mul_dword_inst(fs_inst *inst, bblock_t *block)
3908 {
3909 const fs_builder ibld(this, block, inst);
3910
3911 const bool ud = (inst->src[1].type == BRW_REGISTER_TYPE_UD);
3912 if (inst->src[1].file == IMM &&
3913 (( ud && inst->src[1].ud <= UINT16_MAX) ||
3914 (!ud && inst->src[1].d <= INT16_MAX && inst->src[1].d >= INT16_MIN))) {
3915 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3916 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3917 * src1 are used.
3918 *
3919 * If multiplying by an immediate value that fits in 16-bits, do a
3920 * single MUL instruction with that value in the proper location.
3921 */
3922 if (devinfo->gen < 7) {
3923 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8), inst->dst.type);
3924 ibld.MOV(imm, inst->src[1]);
3925 ibld.MUL(inst->dst, imm, inst->src[0]);
3926 } else {
3927 ibld.MUL(inst->dst, inst->src[0],
3928 ud ? brw_imm_uw(inst->src[1].ud)
3929 : brw_imm_w(inst->src[1].d));
3930 }
3931 } else {
3932 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3933 * do 32-bit integer multiplication in one instruction, but instead
3934 * must do a sequence (which actually calculates a 64-bit result):
3935 *
3936 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3937 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3938 * mov(8) g2<1>D acc0<8,8,1>D
3939 *
3940 * But on Gen > 6, the ability to use second accumulator register
3941 * (acc1) for non-float data types was removed, preventing a simple
3942 * implementation in SIMD16. A 16-channel result can be calculated by
3943 * executing the three instructions twice in SIMD8, once with quarter
3944 * control of 1Q for the first eight channels and again with 2Q for
3945 * the second eight channels.
3946 *
3947 * Which accumulator register is implicitly accessed (by AccWrEnable
3948 * for instance) is determined by the quarter control. Unfortunately
3949 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3950 * implicit accumulator access by an instruction with 2Q will access
3951 * acc1 regardless of whether the data type is usable in acc1.
3952 *
3953 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3954 * integer data types.
3955 *
3956 * Since we only want the low 32-bits of the result, we can do two
3957 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3958 * adjust the high result and add them (like the mach is doing):
3959 *
3960 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3961 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3962 * shl(8) g9<1>D g8<8,8,1>D 16D
3963 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3964 *
3965 * We avoid the shl instruction by realizing that we only want to add
3966 * the low 16-bits of the "high" result to the high 16-bits of the
3967 * "low" result and using proper regioning on the add:
3968 *
3969 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3970 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3971 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3972 *
3973 * Since it does not use the (single) accumulator register, we can
3974 * schedule multi-component multiplications much better.
3975 */
3976
3977 bool needs_mov = false;
3978 fs_reg orig_dst = inst->dst;
3979
3980 /* Get a new VGRF for the "low" 32x16-bit multiplication result if
3981 * reusing the original destination is impossible due to hardware
3982 * restrictions, source/destination overlap, or it being the null
3983 * register.
3984 */
3985 fs_reg low = inst->dst;
3986 if (orig_dst.is_null() || orig_dst.file == MRF ||
3987 regions_overlap(inst->dst, inst->size_written,
3988 inst->src[0], inst->size_read(0)) ||
3989 regions_overlap(inst->dst, inst->size_written,
3990 inst->src[1], inst->size_read(1)) ||
3991 inst->dst.stride >= 4) {
3992 needs_mov = true;
3993 low = fs_reg(VGRF, alloc.allocate(regs_written(inst)),
3994 inst->dst.type);
3995 }
3996
3997 /* Get a new VGRF but keep the same stride as inst->dst */
3998 fs_reg high(VGRF, alloc.allocate(regs_written(inst)), inst->dst.type);
3999 high.stride = inst->dst.stride;
4000 high.offset = inst->dst.offset % REG_SIZE;
4001
4002 if (devinfo->gen >= 7) {
4003 if (inst->src[1].abs)
4004 lower_src_modifiers(this, block, inst, 1);
4005
4006 if (inst->src[1].file == IMM) {
4007 ibld.MUL(low, inst->src[0],
4008 brw_imm_uw(inst->src[1].ud & 0xffff));
4009 ibld.MUL(high, inst->src[0],
4010 brw_imm_uw(inst->src[1].ud >> 16));
4011 } else {
4012 ibld.MUL(low, inst->src[0],
4013 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
4014 ibld.MUL(high, inst->src[0],
4015 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 1));
4016 }
4017 } else {
4018 if (inst->src[0].abs)
4019 lower_src_modifiers(this, block, inst, 0);
4020
4021 ibld.MUL(low, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 0),
4022 inst->src[1]);
4023 ibld.MUL(high, subscript(inst->src[0], BRW_REGISTER_TYPE_UW, 1),
4024 inst->src[1]);
4025 }
4026
4027 ibld.ADD(subscript(low, BRW_REGISTER_TYPE_UW, 1),
4028 subscript(low, BRW_REGISTER_TYPE_UW, 1),
4029 subscript(high, BRW_REGISTER_TYPE_UW, 0));
4030
4031 if (needs_mov || inst->conditional_mod)
4032 set_condmod(inst->conditional_mod, ibld.MOV(orig_dst, low));
4033 }
4034 }
4035
4036 void
4037 fs_visitor::lower_mul_qword_inst(fs_inst *inst, bblock_t *block)
4038 {
4039 const fs_builder ibld(this, block, inst);
4040
4041 /* Considering two 64-bit integers ab and cd where each letter ab
4042 * corresponds to 32 bits, we get a 128-bit result WXYZ. We * cd
4043 * only need to provide the YZ part of the result. -------
4044 * BD
4045 * Only BD needs to be 64 bits. For AD and BC we only care + AD
4046 * about the lower 32 bits (since they are part of the upper + BC
4047 * 32 bits of our result). AC is not needed since it starts + AC
4048 * on the 65th bit of the result. -------
4049 * WXYZ
4050 */
4051 unsigned int q_regs = regs_written(inst);
4052 unsigned int d_regs = (q_regs + 1) / 2;
4053
4054 fs_reg bd(VGRF, alloc.allocate(q_regs), BRW_REGISTER_TYPE_UQ);
4055 fs_reg ad(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
4056 fs_reg bc(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
4057
4058 /* Here we need the full 64 bit result for 32b * 32b. */
4059 if (devinfo->has_integer_dword_mul) {
4060 ibld.MUL(bd, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
4061 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0));
4062 } else {
4063 fs_reg bd_high(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
4064 fs_reg bd_low(VGRF, alloc.allocate(d_regs), BRW_REGISTER_TYPE_UD);
4065 fs_reg acc = retype(brw_acc_reg(inst->exec_size), BRW_REGISTER_TYPE_UD);
4066
4067 fs_inst *mul = ibld.MUL(acc,
4068 subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
4069 subscript(inst->src[1], BRW_REGISTER_TYPE_UW, 0));
4070 mul->writes_accumulator = true;
4071
4072 ibld.MACH(bd_high, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
4073 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0));
4074 ibld.MOV(bd_low, acc);
4075
4076 ibld.MOV(subscript(bd, BRW_REGISTER_TYPE_UD, 0), bd_low);
4077 ibld.MOV(subscript(bd, BRW_REGISTER_TYPE_UD, 1), bd_high);
4078 }
4079
4080 ibld.MUL(ad, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 1),
4081 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 0));
4082 ibld.MUL(bc, subscript(inst->src[0], BRW_REGISTER_TYPE_UD, 0),
4083 subscript(inst->src[1], BRW_REGISTER_TYPE_UD, 1));
4084
4085 ibld.ADD(ad, ad, bc);
4086 ibld.ADD(subscript(bd, BRW_REGISTER_TYPE_UD, 1),
4087 subscript(bd, BRW_REGISTER_TYPE_UD, 1), ad);
4088
4089 ibld.MOV(inst->dst, bd);
4090 }
4091
4092 void
4093 fs_visitor::lower_mulh_inst(fs_inst *inst, bblock_t *block)
4094 {
4095 const fs_builder ibld(this, block, inst);
4096
4097 /* According to the BDW+ BSpec page for the "Multiply Accumulate
4098 * High" instruction:
4099 *
4100 * "An added preliminary mov is required for source modification on
4101 * src1:
4102 * mov (8) r3.0<1>:d -r3<8;8,1>:d
4103 * mul (8) acc0:d r2.0<8;8,1>:d r3.0<16;8,2>:uw
4104 * mach (8) r5.0<1>:d r2.0<8;8,1>:d r3.0<8;8,1>:d"
4105 */
4106 if (devinfo->gen >= 8 && (inst->src[1].negate || inst->src[1].abs))
4107 lower_src_modifiers(this, block, inst, 1);
4108
4109 /* Should have been lowered to 8-wide. */
4110 assert(inst->exec_size <= get_lowered_simd_width(devinfo, inst));
4111 const fs_reg acc = retype(brw_acc_reg(inst->exec_size), inst->dst.type);
4112 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
4113 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
4114
4115 if (devinfo->gen >= 8) {
4116 /* Until Gen8, integer multiplies read 32-bits from one source,
4117 * and 16-bits from the other, and relying on the MACH instruction
4118 * to generate the high bits of the result.
4119 *
4120 * On Gen8, the multiply instruction does a full 32x32-bit
4121 * multiply, but in order to do a 64-bit multiply we can simulate
4122 * the previous behavior and then use a MACH instruction.
4123 */
4124 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
4125 mul->src[1].type == BRW_REGISTER_TYPE_UD);
4126 mul->src[1].type = BRW_REGISTER_TYPE_UW;
4127 mul->src[1].stride *= 2;
4128
4129 if (mul->src[1].file == IMM) {
4130 mul->src[1] = brw_imm_uw(mul->src[1].ud);
4131 }
4132 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
4133 inst->group > 0) {
4134 /* Among other things the quarter control bits influence which
4135 * accumulator register is used by the hardware for instructions
4136 * that access the accumulator implicitly (e.g. MACH). A
4137 * second-half instruction would normally map to acc1, which
4138 * doesn't exist on Gen7 and up (the hardware does emulate it for
4139 * floating-point instructions *only* by taking advantage of the
4140 * extra precision of acc0 not normally used for floating point
4141 * arithmetic).
4142 *
4143 * HSW and up are careful enough not to try to access an
4144 * accumulator register that doesn't exist, but on earlier Gen7
4145 * hardware we need to make sure that the quarter control bits are
4146 * zero to avoid non-deterministic behaviour and emit an extra MOV
4147 * to get the result masked correctly according to the current
4148 * channel enables.
4149 */
4150 mach->group = 0;
4151 mach->force_writemask_all = true;
4152 mach->dst = ibld.vgrf(inst->dst.type);
4153 ibld.MOV(inst->dst, mach->dst);
4154 }
4155 }
4156
4157 bool
4158 fs_visitor::lower_integer_multiplication()
4159 {
4160 bool progress = false;
4161
4162 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4163 if (inst->opcode == BRW_OPCODE_MUL) {
4164 if ((inst->dst.type == BRW_REGISTER_TYPE_Q ||
4165 inst->dst.type == BRW_REGISTER_TYPE_UQ) &&
4166 (inst->src[0].type == BRW_REGISTER_TYPE_Q ||
4167 inst->src[0].type == BRW_REGISTER_TYPE_UQ) &&
4168 (inst->src[1].type == BRW_REGISTER_TYPE_Q ||
4169 inst->src[1].type == BRW_REGISTER_TYPE_UQ)) {
4170 lower_mul_qword_inst(inst, block);
4171 inst->remove(block);
4172 progress = true;
4173 } else if (!inst->dst.is_accumulator() &&
4174 (inst->dst.type == BRW_REGISTER_TYPE_D ||
4175 inst->dst.type == BRW_REGISTER_TYPE_UD) &&
4176 !devinfo->has_integer_dword_mul) {
4177 lower_mul_dword_inst(inst, block);
4178 inst->remove(block);
4179 progress = true;
4180 }
4181 } else if (inst->opcode == SHADER_OPCODE_MULH) {
4182 lower_mulh_inst(inst, block);
4183 inst->remove(block);
4184 progress = true;
4185 }
4186
4187 }
4188
4189 if (progress)
4190 invalidate_live_intervals();
4191
4192 return progress;
4193 }
4194
4195 bool
4196 fs_visitor::lower_minmax()
4197 {
4198 assert(devinfo->gen < 6);
4199
4200 bool progress = false;
4201
4202 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4203 const fs_builder ibld(this, block, inst);
4204
4205 if (inst->opcode == BRW_OPCODE_SEL &&
4206 inst->predicate == BRW_PREDICATE_NONE) {
4207 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
4208 * the original SEL.L/GE instruction
4209 */
4210 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
4211 inst->conditional_mod);
4212 inst->predicate = BRW_PREDICATE_NORMAL;
4213 inst->conditional_mod = BRW_CONDITIONAL_NONE;
4214
4215 progress = true;
4216 }
4217 }
4218
4219 if (progress)
4220 invalidate_live_intervals();
4221
4222 return progress;
4223 }
4224
4225 static void
4226 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
4227 fs_reg *dst, fs_reg color, unsigned components)
4228 {
4229 if (key->clamp_fragment_color) {
4230 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
4231 assert(color.type == BRW_REGISTER_TYPE_F);
4232
4233 for (unsigned i = 0; i < components; i++)
4234 set_saturate(true,
4235 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
4236
4237 color = tmp;
4238 }
4239
4240 for (unsigned i = 0; i < components; i++)
4241 dst[i] = offset(color, bld, i);
4242 }
4243
4244 uint32_t
4245 brw_fb_write_msg_control(const fs_inst *inst,
4246 const struct brw_wm_prog_data *prog_data)
4247 {
4248 uint32_t mctl;
4249
4250 if (inst->opcode == FS_OPCODE_REP_FB_WRITE) {
4251 assert(inst->group == 0 && inst->exec_size == 16);
4252 mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
4253 } else if (prog_data->dual_src_blend) {
4254 assert(inst->exec_size == 8);
4255
4256 if (inst->group % 16 == 0)
4257 mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
4258 else if (inst->group % 16 == 8)
4259 mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
4260 else
4261 unreachable("Invalid dual-source FB write instruction group");
4262 } else {
4263 assert(inst->group == 0 || (inst->group == 16 && inst->exec_size == 16));
4264
4265 if (inst->exec_size == 16)
4266 mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
4267 else if (inst->exec_size == 8)
4268 mctl = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
4269 else
4270 unreachable("Invalid FB write execution size");
4271 }
4272
4273 return mctl;
4274 }
4275
4276 static void
4277 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
4278 const struct brw_wm_prog_data *prog_data,
4279 const brw_wm_prog_key *key,
4280 const fs_visitor::thread_payload &payload)
4281 {
4282 assert(inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
4283 const gen_device_info *devinfo = bld.shader->devinfo;
4284 const fs_reg &color0 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR0];
4285 const fs_reg &color1 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR1];
4286 const fs_reg &src0_alpha = inst->src[FB_WRITE_LOGICAL_SRC_SRC0_ALPHA];
4287 const fs_reg &src_depth = inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH];
4288 const fs_reg &dst_depth = inst->src[FB_WRITE_LOGICAL_SRC_DST_DEPTH];
4289 const fs_reg &src_stencil = inst->src[FB_WRITE_LOGICAL_SRC_SRC_STENCIL];
4290 fs_reg sample_mask = inst->src[FB_WRITE_LOGICAL_SRC_OMASK];
4291 const unsigned components =
4292 inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
4293
4294 /* We can potentially have a message length of up to 15, so we have to set
4295 * base_mrf to either 0 or 1 in order to fit in m0..m15.
4296 */
4297 fs_reg sources[15];
4298 int header_size = 2, payload_header_size;
4299 unsigned length = 0;
4300
4301 if (devinfo->gen < 6) {
4302 /* TODO: Support SIMD32 on gen4-5 */
4303 assert(bld.group() < 16);
4304
4305 /* For gen4-5, we always have a header consisting of g0 and g1. We have
4306 * an implied MOV from g0,g1 to the start of the message. The MOV from
4307 * g0 is handled by the hardware and the MOV from g1 is provided by the
4308 * generator. This is required because, on gen4-5, the generator may
4309 * generate two write messages with different message lengths in order
4310 * to handle AA data properly.
4311 *
4312 * Also, since the pixel mask goes in the g0 portion of the message and
4313 * since render target writes are the last thing in the shader, we write
4314 * the pixel mask directly into g0 and it will get copied as part of the
4315 * implied write.
4316 */
4317 if (prog_data->uses_kill) {
4318 bld.exec_all().group(1, 0)
4319 .MOV(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW),
4320 brw_flag_reg(0, 1));
4321 }
4322
4323 assert(length == 0);
4324 length = 2;
4325 } else if ((devinfo->gen <= 7 && !devinfo->is_haswell &&
4326 prog_data->uses_kill) ||
4327 (devinfo->gen < 11 &&
4328 (color1.file != BAD_FILE || key->nr_color_regions > 1))) {
4329 /* From the Sandy Bridge PRM, volume 4, page 198:
4330 *
4331 * "Dispatched Pixel Enables. One bit per pixel indicating
4332 * which pixels were originally enabled when the thread was
4333 * dispatched. This field is only required for the end-of-
4334 * thread message and on all dual-source messages."
4335 */
4336 const fs_builder ubld = bld.exec_all().group(8, 0);
4337
4338 fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
4339 if (bld.group() < 16) {
4340 /* The header starts off as g0 and g1 for the first half */
4341 ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
4342 BRW_REGISTER_TYPE_UD));
4343 } else {
4344 /* The header starts off as g0 and g2 for the second half */
4345 assert(bld.group() < 32);
4346 const fs_reg header_sources[2] = {
4347 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
4348 retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD),
4349 };
4350 ubld.LOAD_PAYLOAD(header, header_sources, 2, 0);
4351 }
4352
4353 uint32_t g00_bits = 0;
4354
4355 /* Set "Source0 Alpha Present to RenderTarget" bit in message
4356 * header.
4357 */
4358 if (inst->target > 0 && prog_data->replicate_alpha)
4359 g00_bits |= 1 << 11;
4360
4361 /* Set computes stencil to render target */
4362 if (prog_data->computed_stencil)
4363 g00_bits |= 1 << 14;
4364
4365 if (g00_bits) {
4366 /* OR extra bits into g0.0 */
4367 ubld.group(1, 0).OR(component(header, 0),
4368 retype(brw_vec1_grf(0, 0),
4369 BRW_REGISTER_TYPE_UD),
4370 brw_imm_ud(g00_bits));
4371 }
4372
4373 /* Set the render target index for choosing BLEND_STATE. */
4374 if (inst->target > 0) {
4375 ubld.group(1, 0).MOV(component(header, 2), brw_imm_ud(inst->target));
4376 }
4377
4378 if (prog_data->uses_kill) {
4379 assert(bld.group() < 16);
4380 ubld.group(1, 0).MOV(retype(component(header, 15),
4381 BRW_REGISTER_TYPE_UW),
4382 brw_flag_reg(0, 1));
4383 }
4384
4385 assert(length == 0);
4386 sources[0] = header;
4387 sources[1] = horiz_offset(header, 8);
4388 length = 2;
4389 }
4390 assert(length == 0 || length == 2);
4391 header_size = length;
4392
4393 if (payload.aa_dest_stencil_reg[0]) {
4394 assert(inst->group < 16);
4395 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
4396 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
4397 .MOV(sources[length],
4398 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg[0], 0)));
4399 length++;
4400 }
4401
4402 bool src0_alpha_present = false;
4403
4404 if (src0_alpha.file != BAD_FILE) {
4405 for (unsigned i = 0; i < bld.dispatch_width() / 8; i++) {
4406 const fs_builder &ubld = bld.exec_all().group(8, i)
4407 .annotate("FB write src0 alpha");
4408 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_F);
4409 ubld.MOV(tmp, horiz_offset(src0_alpha, i * 8));
4410 setup_color_payload(ubld, key, &sources[length], tmp, 1);
4411 length++;
4412 }
4413 src0_alpha_present = true;
4414 } else if (prog_data->replicate_alpha && inst->target != 0) {
4415 /* Handle the case when fragment shader doesn't write to draw buffer
4416 * zero. No need to call setup_color_payload() for src0_alpha because
4417 * alpha value will be undefined.
4418 */
4419 length += bld.dispatch_width() / 8;
4420 src0_alpha_present = true;
4421 }
4422
4423 if (sample_mask.file != BAD_FILE) {
4424 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1),
4425 BRW_REGISTER_TYPE_UD);
4426
4427 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
4428 * relevant. Since it's unsigned single words one vgrf is always
4429 * 16-wide, but only the lower or higher 8 channels will be used by the
4430 * hardware when doing a SIMD8 write depending on whether we have
4431 * selected the subspans for the first or second half respectively.
4432 */
4433 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
4434 sample_mask.type = BRW_REGISTER_TYPE_UW;
4435 sample_mask.stride *= 2;
4436
4437 bld.exec_all().annotate("FB write oMask")
4438 .MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
4439 inst->group % 16),
4440 sample_mask);
4441 length++;
4442 }
4443
4444 payload_header_size = length;
4445
4446 setup_color_payload(bld, key, &sources[length], color0, components);
4447 length += 4;
4448
4449 if (color1.file != BAD_FILE) {
4450 setup_color_payload(bld, key, &sources[length], color1, components);
4451 length += 4;
4452 }
4453
4454 if (src_depth.file != BAD_FILE) {
4455 sources[length] = src_depth;
4456 length++;
4457 }
4458
4459 if (dst_depth.file != BAD_FILE) {
4460 sources[length] = dst_depth;
4461 length++;
4462 }
4463
4464 if (src_stencil.file != BAD_FILE) {
4465 assert(devinfo->gen >= 9);
4466 assert(bld.dispatch_width() == 8);
4467
4468 /* XXX: src_stencil is only available on gen9+. dst_depth is never
4469 * available on gen9+. As such it's impossible to have both enabled at the
4470 * same time and therefore length cannot overrun the array.
4471 */
4472 assert(length < 15);
4473
4474 sources[length] = bld.vgrf(BRW_REGISTER_TYPE_UD);
4475 bld.exec_all().annotate("FB write OS")
4476 .MOV(retype(sources[length], BRW_REGISTER_TYPE_UB),
4477 subscript(src_stencil, BRW_REGISTER_TYPE_UB, 0));
4478 length++;
4479 }
4480
4481 fs_inst *load;
4482 if (devinfo->gen >= 7) {
4483 /* Send from the GRF */
4484 fs_reg payload = fs_reg(VGRF, -1, BRW_REGISTER_TYPE_F);
4485 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
4486 payload.nr = bld.shader->alloc.allocate(regs_written(load));
4487 load->dst = payload;
4488
4489 uint32_t msg_ctl = brw_fb_write_msg_control(inst, prog_data);
4490 uint32_t ex_desc = 0;
4491
4492 inst->desc =
4493 (inst->group / 16) << 11 | /* rt slot group */
4494 brw_dp_write_desc(devinfo, inst->target, msg_ctl,
4495 GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE,
4496 inst->last_rt, false);
4497
4498 if (devinfo->gen >= 11) {
4499 /* Set the "Render Target Index" and "Src0 Alpha Present" fields
4500 * in the extended message descriptor, in lieu of using a header.
4501 */
4502 ex_desc = inst->target << 12 | src0_alpha_present << 15;
4503
4504 if (key->nr_color_regions == 0)
4505 ex_desc |= 1 << 20; /* Null Render Target */
4506 }
4507
4508 inst->opcode = SHADER_OPCODE_SEND;
4509 inst->resize_sources(3);
4510 inst->sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
4511 inst->src[0] = brw_imm_ud(inst->desc);
4512 inst->src[1] = brw_imm_ud(ex_desc);
4513 inst->src[2] = payload;
4514 inst->mlen = regs_written(load);
4515 inst->ex_mlen = 0;
4516 inst->header_size = header_size;
4517 inst->check_tdr = true;
4518 inst->send_has_side_effects = true;
4519 } else {
4520 /* Send from the MRF */
4521 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
4522 sources, length, payload_header_size);
4523
4524 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
4525 * will do this for us if we just give it a COMPR4 destination.
4526 */
4527 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
4528 load->dst.nr |= BRW_MRF_COMPR4;
4529
4530 if (devinfo->gen < 6) {
4531 /* Set up src[0] for the implied MOV from grf0-1 */
4532 inst->resize_sources(1);
4533 inst->src[0] = brw_vec8_grf(0, 0);
4534 } else {
4535 inst->resize_sources(0);
4536 }
4537 inst->base_mrf = 1;
4538 inst->opcode = FS_OPCODE_FB_WRITE;
4539 inst->mlen = regs_written(load);
4540 inst->header_size = header_size;
4541 }
4542 }
4543
4544 static void
4545 lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
4546 {
4547 const fs_builder &ubld = bld.exec_all().group(8, 0);
4548 const unsigned length = 2;
4549 const fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, length);
4550
4551 if (bld.group() < 16) {
4552 ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
4553 BRW_REGISTER_TYPE_UD));
4554 } else {
4555 assert(bld.group() < 32);
4556 const fs_reg header_sources[] = {
4557 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
4558 retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD)
4559 };
4560 ubld.LOAD_PAYLOAD(header, header_sources, ARRAY_SIZE(header_sources), 0);
4561 }
4562
4563 inst->resize_sources(1);
4564 inst->src[0] = header;
4565 inst->opcode = FS_OPCODE_FB_READ;
4566 inst->mlen = length;
4567 inst->header_size = length;
4568 }
4569
4570 static void
4571 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
4572 const fs_reg &coordinate,
4573 const fs_reg &shadow_c,
4574 const fs_reg &lod, const fs_reg &lod2,
4575 const fs_reg &surface,
4576 const fs_reg &sampler,
4577 unsigned coord_components,
4578 unsigned grad_components)
4579 {
4580 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
4581 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
4582 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
4583 fs_reg msg_end = msg_begin;
4584
4585 /* g0 header. */
4586 msg_end = offset(msg_end, bld.group(8, 0), 1);
4587
4588 for (unsigned i = 0; i < coord_components; i++)
4589 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
4590 offset(coordinate, bld, i));
4591
4592 msg_end = offset(msg_end, bld, coord_components);
4593
4594 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
4595 * require all three components to be present and zero if they are unused.
4596 */
4597 if (coord_components > 0 &&
4598 (has_lod || shadow_c.file != BAD_FILE ||
4599 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
4600 for (unsigned i = coord_components; i < 3; i++)
4601 bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
4602
4603 msg_end = offset(msg_end, bld, 3 - coord_components);
4604 }
4605
4606 if (op == SHADER_OPCODE_TXD) {
4607 /* TXD unsupported in SIMD16 mode. */
4608 assert(bld.dispatch_width() == 8);
4609
4610 /* the slots for u and v are always present, but r is optional */
4611 if (coord_components < 2)
4612 msg_end = offset(msg_end, bld, 2 - coord_components);
4613
4614 /* P = u, v, r
4615 * dPdx = dudx, dvdx, drdx
4616 * dPdy = dudy, dvdy, drdy
4617 *
4618 * 1-arg: Does not exist.
4619 *
4620 * 2-arg: dudx dvdx dudy dvdy
4621 * dPdx.x dPdx.y dPdy.x dPdy.y
4622 * m4 m5 m6 m7
4623 *
4624 * 3-arg: dudx dvdx drdx dudy dvdy drdy
4625 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
4626 * m5 m6 m7 m8 m9 m10
4627 */
4628 for (unsigned i = 0; i < grad_components; i++)
4629 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
4630
4631 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
4632
4633 for (unsigned i = 0; i < grad_components; i++)
4634 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
4635
4636 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
4637 }
4638
4639 if (has_lod) {
4640 /* Bias/LOD with shadow comparator is unsupported in SIMD16 -- *Without*
4641 * shadow comparator (including RESINFO) it's unsupported in SIMD8 mode.
4642 */
4643 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
4644 bld.dispatch_width() == 16);
4645
4646 const brw_reg_type type =
4647 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
4648 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
4649 bld.MOV(retype(msg_end, type), lod);
4650 msg_end = offset(msg_end, bld, 1);
4651 }
4652
4653 if (shadow_c.file != BAD_FILE) {
4654 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
4655 /* There's no plain shadow compare message, so we use shadow
4656 * compare with a bias of 0.0.
4657 */
4658 bld.MOV(msg_end, brw_imm_f(0.0f));
4659 msg_end = offset(msg_end, bld, 1);
4660 }
4661
4662 bld.MOV(msg_end, shadow_c);
4663 msg_end = offset(msg_end, bld, 1);
4664 }
4665
4666 inst->opcode = op;
4667 inst->src[0] = reg_undef;
4668 inst->src[1] = surface;
4669 inst->src[2] = sampler;
4670 inst->resize_sources(3);
4671 inst->base_mrf = msg_begin.nr;
4672 inst->mlen = msg_end.nr - msg_begin.nr;
4673 inst->header_size = 1;
4674 }
4675
4676 static void
4677 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
4678 const fs_reg &coordinate,
4679 const fs_reg &shadow_c,
4680 const fs_reg &lod, const fs_reg &lod2,
4681 const fs_reg &sample_index,
4682 const fs_reg &surface,
4683 const fs_reg &sampler,
4684 unsigned coord_components,
4685 unsigned grad_components)
4686 {
4687 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
4688 fs_reg msg_coords = message;
4689 unsigned header_size = 0;
4690
4691 if (inst->offset != 0) {
4692 /* The offsets set up by the visitor are in the m1 header, so we can't
4693 * go headerless.
4694 */
4695 header_size = 1;
4696 message.nr--;
4697 }
4698
4699 for (unsigned i = 0; i < coord_components; i++)
4700 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type),
4701 offset(coordinate, bld, i));
4702
4703 fs_reg msg_end = offset(msg_coords, bld, coord_components);
4704 fs_reg msg_lod = offset(msg_coords, bld, 4);
4705
4706 if (shadow_c.file != BAD_FILE) {
4707 fs_reg msg_shadow = msg_lod;
4708 bld.MOV(msg_shadow, shadow_c);
4709 msg_lod = offset(msg_shadow, bld, 1);
4710 msg_end = msg_lod;
4711 }
4712
4713 switch (op) {
4714 case SHADER_OPCODE_TXL:
4715 case FS_OPCODE_TXB:
4716 bld.MOV(msg_lod, lod);
4717 msg_end = offset(msg_lod, bld, 1);
4718 break;
4719 case SHADER_OPCODE_TXD:
4720 /**
4721 * P = u, v, r
4722 * dPdx = dudx, dvdx, drdx
4723 * dPdy = dudy, dvdy, drdy
4724 *
4725 * Load up these values:
4726 * - dudx dudy dvdx dvdy drdx drdy
4727 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
4728 */
4729 msg_end = msg_lod;
4730 for (unsigned i = 0; i < grad_components; i++) {
4731 bld.MOV(msg_end, offset(lod, bld, i));
4732 msg_end = offset(msg_end, bld, 1);
4733
4734 bld.MOV(msg_end, offset(lod2, bld, i));
4735 msg_end = offset(msg_end, bld, 1);
4736 }
4737 break;
4738 case SHADER_OPCODE_TXS:
4739 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
4740 bld.MOV(msg_lod, lod);
4741 msg_end = offset(msg_lod, bld, 1);
4742 break;
4743 case SHADER_OPCODE_TXF:
4744 msg_lod = offset(msg_coords, bld, 3);
4745 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
4746 msg_end = offset(msg_lod, bld, 1);
4747 break;
4748 case SHADER_OPCODE_TXF_CMS:
4749 msg_lod = offset(msg_coords, bld, 3);
4750 /* lod */
4751 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
4752 /* sample index */
4753 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
4754 msg_end = offset(msg_lod, bld, 2);
4755 break;
4756 default:
4757 break;
4758 }
4759
4760 inst->opcode = op;
4761 inst->src[0] = reg_undef;
4762 inst->src[1] = surface;
4763 inst->src[2] = sampler;
4764 inst->resize_sources(3);
4765 inst->base_mrf = message.nr;
4766 inst->mlen = msg_end.nr - message.nr;
4767 inst->header_size = header_size;
4768
4769 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4770 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4771 }
4772
4773 static bool
4774 is_high_sampler(const struct gen_device_info *devinfo, const fs_reg &sampler)
4775 {
4776 if (devinfo->gen < 8 && !devinfo->is_haswell)
4777 return false;
4778
4779 return sampler.file != IMM || sampler.ud >= 16;
4780 }
4781
4782 static unsigned
4783 sampler_msg_type(const gen_device_info *devinfo,
4784 opcode opcode, bool shadow_compare)
4785 {
4786 assert(devinfo->gen >= 5);
4787 switch (opcode) {
4788 case SHADER_OPCODE_TEX:
4789 return shadow_compare ? GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE :
4790 GEN5_SAMPLER_MESSAGE_SAMPLE;
4791 case FS_OPCODE_TXB:
4792 return shadow_compare ? GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE :
4793 GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
4794 case SHADER_OPCODE_TXL:
4795 return shadow_compare ? GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE :
4796 GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
4797 case SHADER_OPCODE_TXL_LZ:
4798 return shadow_compare ? GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ :
4799 GEN9_SAMPLER_MESSAGE_SAMPLE_LZ;
4800 case SHADER_OPCODE_TXS:
4801 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
4802 return GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
4803 case SHADER_OPCODE_TXD:
4804 assert(!shadow_compare || devinfo->gen >= 8 || devinfo->is_haswell);
4805 return shadow_compare ? HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE :
4806 GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
4807 case SHADER_OPCODE_TXF:
4808 return GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
4809 case SHADER_OPCODE_TXF_LZ:
4810 assert(devinfo->gen >= 9);
4811 return GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ;
4812 case SHADER_OPCODE_TXF_CMS_W:
4813 assert(devinfo->gen >= 9);
4814 return GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
4815 case SHADER_OPCODE_TXF_CMS:
4816 return devinfo->gen >= 7 ? GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS :
4817 GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
4818 case SHADER_OPCODE_TXF_UMS:
4819 assert(devinfo->gen >= 7);
4820 return GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS;
4821 case SHADER_OPCODE_TXF_MCS:
4822 assert(devinfo->gen >= 7);
4823 return GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
4824 case SHADER_OPCODE_LOD:
4825 return GEN5_SAMPLER_MESSAGE_LOD;
4826 case SHADER_OPCODE_TG4:
4827 assert(devinfo->gen >= 7);
4828 return shadow_compare ? GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C :
4829 GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
4830 break;
4831 case SHADER_OPCODE_TG4_OFFSET:
4832 assert(devinfo->gen >= 7);
4833 return shadow_compare ? GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C :
4834 GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
4835 case SHADER_OPCODE_SAMPLEINFO:
4836 return GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
4837 default:
4838 unreachable("not reached");
4839 }
4840 }
4841
4842 static void
4843 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
4844 const fs_reg &coordinate,
4845 const fs_reg &shadow_c,
4846 fs_reg lod, const fs_reg &lod2,
4847 const fs_reg &min_lod,
4848 const fs_reg &sample_index,
4849 const fs_reg &mcs,
4850 const fs_reg &surface,
4851 const fs_reg &sampler,
4852 const fs_reg &surface_handle,
4853 const fs_reg &sampler_handle,
4854 const fs_reg &tg4_offset,
4855 unsigned coord_components,
4856 unsigned grad_components)
4857 {
4858 const gen_device_info *devinfo = bld.shader->devinfo;
4859 const brw_stage_prog_data *prog_data = bld.shader->stage_prog_data;
4860 unsigned reg_width = bld.dispatch_width() / 8;
4861 unsigned header_size = 0, length = 0;
4862 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
4863 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
4864 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
4865
4866 /* We must have exactly one of surface/sampler and surface/sampler_handle */
4867 assert((surface.file == BAD_FILE) != (surface_handle.file == BAD_FILE));
4868 assert((sampler.file == BAD_FILE) != (sampler_handle.file == BAD_FILE));
4869
4870 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
4871 inst->offset != 0 || inst->eot ||
4872 op == SHADER_OPCODE_SAMPLEINFO ||
4873 sampler_handle.file != BAD_FILE ||
4874 is_high_sampler(devinfo, sampler)) {
4875 /* For general texture offsets (no txf workaround), we need a header to
4876 * put them in.
4877 *
4878 * TG4 needs to place its channel select in the header, for interaction
4879 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
4880 * larger sampler numbers we need to offset the Sampler State Pointer in
4881 * the header.
4882 */
4883 fs_reg header = retype(sources[0], BRW_REGISTER_TYPE_UD);
4884 header_size = 1;
4885 length++;
4886
4887 /* If we're requesting fewer than four channels worth of response,
4888 * and we have an explicit header, we need to set up the sampler
4889 * writemask. It's reversed from normal: 1 means "don't write".
4890 */
4891 if (!inst->eot && regs_written(inst) != 4 * reg_width) {
4892 assert(regs_written(inst) % reg_width == 0);
4893 unsigned mask = ~((1 << (regs_written(inst) / reg_width)) - 1) & 0xf;
4894 inst->offset |= mask << 12;
4895 }
4896
4897 /* Build the actual header */
4898 const fs_builder ubld = bld.exec_all().group(8, 0);
4899 const fs_builder ubld1 = ubld.group(1, 0);
4900 ubld.MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
4901 if (inst->offset) {
4902 ubld1.MOV(component(header, 2), brw_imm_ud(inst->offset));
4903 } else if (bld.shader->stage != MESA_SHADER_VERTEX &&
4904 bld.shader->stage != MESA_SHADER_FRAGMENT) {
4905 /* The vertex and fragment stages have g0.2 set to 0, so
4906 * header0.2 is 0 when g0 is copied. Other stages may not, so we
4907 * must set it to 0 to avoid setting undesirable bits in the
4908 * message.
4909 */
4910 ubld1.MOV(component(header, 2), brw_imm_ud(0));
4911 }
4912
4913 if (sampler_handle.file != BAD_FILE) {
4914 /* Bindless sampler handles aren't relative to the sampler state
4915 * pointer passed into the shader through SAMPLER_STATE_POINTERS_*.
4916 * Instead, it's an absolute pointer relative to dynamic state base
4917 * address.
4918 *
4919 * Sampler states are 16 bytes each and the pointer we give here has
4920 * to be 32-byte aligned. In order to avoid more indirect messages
4921 * than required, we assume that all bindless sampler states are
4922 * 32-byte aligned. This sacrifices a bit of general state base
4923 * address space but means we can do something more efficient in the
4924 * shader.
4925 */
4926 ubld1.MOV(component(header, 3), sampler_handle);
4927 } else if (is_high_sampler(devinfo, sampler)) {
4928 if (sampler.file == BRW_IMMEDIATE_VALUE) {
4929 assert(sampler.ud >= 16);
4930 const int sampler_state_size = 16; /* 16 bytes */
4931
4932 ubld1.ADD(component(header, 3),
4933 retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
4934 brw_imm_ud(16 * (sampler.ud / 16) * sampler_state_size));
4935 } else {
4936 fs_reg tmp = ubld1.vgrf(BRW_REGISTER_TYPE_UD);
4937 ubld1.AND(tmp, sampler, brw_imm_ud(0x0f0));
4938 ubld1.SHL(tmp, tmp, brw_imm_ud(4));
4939 ubld1.ADD(component(header, 3),
4940 retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
4941 tmp);
4942 }
4943 }
4944 }
4945
4946 if (shadow_c.file != BAD_FILE) {
4947 bld.MOV(sources[length], shadow_c);
4948 length++;
4949 }
4950
4951 bool coordinate_done = false;
4952
4953 /* Set up the LOD info */
4954 switch (op) {
4955 case FS_OPCODE_TXB:
4956 case SHADER_OPCODE_TXL:
4957 if (devinfo->gen >= 9 && op == SHADER_OPCODE_TXL && lod.is_zero()) {
4958 op = SHADER_OPCODE_TXL_LZ;
4959 break;
4960 }
4961 bld.MOV(sources[length], lod);
4962 length++;
4963 break;
4964 case SHADER_OPCODE_TXD:
4965 /* TXD should have been lowered in SIMD16 mode. */
4966 assert(bld.dispatch_width() == 8);
4967
4968 /* Load dPdx and the coordinate together:
4969 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
4970 */
4971 for (unsigned i = 0; i < coord_components; i++) {
4972 bld.MOV(sources[length++], offset(coordinate, bld, i));
4973
4974 /* For cube map array, the coordinate is (u,v,r,ai) but there are
4975 * only derivatives for (u, v, r).
4976 */
4977 if (i < grad_components) {
4978 bld.MOV(sources[length++], offset(lod, bld, i));
4979 bld.MOV(sources[length++], offset(lod2, bld, i));
4980 }
4981 }
4982
4983 coordinate_done = true;
4984 break;
4985 case SHADER_OPCODE_TXS:
4986 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
4987 length++;
4988 break;
4989 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
4990 /* We need an LOD; just use 0 */
4991 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
4992 length++;
4993 break;
4994 case SHADER_OPCODE_TXF:
4995 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
4996 * On Gen9 they are u, v, lod, r
4997 */
4998 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D), coordinate);
4999
5000 if (devinfo->gen >= 9) {
5001 if (coord_components >= 2) {
5002 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D),
5003 offset(coordinate, bld, 1));
5004 } else {
5005 sources[length] = brw_imm_d(0);
5006 }
5007 length++;
5008 }
5009
5010 if (devinfo->gen >= 9 && lod.is_zero()) {
5011 op = SHADER_OPCODE_TXF_LZ;
5012 } else {
5013 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
5014 length++;
5015 }
5016
5017 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++)
5018 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
5019 offset(coordinate, bld, i));
5020
5021 coordinate_done = true;
5022 break;
5023
5024 case SHADER_OPCODE_TXF_CMS:
5025 case SHADER_OPCODE_TXF_CMS_W:
5026 case SHADER_OPCODE_TXF_UMS:
5027 case SHADER_OPCODE_TXF_MCS:
5028 if (op == SHADER_OPCODE_TXF_UMS ||
5029 op == SHADER_OPCODE_TXF_CMS ||
5030 op == SHADER_OPCODE_TXF_CMS_W) {
5031 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
5032 length++;
5033 }
5034
5035 if (op == SHADER_OPCODE_TXF_CMS || op == SHADER_OPCODE_TXF_CMS_W) {
5036 /* Data from the multisample control surface. */
5037 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
5038 length++;
5039
5040 /* On Gen9+ we'll use ld2dms_w instead which has two registers for
5041 * the MCS data.
5042 */
5043 if (op == SHADER_OPCODE_TXF_CMS_W) {
5044 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD),
5045 mcs.file == IMM ?
5046 mcs :
5047 offset(mcs, bld, 1));
5048 length++;
5049 }
5050 }
5051
5052 /* There is no offsetting for this message; just copy in the integer
5053 * texture coordinates.
5054 */
5055 for (unsigned i = 0; i < coord_components; i++)
5056 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
5057 offset(coordinate, bld, i));
5058
5059 coordinate_done = true;
5060 break;
5061 case SHADER_OPCODE_TG4_OFFSET:
5062 /* More crazy intermixing */
5063 for (unsigned i = 0; i < 2; i++) /* u, v */
5064 bld.MOV(sources[length++], offset(coordinate, bld, i));
5065
5066 for (unsigned i = 0; i < 2; i++) /* offu, offv */
5067 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
5068 offset(tg4_offset, bld, i));
5069
5070 if (coord_components == 3) /* r if present */
5071 bld.MOV(sources[length++], offset(coordinate, bld, 2));
5072
5073 coordinate_done = true;
5074 break;
5075 default:
5076 break;
5077 }
5078
5079 /* Set up the coordinate (except for cases where it was done above) */
5080 if (!coordinate_done) {
5081 for (unsigned i = 0; i < coord_components; i++)
5082 bld.MOV(sources[length++], offset(coordinate, bld, i));
5083 }
5084
5085 if (min_lod.file != BAD_FILE) {
5086 /* Account for all of the missing coordinate sources */
5087 length += 4 - coord_components;
5088 if (op == SHADER_OPCODE_TXD)
5089 length += (3 - grad_components) * 2;
5090
5091 bld.MOV(sources[length++], min_lod);
5092 }
5093
5094 unsigned mlen;
5095 if (reg_width == 2)
5096 mlen = length * reg_width - header_size;
5097 else
5098 mlen = length * reg_width;
5099
5100 const fs_reg src_payload = fs_reg(VGRF, bld.shader->alloc.allocate(mlen),
5101 BRW_REGISTER_TYPE_F);
5102 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
5103
5104 /* Generate the SEND. */
5105 inst->opcode = SHADER_OPCODE_SEND;
5106 inst->mlen = mlen;
5107 inst->header_size = header_size;
5108
5109 const unsigned msg_type =
5110 sampler_msg_type(devinfo, op, inst->shadow_compare);
5111 const unsigned simd_mode =
5112 inst->exec_size <= 8 ? BRW_SAMPLER_SIMD_MODE_SIMD8 :
5113 BRW_SAMPLER_SIMD_MODE_SIMD16;
5114
5115 uint32_t base_binding_table_index;
5116 switch (op) {
5117 case SHADER_OPCODE_TG4:
5118 case SHADER_OPCODE_TG4_OFFSET:
5119 base_binding_table_index = prog_data->binding_table.gather_texture_start;
5120 break;
5121 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
5122 base_binding_table_index = prog_data->binding_table.image_start;
5123 break;
5124 default:
5125 base_binding_table_index = prog_data->binding_table.texture_start;
5126 break;
5127 }
5128
5129 inst->sfid = BRW_SFID_SAMPLER;
5130 if (surface.file == IMM &&
5131 (sampler.file == IMM || sampler_handle.file != BAD_FILE)) {
5132 inst->desc = brw_sampler_desc(devinfo,
5133 surface.ud + base_binding_table_index,
5134 sampler.file == IMM ? sampler.ud % 16 : 0,
5135 msg_type,
5136 simd_mode,
5137 0 /* return_format unused on gen7+ */);
5138 inst->src[0] = brw_imm_ud(0);
5139 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5140 } else if (surface_handle.file != BAD_FILE) {
5141 /* Bindless surface */
5142 assert(devinfo->gen >= 9);
5143 inst->desc = brw_sampler_desc(devinfo,
5144 GEN9_BTI_BINDLESS,
5145 sampler.file == IMM ? sampler.ud % 16 : 0,
5146 msg_type,
5147 simd_mode,
5148 0 /* return_format unused on gen7+ */);
5149
5150 /* For bindless samplers, the entire address is included in the message
5151 * header so we can leave the portion in the message descriptor 0.
5152 */
5153 if (sampler_handle.file != BAD_FILE || sampler.file == IMM) {
5154 inst->src[0] = brw_imm_ud(0);
5155 } else {
5156 const fs_builder ubld = bld.group(1, 0).exec_all();
5157 fs_reg desc = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5158 ubld.SHL(desc, sampler, brw_imm_ud(8));
5159 inst->src[0] = desc;
5160 }
5161
5162 /* We assume that the driver provided the handle in the top 20 bits so
5163 * we can use the surface handle directly as the extended descriptor.
5164 */
5165 inst->src[1] = retype(surface_handle, BRW_REGISTER_TYPE_UD);
5166 } else {
5167 /* Immediate portion of the descriptor */
5168 inst->desc = brw_sampler_desc(devinfo,
5169 0, /* surface */
5170 0, /* sampler */
5171 msg_type,
5172 simd_mode,
5173 0 /* return_format unused on gen7+ */);
5174 const fs_builder ubld = bld.group(1, 0).exec_all();
5175 fs_reg desc = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5176 if (surface.equals(sampler)) {
5177 /* This case is common in GL */
5178 ubld.MUL(desc, surface, brw_imm_ud(0x101));
5179 } else {
5180 if (sampler_handle.file != BAD_FILE) {
5181 ubld.MOV(desc, surface);
5182 } else if (sampler.file == IMM) {
5183 ubld.OR(desc, surface, brw_imm_ud(sampler.ud << 8));
5184 } else {
5185 ubld.SHL(desc, sampler, brw_imm_ud(8));
5186 ubld.OR(desc, desc, surface);
5187 }
5188 }
5189 if (base_binding_table_index)
5190 ubld.ADD(desc, desc, brw_imm_ud(base_binding_table_index));
5191 ubld.AND(desc, desc, brw_imm_ud(0xfff));
5192
5193 inst->src[0] = component(desc, 0);
5194 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5195 }
5196
5197 inst->src[2] = src_payload;
5198 inst->resize_sources(3);
5199
5200 if (inst->eot) {
5201 /* EOT sampler messages don't make sense to split because it would
5202 * involve ending half of the thread early.
5203 */
5204 assert(inst->group == 0);
5205 /* We need to use SENDC for EOT sampler messages */
5206 inst->check_tdr = true;
5207 inst->send_has_side_effects = true;
5208 }
5209
5210 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
5211 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
5212 }
5213
5214 static void
5215 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
5216 {
5217 const gen_device_info *devinfo = bld.shader->devinfo;
5218 const fs_reg &coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
5219 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
5220 const fs_reg &lod = inst->src[TEX_LOGICAL_SRC_LOD];
5221 const fs_reg &lod2 = inst->src[TEX_LOGICAL_SRC_LOD2];
5222 const fs_reg &min_lod = inst->src[TEX_LOGICAL_SRC_MIN_LOD];
5223 const fs_reg &sample_index = inst->src[TEX_LOGICAL_SRC_SAMPLE_INDEX];
5224 const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
5225 const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
5226 const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
5227 const fs_reg &surface_handle = inst->src[TEX_LOGICAL_SRC_SURFACE_HANDLE];
5228 const fs_reg &sampler_handle = inst->src[TEX_LOGICAL_SRC_SAMPLER_HANDLE];
5229 const fs_reg &tg4_offset = inst->src[TEX_LOGICAL_SRC_TG4_OFFSET];
5230 assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
5231 const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
5232 assert(inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
5233 const unsigned grad_components = inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
5234
5235 if (devinfo->gen >= 7) {
5236 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
5237 shadow_c, lod, lod2, min_lod,
5238 sample_index,
5239 mcs, surface, sampler,
5240 surface_handle, sampler_handle,
5241 tg4_offset,
5242 coord_components, grad_components);
5243 } else if (devinfo->gen >= 5) {
5244 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
5245 shadow_c, lod, lod2, sample_index,
5246 surface, sampler,
5247 coord_components, grad_components);
5248 } else {
5249 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
5250 shadow_c, lod, lod2,
5251 surface, sampler,
5252 coord_components, grad_components);
5253 }
5254 }
5255
5256 static void
5257 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst)
5258 {
5259 const gen_device_info *devinfo = bld.shader->devinfo;
5260
5261 /* Get the logical send arguments. */
5262 const fs_reg &addr = inst->src[SURFACE_LOGICAL_SRC_ADDRESS];
5263 const fs_reg &src = inst->src[SURFACE_LOGICAL_SRC_DATA];
5264 const fs_reg &surface = inst->src[SURFACE_LOGICAL_SRC_SURFACE];
5265 const fs_reg &surface_handle = inst->src[SURFACE_LOGICAL_SRC_SURFACE_HANDLE];
5266 const UNUSED fs_reg &dims = inst->src[SURFACE_LOGICAL_SRC_IMM_DIMS];
5267 const fs_reg &arg = inst->src[SURFACE_LOGICAL_SRC_IMM_ARG];
5268 assert(arg.file == IMM);
5269
5270 /* We must have exactly one of surface and surface_handle */
5271 assert((surface.file == BAD_FILE) != (surface_handle.file == BAD_FILE));
5272
5273 /* Calculate the total number of components of the payload. */
5274 const unsigned addr_sz = inst->components_read(SURFACE_LOGICAL_SRC_ADDRESS);
5275 const unsigned src_sz = inst->components_read(SURFACE_LOGICAL_SRC_DATA);
5276
5277 const bool is_typed_access =
5278 inst->opcode == SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL ||
5279 inst->opcode == SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL ||
5280 inst->opcode == SHADER_OPCODE_TYPED_ATOMIC_LOGICAL;
5281
5282 const bool is_surface_access = is_typed_access ||
5283 inst->opcode == SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL ||
5284 inst->opcode == SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL ||
5285 inst->opcode == SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL;
5286
5287 const bool is_stateless =
5288 surface.file == IMM && (surface.ud == BRW_BTI_STATELESS ||
5289 surface.ud == GEN8_BTI_STATELESS_NON_COHERENT);
5290
5291 const bool has_side_effects = inst->has_side_effects();
5292 fs_reg sample_mask = has_side_effects ? bld.sample_mask_reg() :
5293 fs_reg(brw_imm_d(0xffff));
5294
5295 /* From the BDW PRM Volume 7, page 147:
5296 *
5297 * "For the Data Cache Data Port*, the header must be present for the
5298 * following message types: [...] Typed read/write/atomics"
5299 *
5300 * Earlier generations have a similar wording. Because of this restriction
5301 * we don't attempt to implement sample masks via predication for such
5302 * messages prior to Gen9, since we have to provide a header anyway. On
5303 * Gen11+ the header has been removed so we can only use predication.
5304 *
5305 * For all stateless A32 messages, we also need a header
5306 */
5307 fs_reg header;
5308 if ((devinfo->gen < 9 && is_typed_access) || is_stateless) {
5309 fs_builder ubld = bld.exec_all().group(8, 0);
5310 header = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5311 ubld.MOV(header, brw_imm_d(0));
5312 if (is_stateless) {
5313 /* Both the typed and scattered byte/dword A32 messages take a buffer
5314 * base address in R0.5:[31:0] (See MH1_A32_PSM for typed messages or
5315 * MH_A32_GO for byte/dword scattered messages in the SKL PRM Vol. 2d
5316 * for more details.) This is conveniently where the HW places the
5317 * scratch surface base address.
5318 *
5319 * From the SKL PRM Vol. 7 "Per-Thread Scratch Space":
5320 *
5321 * "When a thread becomes 'active' it is allocated a portion of
5322 * scratch space, sized according to PerThreadScratchSpace. The
5323 * starting location of each thread’s scratch space allocation,
5324 * ScratchSpaceOffset, is passed in the thread payload in
5325 * R0.5[31:10] and is specified as a 1KB-granular offset from the
5326 * GeneralStateBaseAddress. The computation of ScratchSpaceOffset
5327 * includes the starting address of the stage’s scratch space
5328 * allocation, as programmed by ScratchSpaceBasePointer."
5329 *
5330 * The base address is passed in bits R0.5[31:10] and the bottom 10
5331 * bits of R0.5 are used for other things. Therefore, we have to
5332 * mask off the bottom 10 bits so that we don't get a garbage base
5333 * address.
5334 */
5335 ubld.group(1, 0).AND(component(header, 5),
5336 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
5337 brw_imm_ud(0xfffffc00));
5338 }
5339 if (is_surface_access)
5340 ubld.group(1, 0).MOV(component(header, 7), sample_mask);
5341 }
5342 const unsigned header_sz = header.file != BAD_FILE ? 1 : 0;
5343
5344 fs_reg payload, payload2;
5345 unsigned mlen, ex_mlen = 0;
5346 if (devinfo->gen >= 9 &&
5347 (src.file == BAD_FILE || header.file == BAD_FILE)) {
5348 /* We have split sends on gen9 and above */
5349 if (header.file == BAD_FILE) {
5350 payload = bld.move_to_vgrf(addr, addr_sz);
5351 payload2 = bld.move_to_vgrf(src, src_sz);
5352 mlen = addr_sz * (inst->exec_size / 8);
5353 ex_mlen = src_sz * (inst->exec_size / 8);
5354 } else {
5355 assert(src.file == BAD_FILE);
5356 payload = header;
5357 payload2 = bld.move_to_vgrf(addr, addr_sz);
5358 mlen = header_sz;
5359 ex_mlen = addr_sz * (inst->exec_size / 8);
5360 }
5361 } else {
5362 /* Allocate space for the payload. */
5363 const unsigned sz = header_sz + addr_sz + src_sz;
5364 payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
5365 fs_reg *const components = new fs_reg[sz];
5366 unsigned n = 0;
5367
5368 /* Construct the payload. */
5369 if (header.file != BAD_FILE)
5370 components[n++] = header;
5371
5372 for (unsigned i = 0; i < addr_sz; i++)
5373 components[n++] = offset(addr, bld, i);
5374
5375 for (unsigned i = 0; i < src_sz; i++)
5376 components[n++] = offset(src, bld, i);
5377
5378 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
5379 mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
5380
5381 delete[] components;
5382 }
5383
5384 /* Predicate the instruction on the sample mask if no header is
5385 * provided.
5386 */
5387 if ((header.file == BAD_FILE || !is_surface_access) &&
5388 sample_mask.file != BAD_FILE && sample_mask.file != IMM) {
5389 const fs_builder ubld = bld.group(1, 0).exec_all();
5390 if (inst->predicate) {
5391 assert(inst->predicate == BRW_PREDICATE_NORMAL);
5392 assert(!inst->predicate_inverse);
5393 assert(inst->flag_subreg < 2);
5394 /* Combine the sample mask with the existing predicate by using a
5395 * vertical predication mode.
5396 */
5397 inst->predicate = BRW_PREDICATE_ALIGN1_ALLV;
5398 ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg + 2),
5399 sample_mask.type),
5400 sample_mask);
5401 } else {
5402 inst->flag_subreg = 2;
5403 inst->predicate = BRW_PREDICATE_NORMAL;
5404 inst->predicate_inverse = false;
5405 ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg), sample_mask.type),
5406 sample_mask);
5407 }
5408 }
5409
5410 uint32_t sfid;
5411 switch (inst->opcode) {
5412 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5413 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5414 /* Byte scattered opcodes go through the normal data cache */
5415 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
5416 break;
5417
5418 case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
5419 case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
5420 sfid = devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
5421 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
5422 BRW_DATAPORT_READ_TARGET_RENDER_CACHE;
5423 break;
5424
5425 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5426 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5427 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5428 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5429 /* Untyped Surface messages go through the data cache but the SFID value
5430 * changed on Haswell.
5431 */
5432 sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
5433 HSW_SFID_DATAPORT_DATA_CACHE_1 :
5434 GEN7_SFID_DATAPORT_DATA_CACHE);
5435 break;
5436
5437 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5438 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5439 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5440 /* Typed surface messages go through the render cache on IVB and the
5441 * data cache on HSW+.
5442 */
5443 sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
5444 HSW_SFID_DATAPORT_DATA_CACHE_1 :
5445 GEN6_SFID_DATAPORT_RENDER_CACHE);
5446 break;
5447
5448 default:
5449 unreachable("Unsupported surface opcode");
5450 }
5451
5452 uint32_t desc;
5453 switch (inst->opcode) {
5454 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5455 desc = brw_dp_untyped_surface_rw_desc(devinfo, inst->exec_size,
5456 arg.ud, /* num_channels */
5457 false /* write */);
5458 break;
5459
5460 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5461 desc = brw_dp_untyped_surface_rw_desc(devinfo, inst->exec_size,
5462 arg.ud, /* num_channels */
5463 true /* write */);
5464 break;
5465
5466 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5467 desc = brw_dp_byte_scattered_rw_desc(devinfo, inst->exec_size,
5468 arg.ud, /* bit_size */
5469 false /* write */);
5470 break;
5471
5472 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5473 desc = brw_dp_byte_scattered_rw_desc(devinfo, inst->exec_size,
5474 arg.ud, /* bit_size */
5475 true /* write */);
5476 break;
5477
5478 case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
5479 assert(arg.ud == 32); /* bit_size */
5480 desc = brw_dp_dword_scattered_rw_desc(devinfo, inst->exec_size,
5481 false /* write */);
5482 break;
5483
5484 case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
5485 assert(arg.ud == 32); /* bit_size */
5486 desc = brw_dp_dword_scattered_rw_desc(devinfo, inst->exec_size,
5487 true /* write */);
5488 break;
5489
5490 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5491 desc = brw_dp_untyped_atomic_desc(devinfo, inst->exec_size,
5492 arg.ud, /* atomic_op */
5493 !inst->dst.is_null());
5494 break;
5495
5496 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5497 desc = brw_dp_untyped_atomic_float_desc(devinfo, inst->exec_size,
5498 arg.ud, /* atomic_op */
5499 !inst->dst.is_null());
5500 break;
5501
5502 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5503 desc = brw_dp_typed_surface_rw_desc(devinfo, inst->exec_size, inst->group,
5504 arg.ud, /* num_channels */
5505 false /* write */);
5506 break;
5507
5508 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5509 desc = brw_dp_typed_surface_rw_desc(devinfo, inst->exec_size, inst->group,
5510 arg.ud, /* num_channels */
5511 true /* write */);
5512 break;
5513
5514 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5515 desc = brw_dp_typed_atomic_desc(devinfo, inst->exec_size, inst->group,
5516 arg.ud, /* atomic_op */
5517 !inst->dst.is_null());
5518 break;
5519
5520 default:
5521 unreachable("Unknown surface logical instruction");
5522 }
5523
5524 /* Update the original instruction. */
5525 inst->opcode = SHADER_OPCODE_SEND;
5526 inst->mlen = mlen;
5527 inst->ex_mlen = ex_mlen;
5528 inst->header_size = header_sz;
5529 inst->send_has_side_effects = has_side_effects;
5530 inst->send_is_volatile = !has_side_effects;
5531
5532 /* Set up SFID and descriptors */
5533 inst->sfid = sfid;
5534 inst->desc = desc;
5535 if (surface.file == IMM) {
5536 inst->desc |= surface.ud & 0xff;
5537 inst->src[0] = brw_imm_ud(0);
5538 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5539 } else if (surface_handle.file != BAD_FILE) {
5540 /* Bindless surface */
5541 assert(devinfo->gen >= 9);
5542 inst->desc |= GEN9_BTI_BINDLESS;
5543 inst->src[0] = brw_imm_ud(0);
5544
5545 /* We assume that the driver provided the handle in the top 20 bits so
5546 * we can use the surface handle directly as the extended descriptor.
5547 */
5548 inst->src[1] = retype(surface_handle, BRW_REGISTER_TYPE_UD);
5549 } else {
5550 const fs_builder ubld = bld.exec_all().group(1, 0);
5551 fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5552 ubld.AND(tmp, surface, brw_imm_ud(0xff));
5553 inst->src[0] = component(tmp, 0);
5554 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5555 }
5556
5557 /* Finally, the payload */
5558 inst->src[2] = payload;
5559 inst->src[3] = payload2;
5560
5561 inst->resize_sources(4);
5562 }
5563
5564 static void
5565 lower_a64_logical_send(const fs_builder &bld, fs_inst *inst)
5566 {
5567 const gen_device_info *devinfo = bld.shader->devinfo;
5568
5569 const fs_reg &addr = inst->src[0];
5570 const fs_reg &src = inst->src[1];
5571 const unsigned src_comps = inst->components_read(1);
5572 assert(inst->src[2].file == IMM);
5573 const unsigned arg = inst->src[2].ud;
5574 const bool has_side_effects = inst->has_side_effects();
5575
5576 /* If the surface message has side effects and we're a fragment shader, we
5577 * have to predicate with the sample mask to avoid helper invocations.
5578 */
5579 if (has_side_effects && bld.shader->stage == MESA_SHADER_FRAGMENT) {
5580 inst->flag_subreg = 2;
5581 inst->predicate = BRW_PREDICATE_NORMAL;
5582 inst->predicate_inverse = false;
5583
5584 fs_reg sample_mask = bld.sample_mask_reg();
5585 const fs_builder ubld = bld.group(1, 0).exec_all();
5586 ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg), sample_mask.type),
5587 sample_mask);
5588 }
5589
5590 fs_reg payload, payload2;
5591 unsigned mlen, ex_mlen = 0;
5592 if (devinfo->gen >= 9) {
5593 /* On Skylake and above, we have SENDS */
5594 mlen = 2 * (inst->exec_size / 8);
5595 ex_mlen = src_comps * type_sz(src.type) * inst->exec_size / REG_SIZE;
5596 payload = retype(bld.move_to_vgrf(addr, 1), BRW_REGISTER_TYPE_UD);
5597 payload2 = retype(bld.move_to_vgrf(src, src_comps),
5598 BRW_REGISTER_TYPE_UD);
5599 } else {
5600 /* Add two because the address is 64-bit */
5601 const unsigned dwords = 2 + src_comps;
5602 mlen = dwords * (inst->exec_size / 8);
5603
5604 fs_reg sources[5];
5605
5606 sources[0] = addr;
5607
5608 for (unsigned i = 0; i < src_comps; i++)
5609 sources[1 + i] = offset(src, bld, i);
5610
5611 payload = bld.vgrf(BRW_REGISTER_TYPE_UD, dwords);
5612 bld.LOAD_PAYLOAD(payload, sources, 1 + src_comps, 0);
5613 }
5614
5615 uint32_t desc;
5616 switch (inst->opcode) {
5617 case SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL:
5618 desc = brw_dp_a64_untyped_surface_rw_desc(devinfo, inst->exec_size,
5619 arg, /* num_channels */
5620 false /* write */);
5621 break;
5622
5623 case SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL:
5624 desc = brw_dp_a64_untyped_surface_rw_desc(devinfo, inst->exec_size,
5625 arg, /* num_channels */
5626 true /* write */);
5627 break;
5628
5629 case SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL:
5630 desc = brw_dp_a64_byte_scattered_rw_desc(devinfo, inst->exec_size,
5631 arg, /* bit_size */
5632 false /* write */);
5633 break;
5634
5635 case SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL:
5636 desc = brw_dp_a64_byte_scattered_rw_desc(devinfo, inst->exec_size,
5637 arg, /* bit_size */
5638 true /* write */);
5639 break;
5640
5641 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
5642 desc = brw_dp_a64_untyped_atomic_desc(devinfo, inst->exec_size, 32,
5643 arg, /* atomic_op */
5644 !inst->dst.is_null());
5645 break;
5646
5647 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
5648 desc = brw_dp_a64_untyped_atomic_desc(devinfo, inst->exec_size, 64,
5649 arg, /* atomic_op */
5650 !inst->dst.is_null());
5651 break;
5652
5653
5654 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5655 desc = brw_dp_a64_untyped_atomic_float_desc(devinfo, inst->exec_size,
5656 arg, /* atomic_op */
5657 !inst->dst.is_null());
5658 break;
5659
5660 default:
5661 unreachable("Unknown A64 logical instruction");
5662 }
5663
5664 /* Update the original instruction. */
5665 inst->opcode = SHADER_OPCODE_SEND;
5666 inst->mlen = mlen;
5667 inst->ex_mlen = ex_mlen;
5668 inst->header_size = 0;
5669 inst->send_has_side_effects = has_side_effects;
5670 inst->send_is_volatile = !has_side_effects;
5671
5672 /* Set up SFID and descriptors */
5673 inst->sfid = HSW_SFID_DATAPORT_DATA_CACHE_1;
5674 inst->desc = desc;
5675 inst->resize_sources(4);
5676 inst->src[0] = brw_imm_ud(0); /* desc */
5677 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5678 inst->src[2] = payload;
5679 inst->src[3] = payload2;
5680 }
5681
5682 static void
5683 lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
5684 {
5685 const gen_device_info *devinfo = bld.shader->devinfo;
5686
5687 if (devinfo->gen >= 7) {
5688 fs_reg index = inst->src[0];
5689 /* We are switching the instruction from an ALU-like instruction to a
5690 * send-from-grf instruction. Since sends can't handle strides or
5691 * source modifiers, we have to make a copy of the offset source.
5692 */
5693 fs_reg offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
5694 bld.MOV(offset, inst->src[1]);
5695
5696 const unsigned simd_mode =
5697 inst->exec_size <= 8 ? BRW_SAMPLER_SIMD_MODE_SIMD8 :
5698 BRW_SAMPLER_SIMD_MODE_SIMD16;
5699
5700 inst->opcode = SHADER_OPCODE_SEND;
5701 inst->mlen = inst->exec_size / 8;
5702 inst->resize_sources(3);
5703
5704 inst->sfid = BRW_SFID_SAMPLER;
5705 inst->desc = brw_sampler_desc(devinfo, 0, 0,
5706 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
5707 simd_mode, 0);
5708 if (index.file == IMM) {
5709 inst->desc |= index.ud & 0xff;
5710 inst->src[0] = brw_imm_ud(0);
5711 } else {
5712 const fs_builder ubld = bld.exec_all().group(1, 0);
5713 fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD);
5714 ubld.AND(tmp, index, brw_imm_ud(0xff));
5715 inst->src[0] = component(tmp, 0);
5716 }
5717 inst->src[1] = brw_imm_ud(0); /* ex_desc */
5718 inst->src[2] = offset; /* payload */
5719 } else {
5720 const fs_reg payload(MRF, FIRST_PULL_LOAD_MRF(devinfo->gen),
5721 BRW_REGISTER_TYPE_UD);
5722
5723 bld.MOV(byte_offset(payload, REG_SIZE), inst->src[1]);
5724
5725 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4;
5726 inst->resize_sources(1);
5727 inst->base_mrf = payload.nr;
5728 inst->header_size = 1;
5729 inst->mlen = 1 + inst->exec_size / 8;
5730 }
5731 }
5732
5733 static void
5734 lower_math_logical_send(const fs_builder &bld, fs_inst *inst)
5735 {
5736 assert(bld.shader->devinfo->gen < 6);
5737
5738 inst->base_mrf = 2;
5739 inst->mlen = inst->sources * inst->exec_size / 8;
5740
5741 if (inst->sources > 1) {
5742 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
5743 * "Message Payload":
5744 *
5745 * "Operand0[7]. For the INT DIV functions, this operand is the
5746 * denominator."
5747 * ...
5748 * "Operand1[7]. For the INT DIV functions, this operand is the
5749 * numerator."
5750 */
5751 const bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
5752 const fs_reg src0 = is_int_div ? inst->src[1] : inst->src[0];
5753 const fs_reg src1 = is_int_div ? inst->src[0] : inst->src[1];
5754
5755 inst->resize_sources(1);
5756 inst->src[0] = src0;
5757
5758 assert(inst->exec_size == 8);
5759 bld.MOV(fs_reg(MRF, inst->base_mrf + 1, src1.type), src1);
5760 }
5761 }
5762
5763 bool
5764 fs_visitor::lower_logical_sends()
5765 {
5766 bool progress = false;
5767
5768 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5769 const fs_builder ibld(this, block, inst);
5770
5771 switch (inst->opcode) {
5772 case FS_OPCODE_FB_WRITE_LOGICAL:
5773 assert(stage == MESA_SHADER_FRAGMENT);
5774 lower_fb_write_logical_send(ibld, inst,
5775 brw_wm_prog_data(prog_data),
5776 (const brw_wm_prog_key *)key,
5777 payload);
5778 break;
5779
5780 case FS_OPCODE_FB_READ_LOGICAL:
5781 lower_fb_read_logical_send(ibld, inst);
5782 break;
5783
5784 case SHADER_OPCODE_TEX_LOGICAL:
5785 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
5786 break;
5787
5788 case SHADER_OPCODE_TXD_LOGICAL:
5789 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
5790 break;
5791
5792 case SHADER_OPCODE_TXF_LOGICAL:
5793 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
5794 break;
5795
5796 case SHADER_OPCODE_TXL_LOGICAL:
5797 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
5798 break;
5799
5800 case SHADER_OPCODE_TXS_LOGICAL:
5801 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
5802 break;
5803
5804 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
5805 lower_sampler_logical_send(ibld, inst,
5806 SHADER_OPCODE_IMAGE_SIZE_LOGICAL);
5807 break;
5808
5809 case FS_OPCODE_TXB_LOGICAL:
5810 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
5811 break;
5812
5813 case SHADER_OPCODE_TXF_CMS_LOGICAL:
5814 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
5815 break;
5816
5817 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
5818 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS_W);
5819 break;
5820
5821 case SHADER_OPCODE_TXF_UMS_LOGICAL:
5822 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
5823 break;
5824
5825 case SHADER_OPCODE_TXF_MCS_LOGICAL:
5826 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
5827 break;
5828
5829 case SHADER_OPCODE_LOD_LOGICAL:
5830 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
5831 break;
5832
5833 case SHADER_OPCODE_TG4_LOGICAL:
5834 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
5835 break;
5836
5837 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
5838 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
5839 break;
5840
5841 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
5842 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_SAMPLEINFO);
5843 break;
5844
5845 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
5846 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
5847 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
5848 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
5849 case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
5850 case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
5851 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
5852 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5853 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
5854 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
5855 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
5856 lower_surface_logical_send(ibld, inst);
5857 break;
5858
5859 case SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL:
5860 case SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL:
5861 case SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL:
5862 case SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL:
5863 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
5864 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
5865 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
5866 lower_a64_logical_send(ibld, inst);
5867 break;
5868
5869 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
5870 lower_varying_pull_constant_logical_send(ibld, inst);
5871 break;
5872
5873 case SHADER_OPCODE_RCP:
5874 case SHADER_OPCODE_RSQ:
5875 case SHADER_OPCODE_SQRT:
5876 case SHADER_OPCODE_EXP2:
5877 case SHADER_OPCODE_LOG2:
5878 case SHADER_OPCODE_SIN:
5879 case SHADER_OPCODE_COS:
5880 case SHADER_OPCODE_POW:
5881 case SHADER_OPCODE_INT_QUOTIENT:
5882 case SHADER_OPCODE_INT_REMAINDER:
5883 /* The math opcodes are overloaded for the send-like and
5884 * expression-like instructions which seems kind of icky. Gen6+ has
5885 * a native (but rather quirky) MATH instruction so we don't need to
5886 * do anything here. On Gen4-5 we'll have to lower the Gen6-like
5887 * logical instructions (which we can easily recognize because they
5888 * have mlen = 0) into send-like virtual instructions.
5889 */
5890 if (devinfo->gen < 6 && inst->mlen == 0) {
5891 lower_math_logical_send(ibld, inst);
5892 break;
5893
5894 } else {
5895 continue;
5896 }
5897
5898 default:
5899 continue;
5900 }
5901
5902 progress = true;
5903 }
5904
5905 if (progress)
5906 invalidate_live_intervals();
5907
5908 return progress;
5909 }
5910
5911 static bool
5912 is_mixed_float_with_fp32_dst(const fs_inst *inst)
5913 {
5914 /* This opcode sometimes uses :W type on the source even if the operand is
5915 * a :HF, because in gen7 there is no support for :HF, and thus it uses :W.
5916 */
5917 if (inst->opcode == BRW_OPCODE_F16TO32)
5918 return true;
5919
5920 if (inst->dst.type != BRW_REGISTER_TYPE_F)
5921 return false;
5922
5923 for (int i = 0; i < inst->sources; i++) {
5924 if (inst->src[i].type == BRW_REGISTER_TYPE_HF)
5925 return true;
5926 }
5927
5928 return false;
5929 }
5930
5931 static bool
5932 is_mixed_float_with_packed_fp16_dst(const fs_inst *inst)
5933 {
5934 /* This opcode sometimes uses :W type on the destination even if the
5935 * destination is a :HF, because in gen7 there is no support for :HF, and
5936 * thus it uses :W.
5937 */
5938 if (inst->opcode == BRW_OPCODE_F32TO16 &&
5939 inst->dst.stride == 1)
5940 return true;
5941
5942 if (inst->dst.type != BRW_REGISTER_TYPE_HF ||
5943 inst->dst.stride != 1)
5944 return false;
5945
5946 for (int i = 0; i < inst->sources; i++) {
5947 if (inst->src[i].type == BRW_REGISTER_TYPE_F)
5948 return true;
5949 }
5950
5951 return false;
5952 }
5953
5954 /**
5955 * Get the closest allowed SIMD width for instruction \p inst accounting for
5956 * some common regioning and execution control restrictions that apply to FPU
5957 * instructions. These restrictions don't necessarily have any relevance to
5958 * instructions not executed by the FPU pipeline like extended math, control
5959 * flow or send message instructions.
5960 *
5961 * For virtual opcodes it's really up to the instruction -- In some cases
5962 * (e.g. where a virtual instruction unrolls into a simple sequence of FPU
5963 * instructions) it may simplify virtual instruction lowering if we can
5964 * enforce FPU-like regioning restrictions already on the virtual instruction,
5965 * in other cases (e.g. virtual send-like instructions) this may be
5966 * excessively restrictive.
5967 */
5968 static unsigned
5969 get_fpu_lowered_simd_width(const struct gen_device_info *devinfo,
5970 const fs_inst *inst)
5971 {
5972 /* Maximum execution size representable in the instruction controls. */
5973 unsigned max_width = MIN2(32, inst->exec_size);
5974
5975 /* According to the PRMs:
5976 * "A. In Direct Addressing mode, a source cannot span more than 2
5977 * adjacent GRF registers.
5978 * B. A destination cannot span more than 2 adjacent GRF registers."
5979 *
5980 * Look for the source or destination with the largest register region
5981 * which is the one that is going to limit the overall execution size of
5982 * the instruction due to this rule.
5983 */
5984 unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
5985
5986 for (unsigned i = 0; i < inst->sources; i++)
5987 reg_count = MAX2(reg_count, DIV_ROUND_UP(inst->size_read(i), REG_SIZE));
5988
5989 /* Calculate the maximum execution size of the instruction based on the
5990 * factor by which it goes over the hardware limit of 2 GRFs.
5991 */
5992 if (reg_count > 2)
5993 max_width = MIN2(max_width, inst->exec_size / DIV_ROUND_UP(reg_count, 2));
5994
5995 /* According to the IVB PRMs:
5996 * "When destination spans two registers, the source MUST span two
5997 * registers. The exception to the above rule:
5998 *
5999 * - When source is scalar, the source registers are not incremented.
6000 * - When source is packed integer Word and destination is packed
6001 * integer DWord, the source register is not incremented but the
6002 * source sub register is incremented."
6003 *
6004 * The hardware specs from Gen4 to Gen7.5 mention similar regioning
6005 * restrictions. The code below intentionally doesn't check whether the
6006 * destination type is integer because empirically the hardware doesn't
6007 * seem to care what the actual type is as long as it's dword-aligned.
6008 */
6009 if (devinfo->gen < 8) {
6010 for (unsigned i = 0; i < inst->sources; i++) {
6011 /* IVB implements DF scalars as <0;2,1> regions. */
6012 const bool is_scalar_exception = is_uniform(inst->src[i]) &&
6013 (devinfo->is_haswell || type_sz(inst->src[i].type) != 8);
6014 const bool is_packed_word_exception =
6015 type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
6016 type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1;
6017
6018 /* We check size_read(i) against size_written instead of REG_SIZE
6019 * because we want to properly handle SIMD32. In SIMD32, you can end
6020 * up with writes to 4 registers and a source that reads 2 registers
6021 * and we may still need to lower all the way to SIMD8 in that case.
6022 */
6023 if (inst->size_written > REG_SIZE &&
6024 inst->size_read(i) != 0 &&
6025 inst->size_read(i) < inst->size_written &&
6026 !is_scalar_exception && !is_packed_word_exception) {
6027 const unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
6028 max_width = MIN2(max_width, inst->exec_size / reg_count);
6029 }
6030 }
6031 }
6032
6033 if (devinfo->gen < 6) {
6034 /* From the G45 PRM, Volume 4 Page 361:
6035 *
6036 * "Operand Alignment Rule: With the exceptions listed below, a
6037 * source/destination operand in general should be aligned to even
6038 * 256-bit physical register with a region size equal to two 256-bit
6039 * physical registers."
6040 *
6041 * Normally we enforce this by allocating virtual registers to the
6042 * even-aligned class. But we need to handle payload registers.
6043 */
6044 for (unsigned i = 0; i < inst->sources; i++) {
6045 if (inst->src[i].file == FIXED_GRF && (inst->src[i].nr & 1) &&
6046 inst->size_read(i) > REG_SIZE) {
6047 max_width = MIN2(max_width, 8);
6048 }
6049 }
6050 }
6051
6052 /* From the IVB PRMs:
6053 * "When an instruction is SIMD32, the low 16 bits of the execution mask
6054 * are applied for both halves of the SIMD32 instruction. If different
6055 * execution mask channels are required, split the instruction into two
6056 * SIMD16 instructions."
6057 *
6058 * There is similar text in the HSW PRMs. Gen4-6 don't even implement
6059 * 32-wide control flow support in hardware and will behave similarly.
6060 */
6061 if (devinfo->gen < 8 && !inst->force_writemask_all)
6062 max_width = MIN2(max_width, 16);
6063
6064 /* From the IVB PRMs (applies to HSW too):
6065 * "Instructions with condition modifiers must not use SIMD32."
6066 *
6067 * From the BDW PRMs (applies to later hardware too):
6068 * "Ternary instruction with condition modifiers must not use SIMD32."
6069 */
6070 if (inst->conditional_mod && (devinfo->gen < 8 || inst->is_3src(devinfo)))
6071 max_width = MIN2(max_width, 16);
6072
6073 /* From the IVB PRMs (applies to other devices that don't have the
6074 * gen_device_info::supports_simd16_3src flag set):
6075 * "In Align16 access mode, SIMD16 is not allowed for DW operations and
6076 * SIMD8 is not allowed for DF operations."
6077 */
6078 if (inst->is_3src(devinfo) && !devinfo->supports_simd16_3src)
6079 max_width = MIN2(max_width, inst->exec_size / reg_count);
6080
6081 /* Pre-Gen8 EUs are hardwired to use the QtrCtrl+1 (where QtrCtrl is
6082 * the 8-bit quarter of the execution mask signals specified in the
6083 * instruction control fields) for the second compressed half of any
6084 * single-precision instruction (for double-precision instructions
6085 * it's hardwired to use NibCtrl+1, at least on HSW), which means that
6086 * the EU will apply the wrong execution controls for the second
6087 * sequential GRF write if the number of channels per GRF is not exactly
6088 * eight in single-precision mode (or four in double-float mode).
6089 *
6090 * In this situation we calculate the maximum size of the split
6091 * instructions so they only ever write to a single register.
6092 */
6093 if (devinfo->gen < 8 && inst->size_written > REG_SIZE &&
6094 !inst->force_writemask_all) {
6095 const unsigned channels_per_grf = inst->exec_size /
6096 DIV_ROUND_UP(inst->size_written, REG_SIZE);
6097 const unsigned exec_type_size = get_exec_type_size(inst);
6098 assert(exec_type_size);
6099
6100 /* The hardware shifts exactly 8 channels per compressed half of the
6101 * instruction in single-precision mode and exactly 4 in double-precision.
6102 */
6103 if (channels_per_grf != (exec_type_size == 8 ? 4 : 8))
6104 max_width = MIN2(max_width, channels_per_grf);
6105
6106 /* Lower all non-force_writemask_all DF instructions to SIMD4 on IVB/BYT
6107 * because HW applies the same channel enable signals to both halves of
6108 * the compressed instruction which will be just wrong under
6109 * non-uniform control flow.
6110 */
6111 if (devinfo->gen == 7 && !devinfo->is_haswell &&
6112 (exec_type_size == 8 || type_sz(inst->dst.type) == 8))
6113 max_width = MIN2(max_width, 4);
6114 }
6115
6116 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
6117 * Float Operations:
6118 *
6119 * "No SIMD16 in mixed mode when destination is f32. Instruction
6120 * execution size must be no more than 8."
6121 *
6122 * FIXME: the simulator doesn't seem to complain if we don't do this and
6123 * empirical testing with existing CTS tests show that they pass just fine
6124 * without implementing this, however, since our interpretation of the PRM
6125 * is that conversion MOVs between HF and F are still mixed-float
6126 * instructions (and therefore subject to this restriction) we decided to
6127 * split them to be safe. Might be useful to do additional investigation to
6128 * lift the restriction if we can ensure that it is safe though, since these
6129 * conversions are common when half-float types are involved since many
6130 * instructions do not support HF types and conversions from/to F are
6131 * required.
6132 */
6133 if (is_mixed_float_with_fp32_dst(inst))
6134 max_width = MIN2(max_width, 8);
6135
6136 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
6137 * Float Operations:
6138 *
6139 * "No SIMD16 in mixed mode when destination is packed f16 for both
6140 * Align1 and Align16."
6141 */
6142 if (is_mixed_float_with_packed_fp16_dst(inst))
6143 max_width = MIN2(max_width, 8);
6144
6145 /* Only power-of-two execution sizes are representable in the instruction
6146 * control fields.
6147 */
6148 return 1 << _mesa_logbase2(max_width);
6149 }
6150
6151 /**
6152 * Get the maximum allowed SIMD width for instruction \p inst accounting for
6153 * various payload size restrictions that apply to sampler message
6154 * instructions.
6155 *
6156 * This is only intended to provide a maximum theoretical bound for the
6157 * execution size of the message based on the number of argument components
6158 * alone, which in most cases will determine whether the SIMD8 or SIMD16
6159 * variant of the message can be used, though some messages may have
6160 * additional restrictions not accounted for here (e.g. pre-ILK hardware uses
6161 * the message length to determine the exact SIMD width and argument count,
6162 * which makes a number of sampler message combinations impossible to
6163 * represent).
6164 */
6165 static unsigned
6166 get_sampler_lowered_simd_width(const struct gen_device_info *devinfo,
6167 const fs_inst *inst)
6168 {
6169 /* If we have a min_lod parameter on anything other than a simple sample
6170 * message, it will push it over 5 arguments and we have to fall back to
6171 * SIMD8.
6172 */
6173 if (inst->opcode != SHADER_OPCODE_TEX &&
6174 inst->components_read(TEX_LOGICAL_SRC_MIN_LOD))
6175 return 8;
6176
6177 /* Calculate the number of coordinate components that have to be present
6178 * assuming that additional arguments follow the texel coordinates in the
6179 * message payload. On IVB+ there is no need for padding, on ILK-SNB we
6180 * need to pad to four or three components depending on the message,
6181 * pre-ILK we need to pad to at most three components.
6182 */
6183 const unsigned req_coord_components =
6184 (devinfo->gen >= 7 ||
6185 !inst->components_read(TEX_LOGICAL_SRC_COORDINATE)) ? 0 :
6186 (devinfo->gen >= 5 && inst->opcode != SHADER_OPCODE_TXF_LOGICAL &&
6187 inst->opcode != SHADER_OPCODE_TXF_CMS_LOGICAL) ? 4 :
6188 3;
6189
6190 /* On Gen9+ the LOD argument is for free if we're able to use the LZ
6191 * variant of the TXL or TXF message.
6192 */
6193 const bool implicit_lod = devinfo->gen >= 9 &&
6194 (inst->opcode == SHADER_OPCODE_TXL ||
6195 inst->opcode == SHADER_OPCODE_TXF) &&
6196 inst->src[TEX_LOGICAL_SRC_LOD].is_zero();
6197
6198 /* Calculate the total number of argument components that need to be passed
6199 * to the sampler unit.
6200 */
6201 const unsigned num_payload_components =
6202 MAX2(inst->components_read(TEX_LOGICAL_SRC_COORDINATE),
6203 req_coord_components) +
6204 inst->components_read(TEX_LOGICAL_SRC_SHADOW_C) +
6205 (implicit_lod ? 0 : inst->components_read(TEX_LOGICAL_SRC_LOD)) +
6206 inst->components_read(TEX_LOGICAL_SRC_LOD2) +
6207 inst->components_read(TEX_LOGICAL_SRC_SAMPLE_INDEX) +
6208 (inst->opcode == SHADER_OPCODE_TG4_OFFSET_LOGICAL ?
6209 inst->components_read(TEX_LOGICAL_SRC_TG4_OFFSET) : 0) +
6210 inst->components_read(TEX_LOGICAL_SRC_MCS);
6211
6212 /* SIMD16 messages with more than five arguments exceed the maximum message
6213 * size supported by the sampler, regardless of whether a header is
6214 * provided or not.
6215 */
6216 return MIN2(inst->exec_size,
6217 num_payload_components > MAX_SAMPLER_MESSAGE_SIZE / 2 ? 8 : 16);
6218 }
6219
6220 /**
6221 * Get the closest native SIMD width supported by the hardware for instruction
6222 * \p inst. The instruction will be left untouched by
6223 * fs_visitor::lower_simd_width() if the returned value is equal to the
6224 * original execution size.
6225 */
6226 static unsigned
6227 get_lowered_simd_width(const struct gen_device_info *devinfo,
6228 const fs_inst *inst)
6229 {
6230 switch (inst->opcode) {
6231 case BRW_OPCODE_MOV:
6232 case BRW_OPCODE_SEL:
6233 case BRW_OPCODE_NOT:
6234 case BRW_OPCODE_AND:
6235 case BRW_OPCODE_OR:
6236 case BRW_OPCODE_XOR:
6237 case BRW_OPCODE_SHR:
6238 case BRW_OPCODE_SHL:
6239 case BRW_OPCODE_ASR:
6240 case BRW_OPCODE_ROR:
6241 case BRW_OPCODE_ROL:
6242 case BRW_OPCODE_CMPN:
6243 case BRW_OPCODE_CSEL:
6244 case BRW_OPCODE_F32TO16:
6245 case BRW_OPCODE_F16TO32:
6246 case BRW_OPCODE_BFREV:
6247 case BRW_OPCODE_BFE:
6248 case BRW_OPCODE_ADD:
6249 case BRW_OPCODE_MUL:
6250 case BRW_OPCODE_AVG:
6251 case BRW_OPCODE_FRC:
6252 case BRW_OPCODE_RNDU:
6253 case BRW_OPCODE_RNDD:
6254 case BRW_OPCODE_RNDE:
6255 case BRW_OPCODE_RNDZ:
6256 case BRW_OPCODE_LZD:
6257 case BRW_OPCODE_FBH:
6258 case BRW_OPCODE_FBL:
6259 case BRW_OPCODE_CBIT:
6260 case BRW_OPCODE_SAD2:
6261 case BRW_OPCODE_MAD:
6262 case BRW_OPCODE_LRP:
6263 case FS_OPCODE_PACK:
6264 case SHADER_OPCODE_SEL_EXEC:
6265 case SHADER_OPCODE_CLUSTER_BROADCAST:
6266 return get_fpu_lowered_simd_width(devinfo, inst);
6267
6268 case BRW_OPCODE_CMP: {
6269 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says that
6270 * when the destination is a GRF the dependency-clear bit on the flag
6271 * register is cleared early.
6272 *
6273 * Suggested workarounds are to disable coissuing CMP instructions
6274 * or to split CMP(16) instructions into two CMP(8) instructions.
6275 *
6276 * We choose to split into CMP(8) instructions since disabling
6277 * coissuing would affect CMP instructions not otherwise affected by
6278 * the errata.
6279 */
6280 const unsigned max_width = (devinfo->gen == 7 && !devinfo->is_haswell &&
6281 !inst->dst.is_null() ? 8 : ~0);
6282 return MIN2(max_width, get_fpu_lowered_simd_width(devinfo, inst));
6283 }
6284 case BRW_OPCODE_BFI1:
6285 case BRW_OPCODE_BFI2:
6286 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
6287 * should
6288 * "Force BFI instructions to be executed always in SIMD8."
6289 */
6290 return MIN2(devinfo->is_haswell ? 8 : ~0u,
6291 get_fpu_lowered_simd_width(devinfo, inst));
6292
6293 case BRW_OPCODE_IF:
6294 assert(inst->src[0].file == BAD_FILE || inst->exec_size <= 16);
6295 return inst->exec_size;
6296
6297 case SHADER_OPCODE_RCP:
6298 case SHADER_OPCODE_RSQ:
6299 case SHADER_OPCODE_SQRT:
6300 case SHADER_OPCODE_EXP2:
6301 case SHADER_OPCODE_LOG2:
6302 case SHADER_OPCODE_SIN:
6303 case SHADER_OPCODE_COS: {
6304 /* Unary extended math instructions are limited to SIMD8 on Gen4 and
6305 * Gen6. Extended Math Function is limited to SIMD8 with half-float.
6306 */
6307 if (devinfo->gen == 6 || (devinfo->gen == 4 && !devinfo->is_g4x))
6308 return MIN2(8, inst->exec_size);
6309 if (inst->dst.type == BRW_REGISTER_TYPE_HF)
6310 return MIN2(8, inst->exec_size);
6311 return MIN2(16, inst->exec_size);
6312 }
6313
6314 case SHADER_OPCODE_POW: {
6315 /* SIMD16 is only allowed on Gen7+. Extended Math Function is limited
6316 * to SIMD8 with half-float
6317 */
6318 if (devinfo->gen < 7)
6319 return MIN2(8, inst->exec_size);
6320 if (inst->dst.type == BRW_REGISTER_TYPE_HF)
6321 return MIN2(8, inst->exec_size);
6322 return MIN2(16, inst->exec_size);
6323 }
6324
6325 case SHADER_OPCODE_INT_QUOTIENT:
6326 case SHADER_OPCODE_INT_REMAINDER:
6327 /* Integer division is limited to SIMD8 on all generations. */
6328 return MIN2(8, inst->exec_size);
6329
6330 case FS_OPCODE_LINTERP:
6331 case SHADER_OPCODE_GET_BUFFER_SIZE:
6332 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
6333 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
6334 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
6335 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
6336 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
6337 return MIN2(16, inst->exec_size);
6338
6339 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
6340 /* Pre-ILK hardware doesn't have a SIMD8 variant of the texel fetch
6341 * message used to implement varying pull constant loads, so expand it
6342 * to SIMD16. An alternative with longer message payload length but
6343 * shorter return payload would be to use the SIMD8 sampler message that
6344 * takes (header, u, v, r) as parameters instead of (header, u).
6345 */
6346 return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
6347
6348 case FS_OPCODE_DDX_COARSE:
6349 case FS_OPCODE_DDX_FINE:
6350 case FS_OPCODE_DDY_COARSE:
6351 case FS_OPCODE_DDY_FINE:
6352 /* The implementation of this virtual opcode may require emitting
6353 * compressed Align16 instructions, which are severely limited on some
6354 * generations.
6355 *
6356 * From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
6357 * Region Restrictions):
6358 *
6359 * "In Align16 access mode, SIMD16 is not allowed for DW operations
6360 * and SIMD8 is not allowed for DF operations."
6361 *
6362 * In this context, "DW operations" means "operations acting on 32-bit
6363 * values", so it includes operations on floats.
6364 *
6365 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
6366 * (Instruction Compression -> Rules and Restrictions):
6367 *
6368 * "A compressed instruction must be in Align1 access mode. Align16
6369 * mode instructions cannot be compressed."
6370 *
6371 * Similar text exists in the g45 PRM.
6372 *
6373 * Empirically, compressed align16 instructions using odd register
6374 * numbers don't appear to work on Sandybridge either.
6375 */
6376 return (devinfo->gen == 4 || devinfo->gen == 6 ||
6377 (devinfo->gen == 7 && !devinfo->is_haswell) ?
6378 MIN2(8, inst->exec_size) : MIN2(16, inst->exec_size));
6379
6380 case SHADER_OPCODE_MULH:
6381 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
6382 * is 8-wide on Gen7+.
6383 */
6384 return (devinfo->gen >= 7 ? 8 :
6385 get_fpu_lowered_simd_width(devinfo, inst));
6386
6387 case FS_OPCODE_FB_WRITE_LOGICAL:
6388 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
6389 * here.
6390 */
6391 assert(devinfo->gen != 6 ||
6392 inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH].file == BAD_FILE ||
6393 inst->exec_size == 8);
6394 /* Dual-source FB writes are unsupported in SIMD16 mode. */
6395 return (inst->src[FB_WRITE_LOGICAL_SRC_COLOR1].file != BAD_FILE ?
6396 8 : MIN2(16, inst->exec_size));
6397
6398 case FS_OPCODE_FB_READ_LOGICAL:
6399 return MIN2(16, inst->exec_size);
6400
6401 case SHADER_OPCODE_TEX_LOGICAL:
6402 case SHADER_OPCODE_TXF_CMS_LOGICAL:
6403 case SHADER_OPCODE_TXF_UMS_LOGICAL:
6404 case SHADER_OPCODE_TXF_MCS_LOGICAL:
6405 case SHADER_OPCODE_LOD_LOGICAL:
6406 case SHADER_OPCODE_TG4_LOGICAL:
6407 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
6408 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
6409 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
6410 return get_sampler_lowered_simd_width(devinfo, inst);
6411
6412 case SHADER_OPCODE_TXD_LOGICAL:
6413 /* TXD is unsupported in SIMD16 mode. */
6414 return 8;
6415
6416 case SHADER_OPCODE_TXL_LOGICAL:
6417 case FS_OPCODE_TXB_LOGICAL:
6418 /* Only one execution size is representable pre-ILK depending on whether
6419 * the shadow reference argument is present.
6420 */
6421 if (devinfo->gen == 4)
6422 return inst->src[TEX_LOGICAL_SRC_SHADOW_C].file == BAD_FILE ? 16 : 8;
6423 else
6424 return get_sampler_lowered_simd_width(devinfo, inst);
6425
6426 case SHADER_OPCODE_TXF_LOGICAL:
6427 case SHADER_OPCODE_TXS_LOGICAL:
6428 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
6429 * messages. Use SIMD16 instead.
6430 */
6431 if (devinfo->gen == 4)
6432 return 16;
6433 else
6434 return get_sampler_lowered_simd_width(devinfo, inst);
6435
6436 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
6437 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
6438 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
6439 return 8;
6440
6441 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
6442 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
6443 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
6444 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
6445 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
6446 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
6447 case SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL:
6448 case SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL:
6449 return MIN2(16, inst->exec_size);
6450
6451 case SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL:
6452 case SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL:
6453 case SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL:
6454 case SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL:
6455 return devinfo->gen <= 8 ? 8 : MIN2(16, inst->exec_size);
6456
6457 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL:
6458 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL:
6459 case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT_LOGICAL:
6460 return 8;
6461
6462 case SHADER_OPCODE_URB_READ_SIMD8:
6463 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
6464 case SHADER_OPCODE_URB_WRITE_SIMD8:
6465 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
6466 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
6467 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
6468 return MIN2(8, inst->exec_size);
6469
6470 case SHADER_OPCODE_QUAD_SWIZZLE: {
6471 const unsigned swiz = inst->src[1].ud;
6472 return (is_uniform(inst->src[0]) ?
6473 get_fpu_lowered_simd_width(devinfo, inst) :
6474 devinfo->gen < 11 && type_sz(inst->src[0].type) == 4 ? 8 :
6475 swiz == BRW_SWIZZLE_XYXY || swiz == BRW_SWIZZLE_ZWZW ? 4 :
6476 get_fpu_lowered_simd_width(devinfo, inst));
6477 }
6478 case SHADER_OPCODE_MOV_INDIRECT: {
6479 /* From IVB and HSW PRMs:
6480 *
6481 * "2.When the destination requires two registers and the sources are
6482 * indirect, the sources must use 1x1 regioning mode.
6483 *
6484 * In case of DF instructions in HSW/IVB, the exec_size is limited by
6485 * the EU decompression logic not handling VxH indirect addressing
6486 * correctly.
6487 */
6488 const unsigned max_size = (devinfo->gen >= 8 ? 2 : 1) * REG_SIZE;
6489 /* Prior to Broadwell, we only have 8 address subregisters. */
6490 return MIN3(devinfo->gen >= 8 ? 16 : 8,
6491 max_size / (inst->dst.stride * type_sz(inst->dst.type)),
6492 inst->exec_size);
6493 }
6494
6495 case SHADER_OPCODE_LOAD_PAYLOAD: {
6496 const unsigned reg_count =
6497 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
6498
6499 if (reg_count > 2) {
6500 /* Only LOAD_PAYLOAD instructions with per-channel destination region
6501 * can be easily lowered (which excludes headers and heterogeneous
6502 * types).
6503 */
6504 assert(!inst->header_size);
6505 for (unsigned i = 0; i < inst->sources; i++)
6506 assert(type_sz(inst->dst.type) == type_sz(inst->src[i].type) ||
6507 inst->src[i].file == BAD_FILE);
6508
6509 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
6510 } else {
6511 return inst->exec_size;
6512 }
6513 }
6514 default:
6515 return inst->exec_size;
6516 }
6517 }
6518
6519 /**
6520 * Return true if splitting out the group of channels of instruction \p inst
6521 * given by lbld.group() requires allocating a temporary for the i-th source
6522 * of the lowered instruction.
6523 */
6524 static inline bool
6525 needs_src_copy(const fs_builder &lbld, const fs_inst *inst, unsigned i)
6526 {
6527 return !(is_periodic(inst->src[i], lbld.dispatch_width()) ||
6528 (inst->components_read(i) == 1 &&
6529 lbld.dispatch_width() <= inst->exec_size)) ||
6530 (inst->flags_written() &
6531 flag_mask(inst->src[i], type_sz(inst->src[i].type)));
6532 }
6533
6534 /**
6535 * Extract the data that would be consumed by the channel group given by
6536 * lbld.group() from the i-th source region of instruction \p inst and return
6537 * it as result in packed form.
6538 */
6539 static fs_reg
6540 emit_unzip(const fs_builder &lbld, fs_inst *inst, unsigned i)
6541 {
6542 assert(lbld.group() >= inst->group);
6543
6544 /* Specified channel group from the source region. */
6545 const fs_reg src = horiz_offset(inst->src[i], lbld.group() - inst->group);
6546
6547 if (needs_src_copy(lbld, inst, i)) {
6548 /* Builder of the right width to perform the copy avoiding uninitialized
6549 * data if the lowered execution size is greater than the original
6550 * execution size of the instruction.
6551 */
6552 const fs_builder cbld = lbld.group(MIN2(lbld.dispatch_width(),
6553 inst->exec_size), 0);
6554 const fs_reg tmp = lbld.vgrf(inst->src[i].type, inst->components_read(i));
6555
6556 for (unsigned k = 0; k < inst->components_read(i); ++k)
6557 cbld.MOV(offset(tmp, lbld, k), offset(src, inst->exec_size, k));
6558
6559 return tmp;
6560
6561 } else if (is_periodic(inst->src[i], lbld.dispatch_width())) {
6562 /* The source is invariant for all dispatch_width-wide groups of the
6563 * original region.
6564 */
6565 return inst->src[i];
6566
6567 } else {
6568 /* We can just point the lowered instruction at the right channel group
6569 * from the original region.
6570 */
6571 return src;
6572 }
6573 }
6574
6575 /**
6576 * Return true if splitting out the group of channels of instruction \p inst
6577 * given by lbld.group() requires allocating a temporary for the destination
6578 * of the lowered instruction and copying the data back to the original
6579 * destination region.
6580 */
6581 static inline bool
6582 needs_dst_copy(const fs_builder &lbld, const fs_inst *inst)
6583 {
6584 /* If the instruction writes more than one component we'll have to shuffle
6585 * the results of multiple lowered instructions in order to make sure that
6586 * they end up arranged correctly in the original destination region.
6587 */
6588 if (inst->size_written > inst->dst.component_size(inst->exec_size))
6589 return true;
6590
6591 /* If the lowered execution size is larger than the original the result of
6592 * the instruction won't fit in the original destination, so we'll have to
6593 * allocate a temporary in any case.
6594 */
6595 if (lbld.dispatch_width() > inst->exec_size)
6596 return true;
6597
6598 for (unsigned i = 0; i < inst->sources; i++) {
6599 /* If we already made a copy of the source for other reasons there won't
6600 * be any overlap with the destination.
6601 */
6602 if (needs_src_copy(lbld, inst, i))
6603 continue;
6604
6605 /* In order to keep the logic simple we emit a copy whenever the
6606 * destination region doesn't exactly match an overlapping source, which
6607 * may point at the source and destination not being aligned group by
6608 * group which could cause one of the lowered instructions to overwrite
6609 * the data read from the same source by other lowered instructions.
6610 */
6611 if (regions_overlap(inst->dst, inst->size_written,
6612 inst->src[i], inst->size_read(i)) &&
6613 !inst->dst.equals(inst->src[i]))
6614 return true;
6615 }
6616
6617 return false;
6618 }
6619
6620 /**
6621 * Insert data from a packed temporary into the channel group given by
6622 * lbld.group() of the destination region of instruction \p inst and return
6623 * the temporary as result. Any copy instructions that are required for
6624 * unzipping the previous value (in the case of partial writes) will be
6625 * inserted using \p lbld_before and any copy instructions required for
6626 * zipping up the destination of \p inst will be inserted using \p lbld_after.
6627 */
6628 static fs_reg
6629 emit_zip(const fs_builder &lbld_before, const fs_builder &lbld_after,
6630 fs_inst *inst)
6631 {
6632 assert(lbld_before.dispatch_width() == lbld_after.dispatch_width());
6633 assert(lbld_before.group() == lbld_after.group());
6634 assert(lbld_after.group() >= inst->group);
6635
6636 /* Specified channel group from the destination region. */
6637 const fs_reg dst = horiz_offset(inst->dst, lbld_after.group() - inst->group);
6638 const unsigned dst_size = inst->size_written /
6639 inst->dst.component_size(inst->exec_size);
6640
6641 if (needs_dst_copy(lbld_after, inst)) {
6642 const fs_reg tmp = lbld_after.vgrf(inst->dst.type, dst_size);
6643
6644 if (inst->predicate) {
6645 /* Handle predication by copying the original contents of
6646 * the destination into the temporary before emitting the
6647 * lowered instruction.
6648 */
6649 const fs_builder gbld_before =
6650 lbld_before.group(MIN2(lbld_before.dispatch_width(),
6651 inst->exec_size), 0);
6652 for (unsigned k = 0; k < dst_size; ++k) {
6653 gbld_before.MOV(offset(tmp, lbld_before, k),
6654 offset(dst, inst->exec_size, k));
6655 }
6656 }
6657
6658 const fs_builder gbld_after =
6659 lbld_after.group(MIN2(lbld_after.dispatch_width(),
6660 inst->exec_size), 0);
6661 for (unsigned k = 0; k < dst_size; ++k) {
6662 /* Use a builder of the right width to perform the copy avoiding
6663 * uninitialized data if the lowered execution size is greater than
6664 * the original execution size of the instruction.
6665 */
6666 gbld_after.MOV(offset(dst, inst->exec_size, k),
6667 offset(tmp, lbld_after, k));
6668 }
6669
6670 return tmp;
6671
6672 } else {
6673 /* No need to allocate a temporary for the lowered instruction, just
6674 * take the right group of channels from the original region.
6675 */
6676 return dst;
6677 }
6678 }
6679
6680 bool
6681 fs_visitor::lower_simd_width()
6682 {
6683 bool progress = false;
6684
6685 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
6686 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
6687
6688 if (lower_width != inst->exec_size) {
6689 /* Builder matching the original instruction. We may also need to
6690 * emit an instruction of width larger than the original, set the
6691 * execution size of the builder to the highest of both for now so
6692 * we're sure that both cases can be handled.
6693 */
6694 const unsigned max_width = MAX2(inst->exec_size, lower_width);
6695 const fs_builder ibld = bld.at(block, inst)
6696 .exec_all(inst->force_writemask_all)
6697 .group(max_width, inst->group / max_width);
6698
6699 /* Split the copies in chunks of the execution width of either the
6700 * original or the lowered instruction, whichever is lower.
6701 */
6702 const unsigned n = DIV_ROUND_UP(inst->exec_size, lower_width);
6703 const unsigned dst_size = inst->size_written /
6704 inst->dst.component_size(inst->exec_size);
6705
6706 assert(!inst->writes_accumulator && !inst->mlen);
6707
6708 /* Inserting the zip, unzip, and duplicated instructions in all of
6709 * the right spots is somewhat tricky. All of the unzip and any
6710 * instructions from the zip which unzip the destination prior to
6711 * writing need to happen before all of the per-group instructions
6712 * and the zip instructions need to happen after. In order to sort
6713 * this all out, we insert the unzip instructions before \p inst,
6714 * insert the per-group instructions after \p inst (i.e. before
6715 * inst->next), and insert the zip instructions before the
6716 * instruction after \p inst. Since we are inserting instructions
6717 * after \p inst, inst->next is a moving target and we need to save
6718 * it off here so that we insert the zip instructions in the right
6719 * place.
6720 *
6721 * Since we're inserting split instructions after after_inst, the
6722 * instructions will end up in the reverse order that we insert them.
6723 * However, certain render target writes require that the low group
6724 * instructions come before the high group. From the Ivy Bridge PRM
6725 * Vol. 4, Pt. 1, Section 3.9.11:
6726 *
6727 * "If multiple SIMD8 Dual Source messages are delivered by the
6728 * pixel shader thread, each SIMD8_DUALSRC_LO message must be
6729 * issued before the SIMD8_DUALSRC_HI message with the same Slot
6730 * Group Select setting."
6731 *
6732 * And, from Section 3.9.11.1 of the same PRM:
6733 *
6734 * "When SIMD32 or SIMD16 PS threads send render target writes
6735 * with multiple SIMD8 and SIMD16 messages, the following must
6736 * hold:
6737 *
6738 * All the slots (as described above) must have a corresponding
6739 * render target write irrespective of the slot's validity. A slot
6740 * is considered valid when at least one sample is enabled. For
6741 * example, a SIMD16 PS thread must send two SIMD8 render target
6742 * writes to cover all the slots.
6743 *
6744 * PS thread must send SIMD render target write messages with
6745 * increasing slot numbers. For example, SIMD16 thread has
6746 * Slot[15:0] and if two SIMD8 render target writes are used, the
6747 * first SIMD8 render target write must send Slot[7:0] and the
6748 * next one must send Slot[15:8]."
6749 *
6750 * In order to make low group instructions come before high group
6751 * instructions (this is required for some render target writes), we
6752 * split from the highest group to lowest.
6753 */
6754 exec_node *const after_inst = inst->next;
6755 for (int i = n - 1; i >= 0; i--) {
6756 /* Emit a copy of the original instruction with the lowered width.
6757 * If the EOT flag was set throw it away except for the last
6758 * instruction to avoid killing the thread prematurely.
6759 */
6760 fs_inst split_inst = *inst;
6761 split_inst.exec_size = lower_width;
6762 split_inst.eot = inst->eot && i == int(n - 1);
6763
6764 /* Select the correct channel enables for the i-th group, then
6765 * transform the sources and destination and emit the lowered
6766 * instruction.
6767 */
6768 const fs_builder lbld = ibld.group(lower_width, i);
6769
6770 for (unsigned j = 0; j < inst->sources; j++)
6771 split_inst.src[j] = emit_unzip(lbld.at(block, inst), inst, j);
6772
6773 split_inst.dst = emit_zip(lbld.at(block, inst),
6774 lbld.at(block, after_inst), inst);
6775 split_inst.size_written =
6776 split_inst.dst.component_size(lower_width) * dst_size;
6777
6778 lbld.at(block, inst->next).emit(split_inst);
6779 }
6780
6781 inst->remove(block);
6782 progress = true;
6783 }
6784 }
6785
6786 if (progress)
6787 invalidate_live_intervals();
6788
6789 return progress;
6790 }
6791
6792 void
6793 fs_visitor::dump_instructions()
6794 {
6795 dump_instructions(NULL);
6796 }
6797
6798 void
6799 fs_visitor::dump_instructions(const char *name)
6800 {
6801 FILE *file = stderr;
6802 if (name && geteuid() != 0) {
6803 file = fopen(name, "w");
6804 if (!file)
6805 file = stderr;
6806 }
6807
6808 if (cfg) {
6809 calculate_register_pressure();
6810 int ip = 0, max_pressure = 0;
6811 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
6812 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
6813 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
6814 dump_instruction(inst, file);
6815 ip++;
6816 }
6817 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
6818 } else {
6819 int ip = 0;
6820 foreach_in_list(backend_instruction, inst, &instructions) {
6821 fprintf(file, "%4d: ", ip++);
6822 dump_instruction(inst, file);
6823 }
6824 }
6825
6826 if (file != stderr) {
6827 fclose(file);
6828 }
6829 }
6830
6831 void
6832 fs_visitor::dump_instruction(backend_instruction *be_inst)
6833 {
6834 dump_instruction(be_inst, stderr);
6835 }
6836
6837 void
6838 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
6839 {
6840 fs_inst *inst = (fs_inst *)be_inst;
6841
6842 if (inst->predicate) {
6843 fprintf(file, "(%cf%d.%d) ",
6844 inst->predicate_inverse ? '-' : '+',
6845 inst->flag_subreg / 2,
6846 inst->flag_subreg % 2);
6847 }
6848
6849 fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
6850 if (inst->saturate)
6851 fprintf(file, ".sat");
6852 if (inst->conditional_mod) {
6853 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
6854 if (!inst->predicate &&
6855 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
6856 inst->opcode != BRW_OPCODE_CSEL &&
6857 inst->opcode != BRW_OPCODE_IF &&
6858 inst->opcode != BRW_OPCODE_WHILE))) {
6859 fprintf(file, ".f%d.%d", inst->flag_subreg / 2,
6860 inst->flag_subreg % 2);
6861 }
6862 }
6863 fprintf(file, "(%d) ", inst->exec_size);
6864
6865 if (inst->mlen) {
6866 fprintf(file, "(mlen: %d) ", inst->mlen);
6867 }
6868
6869 if (inst->ex_mlen) {
6870 fprintf(file, "(ex_mlen: %d) ", inst->ex_mlen);
6871 }
6872
6873 if (inst->eot) {
6874 fprintf(file, "(EOT) ");
6875 }
6876
6877 switch (inst->dst.file) {
6878 case VGRF:
6879 fprintf(file, "vgrf%d", inst->dst.nr);
6880 break;
6881 case FIXED_GRF:
6882 fprintf(file, "g%d", inst->dst.nr);
6883 break;
6884 case MRF:
6885 fprintf(file, "m%d", inst->dst.nr);
6886 break;
6887 case BAD_FILE:
6888 fprintf(file, "(null)");
6889 break;
6890 case UNIFORM:
6891 fprintf(file, "***u%d***", inst->dst.nr);
6892 break;
6893 case ATTR:
6894 fprintf(file, "***attr%d***", inst->dst.nr);
6895 break;
6896 case ARF:
6897 switch (inst->dst.nr) {
6898 case BRW_ARF_NULL:
6899 fprintf(file, "null");
6900 break;
6901 case BRW_ARF_ADDRESS:
6902 fprintf(file, "a0.%d", inst->dst.subnr);
6903 break;
6904 case BRW_ARF_ACCUMULATOR:
6905 fprintf(file, "acc%d", inst->dst.subnr);
6906 break;
6907 case BRW_ARF_FLAG:
6908 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
6909 break;
6910 default:
6911 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
6912 break;
6913 }
6914 break;
6915 case IMM:
6916 unreachable("not reached");
6917 }
6918
6919 if (inst->dst.offset ||
6920 (inst->dst.file == VGRF &&
6921 alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
6922 const unsigned reg_size = (inst->dst.file == UNIFORM ? 4 : REG_SIZE);
6923 fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
6924 inst->dst.offset % reg_size);
6925 }
6926
6927 if (inst->dst.stride != 1)
6928 fprintf(file, "<%u>", inst->dst.stride);
6929 fprintf(file, ":%s, ", brw_reg_type_to_letters(inst->dst.type));
6930
6931 for (int i = 0; i < inst->sources; i++) {
6932 if (inst->src[i].negate)
6933 fprintf(file, "-");
6934 if (inst->src[i].abs)
6935 fprintf(file, "|");
6936 switch (inst->src[i].file) {
6937 case VGRF:
6938 fprintf(file, "vgrf%d", inst->src[i].nr);
6939 break;
6940 case FIXED_GRF:
6941 fprintf(file, "g%d", inst->src[i].nr);
6942 break;
6943 case MRF:
6944 fprintf(file, "***m%d***", inst->src[i].nr);
6945 break;
6946 case ATTR:
6947 fprintf(file, "attr%d", inst->src[i].nr);
6948 break;
6949 case UNIFORM:
6950 fprintf(file, "u%d", inst->src[i].nr);
6951 break;
6952 case BAD_FILE:
6953 fprintf(file, "(null)");
6954 break;
6955 case IMM:
6956 switch (inst->src[i].type) {
6957 case BRW_REGISTER_TYPE_F:
6958 fprintf(file, "%-gf", inst->src[i].f);
6959 break;
6960 case BRW_REGISTER_TYPE_DF:
6961 fprintf(file, "%fdf", inst->src[i].df);
6962 break;
6963 case BRW_REGISTER_TYPE_W:
6964 case BRW_REGISTER_TYPE_D:
6965 fprintf(file, "%dd", inst->src[i].d);
6966 break;
6967 case BRW_REGISTER_TYPE_UW:
6968 case BRW_REGISTER_TYPE_UD:
6969 fprintf(file, "%uu", inst->src[i].ud);
6970 break;
6971 case BRW_REGISTER_TYPE_Q:
6972 fprintf(file, "%" PRId64 "q", inst->src[i].d64);
6973 break;
6974 case BRW_REGISTER_TYPE_UQ:
6975 fprintf(file, "%" PRIu64 "uq", inst->src[i].u64);
6976 break;
6977 case BRW_REGISTER_TYPE_VF:
6978 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
6979 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
6980 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
6981 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
6982 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
6983 break;
6984 case BRW_REGISTER_TYPE_V:
6985 case BRW_REGISTER_TYPE_UV:
6986 fprintf(file, "%08x%s", inst->src[i].ud,
6987 inst->src[i].type == BRW_REGISTER_TYPE_V ? "V" : "UV");
6988 break;
6989 default:
6990 fprintf(file, "???");
6991 break;
6992 }
6993 break;
6994 case ARF:
6995 switch (inst->src[i].nr) {
6996 case BRW_ARF_NULL:
6997 fprintf(file, "null");
6998 break;
6999 case BRW_ARF_ADDRESS:
7000 fprintf(file, "a0.%d", inst->src[i].subnr);
7001 break;
7002 case BRW_ARF_ACCUMULATOR:
7003 fprintf(file, "acc%d", inst->src[i].subnr);
7004 break;
7005 case BRW_ARF_FLAG:
7006 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
7007 break;
7008 default:
7009 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
7010 break;
7011 }
7012 break;
7013 }
7014
7015 if (inst->src[i].offset ||
7016 (inst->src[i].file == VGRF &&
7017 alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
7018 const unsigned reg_size = (inst->src[i].file == UNIFORM ? 4 : REG_SIZE);
7019 fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
7020 inst->src[i].offset % reg_size);
7021 }
7022
7023 if (inst->src[i].abs)
7024 fprintf(file, "|");
7025
7026 if (inst->src[i].file != IMM) {
7027 unsigned stride;
7028 if (inst->src[i].file == ARF || inst->src[i].file == FIXED_GRF) {
7029 unsigned hstride = inst->src[i].hstride;
7030 stride = (hstride == 0 ? 0 : (1 << (hstride - 1)));
7031 } else {
7032 stride = inst->src[i].stride;
7033 }
7034 if (stride != 1)
7035 fprintf(file, "<%u>", stride);
7036
7037 fprintf(file, ":%s", brw_reg_type_to_letters(inst->src[i].type));
7038 }
7039
7040 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
7041 fprintf(file, ", ");
7042 }
7043
7044 fprintf(file, " ");
7045
7046 if (inst->force_writemask_all)
7047 fprintf(file, "NoMask ");
7048
7049 if (inst->exec_size != dispatch_width)
7050 fprintf(file, "group%d ", inst->group);
7051
7052 fprintf(file, "\n");
7053 }
7054
7055 void
7056 fs_visitor::setup_fs_payload_gen6()
7057 {
7058 assert(stage == MESA_SHADER_FRAGMENT);
7059 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
7060 const unsigned payload_width = MIN2(16, dispatch_width);
7061 assert(dispatch_width % payload_width == 0);
7062 assert(devinfo->gen >= 6);
7063
7064 prog_data->uses_src_depth = prog_data->uses_src_w =
7065 (nir->info.system_values_read & (1ull << SYSTEM_VALUE_FRAG_COORD)) != 0;
7066
7067 prog_data->uses_sample_mask =
7068 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
7069
7070 /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
7071 *
7072 * "MSDISPMODE_PERSAMPLE is required in order to select
7073 * POSOFFSET_SAMPLE"
7074 *
7075 * So we can only really get sample positions if we are doing real
7076 * per-sample dispatch. If we need gl_SamplePosition and we don't have
7077 * persample dispatch, we hard-code it to 0.5.
7078 */
7079 prog_data->uses_pos_offset = prog_data->persample_dispatch &&
7080 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
7081
7082 /* R0: PS thread payload header. */
7083 payload.num_regs++;
7084
7085 for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
7086 /* R1: masks, pixel X/Y coordinates. */
7087 payload.subspan_coord_reg[j] = payload.num_regs++;
7088 }
7089
7090 for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
7091 /* R3-26: barycentric interpolation coordinates. These appear in the
7092 * same order that they appear in the brw_barycentric_mode enum. Each
7093 * set of coordinates occupies 2 registers if dispatch width == 8 and 4
7094 * registers if dispatch width == 16. Coordinates only appear if they
7095 * were enabled using the "Barycentric Interpolation Mode" bits in
7096 * WM_STATE.
7097 */
7098 for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
7099 if (prog_data->barycentric_interp_modes & (1 << i)) {
7100 payload.barycentric_coord_reg[i][j] = payload.num_regs;
7101 payload.num_regs += payload_width / 4;
7102 }
7103 }
7104
7105 /* R27-28: interpolated depth if uses source depth */
7106 if (prog_data->uses_src_depth) {
7107 payload.source_depth_reg[j] = payload.num_regs;
7108 payload.num_regs += payload_width / 8;
7109 }
7110
7111 /* R29-30: interpolated W set if GEN6_WM_USES_SOURCE_W. */
7112 if (prog_data->uses_src_w) {
7113 payload.source_w_reg[j] = payload.num_regs;
7114 payload.num_regs += payload_width / 8;
7115 }
7116
7117 /* R31: MSAA position offsets. */
7118 if (prog_data->uses_pos_offset) {
7119 payload.sample_pos_reg[j] = payload.num_regs;
7120 payload.num_regs++;
7121 }
7122
7123 /* R32-33: MSAA input coverage mask */
7124 if (prog_data->uses_sample_mask) {
7125 assert(devinfo->gen >= 7);
7126 payload.sample_mask_in_reg[j] = payload.num_regs;
7127 payload.num_regs += payload_width / 8;
7128 }
7129 }
7130
7131 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
7132 source_depth_to_render_target = true;
7133 }
7134 }
7135
7136 void
7137 fs_visitor::setup_vs_payload()
7138 {
7139 /* R0: thread header, R1: urb handles */
7140 payload.num_regs = 2;
7141 }
7142
7143 void
7144 fs_visitor::setup_gs_payload()
7145 {
7146 assert(stage == MESA_SHADER_GEOMETRY);
7147
7148 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
7149 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
7150
7151 /* R0: thread header, R1: output URB handles */
7152 payload.num_regs = 2;
7153
7154 if (gs_prog_data->include_primitive_id) {
7155 /* R2: Primitive ID 0..7 */
7156 payload.num_regs++;
7157 }
7158
7159 /* Always enable VUE handles so we can safely use pull model if needed.
7160 *
7161 * The push model for a GS uses a ton of register space even for trivial
7162 * scenarios with just a few inputs, so just make things easier and a bit
7163 * safer by always having pull model available.
7164 */
7165 gs_prog_data->base.include_vue_handles = true;
7166
7167 /* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
7168 payload.num_regs += nir->info.gs.vertices_in;
7169
7170 /* Use a maximum of 24 registers for push-model inputs. */
7171 const unsigned max_push_components = 24;
7172
7173 /* If pushing our inputs would take too many registers, reduce the URB read
7174 * length (which is in HWords, or 8 registers), and resort to pulling.
7175 *
7176 * Note that the GS reads <URB Read Length> HWords for every vertex - so we
7177 * have to multiply by VerticesIn to obtain the total storage requirement.
7178 */
7179 if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
7180 max_push_components) {
7181 vue_prog_data->urb_read_length =
7182 ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
7183 }
7184 }
7185
7186 void
7187 fs_visitor::setup_cs_payload()
7188 {
7189 assert(devinfo->gen >= 7);
7190 payload.num_regs = 1;
7191 }
7192
7193 void
7194 fs_visitor::calculate_register_pressure()
7195 {
7196 invalidate_live_intervals();
7197 calculate_live_intervals();
7198
7199 unsigned num_instructions = 0;
7200 foreach_block(block, cfg)
7201 num_instructions += block->instructions.length();
7202
7203 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
7204
7205 for (unsigned reg = 0; reg < alloc.count; reg++) {
7206 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
7207 regs_live_at_ip[ip] += alloc.sizes[reg];
7208 }
7209 }
7210
7211 void
7212 fs_visitor::optimize()
7213 {
7214 /* Start by validating the shader we currently have. */
7215 validate();
7216
7217 /* bld is the common builder object pointing at the end of the program we
7218 * used to translate it into i965 IR. For the optimization and lowering
7219 * passes coming next, any code added after the end of the program without
7220 * having explicitly called fs_builder::at() clearly points at a mistake.
7221 * Ideally optimization passes wouldn't be part of the visitor so they
7222 * wouldn't have access to bld at all, but they do, so just in case some
7223 * pass forgets to ask for a location explicitly set it to NULL here to
7224 * make it trip. The dispatch width is initialized to a bogus value to
7225 * make sure that optimizations set the execution controls explicitly to
7226 * match the code they are manipulating instead of relying on the defaults.
7227 */
7228 bld = fs_builder(this, 64);
7229
7230 assign_constant_locations();
7231 lower_constant_loads();
7232
7233 validate();
7234
7235 split_virtual_grfs();
7236 validate();
7237
7238 #define OPT(pass, args...) ({ \
7239 pass_num++; \
7240 bool this_progress = pass(args); \
7241 \
7242 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
7243 char filename[64]; \
7244 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
7245 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
7246 \
7247 backend_shader::dump_instructions(filename); \
7248 } \
7249 \
7250 validate(); \
7251 \
7252 progress = progress || this_progress; \
7253 this_progress; \
7254 })
7255
7256 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
7257 char filename[64];
7258 snprintf(filename, 64, "%s%d-%s-00-00-start",
7259 stage_abbrev, dispatch_width, nir->info.name);
7260
7261 backend_shader::dump_instructions(filename);
7262 }
7263
7264 bool progress = false;
7265 int iteration = 0;
7266 int pass_num = 0;
7267
7268 /* Before anything else, eliminate dead code. The results of some NIR
7269 * instructions may effectively be calculated twice. Once when the
7270 * instruction is encountered, and again when the user of that result is
7271 * encountered. Wipe those away before algebraic optimizations and
7272 * especially copy propagation can mix things up.
7273 */
7274 OPT(dead_code_eliminate);
7275
7276 OPT(remove_extra_rounding_modes);
7277
7278 do {
7279 progress = false;
7280 pass_num = 0;
7281 iteration++;
7282
7283 OPT(remove_duplicate_mrf_writes);
7284
7285 OPT(opt_algebraic);
7286 OPT(opt_cse);
7287 OPT(opt_copy_propagation);
7288 OPT(opt_predicated_break, this);
7289 OPT(opt_cmod_propagation);
7290 OPT(dead_code_eliminate);
7291 OPT(opt_peephole_sel);
7292 OPT(dead_control_flow_eliminate, this);
7293 OPT(opt_register_renaming);
7294 OPT(opt_saturate_propagation);
7295 OPT(register_coalesce);
7296 OPT(compute_to_mrf);
7297 OPT(eliminate_find_live_channel);
7298
7299 OPT(compact_virtual_grfs);
7300 } while (progress);
7301
7302 progress = false;
7303 pass_num = 0;
7304
7305 if (OPT(lower_pack)) {
7306 OPT(register_coalesce);
7307 OPT(dead_code_eliminate);
7308 }
7309
7310 OPT(lower_simd_width);
7311
7312 /* After SIMD lowering just in case we had to unroll the EOT send. */
7313 OPT(opt_sampler_eot);
7314
7315 OPT(lower_logical_sends);
7316
7317 if (progress) {
7318 OPT(opt_copy_propagation);
7319 /* Only run after logical send lowering because it's easier to implement
7320 * in terms of physical sends.
7321 */
7322 if (OPT(opt_zero_samples))
7323 OPT(opt_copy_propagation);
7324 /* Run after logical send lowering to give it a chance to CSE the
7325 * LOAD_PAYLOAD instructions created to construct the payloads of
7326 * e.g. texturing messages in cases where it wasn't possible to CSE the
7327 * whole logical instruction.
7328 */
7329 OPT(opt_cse);
7330 OPT(register_coalesce);
7331 OPT(compute_to_mrf);
7332 OPT(dead_code_eliminate);
7333 OPT(remove_duplicate_mrf_writes);
7334 OPT(opt_peephole_sel);
7335 }
7336
7337 OPT(opt_redundant_discard_jumps);
7338
7339 if (OPT(lower_load_payload)) {
7340 split_virtual_grfs();
7341
7342 /* Lower 64 bit MOVs generated by payload lowering. */
7343 if (!devinfo->has_64bit_types)
7344 OPT(opt_algebraic);
7345
7346 OPT(register_coalesce);
7347 OPT(lower_simd_width);
7348 OPT(compute_to_mrf);
7349 OPT(dead_code_eliminate);
7350 }
7351
7352 OPT(opt_combine_constants);
7353 OPT(lower_integer_multiplication);
7354
7355 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
7356 OPT(opt_cmod_propagation);
7357 OPT(opt_cse);
7358 OPT(opt_copy_propagation);
7359 OPT(dead_code_eliminate);
7360 }
7361
7362 if (OPT(lower_regioning)) {
7363 OPT(opt_copy_propagation);
7364 OPT(dead_code_eliminate);
7365 OPT(lower_simd_width);
7366 }
7367
7368 OPT(fixup_sends_duplicate_payload);
7369
7370 lower_uniform_pull_constant_loads();
7371
7372 validate();
7373 }
7374
7375 /**
7376 * From the Skylake PRM Vol. 2a docs for sends:
7377 *
7378 * "It is required that the second block of GRFs does not overlap with the
7379 * first block."
7380 *
7381 * There are plenty of cases where we may accidentally violate this due to
7382 * having, for instance, both sources be the constant 0. This little pass
7383 * just adds a new vgrf for the second payload and copies it over.
7384 */
7385 bool
7386 fs_visitor::fixup_sends_duplicate_payload()
7387 {
7388 bool progress = false;
7389
7390 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
7391 if (inst->opcode == SHADER_OPCODE_SEND && inst->ex_mlen > 0 &&
7392 regions_overlap(inst->src[2], inst->mlen * REG_SIZE,
7393 inst->src[3], inst->ex_mlen * REG_SIZE)) {
7394 fs_reg tmp = fs_reg(VGRF, alloc.allocate(inst->ex_mlen),
7395 BRW_REGISTER_TYPE_UD);
7396 /* Sadly, we've lost all notion of channels and bit sizes at this
7397 * point. Just WE_all it.
7398 */
7399 const fs_builder ibld = bld.at(block, inst).exec_all().group(16, 0);
7400 fs_reg copy_src = retype(inst->src[3], BRW_REGISTER_TYPE_UD);
7401 fs_reg copy_dst = tmp;
7402 for (unsigned i = 0; i < inst->ex_mlen; i += 2) {
7403 if (inst->ex_mlen == i + 1) {
7404 /* Only one register left; do SIMD8 */
7405 ibld.group(8, 0).MOV(copy_dst, copy_src);
7406 } else {
7407 ibld.MOV(copy_dst, copy_src);
7408 }
7409 copy_src = offset(copy_src, ibld, 1);
7410 copy_dst = offset(copy_dst, ibld, 1);
7411 }
7412 inst->src[3] = tmp;
7413 progress = true;
7414 }
7415 }
7416
7417 if (progress)
7418 invalidate_live_intervals();
7419
7420 return progress;
7421 }
7422
7423 /**
7424 * Three source instruction must have a GRF/MRF destination register.
7425 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
7426 */
7427 void
7428 fs_visitor::fixup_3src_null_dest()
7429 {
7430 bool progress = false;
7431
7432 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
7433 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
7434 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
7435 inst->dst.type);
7436 progress = true;
7437 }
7438 }
7439
7440 if (progress)
7441 invalidate_live_intervals();
7442 }
7443
7444 void
7445 fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
7446 {
7447 bool allocated;
7448
7449 static const enum instruction_scheduler_mode pre_modes[] = {
7450 SCHEDULE_PRE,
7451 SCHEDULE_PRE_NON_LIFO,
7452 SCHEDULE_PRE_LIFO,
7453 };
7454
7455 static const char *scheduler_mode_name[] = {
7456 "top-down",
7457 "non-lifo",
7458 "lifo"
7459 };
7460
7461 bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
7462
7463 /* Try each scheduling heuristic to see if it can successfully register
7464 * allocate without spilling. They should be ordered by decreasing
7465 * performance but increasing likelihood of allocating.
7466 */
7467 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
7468 schedule_instructions(pre_modes[i]);
7469 this->shader_stats.scheduler_mode = scheduler_mode_name[i];
7470
7471 if (0) {
7472 assign_regs_trivial();
7473 allocated = true;
7474 break;
7475 }
7476
7477 /* We only allow spilling for the last schedule mode and only if the
7478 * allow_spilling parameter and dispatch width work out ok.
7479 */
7480 bool can_spill = allow_spilling &&
7481 (i == ARRAY_SIZE(pre_modes) - 1) &&
7482 dispatch_width == min_dispatch_width;
7483
7484 /* We should only spill registers on the last scheduling. */
7485 assert(!spilled_any_registers);
7486
7487 allocated = assign_regs(can_spill, spill_all);
7488 if (allocated)
7489 break;
7490 }
7491
7492 if (!allocated) {
7493 if (!allow_spilling)
7494 fail("Failure to register allocate and spilling is not allowed.");
7495
7496 /* We assume that any spilling is worse than just dropping back to
7497 * SIMD8. There's probably actually some intermediate point where
7498 * SIMD16 with a couple of spills is still better.
7499 */
7500 if (dispatch_width > min_dispatch_width) {
7501 fail("Failure to register allocate. Reduce number of "
7502 "live scalar values to avoid this.");
7503 }
7504
7505 /* If we failed to allocate, we must have a reason */
7506 assert(failed);
7507 } else if (spilled_any_registers) {
7508 compiler->shader_perf_log(log_data,
7509 "%s shader triggered register spilling. "
7510 "Try reducing the number of live scalar "
7511 "values to improve performance.\n",
7512 stage_name);
7513 }
7514
7515 /* This must come after all optimization and register allocation, since
7516 * it inserts dead code that happens to have side effects, and it does
7517 * so based on the actual physical registers in use.
7518 */
7519 insert_gen4_send_dependency_workarounds();
7520
7521 if (failed)
7522 return;
7523
7524 opt_bank_conflicts();
7525
7526 schedule_instructions(SCHEDULE_POST);
7527
7528 if (last_scratch > 0) {
7529 ASSERTED unsigned max_scratch_size = 2 * 1024 * 1024;
7530
7531 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
7532
7533 if (stage == MESA_SHADER_COMPUTE) {
7534 if (devinfo->is_haswell) {
7535 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
7536 * field documentation, Haswell supports a minimum of 2kB of
7537 * scratch space for compute shaders, unlike every other stage
7538 * and platform.
7539 */
7540 prog_data->total_scratch = MAX2(prog_data->total_scratch, 2048);
7541 } else if (devinfo->gen <= 7) {
7542 /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
7543 * field documentation, platforms prior to Haswell measure scratch
7544 * size linearly with a range of [1kB, 12kB] and 1kB granularity.
7545 */
7546 prog_data->total_scratch = ALIGN(last_scratch, 1024);
7547 max_scratch_size = 12 * 1024;
7548 }
7549 }
7550
7551 /* We currently only support up to 2MB of scratch space. If we
7552 * need to support more eventually, the documentation suggests
7553 * that we could allocate a larger buffer, and partition it out
7554 * ourselves. We'd just have to undo the hardware's address
7555 * calculation by subtracting (FFTID * Per Thread Scratch Space)
7556 * and then add FFTID * (Larger Per Thread Scratch Space).
7557 *
7558 * See 3D-Media-GPGPU Engine > Media GPGPU Pipeline >
7559 * Thread Group Tracking > Local Memory/Scratch Space.
7560 */
7561 assert(prog_data->total_scratch < max_scratch_size);
7562 }
7563
7564 lower_scoreboard();
7565 }
7566
7567 bool
7568 fs_visitor::run_vs()
7569 {
7570 assert(stage == MESA_SHADER_VERTEX);
7571
7572 setup_vs_payload();
7573
7574 if (shader_time_index >= 0)
7575 emit_shader_time_begin();
7576
7577 emit_nir_code();
7578
7579 if (failed)
7580 return false;
7581
7582 emit_urb_writes();
7583
7584 if (shader_time_index >= 0)
7585 emit_shader_time_end();
7586
7587 calculate_cfg();
7588
7589 optimize();
7590
7591 assign_curb_setup();
7592 assign_vs_urb_setup();
7593
7594 fixup_3src_null_dest();
7595 allocate_registers(8, true);
7596
7597 return !failed;
7598 }
7599
7600 void
7601 fs_visitor::set_tcs_invocation_id()
7602 {
7603 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
7604 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
7605
7606 const unsigned instance_id_mask =
7607 devinfo->gen >= 11 ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
7608 const unsigned instance_id_shift =
7609 devinfo->gen >= 11 ? 16 : 17;
7610
7611 /* Get instance number from g0.2 bits 22:16 or 23:17 */
7612 fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
7613 bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
7614 brw_imm_ud(instance_id_mask));
7615
7616 invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
7617
7618 if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH) {
7619 /* gl_InvocationID is just the thread number */
7620 bld.SHR(invocation_id, t, brw_imm_ud(instance_id_shift));
7621 return;
7622 }
7623
7624 assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH);
7625
7626 fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
7627 fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
7628 bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
7629 bld.MOV(channels_ud, channels_uw);
7630
7631 if (tcs_prog_data->instances == 1) {
7632 invocation_id = channels_ud;
7633 } else {
7634 fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
7635 bld.SHR(instance_times_8, t, brw_imm_ud(instance_id_shift - 3));
7636 bld.ADD(invocation_id, instance_times_8, channels_ud);
7637 }
7638 }
7639
7640 bool
7641 fs_visitor::run_tcs()
7642 {
7643 assert(stage == MESA_SHADER_TESS_CTRL);
7644
7645 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
7646 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
7647 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
7648
7649 assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH ||
7650 vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
7651
7652 if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH) {
7653 /* r1-r4 contain the ICP handles. */
7654 payload.num_regs = 5;
7655 } else {
7656 assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
7657 assert(tcs_key->input_vertices > 0);
7658 /* r1 contains output handles, r2 may contain primitive ID, then the
7659 * ICP handles occupy the next 1-32 registers.
7660 */
7661 payload.num_regs = 2 + tcs_prog_data->include_primitive_id +
7662 tcs_key->input_vertices;
7663 }
7664
7665 if (shader_time_index >= 0)
7666 emit_shader_time_begin();
7667
7668 /* Initialize gl_InvocationID */
7669 set_tcs_invocation_id();
7670
7671 const bool fix_dispatch_mask =
7672 vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH &&
7673 (nir->info.tess.tcs_vertices_out % 8) != 0;
7674
7675 /* Fix the disptach mask */
7676 if (fix_dispatch_mask) {
7677 bld.CMP(bld.null_reg_ud(), invocation_id,
7678 brw_imm_ud(nir->info.tess.tcs_vertices_out), BRW_CONDITIONAL_L);
7679 bld.IF(BRW_PREDICATE_NORMAL);
7680 }
7681
7682 emit_nir_code();
7683
7684 if (fix_dispatch_mask) {
7685 bld.emit(BRW_OPCODE_ENDIF);
7686 }
7687
7688 /* Emit EOT write; set TR DS Cache bit */
7689 fs_reg srcs[3] = {
7690 fs_reg(get_tcs_output_urb_handle()),
7691 fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
7692 fs_reg(brw_imm_ud(0)),
7693 };
7694 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
7695 bld.LOAD_PAYLOAD(payload, srcs, 3, 2);
7696
7697 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
7698 bld.null_reg_ud(), payload);
7699 inst->mlen = 3;
7700 inst->eot = true;
7701
7702 if (shader_time_index >= 0)
7703 emit_shader_time_end();
7704
7705 if (failed)
7706 return false;
7707
7708 calculate_cfg();
7709
7710 optimize();
7711
7712 assign_curb_setup();
7713 assign_tcs_urb_setup();
7714
7715 fixup_3src_null_dest();
7716 allocate_registers(8, true);
7717
7718 return !failed;
7719 }
7720
7721 bool
7722 fs_visitor::run_tes()
7723 {
7724 assert(stage == MESA_SHADER_TESS_EVAL);
7725
7726 /* R0: thread header, R1-3: gl_TessCoord.xyz, R4: URB handles */
7727 payload.num_regs = 5;
7728
7729 if (shader_time_index >= 0)
7730 emit_shader_time_begin();
7731
7732 emit_nir_code();
7733
7734 if (failed)
7735 return false;
7736
7737 emit_urb_writes();
7738
7739 if (shader_time_index >= 0)
7740 emit_shader_time_end();
7741
7742 calculate_cfg();
7743
7744 optimize();
7745
7746 assign_curb_setup();
7747 assign_tes_urb_setup();
7748
7749 fixup_3src_null_dest();
7750 allocate_registers(8, true);
7751
7752 return !failed;
7753 }
7754
7755 bool
7756 fs_visitor::run_gs()
7757 {
7758 assert(stage == MESA_SHADER_GEOMETRY);
7759
7760 setup_gs_payload();
7761
7762 this->final_gs_vertex_count = vgrf(glsl_type::uint_type);
7763
7764 if (gs_compile->control_data_header_size_bits > 0) {
7765 /* Create a VGRF to store accumulated control data bits. */
7766 this->control_data_bits = vgrf(glsl_type::uint_type);
7767
7768 /* If we're outputting more than 32 control data bits, then EmitVertex()
7769 * will set control_data_bits to 0 after emitting the first vertex.
7770 * Otherwise, we need to initialize it to 0 here.
7771 */
7772 if (gs_compile->control_data_header_size_bits <= 32) {
7773 const fs_builder abld = bld.annotate("initialize control data bits");
7774 abld.MOV(this->control_data_bits, brw_imm_ud(0u));
7775 }
7776 }
7777
7778 if (shader_time_index >= 0)
7779 emit_shader_time_begin();
7780
7781 emit_nir_code();
7782
7783 emit_gs_thread_end();
7784
7785 if (shader_time_index >= 0)
7786 emit_shader_time_end();
7787
7788 if (failed)
7789 return false;
7790
7791 calculate_cfg();
7792
7793 optimize();
7794
7795 assign_curb_setup();
7796 assign_gs_urb_setup();
7797
7798 fixup_3src_null_dest();
7799 allocate_registers(8, true);
7800
7801 return !failed;
7802 }
7803
7804 /* From the SKL PRM, Volume 16, Workarounds:
7805 *
7806 * 0877 3D Pixel Shader Hang possible when pixel shader dispatched with
7807 * only header phases (R0-R2)
7808 *
7809 * WA: Enable a non-header phase (e.g. push constant) when dispatch would
7810 * have been header only.
7811 *
7812 * Instead of enabling push constants one can alternatively enable one of the
7813 * inputs. Here one simply chooses "layer" which shouldn't impose much
7814 * overhead.
7815 */
7816 static void
7817 gen9_ps_header_only_workaround(struct brw_wm_prog_data *wm_prog_data)
7818 {
7819 if (wm_prog_data->num_varying_inputs)
7820 return;
7821
7822 if (wm_prog_data->base.curb_read_length)
7823 return;
7824
7825 wm_prog_data->urb_setup[VARYING_SLOT_LAYER] = 0;
7826 wm_prog_data->num_varying_inputs = 1;
7827 }
7828
7829 bool
7830 fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
7831 {
7832 struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
7833 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
7834
7835 assert(stage == MESA_SHADER_FRAGMENT);
7836
7837 if (devinfo->gen >= 6)
7838 setup_fs_payload_gen6();
7839 else
7840 setup_fs_payload_gen4();
7841
7842 if (0) {
7843 emit_dummy_fs();
7844 } else if (do_rep_send) {
7845 assert(dispatch_width == 16);
7846 emit_repclear_shader();
7847 } else {
7848 if (shader_time_index >= 0)
7849 emit_shader_time_begin();
7850
7851 if (nir->info.inputs_read > 0 ||
7852 (nir->info.system_values_read & (1ull << SYSTEM_VALUE_FRAG_COORD)) ||
7853 (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
7854 if (devinfo->gen < 6)
7855 emit_interpolation_setup_gen4();
7856 else
7857 emit_interpolation_setup_gen6();
7858 }
7859
7860 /* We handle discards by keeping track of the still-live pixels in f0.1.
7861 * Initialize it with the dispatched pixels.
7862 */
7863 if (wm_prog_data->uses_kill) {
7864 const fs_reg dispatch_mask =
7865 devinfo->gen >= 6 ? brw_vec1_grf(1, 7) : brw_vec1_grf(0, 0);
7866 bld.exec_all().group(1, 0)
7867 .MOV(retype(brw_flag_reg(0, 1), BRW_REGISTER_TYPE_UW),
7868 retype(dispatch_mask, BRW_REGISTER_TYPE_UW));
7869 }
7870
7871 emit_nir_code();
7872
7873 if (failed)
7874 return false;
7875
7876 if (wm_prog_data->uses_kill)
7877 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
7878
7879 if (wm_key->alpha_test_func)
7880 emit_alpha_test();
7881
7882 emit_fb_writes();
7883
7884 if (shader_time_index >= 0)
7885 emit_shader_time_end();
7886
7887 calculate_cfg();
7888
7889 optimize();
7890
7891 assign_curb_setup();
7892
7893 if (devinfo->gen >= 9)
7894 gen9_ps_header_only_workaround(wm_prog_data);
7895
7896 assign_urb_setup();
7897
7898 fixup_3src_null_dest();
7899 allocate_registers(8, allow_spilling);
7900
7901 if (failed)
7902 return false;
7903 }
7904
7905 return !failed;
7906 }
7907
7908 bool
7909 fs_visitor::run_cs(unsigned min_dispatch_width)
7910 {
7911 assert(stage == MESA_SHADER_COMPUTE);
7912 assert(dispatch_width >= min_dispatch_width);
7913
7914 setup_cs_payload();
7915
7916 if (shader_time_index >= 0)
7917 emit_shader_time_begin();
7918
7919 if (devinfo->is_haswell && prog_data->total_shared > 0) {
7920 /* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
7921 const fs_builder abld = bld.exec_all().group(1, 0);
7922 abld.MOV(retype(brw_sr0_reg(1), BRW_REGISTER_TYPE_UW),
7923 suboffset(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW), 1));
7924 }
7925
7926 emit_nir_code();
7927
7928 if (failed)
7929 return false;
7930
7931 emit_cs_terminate();
7932
7933 if (shader_time_index >= 0)
7934 emit_shader_time_end();
7935
7936 calculate_cfg();
7937
7938 optimize();
7939
7940 assign_curb_setup();
7941
7942 fixup_3src_null_dest();
7943 allocate_registers(min_dispatch_width, true);
7944
7945 if (failed)
7946 return false;
7947
7948 return !failed;
7949 }
7950
7951 static bool
7952 is_used_in_not_interp_frag_coord(nir_ssa_def *def)
7953 {
7954 nir_foreach_use(src, def) {
7955 if (src->parent_instr->type != nir_instr_type_intrinsic)
7956 return true;
7957
7958 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src->parent_instr);
7959 if (intrin->intrinsic != nir_intrinsic_load_frag_coord)
7960 return true;
7961 }
7962
7963 nir_foreach_if_use(src, def)
7964 return true;
7965
7966 return false;
7967 }
7968
7969 /**
7970 * Return a bitfield where bit n is set if barycentric interpolation mode n
7971 * (see enum brw_barycentric_mode) is needed by the fragment shader.
7972 *
7973 * We examine the load_barycentric intrinsics rather than looking at input
7974 * variables so that we catch interpolateAtCentroid() messages too, which
7975 * also need the BRW_BARYCENTRIC_[NON]PERSPECTIVE_CENTROID mode set up.
7976 */
7977 static unsigned
7978 brw_compute_barycentric_interp_modes(const struct gen_device_info *devinfo,
7979 const nir_shader *shader)
7980 {
7981 unsigned barycentric_interp_modes = 0;
7982
7983 nir_foreach_function(f, shader) {
7984 if (!f->impl)
7985 continue;
7986
7987 nir_foreach_block(block, f->impl) {
7988 nir_foreach_instr(instr, block) {
7989 if (instr->type != nir_instr_type_intrinsic)
7990 continue;
7991
7992 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
7993 switch (intrin->intrinsic) {
7994 case nir_intrinsic_load_barycentric_pixel:
7995 case nir_intrinsic_load_barycentric_centroid:
7996 case nir_intrinsic_load_barycentric_sample:
7997 break;
7998 default:
7999 continue;
8000 }
8001
8002 /* Ignore WPOS; it doesn't require interpolation. */
8003 assert(intrin->dest.is_ssa);
8004 if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa))
8005 continue;
8006
8007 enum glsl_interp_mode interp = (enum glsl_interp_mode)
8008 nir_intrinsic_interp_mode(intrin);
8009 nir_intrinsic_op bary_op = intrin->intrinsic;
8010 enum brw_barycentric_mode bary =
8011 brw_barycentric_mode(interp, bary_op);
8012
8013 barycentric_interp_modes |= 1 << bary;
8014
8015 if (devinfo->needs_unlit_centroid_workaround &&
8016 bary_op == nir_intrinsic_load_barycentric_centroid)
8017 barycentric_interp_modes |= 1 << centroid_to_pixel(bary);
8018 }
8019 }
8020 }
8021
8022 return barycentric_interp_modes;
8023 }
8024
8025 static void
8026 brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
8027 const nir_shader *shader)
8028 {
8029 prog_data->flat_inputs = 0;
8030
8031 nir_foreach_variable(var, &shader->inputs) {
8032 unsigned slots = glsl_count_attribute_slots(var->type, false);
8033 for (unsigned s = 0; s < slots; s++) {
8034 int input_index = prog_data->urb_setup[var->data.location + s];
8035
8036 if (input_index < 0)
8037 continue;
8038
8039 /* flat shading */
8040 if (var->data.interpolation == INTERP_MODE_FLAT)
8041 prog_data->flat_inputs |= 1 << input_index;
8042 }
8043 }
8044 }
8045
8046 static uint8_t
8047 computed_depth_mode(const nir_shader *shader)
8048 {
8049 if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
8050 switch (shader->info.fs.depth_layout) {
8051 case FRAG_DEPTH_LAYOUT_NONE:
8052 case FRAG_DEPTH_LAYOUT_ANY:
8053 return BRW_PSCDEPTH_ON;
8054 case FRAG_DEPTH_LAYOUT_GREATER:
8055 return BRW_PSCDEPTH_ON_GE;
8056 case FRAG_DEPTH_LAYOUT_LESS:
8057 return BRW_PSCDEPTH_ON_LE;
8058 case FRAG_DEPTH_LAYOUT_UNCHANGED:
8059 return BRW_PSCDEPTH_OFF;
8060 }
8061 }
8062 return BRW_PSCDEPTH_OFF;
8063 }
8064
8065 /**
8066 * Move load_interpolated_input with simple (payload-based) barycentric modes
8067 * to the top of the program so we don't emit multiple PLNs for the same input.
8068 *
8069 * This works around CSE not being able to handle non-dominating cases
8070 * such as:
8071 *
8072 * if (...) {
8073 * interpolate input
8074 * } else {
8075 * interpolate the same exact input
8076 * }
8077 *
8078 * This should be replaced by global value numbering someday.
8079 */
8080 static bool
8081 move_interpolation_to_top(nir_shader *nir)
8082 {
8083 bool progress = false;
8084
8085 nir_foreach_function(f, nir) {
8086 if (!f->impl)
8087 continue;
8088
8089 nir_block *top = nir_start_block(f->impl);
8090 exec_node *cursor_node = NULL;
8091
8092 nir_foreach_block(block, f->impl) {
8093 if (block == top)
8094 continue;
8095
8096 nir_foreach_instr_safe(instr, block) {
8097 if (instr->type != nir_instr_type_intrinsic)
8098 continue;
8099
8100 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
8101 if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
8102 continue;
8103 nir_intrinsic_instr *bary_intrinsic =
8104 nir_instr_as_intrinsic(intrin->src[0].ssa->parent_instr);
8105 nir_intrinsic_op op = bary_intrinsic->intrinsic;
8106
8107 /* Leave interpolateAtSample/Offset() where they are. */
8108 if (op == nir_intrinsic_load_barycentric_at_sample ||
8109 op == nir_intrinsic_load_barycentric_at_offset)
8110 continue;
8111
8112 nir_instr *move[3] = {
8113 &bary_intrinsic->instr,
8114 intrin->src[1].ssa->parent_instr,
8115 instr
8116 };
8117
8118 for (unsigned i = 0; i < ARRAY_SIZE(move); i++) {
8119 if (move[i]->block != top) {
8120 move[i]->block = top;
8121 exec_node_remove(&move[i]->node);
8122 if (cursor_node) {
8123 exec_node_insert_after(cursor_node, &move[i]->node);
8124 } else {
8125 exec_list_push_head(&top->instr_list, &move[i]->node);
8126 }
8127 cursor_node = &move[i]->node;
8128 progress = true;
8129 }
8130 }
8131 }
8132 }
8133 nir_metadata_preserve(f->impl, (nir_metadata)
8134 ((unsigned) nir_metadata_block_index |
8135 (unsigned) nir_metadata_dominance));
8136 }
8137
8138 return progress;
8139 }
8140
8141 /**
8142 * Demote per-sample barycentric intrinsics to centroid.
8143 *
8144 * Useful when rendering to a non-multisampled buffer.
8145 */
8146 static bool
8147 demote_sample_qualifiers(nir_shader *nir)
8148 {
8149 bool progress = true;
8150
8151 nir_foreach_function(f, nir) {
8152 if (!f->impl)
8153 continue;
8154
8155 nir_builder b;
8156 nir_builder_init(&b, f->impl);
8157
8158 nir_foreach_block(block, f->impl) {
8159 nir_foreach_instr_safe(instr, block) {
8160 if (instr->type != nir_instr_type_intrinsic)
8161 continue;
8162
8163 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
8164 if (intrin->intrinsic != nir_intrinsic_load_barycentric_sample &&
8165 intrin->intrinsic != nir_intrinsic_load_barycentric_at_sample)
8166 continue;
8167
8168 b.cursor = nir_before_instr(instr);
8169 nir_ssa_def *centroid =
8170 nir_load_barycentric(&b, nir_intrinsic_load_barycentric_centroid,
8171 nir_intrinsic_interp_mode(intrin));
8172 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
8173 nir_src_for_ssa(centroid));
8174 nir_instr_remove(instr);
8175 progress = true;
8176 }
8177 }
8178
8179 nir_metadata_preserve(f->impl, (nir_metadata)
8180 ((unsigned) nir_metadata_block_index |
8181 (unsigned) nir_metadata_dominance));
8182 }
8183
8184 return progress;
8185 }
8186
8187 /**
8188 * Pre-gen6, the register file of the EUs was shared between threads,
8189 * and each thread used some subset allocated on a 16-register block
8190 * granularity. The unit states wanted these block counts.
8191 */
8192 static inline int
8193 brw_register_blocks(int reg_count)
8194 {
8195 return ALIGN(reg_count, 16) / 16 - 1;
8196 }
8197
8198 const unsigned *
8199 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
8200 void *mem_ctx,
8201 const struct brw_wm_prog_key *key,
8202 struct brw_wm_prog_data *prog_data,
8203 nir_shader *shader,
8204 int shader_time_index8, int shader_time_index16,
8205 int shader_time_index32, bool allow_spilling,
8206 bool use_rep_send, struct brw_vue_map *vue_map,
8207 struct brw_compile_stats *stats,
8208 char **error_str)
8209 {
8210 const struct gen_device_info *devinfo = compiler->devinfo;
8211
8212 unsigned max_subgroup_size = unlikely(INTEL_DEBUG & DEBUG_DO32) ? 32 : 16;
8213
8214 brw_nir_apply_key(shader, compiler, &key->base, max_subgroup_size, true);
8215 brw_nir_lower_fs_inputs(shader, devinfo, key);
8216 brw_nir_lower_fs_outputs(shader);
8217
8218 if (devinfo->gen < 6)
8219 brw_setup_vue_interpolation(vue_map, shader, prog_data);
8220
8221 /* From the SKL PRM, Volume 7, "Alpha Coverage":
8222 * "If Pixel Shader outputs oMask, AlphaToCoverage is disabled in
8223 * hardware, regardless of the state setting for this feature."
8224 */
8225 if (devinfo->gen > 6 && key->alpha_to_coverage) {
8226 /* Run constant fold optimization in order to get the correct source
8227 * offset to determine render target 0 store instruction in
8228 * emit_alpha_to_coverage pass.
8229 */
8230 NIR_PASS_V(shader, nir_opt_constant_folding);
8231 NIR_PASS_V(shader, brw_nir_lower_alpha_to_coverage);
8232 }
8233
8234 if (!key->multisample_fbo)
8235 NIR_PASS_V(shader, demote_sample_qualifiers);
8236 NIR_PASS_V(shader, move_interpolation_to_top);
8237 brw_postprocess_nir(shader, compiler, true);
8238
8239 /* key->alpha_test_func means simulating alpha testing via discards,
8240 * so the shader definitely kills pixels.
8241 */
8242 prog_data->uses_kill = shader->info.fs.uses_discard ||
8243 key->alpha_test_func;
8244 prog_data->uses_omask = key->multisample_fbo &&
8245 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
8246 prog_data->computed_depth_mode = computed_depth_mode(shader);
8247 prog_data->computed_stencil =
8248 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
8249
8250 prog_data->persample_dispatch =
8251 key->multisample_fbo &&
8252 (key->persample_interp ||
8253 (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
8254 SYSTEM_BIT_SAMPLE_POS)) ||
8255 shader->info.fs.uses_sample_qualifier ||
8256 shader->info.outputs_read);
8257
8258 prog_data->has_render_target_reads = shader->info.outputs_read != 0ull;
8259
8260 prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
8261 prog_data->post_depth_coverage = shader->info.fs.post_depth_coverage;
8262 prog_data->inner_coverage = shader->info.fs.inner_coverage;
8263
8264 prog_data->barycentric_interp_modes =
8265 brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
8266
8267 calculate_urb_setup(devinfo, key, prog_data, shader);
8268 brw_compute_flat_inputs(prog_data, shader);
8269
8270 cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
8271
8272 fs_visitor v8(compiler, log_data, mem_ctx, &key->base,
8273 &prog_data->base, shader, 8,
8274 shader_time_index8);
8275 if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
8276 if (error_str)
8277 *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
8278
8279 return NULL;
8280 } else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
8281 simd8_cfg = v8.cfg;
8282 prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
8283 prog_data->reg_blocks_8 = brw_register_blocks(v8.grf_used);
8284 }
8285
8286 /* Limit dispatch width to simd8 with dual source blending on gen8.
8287 * See: https://gitlab.freedesktop.org/mesa/mesa/issues/1917
8288 */
8289 if (devinfo->gen == 8 && prog_data->dual_src_blend &&
8290 !(INTEL_DEBUG & DEBUG_NO8)) {
8291 assert(!use_rep_send);
8292 v8.limit_dispatch_width(8, "gen8 workaround: "
8293 "using SIMD8 when dual src blending.\n");
8294 }
8295
8296 if (v8.max_dispatch_width >= 16 &&
8297 likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
8298 /* Try a SIMD16 compile */
8299 fs_visitor v16(compiler, log_data, mem_ctx, &key->base,
8300 &prog_data->base, shader, 16,
8301 shader_time_index16);
8302 v16.import_uniforms(&v8);
8303 if (!v16.run_fs(allow_spilling, use_rep_send)) {
8304 compiler->shader_perf_log(log_data,
8305 "SIMD16 shader failed to compile: %s",
8306 v16.fail_msg);
8307 } else {
8308 simd16_cfg = v16.cfg;
8309 prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
8310 prog_data->reg_blocks_16 = brw_register_blocks(v16.grf_used);
8311 }
8312 }
8313
8314 /* Currently, the compiler only supports SIMD32 on SNB+ */
8315 if (v8.max_dispatch_width >= 32 && !use_rep_send &&
8316 compiler->devinfo->gen >= 6 &&
8317 unlikely(INTEL_DEBUG & DEBUG_DO32)) {
8318 /* Try a SIMD32 compile */
8319 fs_visitor v32(compiler, log_data, mem_ctx, &key->base,
8320 &prog_data->base, shader, 32,
8321 shader_time_index32);
8322 v32.import_uniforms(&v8);
8323 if (!v32.run_fs(allow_spilling, false)) {
8324 compiler->shader_perf_log(log_data,
8325 "SIMD32 shader failed to compile: %s",
8326 v32.fail_msg);
8327 } else {
8328 simd32_cfg = v32.cfg;
8329 prog_data->dispatch_grf_start_reg_32 = v32.payload.num_regs;
8330 prog_data->reg_blocks_32 = brw_register_blocks(v32.grf_used);
8331 }
8332 }
8333
8334 /* When the caller requests a repclear shader, they want SIMD16-only */
8335 if (use_rep_send)
8336 simd8_cfg = NULL;
8337
8338 /* Prior to Iron Lake, the PS had a single shader offset with a jump table
8339 * at the top to select the shader. We've never implemented that.
8340 * Instead, we just give them exactly one shader and we pick the widest one
8341 * available.
8342 */
8343 if (compiler->devinfo->gen < 5) {
8344 if (simd32_cfg || simd16_cfg)
8345 simd8_cfg = NULL;
8346 if (simd32_cfg)
8347 simd16_cfg = NULL;
8348 }
8349
8350 /* If computed depth is enabled SNB only allows SIMD8. */
8351 if (compiler->devinfo->gen == 6 &&
8352 prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF)
8353 assert(simd16_cfg == NULL && simd32_cfg == NULL);
8354
8355 if (compiler->devinfo->gen <= 5 && !simd8_cfg) {
8356 /* Iron lake and earlier only have one Dispatch GRF start field. Make
8357 * the data available in the base prog data struct for convenience.
8358 */
8359 if (simd16_cfg) {
8360 prog_data->base.dispatch_grf_start_reg =
8361 prog_data->dispatch_grf_start_reg_16;
8362 } else if (simd32_cfg) {
8363 prog_data->base.dispatch_grf_start_reg =
8364 prog_data->dispatch_grf_start_reg_32;
8365 }
8366 }
8367
8368 if (prog_data->persample_dispatch) {
8369 /* Starting with SandyBridge (where we first get MSAA), the different
8370 * pixel dispatch combinations are grouped into classifications A
8371 * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
8372 * generations, the only configurations supporting persample dispatch
8373 * are are this in which only one dispatch width is enabled.
8374 */
8375 if (simd32_cfg || simd16_cfg)
8376 simd8_cfg = NULL;
8377 if (simd32_cfg)
8378 simd16_cfg = NULL;
8379 }
8380
8381 fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
8382 v8.shader_stats, v8.runtime_check_aads_emit,
8383 MESA_SHADER_FRAGMENT);
8384
8385 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
8386 g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
8387 shader->info.label ?
8388 shader->info.label : "unnamed",
8389 shader->info.name));
8390 }
8391
8392 if (simd8_cfg) {
8393 prog_data->dispatch_8 = true;
8394 g.generate_code(simd8_cfg, 8, stats);
8395 stats = stats ? stats + 1 : NULL;
8396 }
8397
8398 if (simd16_cfg) {
8399 prog_data->dispatch_16 = true;
8400 prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16, stats);
8401 stats = stats ? stats + 1 : NULL;
8402 }
8403
8404 if (simd32_cfg) {
8405 prog_data->dispatch_32 = true;
8406 prog_data->prog_offset_32 = g.generate_code(simd32_cfg, 32, stats);
8407 stats = stats ? stats + 1 : NULL;
8408 }
8409
8410 return g.get_assembly();
8411 }
8412
8413 fs_reg *
8414 fs_visitor::emit_cs_work_group_id_setup()
8415 {
8416 assert(stage == MESA_SHADER_COMPUTE);
8417
8418 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
8419
8420 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
8421 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
8422 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
8423
8424 bld.MOV(*reg, r0_1);
8425 bld.MOV(offset(*reg, bld, 1), r0_6);
8426 bld.MOV(offset(*reg, bld, 2), r0_7);
8427
8428 return reg;
8429 }
8430
8431 static void
8432 fill_push_const_block_info(struct brw_push_const_block *block, unsigned dwords)
8433 {
8434 block->dwords = dwords;
8435 block->regs = DIV_ROUND_UP(dwords, 8);
8436 block->size = block->regs * 32;
8437 }
8438
8439 static void
8440 cs_fill_push_const_info(const struct gen_device_info *devinfo,
8441 struct brw_cs_prog_data *cs_prog_data)
8442 {
8443 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
8444 int subgroup_id_index = get_subgroup_id_param_index(prog_data);
8445 bool cross_thread_supported = devinfo->gen > 7 || devinfo->is_haswell;
8446
8447 /* The thread ID should be stored in the last param dword */
8448 assert(subgroup_id_index == -1 ||
8449 subgroup_id_index == (int)prog_data->nr_params - 1);
8450
8451 unsigned cross_thread_dwords, per_thread_dwords;
8452 if (!cross_thread_supported) {
8453 cross_thread_dwords = 0u;
8454 per_thread_dwords = prog_data->nr_params;
8455 } else if (subgroup_id_index >= 0) {
8456 /* Fill all but the last register with cross-thread payload */
8457 cross_thread_dwords = 8 * (subgroup_id_index / 8);
8458 per_thread_dwords = prog_data->nr_params - cross_thread_dwords;
8459 assert(per_thread_dwords > 0 && per_thread_dwords <= 8);
8460 } else {
8461 /* Fill all data using cross-thread payload */
8462 cross_thread_dwords = prog_data->nr_params;
8463 per_thread_dwords = 0u;
8464 }
8465
8466 fill_push_const_block_info(&cs_prog_data->push.cross_thread, cross_thread_dwords);
8467 fill_push_const_block_info(&cs_prog_data->push.per_thread, per_thread_dwords);
8468
8469 unsigned total_dwords =
8470 (cs_prog_data->push.per_thread.size * cs_prog_data->threads +
8471 cs_prog_data->push.cross_thread.size) / 4;
8472 fill_push_const_block_info(&cs_prog_data->push.total, total_dwords);
8473
8474 assert(cs_prog_data->push.cross_thread.dwords % 8 == 0 ||
8475 cs_prog_data->push.per_thread.size == 0);
8476 assert(cs_prog_data->push.cross_thread.dwords +
8477 cs_prog_data->push.per_thread.dwords ==
8478 prog_data->nr_params);
8479 }
8480
8481 static void
8482 cs_set_simd_size(struct brw_cs_prog_data *cs_prog_data, unsigned size)
8483 {
8484 cs_prog_data->simd_size = size;
8485 unsigned group_size = cs_prog_data->local_size[0] *
8486 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
8487 cs_prog_data->threads = (group_size + size - 1) / size;
8488 }
8489
8490 static nir_shader *
8491 compile_cs_to_nir(const struct brw_compiler *compiler,
8492 void *mem_ctx,
8493 const struct brw_cs_prog_key *key,
8494 const nir_shader *src_shader,
8495 unsigned dispatch_width)
8496 {
8497 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
8498 brw_nir_apply_key(shader, compiler, &key->base, dispatch_width, true);
8499
8500 NIR_PASS_V(shader, brw_nir_lower_cs_intrinsics, dispatch_width);
8501
8502 /* Clean up after the local index and ID calculations. */
8503 NIR_PASS_V(shader, nir_opt_constant_folding);
8504 NIR_PASS_V(shader, nir_opt_dce);
8505
8506 brw_postprocess_nir(shader, compiler, true);
8507
8508 return shader;
8509 }
8510
8511 const unsigned *
8512 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
8513 void *mem_ctx,
8514 const struct brw_cs_prog_key *key,
8515 struct brw_cs_prog_data *prog_data,
8516 const nir_shader *src_shader,
8517 int shader_time_index,
8518 struct brw_compile_stats *stats,
8519 char **error_str)
8520 {
8521 prog_data->base.total_shared = src_shader->info.cs.shared_size;
8522 prog_data->local_size[0] = src_shader->info.cs.local_size[0];
8523 prog_data->local_size[1] = src_shader->info.cs.local_size[1];
8524 prog_data->local_size[2] = src_shader->info.cs.local_size[2];
8525 prog_data->slm_size = src_shader->num_shared;
8526 unsigned local_workgroup_size =
8527 src_shader->info.cs.local_size[0] * src_shader->info.cs.local_size[1] *
8528 src_shader->info.cs.local_size[2];
8529
8530 unsigned min_dispatch_width =
8531 DIV_ROUND_UP(local_workgroup_size, compiler->devinfo->max_cs_threads);
8532 min_dispatch_width = MAX2(8, min_dispatch_width);
8533 min_dispatch_width = util_next_power_of_two(min_dispatch_width);
8534 assert(min_dispatch_width <= 32);
8535 unsigned max_dispatch_width = 32;
8536
8537 fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
8538 fs_visitor *v = NULL;
8539 const char *fail_msg = NULL;
8540
8541 if ((int)key->base.subgroup_size_type >= (int)BRW_SUBGROUP_SIZE_REQUIRE_8) {
8542 /* These enum values are expressly chosen to be equal to the subgroup
8543 * size that they require.
8544 */
8545 const unsigned required_dispatch_width =
8546 (unsigned)key->base.subgroup_size_type;
8547 assert(required_dispatch_width == 8 ||
8548 required_dispatch_width == 16 ||
8549 required_dispatch_width == 32);
8550 if (required_dispatch_width < min_dispatch_width ||
8551 required_dispatch_width > max_dispatch_width) {
8552 fail_msg = "Cannot satisfy explicit subgroup size";
8553 } else {
8554 min_dispatch_width = max_dispatch_width = required_dispatch_width;
8555 }
8556 }
8557
8558 /* Now the main event: Visit the shader IR and generate our CS IR for it.
8559 */
8560 if (!fail_msg && min_dispatch_width <= 8 && max_dispatch_width >= 8) {
8561 nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
8562 src_shader, 8);
8563 v8 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
8564 &prog_data->base,
8565 nir8, 8, shader_time_index);
8566 if (!v8->run_cs(min_dispatch_width)) {
8567 fail_msg = v8->fail_msg;
8568 } else {
8569 /* We should always be able to do SIMD32 for compute shaders */
8570 assert(v8->max_dispatch_width >= 32);
8571
8572 v = v8;
8573 cs_set_simd_size(prog_data, 8);
8574 cs_fill_push_const_info(compiler->devinfo, prog_data);
8575 }
8576 }
8577
8578 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
8579 !fail_msg && min_dispatch_width <= 16 && max_dispatch_width >= 16) {
8580 /* Try a SIMD16 compile */
8581 nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
8582 src_shader, 16);
8583 v16 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
8584 &prog_data->base,
8585 nir16, 16, shader_time_index);
8586 if (v8)
8587 v16->import_uniforms(v8);
8588
8589 if (!v16->run_cs(min_dispatch_width)) {
8590 compiler->shader_perf_log(log_data,
8591 "SIMD16 shader failed to compile: %s",
8592 v16->fail_msg);
8593 if (!v) {
8594 fail_msg =
8595 "Couldn't generate SIMD16 program and not "
8596 "enough threads for SIMD8";
8597 }
8598 } else {
8599 /* We should always be able to do SIMD32 for compute shaders */
8600 assert(v16->max_dispatch_width >= 32);
8601
8602 v = v16;
8603 cs_set_simd_size(prog_data, 16);
8604 cs_fill_push_const_info(compiler->devinfo, prog_data);
8605 }
8606 }
8607
8608 /* We should always be able to do SIMD32 for compute shaders */
8609 assert(!v16 || v16->max_dispatch_width >= 32);
8610
8611 if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32)) &&
8612 max_dispatch_width >= 32) {
8613 /* Try a SIMD32 compile */
8614 nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
8615 src_shader, 32);
8616 v32 = new fs_visitor(compiler, log_data, mem_ctx, &key->base,
8617 &prog_data->base,
8618 nir32, 32, shader_time_index);
8619 if (v8)
8620 v32->import_uniforms(v8);
8621 else if (v16)
8622 v32->import_uniforms(v16);
8623
8624 if (!v32->run_cs(min_dispatch_width)) {
8625 compiler->shader_perf_log(log_data,
8626 "SIMD32 shader failed to compile: %s",
8627 v32->fail_msg);
8628 if (!v) {
8629 fail_msg =
8630 "Couldn't generate SIMD32 program and not "
8631 "enough threads for SIMD16";
8632 }
8633 } else {
8634 v = v32;
8635 cs_set_simd_size(prog_data, 32);
8636 cs_fill_push_const_info(compiler->devinfo, prog_data);
8637 }
8638 }
8639
8640 const unsigned *ret = NULL;
8641 if (unlikely(v == NULL)) {
8642 assert(fail_msg);
8643 if (error_str)
8644 *error_str = ralloc_strdup(mem_ctx, fail_msg);
8645 } else {
8646 fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
8647 v->shader_stats, v->runtime_check_aads_emit,
8648 MESA_SHADER_COMPUTE);
8649 if (INTEL_DEBUG & DEBUG_CS) {
8650 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
8651 src_shader->info.label ?
8652 src_shader->info.label : "unnamed",
8653 src_shader->info.name);
8654 g.enable_debug(name);
8655 }
8656
8657 g.generate_code(v->cfg, prog_data->simd_size, stats);
8658
8659 ret = g.get_assembly();
8660 }
8661
8662 delete v8;
8663 delete v16;
8664 delete v32;
8665
8666 return ret;
8667 }
8668
8669 /**
8670 * Test the dispatch mask packing assumptions of
8671 * brw_stage_has_packed_dispatch(). Call this from e.g. the top of
8672 * fs_visitor::emit_nir_code() to cause a GPU hang if any shader invocation is
8673 * executed with an unexpected dispatch mask.
8674 */
8675 static UNUSED void
8676 brw_fs_test_dispatch_packing(const fs_builder &bld)
8677 {
8678 const gl_shader_stage stage = bld.shader->stage;
8679
8680 if (brw_stage_has_packed_dispatch(bld.shader->devinfo, stage,
8681 bld.shader->stage_prog_data)) {
8682 const fs_builder ubld = bld.exec_all().group(1, 0);
8683 const fs_reg tmp = component(bld.vgrf(BRW_REGISTER_TYPE_UD), 0);
8684 const fs_reg mask = (stage == MESA_SHADER_FRAGMENT ? brw_vmask_reg() :
8685 brw_dmask_reg());
8686
8687 ubld.ADD(tmp, mask, brw_imm_ud(1));
8688 ubld.AND(tmp, mask, tmp);
8689
8690 /* This will loop forever if the dispatch mask doesn't have the expected
8691 * form '2^n-1', in which case tmp will be non-zero.
8692 */
8693 bld.emit(BRW_OPCODE_DO);
8694 bld.CMP(bld.null_reg_ud(), tmp, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
8695 set_predicate(BRW_PREDICATE_NORMAL, bld.emit(BRW_OPCODE_WHILE));
8696 }
8697 }