830c4f2bf91612a4e34676e8eda7ff0cbcdff319
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs.cpp
25 *
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
28 * from the LIR.
29 */
30
31 #include "main/macros.h"
32 #include "brw_context.h"
33 #include "brw_eu.h"
34 #include "brw_fs.h"
35 #include "brw_cs.h"
36 #include "brw_nir.h"
37 #include "brw_vec4_gs_visitor.h"
38 #include "brw_cfg.h"
39 #include "brw_program.h"
40 #include "brw_dead_control_flow.h"
41 #include "compiler/glsl_types.h"
42 #include "program/prog_parameter.h"
43
44 using namespace brw;
45
46 void
47 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
48 const fs_reg *src, unsigned sources)
49 {
50 memset(this, 0, sizeof(*this));
51
52 this->src = new fs_reg[MAX2(sources, 3)];
53 for (unsigned i = 0; i < sources; i++)
54 this->src[i] = src[i];
55
56 this->opcode = opcode;
57 this->dst = dst;
58 this->sources = sources;
59 this->exec_size = exec_size;
60
61 assert(dst.file != IMM && dst.file != UNIFORM);
62
63 assert(this->exec_size != 0);
64
65 this->conditional_mod = BRW_CONDITIONAL_NONE;
66
67 /* This will be the case for almost all instructions. */
68 switch (dst.file) {
69 case VGRF:
70 case ARF:
71 case FIXED_GRF:
72 case MRF:
73 case ATTR:
74 this->regs_written = DIV_ROUND_UP(dst.component_size(exec_size),
75 REG_SIZE);
76 break;
77 case BAD_FILE:
78 this->regs_written = 0;
79 break;
80 case IMM:
81 case UNIFORM:
82 unreachable("Invalid destination register file");
83 }
84
85 this->writes_accumulator = false;
86 }
87
88 fs_inst::fs_inst()
89 {
90 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
91 }
92
93 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
94 {
95 init(opcode, exec_size, reg_undef, NULL, 0);
96 }
97
98 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
99 {
100 init(opcode, exec_size, dst, NULL, 0);
101 }
102
103 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
104 const fs_reg &src0)
105 {
106 const fs_reg src[1] = { src0 };
107 init(opcode, exec_size, dst, src, 1);
108 }
109
110 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
111 const fs_reg &src0, const fs_reg &src1)
112 {
113 const fs_reg src[2] = { src0, src1 };
114 init(opcode, exec_size, dst, src, 2);
115 }
116
117 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
118 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
119 {
120 const fs_reg src[3] = { src0, src1, src2 };
121 init(opcode, exec_size, dst, src, 3);
122 }
123
124 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
125 const fs_reg src[], unsigned sources)
126 {
127 init(opcode, exec_width, dst, src, sources);
128 }
129
130 fs_inst::fs_inst(const fs_inst &that)
131 {
132 memcpy(this, &that, sizeof(that));
133
134 this->src = new fs_reg[MAX2(that.sources, 3)];
135
136 for (unsigned i = 0; i < that.sources; i++)
137 this->src[i] = that.src[i];
138 }
139
140 fs_inst::~fs_inst()
141 {
142 delete[] this->src;
143 }
144
145 void
146 fs_inst::resize_sources(uint8_t num_sources)
147 {
148 if (this->sources != num_sources) {
149 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
150
151 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
152 src[i] = this->src[i];
153
154 delete[] this->src;
155 this->src = src;
156 this->sources = num_sources;
157 }
158 }
159
160 void
161 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
162 const fs_reg &dst,
163 const fs_reg &surf_index,
164 const fs_reg &varying_offset,
165 uint32_t const_offset)
166 {
167 /* We have our constant surface use a pitch of 4 bytes, so our index can
168 * be any component of a vector, and then we load 4 contiguous
169 * components starting from that.
170 *
171 * We break down the const_offset to a portion added to the variable
172 * offset and a portion done using reg_offset, which means that if you
173 * have GLSL using something like "uniform vec4 a[20]; gl_FragColor =
174 * a[i]", we'll temporarily generate 4 vec4 loads from offset i * 4, and
175 * CSE can later notice that those loads are all the same and eliminate
176 * the redundant ones.
177 */
178 fs_reg vec4_offset = vgrf(glsl_type::uint_type);
179 bld.ADD(vec4_offset, varying_offset, brw_imm_ud(const_offset & ~0xf));
180
181 /* The pull load message will load a vec4 (16 bytes). If we are loading
182 * a double this means we are only loading 2 elements worth of data.
183 * We also want to use a 32-bit data type for the dst of the load operation
184 * so other parts of the driver don't get confused about the size of the
185 * result.
186 */
187 fs_reg vec4_result = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
188 fs_inst *inst = bld.emit(FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
189 vec4_result, surf_index, vec4_offset);
190 inst->regs_written = 4 * bld.dispatch_width() / 8;
191
192 if (type_sz(dst.type) == 8) {
193 shuffle_32bit_load_result_to_64bit_data(
194 bld, retype(vec4_result, dst.type), vec4_result, 2);
195 }
196
197 vec4_result.type = dst.type;
198 bld.MOV(dst, offset(vec4_result, bld,
199 (const_offset & 0xf) / type_sz(vec4_result.type)));
200 }
201
202 /**
203 * A helper for MOV generation for fixing up broken hardware SEND dependency
204 * handling.
205 */
206 void
207 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
208 {
209 /* The caller always wants uncompressed to emit the minimal extra
210 * dependencies, and to avoid having to deal with aligning its regs to 2.
211 */
212 const fs_builder ubld = bld.annotate("send dependency resolve")
213 .half(0);
214
215 ubld.MOV(ubld.null_reg_f(), fs_reg(VGRF, grf, BRW_REGISTER_TYPE_F));
216 }
217
218 bool
219 fs_inst::equals(fs_inst *inst) const
220 {
221 return (opcode == inst->opcode &&
222 dst.equals(inst->dst) &&
223 src[0].equals(inst->src[0]) &&
224 src[1].equals(inst->src[1]) &&
225 src[2].equals(inst->src[2]) &&
226 saturate == inst->saturate &&
227 predicate == inst->predicate &&
228 conditional_mod == inst->conditional_mod &&
229 mlen == inst->mlen &&
230 base_mrf == inst->base_mrf &&
231 target == inst->target &&
232 eot == inst->eot &&
233 header_size == inst->header_size &&
234 shadow_compare == inst->shadow_compare &&
235 exec_size == inst->exec_size &&
236 offset == inst->offset);
237 }
238
239 bool
240 fs_inst::overwrites_reg(const fs_reg &reg) const
241 {
242 return reg.in_range(dst, regs_written);
243 }
244
245 bool
246 fs_inst::is_send_from_grf() const
247 {
248 switch (opcode) {
249 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
250 case SHADER_OPCODE_SHADER_TIME_ADD:
251 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
252 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
253 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
254 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
255 case SHADER_OPCODE_UNTYPED_ATOMIC:
256 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
257 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
258 case SHADER_OPCODE_TYPED_ATOMIC:
259 case SHADER_OPCODE_TYPED_SURFACE_READ:
260 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
261 case SHADER_OPCODE_URB_WRITE_SIMD8:
262 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
263 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
264 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
265 case SHADER_OPCODE_URB_READ_SIMD8:
266 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
267 return true;
268 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
269 return src[1].file == VGRF;
270 case FS_OPCODE_FB_WRITE:
271 return src[0].file == VGRF;
272 default:
273 if (is_tex())
274 return src[0].file == VGRF;
275
276 return false;
277 }
278 }
279
280 /**
281 * Returns true if this instruction's sources and destinations cannot
282 * safely be the same register.
283 *
284 * In most cases, a register can be written over safely by the same
285 * instruction that is its last use. For a single instruction, the
286 * sources are dereferenced before writing of the destination starts
287 * (naturally).
288 *
289 * However, there are a few cases where this can be problematic:
290 *
291 * - Virtual opcodes that translate to multiple instructions in the
292 * code generator: if src == dst and one instruction writes the
293 * destination before a later instruction reads the source, then
294 * src will have been clobbered.
295 *
296 * - SIMD16 compressed instructions with certain regioning (see below).
297 *
298 * The register allocator uses this information to set up conflicts between
299 * GRF sources and the destination.
300 */
301 bool
302 fs_inst::has_source_and_destination_hazard() const
303 {
304 switch (opcode) {
305 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
306 /* Multiple partial writes to the destination */
307 return true;
308 default:
309 /* The SIMD16 compressed instruction
310 *
311 * add(16) g4<1>F g4<8,8,1>F g6<8,8,1>F
312 *
313 * is actually decoded in hardware as:
314 *
315 * add(8) g4<1>F g4<8,8,1>F g6<8,8,1>F
316 * add(8) g5<1>F g5<8,8,1>F g7<8,8,1>F
317 *
318 * Which is safe. However, if we have uniform accesses
319 * happening, we get into trouble:
320 *
321 * add(8) g4<1>F g4<0,1,0>F g6<8,8,1>F
322 * add(8) g5<1>F g4<0,1,0>F g7<8,8,1>F
323 *
324 * Now our destination for the first instruction overwrote the
325 * second instruction's src0, and we get garbage for those 8
326 * pixels. There's a similar issue for the pre-gen6
327 * pixel_x/pixel_y, which are registers of 16-bit values and thus
328 * would get stomped by the first decode as well.
329 */
330 if (exec_size == 16) {
331 for (int i = 0; i < sources; i++) {
332 if (src[i].file == VGRF && (src[i].stride == 0 ||
333 src[i].type == BRW_REGISTER_TYPE_UW ||
334 src[i].type == BRW_REGISTER_TYPE_W ||
335 src[i].type == BRW_REGISTER_TYPE_UB ||
336 src[i].type == BRW_REGISTER_TYPE_B)) {
337 return true;
338 }
339 }
340 }
341 return false;
342 }
343 }
344
345 bool
346 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
347 {
348 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
349 return false;
350
351 fs_reg reg = this->src[0];
352 if (reg.file != VGRF || reg.reg_offset != 0 || reg.stride == 0)
353 return false;
354
355 if (grf_alloc.sizes[reg.nr] != this->regs_written)
356 return false;
357
358 for (int i = 0; i < this->sources; i++) {
359 reg.type = this->src[i].type;
360 if (!this->src[i].equals(reg))
361 return false;
362
363 if (i < this->header_size) {
364 reg.reg_offset += 1;
365 } else {
366 reg = horiz_offset(reg, this->exec_size);
367 }
368 }
369
370 return true;
371 }
372
373 bool
374 fs_inst::can_do_source_mods(const struct brw_device_info *devinfo)
375 {
376 if (devinfo->gen == 6 && is_math())
377 return false;
378
379 if (is_send_from_grf())
380 return false;
381
382 if (!backend_instruction::can_do_source_mods())
383 return false;
384
385 return true;
386 }
387
388 bool
389 fs_inst::can_change_types() const
390 {
391 return dst.type == src[0].type &&
392 !src[0].abs && !src[0].negate && !saturate &&
393 (opcode == BRW_OPCODE_MOV ||
394 (opcode == BRW_OPCODE_SEL &&
395 dst.type == src[1].type &&
396 predicate != BRW_PREDICATE_NONE &&
397 !src[1].abs && !src[1].negate));
398 }
399
400 bool
401 fs_inst::has_side_effects() const
402 {
403 return this->eot || backend_instruction::has_side_effects();
404 }
405
406 void
407 fs_reg::init()
408 {
409 memset(this, 0, sizeof(*this));
410 stride = 1;
411 }
412
413 /** Generic unset register constructor. */
414 fs_reg::fs_reg()
415 {
416 init();
417 this->file = BAD_FILE;
418 }
419
420 fs_reg::fs_reg(struct ::brw_reg reg) :
421 backend_reg(reg)
422 {
423 this->reg_offset = 0;
424 this->subreg_offset = 0;
425 this->stride = 1;
426 if (this->file == IMM &&
427 (this->type != BRW_REGISTER_TYPE_V &&
428 this->type != BRW_REGISTER_TYPE_UV &&
429 this->type != BRW_REGISTER_TYPE_VF)) {
430 this->stride = 0;
431 }
432 }
433
434 bool
435 fs_reg::equals(const fs_reg &r) const
436 {
437 return (this->backend_reg::equals(r) &&
438 subreg_offset == r.subreg_offset &&
439 stride == r.stride);
440 }
441
442 fs_reg &
443 fs_reg::set_smear(unsigned subreg)
444 {
445 assert(file != ARF && file != FIXED_GRF && file != IMM);
446 subreg_offset = subreg * type_sz(type);
447 stride = 0;
448 return *this;
449 }
450
451 bool
452 fs_reg::is_contiguous() const
453 {
454 return stride == 1;
455 }
456
457 unsigned
458 fs_reg::component_size(unsigned width) const
459 {
460 const unsigned stride = ((file != ARF && file != FIXED_GRF) ? this->stride :
461 hstride == 0 ? 0 :
462 1 << (hstride - 1));
463 return MAX2(width * stride, 1) * type_sz(type);
464 }
465
466 extern "C" int
467 type_size_scalar(const struct glsl_type *type)
468 {
469 unsigned int size, i;
470
471 switch (type->base_type) {
472 case GLSL_TYPE_UINT:
473 case GLSL_TYPE_INT:
474 case GLSL_TYPE_FLOAT:
475 case GLSL_TYPE_BOOL:
476 return type->components();
477 case GLSL_TYPE_DOUBLE:
478 return type->components() * 2;
479 case GLSL_TYPE_ARRAY:
480 return type_size_scalar(type->fields.array) * type->length;
481 case GLSL_TYPE_STRUCT:
482 size = 0;
483 for (i = 0; i < type->length; i++) {
484 size += type_size_scalar(type->fields.structure[i].type);
485 }
486 return size;
487 case GLSL_TYPE_SAMPLER:
488 /* Samplers take up no register space, since they're baked in at
489 * link time.
490 */
491 return 0;
492 case GLSL_TYPE_ATOMIC_UINT:
493 return 0;
494 case GLSL_TYPE_SUBROUTINE:
495 return 1;
496 case GLSL_TYPE_IMAGE:
497 return BRW_IMAGE_PARAM_SIZE;
498 case GLSL_TYPE_VOID:
499 case GLSL_TYPE_ERROR:
500 case GLSL_TYPE_INTERFACE:
501 case GLSL_TYPE_FUNCTION:
502 unreachable("not reached");
503 }
504
505 return 0;
506 }
507
508 /**
509 * Returns the number of scalar components needed to store type, assuming
510 * that vectors are padded out to vec4.
511 *
512 * This has the packing rules of type_size_vec4(), but counts components
513 * similar to type_size_scalar().
514 */
515 extern "C" int
516 type_size_vec4_times_4(const struct glsl_type *type)
517 {
518 return 4 * type_size_vec4(type);
519 }
520
521 /* Attribute arrays are loaded as one vec4 per element (or matrix column),
522 * except for double-precision types, which are loaded as one dvec4.
523 */
524 extern "C" int
525 type_size_vs_input(const struct glsl_type *type)
526 {
527 if (type->is_double()) {
528 return type_size_dvec4(type);
529 } else {
530 return type_size_vec4(type);
531 }
532 }
533
534 /**
535 * Create a MOV to read the timestamp register.
536 *
537 * The caller is responsible for emitting the MOV. The return value is
538 * the destination of the MOV, with extra parameters set.
539 */
540 fs_reg
541 fs_visitor::get_timestamp(const fs_builder &bld)
542 {
543 assert(devinfo->gen >= 7);
544
545 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
546 BRW_ARF_TIMESTAMP,
547 0),
548 BRW_REGISTER_TYPE_UD));
549
550 fs_reg dst = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
551
552 /* We want to read the 3 fields we care about even if it's not enabled in
553 * the dispatch.
554 */
555 bld.group(4, 0).exec_all().MOV(dst, ts);
556
557 return dst;
558 }
559
560 void
561 fs_visitor::emit_shader_time_begin()
562 {
563 shader_start_time = get_timestamp(bld.annotate("shader time start"));
564
565 /* We want only the low 32 bits of the timestamp. Since it's running
566 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
567 * which is plenty of time for our purposes. It is identical across the
568 * EUs, but since it's tracking GPU core speed it will increment at a
569 * varying rate as render P-states change.
570 */
571 shader_start_time.set_smear(0);
572 }
573
574 void
575 fs_visitor::emit_shader_time_end()
576 {
577 /* Insert our code just before the final SEND with EOT. */
578 exec_node *end = this->instructions.get_tail();
579 assert(end && ((fs_inst *) end)->eot);
580 const fs_builder ibld = bld.annotate("shader time end")
581 .exec_all().at(NULL, end);
582
583 fs_reg shader_end_time = get_timestamp(ibld);
584
585 /* We only use the low 32 bits of the timestamp - see
586 * emit_shader_time_begin()).
587 *
588 * We could also check if render P-states have changed (or anything
589 * else that might disrupt timing) by setting smear to 2 and checking if
590 * that field is != 0.
591 */
592 shader_end_time.set_smear(0);
593
594 /* Check that there weren't any timestamp reset events (assuming these
595 * were the only two timestamp reads that happened).
596 */
597 fs_reg reset = shader_end_time;
598 reset.set_smear(2);
599 set_condmod(BRW_CONDITIONAL_Z,
600 ibld.AND(ibld.null_reg_ud(), reset, brw_imm_ud(1u)));
601 ibld.IF(BRW_PREDICATE_NORMAL);
602
603 fs_reg start = shader_start_time;
604 start.negate = true;
605 fs_reg diff = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
606 diff.set_smear(0);
607
608 const fs_builder cbld = ibld.group(1, 0);
609 cbld.group(1, 0).ADD(diff, start, shader_end_time);
610
611 /* If there were no instructions between the two timestamp gets, the diff
612 * is 2 cycles. Remove that overhead, so I can forget about that when
613 * trying to determine the time taken for single instructions.
614 */
615 cbld.ADD(diff, diff, brw_imm_ud(-2u));
616 SHADER_TIME_ADD(cbld, 0, diff);
617 SHADER_TIME_ADD(cbld, 1, brw_imm_ud(1u));
618 ibld.emit(BRW_OPCODE_ELSE);
619 SHADER_TIME_ADD(cbld, 2, brw_imm_ud(1u));
620 ibld.emit(BRW_OPCODE_ENDIF);
621 }
622
623 void
624 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
625 int shader_time_subindex,
626 fs_reg value)
627 {
628 int index = shader_time_index * 3 + shader_time_subindex;
629 struct brw_reg offset = brw_imm_d(index * SHADER_TIME_STRIDE);
630
631 fs_reg payload;
632 if (dispatch_width == 8)
633 payload = vgrf(glsl_type::uvec2_type);
634 else
635 payload = vgrf(glsl_type::uint_type);
636
637 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
638 }
639
640 void
641 fs_visitor::vfail(const char *format, va_list va)
642 {
643 char *msg;
644
645 if (failed)
646 return;
647
648 failed = true;
649
650 msg = ralloc_vasprintf(mem_ctx, format, va);
651 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
652
653 this->fail_msg = msg;
654
655 if (debug_enabled) {
656 fprintf(stderr, "%s", msg);
657 }
658 }
659
660 void
661 fs_visitor::fail(const char *format, ...)
662 {
663 va_list va;
664
665 va_start(va, format);
666 vfail(format, va);
667 va_end(va);
668 }
669
670 /**
671 * Mark this program as impossible to compile with dispatch width greater
672 * than n.
673 *
674 * During the SIMD8 compile (which happens first), we can detect and flag
675 * things that are unsupported in SIMD16+ mode, so the compiler can skip the
676 * SIMD16+ compile altogether.
677 *
678 * During a compile of dispatch width greater than n (if one happens anyway),
679 * this just calls fail().
680 */
681 void
682 fs_visitor::limit_dispatch_width(unsigned n, const char *msg)
683 {
684 if (dispatch_width > n) {
685 fail("%s", msg);
686 } else {
687 max_dispatch_width = n;
688 compiler->shader_perf_log(log_data,
689 "Shader dispatch width limited to SIMD%d: %s",
690 n, msg);
691 }
692 }
693
694 /**
695 * Returns true if the instruction has a flag that means it won't
696 * update an entire destination register.
697 *
698 * For example, dead code elimination and live variable analysis want to know
699 * when a write to a variable screens off any preceding values that were in
700 * it.
701 */
702 bool
703 fs_inst::is_partial_write() const
704 {
705 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
706 (this->exec_size * type_sz(this->dst.type)) < 32 ||
707 !this->dst.is_contiguous() ||
708 this->dst.subreg_offset > 0);
709 }
710
711 unsigned
712 fs_inst::components_read(unsigned i) const
713 {
714 switch (opcode) {
715 case FS_OPCODE_LINTERP:
716 if (i == 0)
717 return 2;
718 else
719 return 1;
720
721 case FS_OPCODE_PIXEL_X:
722 case FS_OPCODE_PIXEL_Y:
723 assert(i == 0);
724 return 2;
725
726 case FS_OPCODE_FB_WRITE_LOGICAL:
727 assert(src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
728 /* First/second FB write color. */
729 if (i < 2)
730 return src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
731 else
732 return 1;
733
734 case SHADER_OPCODE_TEX_LOGICAL:
735 case SHADER_OPCODE_TXD_LOGICAL:
736 case SHADER_OPCODE_TXF_LOGICAL:
737 case SHADER_OPCODE_TXL_LOGICAL:
738 case SHADER_OPCODE_TXS_LOGICAL:
739 case FS_OPCODE_TXB_LOGICAL:
740 case SHADER_OPCODE_TXF_CMS_LOGICAL:
741 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
742 case SHADER_OPCODE_TXF_UMS_LOGICAL:
743 case SHADER_OPCODE_TXF_MCS_LOGICAL:
744 case SHADER_OPCODE_LOD_LOGICAL:
745 case SHADER_OPCODE_TG4_LOGICAL:
746 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
747 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
748 assert(src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM &&
749 src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
750 /* Texture coordinates. */
751 if (i == TEX_LOGICAL_SRC_COORDINATE)
752 return src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
753 /* Texture derivatives. */
754 else if ((i == TEX_LOGICAL_SRC_LOD || i == TEX_LOGICAL_SRC_LOD2) &&
755 opcode == SHADER_OPCODE_TXD_LOGICAL)
756 return src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
757 /* Texture offset. */
758 else if (i == TEX_LOGICAL_SRC_OFFSET_VALUE)
759 return 2;
760 /* MCS */
761 else if (i == TEX_LOGICAL_SRC_MCS && opcode == SHADER_OPCODE_TXF_CMS_W_LOGICAL)
762 return 2;
763 else
764 return 1;
765
766 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
767 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
768 assert(src[3].file == IMM);
769 /* Surface coordinates. */
770 if (i == 0)
771 return src[3].ud;
772 /* Surface operation source (ignored for reads). */
773 else if (i == 1)
774 return 0;
775 else
776 return 1;
777
778 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
779 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
780 assert(src[3].file == IMM &&
781 src[4].file == IMM);
782 /* Surface coordinates. */
783 if (i == 0)
784 return src[3].ud;
785 /* Surface operation source. */
786 else if (i == 1)
787 return src[4].ud;
788 else
789 return 1;
790
791 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
792 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
793 assert(src[3].file == IMM &&
794 src[4].file == IMM);
795 const unsigned op = src[4].ud;
796 /* Surface coordinates. */
797 if (i == 0)
798 return src[3].ud;
799 /* Surface operation source. */
800 else if (i == 1 && op == BRW_AOP_CMPWR)
801 return 2;
802 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
803 op == BRW_AOP_PREDEC))
804 return 0;
805 else
806 return 1;
807 }
808
809 default:
810 return 1;
811 }
812 }
813
814 int
815 fs_inst::regs_read(int arg) const
816 {
817 switch (opcode) {
818 case FS_OPCODE_FB_WRITE:
819 case SHADER_OPCODE_URB_WRITE_SIMD8:
820 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
821 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
822 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
823 case SHADER_OPCODE_URB_READ_SIMD8:
824 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
825 case SHADER_OPCODE_UNTYPED_ATOMIC:
826 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
827 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
828 case SHADER_OPCODE_TYPED_ATOMIC:
829 case SHADER_OPCODE_TYPED_SURFACE_READ:
830 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
831 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
832 if (arg == 0)
833 return mlen;
834 break;
835
836 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
837 /* The payload is actually stored in src1 */
838 if (arg == 1)
839 return mlen;
840 break;
841
842 case FS_OPCODE_LINTERP:
843 if (arg == 1)
844 return 1;
845 break;
846
847 case SHADER_OPCODE_LOAD_PAYLOAD:
848 if (arg < this->header_size)
849 return 1;
850 break;
851
852 case CS_OPCODE_CS_TERMINATE:
853 case SHADER_OPCODE_BARRIER:
854 return 1;
855
856 case SHADER_OPCODE_MOV_INDIRECT:
857 if (arg == 0) {
858 assert(src[2].file == IMM);
859 unsigned region_length = src[2].ud;
860
861 if (src[0].file == UNIFORM) {
862 assert(region_length % 4 == 0);
863 return region_length / 4;
864 } else if (src[0].file == FIXED_GRF) {
865 /* If the start of the region is not register aligned, then
866 * there's some portion of the register that's technically
867 * unread at the beginning.
868 *
869 * However, the register allocator works in terms of whole
870 * registers, and does not use subnr. It assumes that the
871 * read starts at the beginning of the register, and extends
872 * regs_read() whole registers beyond that.
873 *
874 * To compensate, we extend the region length to include this
875 * unread portion at the beginning.
876 */
877 if (src[0].subnr)
878 region_length += src[0].subnr;
879
880 return DIV_ROUND_UP(region_length, REG_SIZE);
881 } else {
882 assert(!"Invalid register file");
883 }
884 }
885 break;
886
887 default:
888 if (is_tex() && arg == 0 && src[0].file == VGRF)
889 return mlen;
890 break;
891 }
892
893 switch (src[arg].file) {
894 case BAD_FILE:
895 return 0;
896 case UNIFORM:
897 case IMM:
898 return 1;
899 case ARF:
900 case FIXED_GRF:
901 case VGRF:
902 case ATTR:
903 return DIV_ROUND_UP(components_read(arg) *
904 src[arg].component_size(exec_size),
905 REG_SIZE);
906 case MRF:
907 unreachable("MRF registers are not allowed as sources");
908 }
909 return 0;
910 }
911
912 namespace {
913 /* Return the subset of flag registers that an instruction could
914 * potentially read or write based on the execution controls and flag
915 * subregister number of the instruction.
916 */
917 unsigned
918 flag_mask(const fs_inst *inst)
919 {
920 const unsigned start = inst->flag_subreg * 16 + inst->group;
921 const unsigned end = start + inst->exec_size;
922 return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
923 }
924 }
925
926 unsigned
927 fs_inst::flags_read(const brw_device_info *devinfo) const
928 {
929 /* XXX - This doesn't consider explicit uses of the flag register as source
930 * region.
931 */
932 if (predicate == BRW_PREDICATE_ALIGN1_ANYV ||
933 predicate == BRW_PREDICATE_ALIGN1_ALLV) {
934 /* The vertical predication modes combine corresponding bits from
935 * f0.0 and f1.0 on Gen7+, and f0.0 and f0.1 on older hardware.
936 */
937 const unsigned shift = devinfo->gen >= 7 ? 4 : 2;
938 return flag_mask(this) << shift | flag_mask(this);
939 } else if (predicate) {
940 return flag_mask(this);
941 } else {
942 return 0;
943 }
944 }
945
946 unsigned
947 fs_inst::flags_written() const
948 {
949 /* XXX - This doesn't consider explicit uses of the flag register as
950 * destination region.
951 */
952 if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
953 opcode != BRW_OPCODE_IF &&
954 opcode != BRW_OPCODE_WHILE)) ||
955 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
956 return flag_mask(this);
957 } else {
958 return 0;
959 }
960 }
961
962 /**
963 * Returns how many MRFs an FS opcode will write over.
964 *
965 * Note that this is not the 0 or 1 implied writes in an actual gen
966 * instruction -- the FS opcodes often generate MOVs in addition.
967 */
968 int
969 fs_visitor::implied_mrf_writes(fs_inst *inst)
970 {
971 if (inst->mlen == 0)
972 return 0;
973
974 if (inst->base_mrf == -1)
975 return 0;
976
977 switch (inst->opcode) {
978 case SHADER_OPCODE_RCP:
979 case SHADER_OPCODE_RSQ:
980 case SHADER_OPCODE_SQRT:
981 case SHADER_OPCODE_EXP2:
982 case SHADER_OPCODE_LOG2:
983 case SHADER_OPCODE_SIN:
984 case SHADER_OPCODE_COS:
985 return 1 * dispatch_width / 8;
986 case SHADER_OPCODE_POW:
987 case SHADER_OPCODE_INT_QUOTIENT:
988 case SHADER_OPCODE_INT_REMAINDER:
989 return 2 * dispatch_width / 8;
990 case SHADER_OPCODE_TEX:
991 case FS_OPCODE_TXB:
992 case SHADER_OPCODE_TXD:
993 case SHADER_OPCODE_TXF:
994 case SHADER_OPCODE_TXF_LZ:
995 case SHADER_OPCODE_TXF_CMS:
996 case SHADER_OPCODE_TXF_CMS_W:
997 case SHADER_OPCODE_TXF_MCS:
998 case SHADER_OPCODE_TG4:
999 case SHADER_OPCODE_TG4_OFFSET:
1000 case SHADER_OPCODE_TXL:
1001 case SHADER_OPCODE_TXL_LZ:
1002 case SHADER_OPCODE_TXS:
1003 case SHADER_OPCODE_LOD:
1004 case SHADER_OPCODE_SAMPLEINFO:
1005 return 1;
1006 case FS_OPCODE_FB_WRITE:
1007 return 2;
1008 case FS_OPCODE_GET_BUFFER_SIZE:
1009 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
1010 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1011 return 1;
1012 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
1013 return inst->mlen;
1014 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1015 return inst->mlen;
1016 case SHADER_OPCODE_UNTYPED_ATOMIC:
1017 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
1018 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
1019 case SHADER_OPCODE_TYPED_ATOMIC:
1020 case SHADER_OPCODE_TYPED_SURFACE_READ:
1021 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
1022 case SHADER_OPCODE_URB_WRITE_SIMD8:
1023 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
1024 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
1025 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
1026 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
1027 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
1028 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
1029 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
1030 return 0;
1031 default:
1032 unreachable("not reached");
1033 }
1034 }
1035
1036 fs_reg
1037 fs_visitor::vgrf(const glsl_type *const type)
1038 {
1039 int reg_width = dispatch_width / 8;
1040 return fs_reg(VGRF, alloc.allocate(type_size_scalar(type) * reg_width),
1041 brw_type_for_base_type(type));
1042 }
1043
1044 fs_reg::fs_reg(enum brw_reg_file file, int nr)
1045 {
1046 init();
1047 this->file = file;
1048 this->nr = nr;
1049 this->type = BRW_REGISTER_TYPE_F;
1050 this->stride = (file == UNIFORM ? 0 : 1);
1051 }
1052
1053 fs_reg::fs_reg(enum brw_reg_file file, int nr, enum brw_reg_type type)
1054 {
1055 init();
1056 this->file = file;
1057 this->nr = nr;
1058 this->type = type;
1059 this->stride = (file == UNIFORM ? 0 : 1);
1060 }
1061
1062 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
1063 * This brings in those uniform definitions
1064 */
1065 void
1066 fs_visitor::import_uniforms(fs_visitor *v)
1067 {
1068 this->push_constant_loc = v->push_constant_loc;
1069 this->pull_constant_loc = v->pull_constant_loc;
1070 this->uniforms = v->uniforms;
1071 }
1072
1073 fs_reg *
1074 fs_visitor::emit_fragcoord_interpolation()
1075 {
1076 assert(stage == MESA_SHADER_FRAGMENT);
1077 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec4_type));
1078 fs_reg wpos = *reg;
1079
1080 /* gl_FragCoord.x */
1081 bld.MOV(wpos, this->pixel_x);
1082 wpos = offset(wpos, bld, 1);
1083
1084 /* gl_FragCoord.y */
1085 bld.MOV(wpos, this->pixel_y);
1086 wpos = offset(wpos, bld, 1);
1087
1088 /* gl_FragCoord.z */
1089 if (devinfo->gen >= 6) {
1090 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
1091 } else {
1092 bld.emit(FS_OPCODE_LINTERP, wpos,
1093 this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1094 interp_reg(VARYING_SLOT_POS, 2));
1095 }
1096 wpos = offset(wpos, bld, 1);
1097
1098 /* gl_FragCoord.w: Already set up in emit_interpolation */
1099 bld.MOV(wpos, this->wpos_w);
1100
1101 return reg;
1102 }
1103
1104 fs_inst *
1105 fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
1106 glsl_interp_qualifier interpolation_mode,
1107 bool is_centroid, bool is_sample)
1108 {
1109 brw_wm_barycentric_interp_mode barycoord_mode;
1110 if (devinfo->gen >= 6) {
1111 if (is_centroid) {
1112 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1113 barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
1114 else
1115 barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
1116 } else if (is_sample) {
1117 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1118 barycoord_mode = BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
1119 else
1120 barycoord_mode = BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
1121 } else {
1122 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1123 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1124 else
1125 barycoord_mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
1126 }
1127 } else {
1128 /* On Ironlake and below, there is only one interpolation mode.
1129 * Centroid interpolation doesn't mean anything on this hardware --
1130 * there is no multisampling.
1131 */
1132 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1133 }
1134 return bld.emit(FS_OPCODE_LINTERP, attr,
1135 this->delta_xy[barycoord_mode], interp);
1136 }
1137
1138 void
1139 fs_visitor::emit_general_interpolation(fs_reg *attr, const char *name,
1140 const glsl_type *type,
1141 glsl_interp_qualifier interpolation_mode,
1142 int *location, bool mod_centroid,
1143 bool mod_sample)
1144 {
1145 assert(stage == MESA_SHADER_FRAGMENT);
1146 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1147 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1148
1149 if (interpolation_mode == INTERP_QUALIFIER_NONE) {
1150 bool is_gl_Color =
1151 *location == VARYING_SLOT_COL0 || *location == VARYING_SLOT_COL1;
1152 if (key->flat_shade && is_gl_Color) {
1153 interpolation_mode = INTERP_QUALIFIER_FLAT;
1154 } else {
1155 interpolation_mode = INTERP_QUALIFIER_SMOOTH;
1156 }
1157 }
1158
1159 if (type->is_array() || type->is_matrix()) {
1160 const glsl_type *elem_type = glsl_get_array_element(type);
1161 const unsigned length = glsl_get_length(type);
1162
1163 for (unsigned i = 0; i < length; i++) {
1164 emit_general_interpolation(attr, name, elem_type, interpolation_mode,
1165 location, mod_centroid, mod_sample);
1166 }
1167 } else if (type->is_record()) {
1168 for (unsigned i = 0; i < type->length; i++) {
1169 const glsl_type *field_type = type->fields.structure[i].type;
1170 emit_general_interpolation(attr, name, field_type, interpolation_mode,
1171 location, mod_centroid, mod_sample);
1172 }
1173 } else {
1174 assert(type->is_scalar() || type->is_vector());
1175
1176 if (prog_data->urb_setup[*location] == -1) {
1177 /* If there's no incoming setup data for this slot, don't
1178 * emit interpolation for it.
1179 */
1180 *attr = offset(*attr, bld, type->vector_elements);
1181 (*location)++;
1182 return;
1183 }
1184
1185 attr->type = brw_type_for_base_type(type->get_scalar_type());
1186
1187 if (interpolation_mode == INTERP_QUALIFIER_FLAT) {
1188 /* Constant interpolation (flat shading) case. The SF has
1189 * handed us defined values in only the constant offset
1190 * field of the setup reg.
1191 */
1192 for (unsigned int i = 0; i < type->vector_elements; i++) {
1193 struct brw_reg interp = interp_reg(*location, i);
1194 interp = suboffset(interp, 3);
1195 interp.type = attr->type;
1196 bld.emit(FS_OPCODE_CINTERP, *attr, fs_reg(interp));
1197 *attr = offset(*attr, bld, 1);
1198 }
1199 } else {
1200 /* Smooth/noperspective interpolation case. */
1201 for (unsigned int i = 0; i < type->vector_elements; i++) {
1202 struct brw_reg interp = interp_reg(*location, i);
1203 if (devinfo->needs_unlit_centroid_workaround && mod_centroid) {
1204 /* Get the pixel/sample mask into f0 so that we know
1205 * which pixels are lit. Then, for each channel that is
1206 * unlit, replace the centroid data with non-centroid
1207 * data.
1208 */
1209 bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
1210
1211 fs_inst *inst;
1212 inst = emit_linterp(*attr, fs_reg(interp), interpolation_mode,
1213 false, false);
1214 inst->predicate = BRW_PREDICATE_NORMAL;
1215 inst->predicate_inverse = true;
1216 if (devinfo->has_pln)
1217 inst->no_dd_clear = true;
1218
1219 inst = emit_linterp(*attr, fs_reg(interp), interpolation_mode,
1220 mod_centroid && !key->persample_interp,
1221 mod_sample || key->persample_interp);
1222 inst->predicate = BRW_PREDICATE_NORMAL;
1223 inst->predicate_inverse = false;
1224 if (devinfo->has_pln)
1225 inst->no_dd_check = true;
1226
1227 } else {
1228 emit_linterp(*attr, fs_reg(interp), interpolation_mode,
1229 mod_centroid && !key->persample_interp,
1230 mod_sample || key->persample_interp);
1231 }
1232 if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
1233 bld.MUL(*attr, *attr, this->pixel_w);
1234 }
1235 *attr = offset(*attr, bld, 1);
1236 }
1237 }
1238 (*location)++;
1239 }
1240 }
1241
1242 fs_reg *
1243 fs_visitor::emit_frontfacing_interpolation()
1244 {
1245 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1246
1247 if (devinfo->gen >= 6) {
1248 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1249 * a boolean result from this (~0/true or 0/false).
1250 *
1251 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1252 * this task in only one instruction:
1253 * - a negation source modifier will flip the bit; and
1254 * - a W -> D type conversion will sign extend the bit into the high
1255 * word of the destination.
1256 *
1257 * An ASR 15 fills the low word of the destination.
1258 */
1259 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1260 g0.negate = true;
1261
1262 bld.ASR(*reg, g0, brw_imm_d(15));
1263 } else {
1264 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1265 * a boolean result from this (1/true or 0/false).
1266 *
1267 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1268 * the negation source modifier to flip it. Unfortunately the SHR
1269 * instruction only operates on UD (or D with an abs source modifier)
1270 * sources without negation.
1271 *
1272 * Instead, use ASR (which will give ~0/true or 0/false).
1273 */
1274 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1275 g1_6.negate = true;
1276
1277 bld.ASR(*reg, g1_6, brw_imm_d(31));
1278 }
1279
1280 return reg;
1281 }
1282
1283 void
1284 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1285 {
1286 assert(stage == MESA_SHADER_FRAGMENT);
1287 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
1288 assert(dst.type == BRW_REGISTER_TYPE_F);
1289
1290 if (wm_prog_data->persample_dispatch) {
1291 /* Convert int_sample_pos to floating point */
1292 bld.MOV(dst, int_sample_pos);
1293 /* Scale to the range [0, 1] */
1294 bld.MUL(dst, dst, brw_imm_f(1 / 16.0f));
1295 }
1296 else {
1297 /* From ARB_sample_shading specification:
1298 * "When rendering to a non-multisample buffer, or if multisample
1299 * rasterization is disabled, gl_SamplePosition will always be
1300 * (0.5, 0.5).
1301 */
1302 bld.MOV(dst, brw_imm_f(0.5f));
1303 }
1304 }
1305
1306 fs_reg *
1307 fs_visitor::emit_samplepos_setup()
1308 {
1309 assert(devinfo->gen >= 6);
1310
1311 const fs_builder abld = bld.annotate("compute sample position");
1312 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1313 fs_reg pos = *reg;
1314 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1315 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1316
1317 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1318 * mode will be enabled.
1319 *
1320 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1321 * R31.1:0 Position Offset X/Y for Slot[3:0]
1322 * R31.3:2 Position Offset X/Y for Slot[7:4]
1323 * .....
1324 *
1325 * The X, Y sample positions come in as bytes in thread payload. So, read
1326 * the positions using vstride=16, width=8, hstride=2.
1327 */
1328 struct brw_reg sample_pos_reg =
1329 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1330 BRW_REGISTER_TYPE_B), 16, 8, 2);
1331
1332 if (dispatch_width == 8) {
1333 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1334 } else {
1335 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1336 abld.half(1).MOV(half(int_sample_x, 1),
1337 fs_reg(suboffset(sample_pos_reg, 16)));
1338 }
1339 /* Compute gl_SamplePosition.x */
1340 compute_sample_position(pos, int_sample_x);
1341 pos = offset(pos, abld, 1);
1342 if (dispatch_width == 8) {
1343 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1344 } else {
1345 abld.half(0).MOV(half(int_sample_y, 0),
1346 fs_reg(suboffset(sample_pos_reg, 1)));
1347 abld.half(1).MOV(half(int_sample_y, 1),
1348 fs_reg(suboffset(sample_pos_reg, 17)));
1349 }
1350 /* Compute gl_SamplePosition.y */
1351 compute_sample_position(pos, int_sample_y);
1352 return reg;
1353 }
1354
1355 fs_reg *
1356 fs_visitor::emit_sampleid_setup()
1357 {
1358 assert(stage == MESA_SHADER_FRAGMENT);
1359 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1360 assert(devinfo->gen >= 6);
1361
1362 const fs_builder abld = bld.annotate("compute sample id");
1363 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1364
1365 if (!key->multisample_fbo) {
1366 /* As per GL_ARB_sample_shading specification:
1367 * "When rendering to a non-multisample buffer, or if multisample
1368 * rasterization is disabled, gl_SampleID will always be zero."
1369 */
1370 abld.MOV(*reg, brw_imm_d(0));
1371 } else if (devinfo->gen >= 8) {
1372 /* Sample ID comes in as 4-bit numbers in g1.0:
1373 *
1374 * 15:12 Slot 3 SampleID (only used in SIMD16)
1375 * 11:8 Slot 2 SampleID (only used in SIMD16)
1376 * 7:4 Slot 1 SampleID
1377 * 3:0 Slot 0 SampleID
1378 *
1379 * Each slot corresponds to four channels, so we want to replicate each
1380 * half-byte value to 4 channels in a row:
1381 *
1382 * dst+0: .7 .6 .5 .4 .3 .2 .1 .0
1383 * 7:4 7:4 7:4 7:4 3:0 3:0 3:0 3:0
1384 *
1385 * dst+1: .7 .6 .5 .4 .3 .2 .1 .0 (if SIMD16)
1386 * 15:12 15:12 15:12 15:12 11:8 11:8 11:8 11:8
1387 *
1388 * First, we read g1.0 with a <1,8,0>UB region, causing the first 8
1389 * channels to read the first byte (7:0), and the second group of 8
1390 * channels to read the second byte (15:8). Then, we shift right by
1391 * a vector immediate of <4, 4, 4, 4, 0, 0, 0, 0>, moving the slot 1 / 3
1392 * values into place. Finally, we AND with 0xf to keep the low nibble.
1393 *
1394 * shr(16) tmp<1>W g1.0<1,8,0>B 0x44440000:V
1395 * and(16) dst<1>D tmp<8,8,1>W 0xf:W
1396 *
1397 * TODO: These payload bits exist on Gen7 too, but they appear to always
1398 * be zero, so this code fails to work. We should find out why.
1399 */
1400 fs_reg tmp(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1401
1402 abld.SHR(tmp, fs_reg(stride(retype(brw_vec1_grf(1, 0),
1403 BRW_REGISTER_TYPE_B), 1, 8, 0)),
1404 brw_imm_v(0x44440000));
1405 abld.AND(*reg, tmp, brw_imm_w(0xf));
1406 } else {
1407 fs_reg t1(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_D);
1408 t1.set_smear(0);
1409 fs_reg t2(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_W);
1410
1411 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1412 * 8x multisampling, subspan 0 will represent sample N (where N
1413 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1414 * 7. We can find the value of N by looking at R0.0 bits 7:6
1415 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1416 * (since samples are always delivered in pairs). That is, we
1417 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1418 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1419 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1420 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1421 * populating a temporary variable with the sequence (0, 1, 2, 3),
1422 * and then reading from it using vstride=1, width=4, hstride=0.
1423 * These computations hold good for 4x multisampling as well.
1424 *
1425 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1426 * the first four slots are sample 0 of subspan 0; the next four
1427 * are sample 1 of subspan 0; the third group is sample 0 of
1428 * subspan 1, and finally sample 1 of subspan 1.
1429 */
1430
1431 /* SKL+ has an extra bit for the Starting Sample Pair Index to
1432 * accomodate 16x MSAA.
1433 */
1434 abld.exec_all().group(1, 0)
1435 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
1436 brw_imm_ud(0xc0));
1437 abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
1438
1439 /* This works for both SIMD8 and SIMD16 */
1440 abld.exec_all().group(4, 0).MOV(t2, brw_imm_v(0x3210));
1441
1442 /* This special instruction takes care of setting vstride=1,
1443 * width=4, hstride=0 of t2 during an ADD instruction.
1444 */
1445 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1446 }
1447
1448 return reg;
1449 }
1450
1451 fs_reg *
1452 fs_visitor::emit_samplemaskin_setup()
1453 {
1454 assert(stage == MESA_SHADER_FRAGMENT);
1455 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
1456 assert(devinfo->gen >= 6);
1457
1458 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1459
1460 fs_reg coverage_mask(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
1461 BRW_REGISTER_TYPE_D));
1462
1463 if (wm_prog_data->persample_dispatch) {
1464 /* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
1465 * and a mask representing which sample is being processed by the
1466 * current shader invocation.
1467 *
1468 * From the OES_sample_variables specification:
1469 * "When per-sample shading is active due to the use of a fragment input
1470 * qualified by "sample" or due to the use of the gl_SampleID or
1471 * gl_SamplePosition variables, only the bit for the current sample is
1472 * set in gl_SampleMaskIn."
1473 */
1474 const fs_builder abld = bld.annotate("compute gl_SampleMaskIn");
1475
1476 if (nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
1477 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
1478
1479 fs_reg one = vgrf(glsl_type::int_type);
1480 fs_reg enabled_mask = vgrf(glsl_type::int_type);
1481 abld.MOV(one, brw_imm_d(1));
1482 abld.SHL(enabled_mask, one, nir_system_values[SYSTEM_VALUE_SAMPLE_ID]);
1483 abld.AND(*reg, enabled_mask, coverage_mask);
1484 } else {
1485 /* In per-pixel mode, the coverage mask is sufficient. */
1486 *reg = coverage_mask;
1487 }
1488 return reg;
1489 }
1490
1491 fs_reg
1492 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1493 {
1494 if (!src.abs && !src.negate)
1495 return src;
1496
1497 fs_reg temp = bld.vgrf(src.type);
1498 bld.MOV(temp, src);
1499
1500 return temp;
1501 }
1502
1503 void
1504 fs_visitor::emit_discard_jump()
1505 {
1506 assert(((brw_wm_prog_data*) this->prog_data)->uses_kill);
1507
1508 /* For performance, after a discard, jump to the end of the
1509 * shader if all relevant channels have been discarded.
1510 */
1511 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1512 discard_jump->flag_subreg = 1;
1513
1514 discard_jump->predicate = (dispatch_width == 8)
1515 ? BRW_PREDICATE_ALIGN1_ANY8H
1516 : BRW_PREDICATE_ALIGN1_ANY16H;
1517 discard_jump->predicate_inverse = true;
1518 }
1519
1520 void
1521 fs_visitor::emit_gs_thread_end()
1522 {
1523 assert(stage == MESA_SHADER_GEOMETRY);
1524
1525 struct brw_gs_prog_data *gs_prog_data =
1526 (struct brw_gs_prog_data *) prog_data;
1527
1528 if (gs_compile->control_data_header_size_bits > 0) {
1529 emit_gs_control_data_bits(this->final_gs_vertex_count);
1530 }
1531
1532 const fs_builder abld = bld.annotate("thread end");
1533 fs_inst *inst;
1534
1535 if (gs_prog_data->static_vertex_count != -1) {
1536 foreach_in_list_reverse(fs_inst, prev, &this->instructions) {
1537 if (prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8 ||
1538 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
1539 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
1540 prev->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT) {
1541 prev->eot = true;
1542
1543 /* Delete now dead instructions. */
1544 foreach_in_list_reverse_safe(exec_node, dead, &this->instructions) {
1545 if (dead == prev)
1546 break;
1547 dead->remove();
1548 }
1549 return;
1550 } else if (prev->is_control_flow() || prev->has_side_effects()) {
1551 break;
1552 }
1553 }
1554 fs_reg hdr = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1555 abld.MOV(hdr, fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD)));
1556 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, hdr);
1557 inst->mlen = 1;
1558 } else {
1559 fs_reg payload = abld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1560 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
1561 sources[0] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1562 sources[1] = this->final_gs_vertex_count;
1563 abld.LOAD_PAYLOAD(payload, sources, 2, 2);
1564 inst = abld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
1565 inst->mlen = 2;
1566 }
1567 inst->eot = true;
1568 inst->offset = 0;
1569 }
1570
1571 void
1572 fs_visitor::assign_curb_setup()
1573 {
1574 prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
1575
1576 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1577 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1578 for (unsigned int i = 0; i < inst->sources; i++) {
1579 if (inst->src[i].file == UNIFORM) {
1580 int uniform_nr = inst->src[i].nr + inst->src[i].reg_offset;
1581 int constant_nr;
1582 if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1583 constant_nr = push_constant_loc[uniform_nr];
1584 } else {
1585 /* Section 5.11 of the OpenGL 4.1 spec says:
1586 * "Out-of-bounds reads return undefined values, which include
1587 * values from other variables of the active program or zero."
1588 * Just return the first push constant.
1589 */
1590 constant_nr = 0;
1591 }
1592
1593 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1594 constant_nr / 8,
1595 constant_nr % 8);
1596 brw_reg.abs = inst->src[i].abs;
1597 brw_reg.negate = inst->src[i].negate;
1598
1599 assert(inst->src[i].stride == 0);
1600 inst->src[i] = byte_offset(
1601 retype(brw_reg, inst->src[i].type),
1602 inst->src[i].subreg_offset);
1603 }
1604 }
1605 }
1606
1607 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1608 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1609 }
1610
1611 void
1612 fs_visitor::calculate_urb_setup()
1613 {
1614 assert(stage == MESA_SHADER_FRAGMENT);
1615 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1616 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1617
1618 memset(prog_data->urb_setup, -1,
1619 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1620
1621 int urb_next = 0;
1622 /* Figure out where each of the incoming setup attributes lands. */
1623 if (devinfo->gen >= 6) {
1624 if (_mesa_bitcount_64(nir->info.inputs_read &
1625 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1626 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1627 * first 16 varying inputs, so we can put them wherever we want.
1628 * Just put them in order.
1629 *
1630 * This is useful because it means that (a) inputs not used by the
1631 * fragment shader won't take up valuable register space, and (b) we
1632 * won't have to recompile the fragment shader if it gets paired with
1633 * a different vertex (or geometry) shader.
1634 */
1635 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1636 if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1637 BITFIELD64_BIT(i)) {
1638 prog_data->urb_setup[i] = urb_next++;
1639 }
1640 }
1641 } else {
1642 bool include_vue_header =
1643 nir->info.inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
1644
1645 /* We have enough input varyings that the SF/SBE pipeline stage can't
1646 * arbitrarily rearrange them to suit our whim; we have to put them
1647 * in an order that matches the output of the previous pipeline stage
1648 * (geometry or vertex shader).
1649 */
1650 struct brw_vue_map prev_stage_vue_map;
1651 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1652 key->input_slots_valid,
1653 nir->info.separate_shader);
1654 int first_slot =
1655 include_vue_header ? 0 : 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
1656
1657 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1658 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1659 slot++) {
1660 int varying = prev_stage_vue_map.slot_to_varying[slot];
1661 if (varying != BRW_VARYING_SLOT_PAD &&
1662 (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
1663 BITFIELD64_BIT(varying))) {
1664 prog_data->urb_setup[varying] = slot - first_slot;
1665 }
1666 }
1667 urb_next = prev_stage_vue_map.num_slots - first_slot;
1668 }
1669 } else {
1670 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1671 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1672 /* Point size is packed into the header, not as a general attribute */
1673 if (i == VARYING_SLOT_PSIZ)
1674 continue;
1675
1676 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1677 /* The back color slot is skipped when the front color is
1678 * also written to. In addition, some slots can be
1679 * written in the vertex shader and not read in the
1680 * fragment shader. So the register number must always be
1681 * incremented, mapped or not.
1682 */
1683 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1684 prog_data->urb_setup[i] = urb_next;
1685 urb_next++;
1686 }
1687 }
1688
1689 /*
1690 * It's a FS only attribute, and we did interpolation for this attribute
1691 * in SF thread. So, count it here, too.
1692 *
1693 * See compile_sf_prog() for more info.
1694 */
1695 if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1696 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1697 }
1698
1699 prog_data->num_varying_inputs = urb_next;
1700 }
1701
1702 void
1703 fs_visitor::assign_urb_setup()
1704 {
1705 assert(stage == MESA_SHADER_FRAGMENT);
1706 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1707
1708 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1709
1710 /* Offset all the urb_setup[] index by the actual position of the
1711 * setup regs, now that the location of the constants has been chosen.
1712 */
1713 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1714 if (inst->opcode == FS_OPCODE_LINTERP) {
1715 assert(inst->src[1].file == FIXED_GRF);
1716 inst->src[1].nr += urb_start;
1717 }
1718
1719 if (inst->opcode == FS_OPCODE_CINTERP) {
1720 assert(inst->src[0].file == FIXED_GRF);
1721 inst->src[0].nr += urb_start;
1722 }
1723 }
1724
1725 /* Each attribute is 4 setup channels, each of which is half a reg. */
1726 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1727 }
1728
1729 void
1730 fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
1731 {
1732 for (int i = 0; i < inst->sources; i++) {
1733 if (inst->src[i].file == ATTR) {
1734 int grf = payload.num_regs +
1735 prog_data->curb_read_length +
1736 inst->src[i].nr +
1737 inst->src[i].reg_offset;
1738
1739 /* As explained at brw_reg_from_fs_reg, From the Haswell PRM:
1740 *
1741 * VertStride must be used to cross GRF register boundaries. This
1742 * rule implies that elements within a 'Width' cannot cross GRF
1743 * boundaries.
1744 *
1745 * So, for registers that are large enough, we have to split the exec
1746 * size in two and trust the compression state to sort it out.
1747 */
1748 unsigned total_size = inst->exec_size *
1749 inst->src[i].stride *
1750 type_sz(inst->src[i].type);
1751
1752 assert(total_size <= 2 * REG_SIZE);
1753 const unsigned exec_size =
1754 (total_size <= REG_SIZE) ? inst->exec_size : inst->exec_size / 2;
1755
1756 unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
1757 struct brw_reg reg =
1758 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1759 inst->src[i].subreg_offset),
1760 exec_size * inst->src[i].stride,
1761 width, inst->src[i].stride);
1762 reg.abs = inst->src[i].abs;
1763 reg.negate = inst->src[i].negate;
1764
1765 inst->src[i] = reg;
1766 }
1767 }
1768 }
1769
1770 void
1771 fs_visitor::assign_vs_urb_setup()
1772 {
1773 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
1774
1775 assert(stage == MESA_SHADER_VERTEX);
1776
1777 /* Each attribute is 4 regs. */
1778 this->first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
1779
1780 assert(vs_prog_data->base.urb_read_length <= 15);
1781
1782 /* Rewrite all ATTR file references to the hw grf that they land in. */
1783 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1784 convert_attr_sources_to_hw_regs(inst);
1785 }
1786 }
1787
1788 void
1789 fs_visitor::assign_tcs_single_patch_urb_setup()
1790 {
1791 assert(stage == MESA_SHADER_TESS_CTRL);
1792
1793 /* Rewrite all ATTR file references to HW_REGs. */
1794 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1795 convert_attr_sources_to_hw_regs(inst);
1796 }
1797 }
1798
1799 void
1800 fs_visitor::assign_tes_urb_setup()
1801 {
1802 assert(stage == MESA_SHADER_TESS_EVAL);
1803
1804 brw_vue_prog_data *vue_prog_data = (brw_vue_prog_data *) prog_data;
1805
1806 first_non_payload_grf += 8 * vue_prog_data->urb_read_length;
1807
1808 /* Rewrite all ATTR file references to HW_REGs. */
1809 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1810 convert_attr_sources_to_hw_regs(inst);
1811 }
1812 }
1813
1814 void
1815 fs_visitor::assign_gs_urb_setup()
1816 {
1817 assert(stage == MESA_SHADER_GEOMETRY);
1818
1819 brw_vue_prog_data *vue_prog_data = (brw_vue_prog_data *) prog_data;
1820
1821 first_non_payload_grf +=
1822 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
1823
1824 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1825 /* Rewrite all ATTR file references to GRFs. */
1826 convert_attr_sources_to_hw_regs(inst);
1827 }
1828 }
1829
1830
1831 /**
1832 * Split large virtual GRFs into separate components if we can.
1833 *
1834 * This is mostly duplicated with what brw_fs_vector_splitting does,
1835 * but that's really conservative because it's afraid of doing
1836 * splitting that doesn't result in real progress after the rest of
1837 * the optimization phases, which would cause infinite looping in
1838 * optimization. We can do it once here, safely. This also has the
1839 * opportunity to split interpolated values, or maybe even uniforms,
1840 * which we don't have at the IR level.
1841 *
1842 * We want to split, because virtual GRFs are what we register
1843 * allocate and spill (due to contiguousness requirements for some
1844 * instructions), and they're what we naturally generate in the
1845 * codegen process, but most virtual GRFs don't actually need to be
1846 * contiguous sets of GRFs. If we split, we'll end up with reduced
1847 * live intervals and better dead code elimination and coalescing.
1848 */
1849 void
1850 fs_visitor::split_virtual_grfs()
1851 {
1852 int num_vars = this->alloc.count;
1853
1854 /* Count the total number of registers */
1855 int reg_count = 0;
1856 int vgrf_to_reg[num_vars];
1857 for (int i = 0; i < num_vars; i++) {
1858 vgrf_to_reg[i] = reg_count;
1859 reg_count += alloc.sizes[i];
1860 }
1861
1862 /* An array of "split points". For each register slot, this indicates
1863 * if this slot can be separated from the previous slot. Every time an
1864 * instruction uses multiple elements of a register (as a source or
1865 * destination), we mark the used slots as inseparable. Then we go
1866 * through and split the registers into the smallest pieces we can.
1867 */
1868 bool split_points[reg_count];
1869 memset(split_points, 0, sizeof(split_points));
1870
1871 /* Mark all used registers as fully splittable */
1872 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1873 if (inst->dst.file == VGRF) {
1874 int reg = vgrf_to_reg[inst->dst.nr];
1875 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.nr]; j++)
1876 split_points[reg + j] = true;
1877 }
1878
1879 for (int i = 0; i < inst->sources; i++) {
1880 if (inst->src[i].file == VGRF) {
1881 int reg = vgrf_to_reg[inst->src[i].nr];
1882 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].nr]; j++)
1883 split_points[reg + j] = true;
1884 }
1885 }
1886 }
1887
1888 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1889 if (inst->dst.file == VGRF) {
1890 int reg = vgrf_to_reg[inst->dst.nr] + inst->dst.reg_offset;
1891 for (int j = 1; j < inst->regs_written; j++)
1892 split_points[reg + j] = false;
1893 }
1894 for (int i = 0; i < inst->sources; i++) {
1895 if (inst->src[i].file == VGRF) {
1896 int reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].reg_offset;
1897 for (int j = 1; j < inst->regs_read(i); j++)
1898 split_points[reg + j] = false;
1899 }
1900 }
1901 }
1902
1903 int new_virtual_grf[reg_count];
1904 int new_reg_offset[reg_count];
1905
1906 int reg = 0;
1907 for (int i = 0; i < num_vars; i++) {
1908 /* The first one should always be 0 as a quick sanity check. */
1909 assert(split_points[reg] == false);
1910
1911 /* j = 0 case */
1912 new_reg_offset[reg] = 0;
1913 reg++;
1914 int offset = 1;
1915
1916 /* j > 0 case */
1917 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1918 /* If this is a split point, reset the offset to 0 and allocate a
1919 * new virtual GRF for the previous offset many registers
1920 */
1921 if (split_points[reg]) {
1922 assert(offset <= MAX_VGRF_SIZE);
1923 int grf = alloc.allocate(offset);
1924 for (int k = reg - offset; k < reg; k++)
1925 new_virtual_grf[k] = grf;
1926 offset = 0;
1927 }
1928 new_reg_offset[reg] = offset;
1929 offset++;
1930 reg++;
1931 }
1932
1933 /* The last one gets the original register number */
1934 assert(offset <= MAX_VGRF_SIZE);
1935 alloc.sizes[i] = offset;
1936 for (int k = reg - offset; k < reg; k++)
1937 new_virtual_grf[k] = i;
1938 }
1939 assert(reg == reg_count);
1940
1941 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1942 if (inst->dst.file == VGRF) {
1943 reg = vgrf_to_reg[inst->dst.nr] + inst->dst.reg_offset;
1944 inst->dst.nr = new_virtual_grf[reg];
1945 inst->dst.reg_offset = new_reg_offset[reg];
1946 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1947 }
1948 for (int i = 0; i < inst->sources; i++) {
1949 if (inst->src[i].file == VGRF) {
1950 reg = vgrf_to_reg[inst->src[i].nr] + inst->src[i].reg_offset;
1951 inst->src[i].nr = new_virtual_grf[reg];
1952 inst->src[i].reg_offset = new_reg_offset[reg];
1953 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1954 }
1955 }
1956 }
1957 invalidate_live_intervals();
1958 }
1959
1960 /**
1961 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1962 *
1963 * During code generation, we create tons of temporary variables, many of
1964 * which get immediately killed and are never used again. Yet, in later
1965 * optimization and analysis passes, such as compute_live_intervals, we need
1966 * to loop over all the virtual GRFs. Compacting them can save a lot of
1967 * overhead.
1968 */
1969 bool
1970 fs_visitor::compact_virtual_grfs()
1971 {
1972 bool progress = false;
1973 int remap_table[this->alloc.count];
1974 memset(remap_table, -1, sizeof(remap_table));
1975
1976 /* Mark which virtual GRFs are used. */
1977 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1978 if (inst->dst.file == VGRF)
1979 remap_table[inst->dst.nr] = 0;
1980
1981 for (int i = 0; i < inst->sources; i++) {
1982 if (inst->src[i].file == VGRF)
1983 remap_table[inst->src[i].nr] = 0;
1984 }
1985 }
1986
1987 /* Compact the GRF arrays. */
1988 int new_index = 0;
1989 for (unsigned i = 0; i < this->alloc.count; i++) {
1990 if (remap_table[i] == -1) {
1991 /* We just found an unused register. This means that we are
1992 * actually going to compact something.
1993 */
1994 progress = true;
1995 } else {
1996 remap_table[i] = new_index;
1997 alloc.sizes[new_index] = alloc.sizes[i];
1998 invalidate_live_intervals();
1999 ++new_index;
2000 }
2001 }
2002
2003 this->alloc.count = new_index;
2004
2005 /* Patch all the instructions to use the newly renumbered registers */
2006 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2007 if (inst->dst.file == VGRF)
2008 inst->dst.nr = remap_table[inst->dst.nr];
2009
2010 for (int i = 0; i < inst->sources; i++) {
2011 if (inst->src[i].file == VGRF)
2012 inst->src[i].nr = remap_table[inst->src[i].nr];
2013 }
2014 }
2015
2016 /* Patch all the references to delta_xy, since they're used in register
2017 * allocation. If they're unused, switch them to BAD_FILE so we don't
2018 * think some random VGRF is delta_xy.
2019 */
2020 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2021 if (delta_xy[i].file == VGRF) {
2022 if (remap_table[delta_xy[i].nr] != -1) {
2023 delta_xy[i].nr = remap_table[delta_xy[i].nr];
2024 } else {
2025 delta_xy[i].file = BAD_FILE;
2026 }
2027 }
2028 }
2029
2030 return progress;
2031 }
2032
2033 static void
2034 set_push_pull_constant_loc(unsigned uniform, int *chunk_start, bool contiguous,
2035 int *push_constant_loc, int *pull_constant_loc,
2036 unsigned *num_push_constants,
2037 unsigned *num_pull_constants,
2038 const unsigned max_push_components,
2039 const unsigned max_chunk_size,
2040 struct brw_stage_prog_data *stage_prog_data)
2041 {
2042 /* This is the first live uniform in the chunk */
2043 if (*chunk_start < 0)
2044 *chunk_start = uniform;
2045
2046 /* If this element does not need to be contiguous with the next, we
2047 * split at this point and everything between chunk_start and u forms a
2048 * single chunk.
2049 */
2050 if (!contiguous) {
2051 unsigned chunk_size = uniform - *chunk_start + 1;
2052
2053 /* Decide whether we should push or pull this parameter. In the
2054 * Vulkan driver, push constants are explicitly exposed via the API
2055 * so we push everything. In GL, we only push small arrays.
2056 */
2057 if (stage_prog_data->pull_param == NULL ||
2058 (*num_push_constants + chunk_size <= max_push_components &&
2059 chunk_size <= max_chunk_size)) {
2060 assert(*num_push_constants + chunk_size <= max_push_components);
2061 for (unsigned j = *chunk_start; j <= uniform; j++)
2062 push_constant_loc[j] = (*num_push_constants)++;
2063 } else {
2064 for (unsigned j = *chunk_start; j <= uniform; j++)
2065 pull_constant_loc[j] = (*num_pull_constants)++;
2066 }
2067
2068 *chunk_start = -1;
2069 }
2070 }
2071
2072 /**
2073 * Assign UNIFORM file registers to either push constants or pull constants.
2074 *
2075 * We allow a fragment shader to have more than the specified minimum
2076 * maximum number of fragment shader uniform components (64). If
2077 * there are too many of these, they'd fill up all of register space.
2078 * So, this will push some of them out to the pull constant buffer and
2079 * update the program to load them.
2080 */
2081 void
2082 fs_visitor::assign_constant_locations()
2083 {
2084 /* Only the first compile gets to decide on locations. */
2085 if (dispatch_width != min_dispatch_width)
2086 return;
2087
2088 bool is_live[uniforms];
2089 memset(is_live, 0, sizeof(is_live));
2090 bool is_live_64bit[uniforms];
2091 memset(is_live_64bit, 0, sizeof(is_live_64bit));
2092
2093 /* For each uniform slot, a value of true indicates that the given slot and
2094 * the next slot must remain contiguous. This is used to keep us from
2095 * splitting arrays apart.
2096 */
2097 bool contiguous[uniforms];
2098 memset(contiguous, 0, sizeof(contiguous));
2099
2100 /* First, we walk through the instructions and do two things:
2101 *
2102 * 1) Figure out which uniforms are live.
2103 *
2104 * 2) Mark any indirectly used ranges of registers as contiguous.
2105 *
2106 * Note that we don't move constant-indexed accesses to arrays. No
2107 * testing has been done of the performance impact of this choice.
2108 */
2109 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2110 for (int i = 0 ; i < inst->sources; i++) {
2111 if (inst->src[i].file != UNIFORM)
2112 continue;
2113
2114 int constant_nr = inst->src[i].nr + inst->src[i].reg_offset;
2115
2116 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0) {
2117 assert(inst->src[2].ud % 4 == 0);
2118 unsigned last = constant_nr + (inst->src[2].ud / 4) - 1;
2119 assert(last < uniforms);
2120
2121 for (unsigned j = constant_nr; j < last; j++) {
2122 is_live[j] = true;
2123 contiguous[j] = true;
2124 if (type_sz(inst->src[i].type) == 8) {
2125 is_live_64bit[j] = true;
2126 }
2127 }
2128 is_live[last] = true;
2129 } else {
2130 if (constant_nr >= 0 && constant_nr < (int) uniforms) {
2131 int regs_read = inst->components_read(i) *
2132 type_sz(inst->src[i].type) / 4;
2133 for (int j = 0; j < regs_read; j++) {
2134 is_live[constant_nr + j] = true;
2135 if (type_sz(inst->src[i].type) == 8) {
2136 is_live_64bit[constant_nr + j] = true;
2137 }
2138 }
2139 }
2140 }
2141 }
2142 }
2143
2144 /* Only allow 16 registers (128 uniform components) as push constants.
2145 *
2146 * Just demote the end of the list. We could probably do better
2147 * here, demoting things that are rarely used in the program first.
2148 *
2149 * If changing this value, note the limitation about total_regs in
2150 * brw_curbe.c.
2151 */
2152 const unsigned int max_push_components = 16 * 8;
2153
2154 /* We push small arrays, but no bigger than 16 floats. This is big enough
2155 * for a vec4 but hopefully not large enough to push out other stuff. We
2156 * should probably use a better heuristic at some point.
2157 */
2158 const unsigned int max_chunk_size = 16;
2159
2160 unsigned int num_push_constants = 0;
2161 unsigned int num_pull_constants = 0;
2162
2163 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2164 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
2165
2166 /* Default to -1 meaning no location */
2167 memset(push_constant_loc, -1, uniforms * sizeof(*push_constant_loc));
2168 memset(pull_constant_loc, -1, uniforms * sizeof(*pull_constant_loc));
2169
2170 int chunk_start = -1;
2171
2172 /* First push 64-bit uniforms to ensure they are properly aligned */
2173 for (unsigned u = 0; u < uniforms; u++) {
2174 if (!is_live[u] || !is_live_64bit[u])
2175 continue;
2176
2177 set_push_pull_constant_loc(u, &chunk_start, contiguous[u],
2178 push_constant_loc, pull_constant_loc,
2179 &num_push_constants, &num_pull_constants,
2180 max_push_components, max_chunk_size,
2181 stage_prog_data);
2182
2183 }
2184
2185 /* Then push the rest of uniforms */
2186 for (unsigned u = 0; u < uniforms; u++) {
2187 if (!is_live[u] || is_live_64bit[u])
2188 continue;
2189
2190 set_push_pull_constant_loc(u, &chunk_start, contiguous[u],
2191 push_constant_loc, pull_constant_loc,
2192 &num_push_constants, &num_pull_constants,
2193 max_push_components, max_chunk_size,
2194 stage_prog_data);
2195 }
2196
2197 /* As the uniforms are going to be reordered, take the data from a temporary
2198 * copy of the original param[].
2199 */
2200 gl_constant_value **param = ralloc_array(NULL, gl_constant_value*,
2201 stage_prog_data->nr_params);
2202 memcpy(param, stage_prog_data->param,
2203 sizeof(gl_constant_value*) * stage_prog_data->nr_params);
2204 stage_prog_data->nr_params = num_push_constants;
2205 stage_prog_data->nr_pull_params = num_pull_constants;
2206
2207 /* Up until now, the param[] array has been indexed by reg + reg_offset
2208 * of UNIFORM registers. Move pull constants into pull_param[] and
2209 * condense param[] to only contain the uniforms we chose to push.
2210 *
2211 * NOTE: Because we are condensing the params[] array, we know that
2212 * push_constant_loc[i] <= i and we can do it in one smooth loop without
2213 * having to make a copy.
2214 */
2215 for (unsigned int i = 0; i < uniforms; i++) {
2216 const gl_constant_value *value = param[i];
2217
2218 if (pull_constant_loc[i] != -1) {
2219 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
2220 } else if (push_constant_loc[i] != -1) {
2221 stage_prog_data->param[push_constant_loc[i]] = value;
2222 }
2223 }
2224 ralloc_free(param);
2225 }
2226
2227 /**
2228 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
2229 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
2230 */
2231 void
2232 fs_visitor::lower_constant_loads()
2233 {
2234 const unsigned index = stage_prog_data->binding_table.pull_constants_start;
2235
2236 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2237 /* Set up the annotation tracking for new generated instructions. */
2238 const fs_builder ibld(this, block, inst);
2239
2240 for (int i = 0; i < inst->sources; i++) {
2241 if (inst->src[i].file != UNIFORM)
2242 continue;
2243
2244 /* We'll handle this case later */
2245 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT && i == 0)
2246 continue;
2247
2248 unsigned location = inst->src[i].nr + inst->src[i].reg_offset;
2249 if (location >= uniforms)
2250 continue; /* Out of bounds access */
2251
2252 int pull_index = pull_constant_loc[location];
2253
2254 if (pull_index == -1)
2255 continue;
2256
2257 const unsigned index = stage_prog_data->binding_table.pull_constants_start;
2258 fs_reg dst;
2259
2260 if (type_sz(inst->src[i].type) <= 4)
2261 dst = vgrf(glsl_type::float_type);
2262 else
2263 dst = vgrf(glsl_type::double_type);
2264
2265 assert(inst->src[i].stride == 0);
2266
2267 const fs_builder ubld = ibld.exec_all().group(8, 0);
2268 struct brw_reg offset = brw_imm_ud((unsigned)(pull_index * 4) & ~15);
2269 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
2270 dst, brw_imm_ud(index), offset);
2271
2272 /* Rewrite the instruction to use the temporary VGRF. */
2273 inst->src[i].file = VGRF;
2274 inst->src[i].nr = dst.nr;
2275 inst->src[i].reg_offset = 0;
2276 inst->src[i].set_smear((pull_index & 3) * 4 /
2277 type_sz(inst->src[i].type));
2278
2279 brw_mark_surface_used(prog_data, index);
2280 }
2281
2282 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
2283 inst->src[0].file == UNIFORM) {
2284
2285 unsigned location = inst->src[0].nr + inst->src[0].reg_offset;
2286 if (location >= uniforms)
2287 continue; /* Out of bounds access */
2288
2289 int pull_index = pull_constant_loc[location];
2290
2291 if (pull_index == -1)
2292 continue;
2293
2294 VARYING_PULL_CONSTANT_LOAD(ibld, inst->dst,
2295 brw_imm_ud(index),
2296 inst->src[1],
2297 pull_index * 4);
2298 inst->remove(block);
2299
2300 brw_mark_surface_used(prog_data, index);
2301 }
2302 }
2303 invalidate_live_intervals();
2304 }
2305
2306 bool
2307 fs_visitor::opt_algebraic()
2308 {
2309 bool progress = false;
2310
2311 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2312 switch (inst->opcode) {
2313 case BRW_OPCODE_MOV:
2314 if (inst->src[0].file != IMM)
2315 break;
2316
2317 if (inst->saturate) {
2318 if (inst->dst.type != inst->src[0].type)
2319 assert(!"unimplemented: saturate mixed types");
2320
2321 if (brw_saturate_immediate(inst->dst.type,
2322 &inst->src[0].as_brw_reg())) {
2323 inst->saturate = false;
2324 progress = true;
2325 }
2326 }
2327 break;
2328
2329 case BRW_OPCODE_MUL:
2330 if (inst->src[1].file != IMM)
2331 continue;
2332
2333 /* a * 1.0 = a */
2334 if (inst->src[1].is_one()) {
2335 inst->opcode = BRW_OPCODE_MOV;
2336 inst->src[1] = reg_undef;
2337 progress = true;
2338 break;
2339 }
2340
2341 /* a * -1.0 = -a */
2342 if (inst->src[1].is_negative_one()) {
2343 inst->opcode = BRW_OPCODE_MOV;
2344 inst->src[0].negate = !inst->src[0].negate;
2345 inst->src[1] = reg_undef;
2346 progress = true;
2347 break;
2348 }
2349
2350 /* a * 0.0 = 0.0 */
2351 if (inst->src[1].is_zero()) {
2352 inst->opcode = BRW_OPCODE_MOV;
2353 inst->src[0] = inst->src[1];
2354 inst->src[1] = reg_undef;
2355 progress = true;
2356 break;
2357 }
2358
2359 if (inst->src[0].file == IMM) {
2360 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2361 inst->opcode = BRW_OPCODE_MOV;
2362 inst->src[0].f *= inst->src[1].f;
2363 inst->src[1] = reg_undef;
2364 progress = true;
2365 break;
2366 }
2367 break;
2368 case BRW_OPCODE_ADD:
2369 if (inst->src[1].file != IMM)
2370 continue;
2371
2372 /* a + 0.0 = a */
2373 if (inst->src[1].is_zero()) {
2374 inst->opcode = BRW_OPCODE_MOV;
2375 inst->src[1] = reg_undef;
2376 progress = true;
2377 break;
2378 }
2379
2380 if (inst->src[0].file == IMM) {
2381 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2382 inst->opcode = BRW_OPCODE_MOV;
2383 inst->src[0].f += inst->src[1].f;
2384 inst->src[1] = reg_undef;
2385 progress = true;
2386 break;
2387 }
2388 break;
2389 case BRW_OPCODE_OR:
2390 if (inst->src[0].equals(inst->src[1])) {
2391 inst->opcode = BRW_OPCODE_MOV;
2392 inst->src[1] = reg_undef;
2393 progress = true;
2394 break;
2395 }
2396 break;
2397 case BRW_OPCODE_LRP:
2398 if (inst->src[1].equals(inst->src[2])) {
2399 inst->opcode = BRW_OPCODE_MOV;
2400 inst->src[0] = inst->src[1];
2401 inst->src[1] = reg_undef;
2402 inst->src[2] = reg_undef;
2403 progress = true;
2404 break;
2405 }
2406 break;
2407 case BRW_OPCODE_CMP:
2408 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
2409 inst->src[0].abs &&
2410 inst->src[0].negate &&
2411 inst->src[1].is_zero()) {
2412 inst->src[0].abs = false;
2413 inst->src[0].negate = false;
2414 inst->conditional_mod = BRW_CONDITIONAL_Z;
2415 progress = true;
2416 break;
2417 }
2418 break;
2419 case BRW_OPCODE_SEL:
2420 if (inst->src[0].equals(inst->src[1])) {
2421 inst->opcode = BRW_OPCODE_MOV;
2422 inst->src[1] = reg_undef;
2423 inst->predicate = BRW_PREDICATE_NONE;
2424 inst->predicate_inverse = false;
2425 progress = true;
2426 } else if (inst->saturate && inst->src[1].file == IMM) {
2427 switch (inst->conditional_mod) {
2428 case BRW_CONDITIONAL_LE:
2429 case BRW_CONDITIONAL_L:
2430 switch (inst->src[1].type) {
2431 case BRW_REGISTER_TYPE_F:
2432 if (inst->src[1].f >= 1.0f) {
2433 inst->opcode = BRW_OPCODE_MOV;
2434 inst->src[1] = reg_undef;
2435 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2436 progress = true;
2437 }
2438 break;
2439 default:
2440 break;
2441 }
2442 break;
2443 case BRW_CONDITIONAL_GE:
2444 case BRW_CONDITIONAL_G:
2445 switch (inst->src[1].type) {
2446 case BRW_REGISTER_TYPE_F:
2447 if (inst->src[1].f <= 0.0f) {
2448 inst->opcode = BRW_OPCODE_MOV;
2449 inst->src[1] = reg_undef;
2450 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2451 progress = true;
2452 }
2453 break;
2454 default:
2455 break;
2456 }
2457 default:
2458 break;
2459 }
2460 }
2461 break;
2462 case BRW_OPCODE_MAD:
2463 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2464 inst->opcode = BRW_OPCODE_MOV;
2465 inst->src[1] = reg_undef;
2466 inst->src[2] = reg_undef;
2467 progress = true;
2468 } else if (inst->src[0].is_zero()) {
2469 inst->opcode = BRW_OPCODE_MUL;
2470 inst->src[0] = inst->src[2];
2471 inst->src[2] = reg_undef;
2472 progress = true;
2473 } else if (inst->src[1].is_one()) {
2474 inst->opcode = BRW_OPCODE_ADD;
2475 inst->src[1] = inst->src[2];
2476 inst->src[2] = reg_undef;
2477 progress = true;
2478 } else if (inst->src[2].is_one()) {
2479 inst->opcode = BRW_OPCODE_ADD;
2480 inst->src[2] = reg_undef;
2481 progress = true;
2482 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2483 inst->opcode = BRW_OPCODE_ADD;
2484 inst->src[1].f *= inst->src[2].f;
2485 inst->src[2] = reg_undef;
2486 progress = true;
2487 }
2488 break;
2489 case SHADER_OPCODE_BROADCAST:
2490 if (is_uniform(inst->src[0])) {
2491 inst->opcode = BRW_OPCODE_MOV;
2492 inst->sources = 1;
2493 inst->force_writemask_all = true;
2494 progress = true;
2495 } else if (inst->src[1].file == IMM) {
2496 inst->opcode = BRW_OPCODE_MOV;
2497 inst->src[0] = component(inst->src[0],
2498 inst->src[1].ud);
2499 inst->sources = 1;
2500 inst->force_writemask_all = true;
2501 progress = true;
2502 }
2503 break;
2504
2505 default:
2506 break;
2507 }
2508
2509 /* Swap if src[0] is immediate. */
2510 if (progress && inst->is_commutative()) {
2511 if (inst->src[0].file == IMM) {
2512 fs_reg tmp = inst->src[1];
2513 inst->src[1] = inst->src[0];
2514 inst->src[0] = tmp;
2515 }
2516 }
2517 }
2518 return progress;
2519 }
2520
2521 /**
2522 * Optimize sample messages that have constant zero values for the trailing
2523 * texture coordinates. We can just reduce the message length for these
2524 * instructions instead of reserving a register for it. Trailing parameters
2525 * that aren't sent default to zero anyway. This will cause the dead code
2526 * eliminator to remove the MOV instruction that would otherwise be emitted to
2527 * set up the zero value.
2528 */
2529 bool
2530 fs_visitor::opt_zero_samples()
2531 {
2532 /* Gen4 infers the texturing opcode based on the message length so we can't
2533 * change it.
2534 */
2535 if (devinfo->gen < 5)
2536 return false;
2537
2538 bool progress = false;
2539
2540 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2541 if (!inst->is_tex())
2542 continue;
2543
2544 fs_inst *load_payload = (fs_inst *) inst->prev;
2545
2546 if (load_payload->is_head_sentinel() ||
2547 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2548 continue;
2549
2550 /* We don't want to remove the message header or the first parameter.
2551 * Removing the first parameter is not allowed, see the Haswell PRM
2552 * volume 7, page 149:
2553 *
2554 * "Parameter 0 is required except for the sampleinfo message, which
2555 * has no parameter 0"
2556 */
2557 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2558 load_payload->src[(inst->mlen - inst->header_size) /
2559 (inst->exec_size / 8) +
2560 inst->header_size - 1].is_zero()) {
2561 inst->mlen -= inst->exec_size / 8;
2562 progress = true;
2563 }
2564 }
2565
2566 if (progress)
2567 invalidate_live_intervals();
2568
2569 return progress;
2570 }
2571
2572 /**
2573 * Optimize sample messages which are followed by the final RT write.
2574 *
2575 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2576 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2577 * final texturing results copied to the framebuffer write payload and modify
2578 * them to write to the framebuffer directly.
2579 */
2580 bool
2581 fs_visitor::opt_sampler_eot()
2582 {
2583 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2584
2585 if (stage != MESA_SHADER_FRAGMENT)
2586 return false;
2587
2588 if (devinfo->gen < 9 && !devinfo->is_cherryview)
2589 return false;
2590
2591 /* FINISHME: It should be possible to implement this optimization when there
2592 * are multiple drawbuffers.
2593 */
2594 if (key->nr_color_regions != 1)
2595 return false;
2596
2597 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2598 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2599 fs_inst *fb_write = (fs_inst *)block->end();
2600 assert(fb_write->eot);
2601 assert(fb_write->opcode == FS_OPCODE_FB_WRITE);
2602
2603 /* There wasn't one; nothing to do. */
2604 if (unlikely(fb_write->prev->is_head_sentinel()))
2605 return false;
2606
2607 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2608
2609 /* 3D Sampler » Messages » Message Format
2610 *
2611 * “Response Length of zero is allowed on all SIMD8* and SIMD16* sampler
2612 * messages except sample+killpix, resinfo, sampleinfo, LOD, and gather4*”
2613 */
2614 if (!tex_inst->is_tex() ||
2615 tex_inst->opcode == SHADER_OPCODE_TXS ||
2616 tex_inst->opcode == SHADER_OPCODE_SAMPLEINFO ||
2617 tex_inst->opcode == SHADER_OPCODE_LOD ||
2618 tex_inst->opcode == SHADER_OPCODE_TG4 ||
2619 tex_inst->opcode == SHADER_OPCODE_TG4_OFFSET)
2620 return false;
2621
2622 /* If there's no header present, we need to munge the LOAD_PAYLOAD as well.
2623 * It's very likely to be the previous instruction.
2624 */
2625 if (tex_inst->prev->is_head_sentinel())
2626 return false;
2627
2628 fs_inst *load_payload = (fs_inst *) tex_inst->prev;
2629 if (load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2630 return false;
2631
2632 assert(!tex_inst->eot); /* We can't get here twice */
2633 assert((tex_inst->offset & (0xff << 24)) == 0);
2634
2635 const fs_builder ibld(this, block, tex_inst);
2636
2637 tex_inst->offset |= fb_write->target << 24;
2638 tex_inst->eot = true;
2639 tex_inst->dst = ibld.null_reg_ud();
2640 tex_inst->regs_written = 0;
2641 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2642
2643 /* If a header is present, marking the eot is sufficient. Otherwise, we need
2644 * to create a new LOAD_PAYLOAD command with the same sources and a space
2645 * saved for the header. Using a new destination register not only makes sure
2646 * we have enough space, but it will make sure the dead code eliminator kills
2647 * the instruction that this will replace.
2648 */
2649 if (tex_inst->header_size != 0) {
2650 invalidate_live_intervals();
2651 return true;
2652 }
2653
2654 fs_reg send_header = ibld.vgrf(BRW_REGISTER_TYPE_F,
2655 load_payload->sources + 1);
2656 fs_reg *new_sources =
2657 ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
2658
2659 new_sources[0] = fs_reg();
2660 for (int i = 0; i < load_payload->sources; i++)
2661 new_sources[i+1] = load_payload->src[i];
2662
2663 /* The LOAD_PAYLOAD helper seems like the obvious choice here. However, it
2664 * requires a lot of information about the sources to appropriately figure
2665 * out the number of registers needed to be used. Given this stage in our
2666 * optimization, we may not have the appropriate GRFs required by
2667 * LOAD_PAYLOAD at this point (copy propagation). Therefore, we need to
2668 * manually emit the instruction.
2669 */
2670 fs_inst *new_load_payload = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD,
2671 load_payload->exec_size,
2672 send_header,
2673 new_sources,
2674 load_payload->sources + 1);
2675
2676 new_load_payload->regs_written = load_payload->regs_written + 1;
2677 new_load_payload->header_size = 1;
2678 tex_inst->mlen++;
2679 tex_inst->header_size = 1;
2680 tex_inst->insert_before(cfg->blocks[cfg->num_blocks - 1], new_load_payload);
2681 tex_inst->src[0] = send_header;
2682
2683 invalidate_live_intervals();
2684 return true;
2685 }
2686
2687 bool
2688 fs_visitor::opt_register_renaming()
2689 {
2690 bool progress = false;
2691 int depth = 0;
2692
2693 int remap[alloc.count];
2694 memset(remap, -1, sizeof(int) * alloc.count);
2695
2696 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2697 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2698 depth++;
2699 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2700 inst->opcode == BRW_OPCODE_WHILE) {
2701 depth--;
2702 }
2703
2704 /* Rewrite instruction sources. */
2705 for (int i = 0; i < inst->sources; i++) {
2706 if (inst->src[i].file == VGRF &&
2707 remap[inst->src[i].nr] != -1 &&
2708 remap[inst->src[i].nr] != inst->src[i].nr) {
2709 inst->src[i].nr = remap[inst->src[i].nr];
2710 progress = true;
2711 }
2712 }
2713
2714 const int dst = inst->dst.nr;
2715
2716 if (depth == 0 &&
2717 inst->dst.file == VGRF &&
2718 alloc.sizes[inst->dst.nr] == inst->regs_written &&
2719 !inst->is_partial_write()) {
2720 if (remap[dst] == -1) {
2721 remap[dst] = dst;
2722 } else {
2723 remap[dst] = alloc.allocate(inst->regs_written);
2724 inst->dst.nr = remap[dst];
2725 progress = true;
2726 }
2727 } else if (inst->dst.file == VGRF &&
2728 remap[dst] != -1 &&
2729 remap[dst] != dst) {
2730 inst->dst.nr = remap[dst];
2731 progress = true;
2732 }
2733 }
2734
2735 if (progress) {
2736 invalidate_live_intervals();
2737
2738 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2739 if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
2740 delta_xy[i].nr = remap[delta_xy[i].nr];
2741 }
2742 }
2743 }
2744
2745 return progress;
2746 }
2747
2748 /**
2749 * Remove redundant or useless discard jumps.
2750 *
2751 * For example, we can eliminate jumps in the following sequence:
2752 *
2753 * discard-jump (redundant with the next jump)
2754 * discard-jump (useless; jumps to the next instruction)
2755 * placeholder-halt
2756 */
2757 bool
2758 fs_visitor::opt_redundant_discard_jumps()
2759 {
2760 bool progress = false;
2761
2762 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2763
2764 fs_inst *placeholder_halt = NULL;
2765 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2766 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2767 placeholder_halt = inst;
2768 break;
2769 }
2770 }
2771
2772 if (!placeholder_halt)
2773 return false;
2774
2775 /* Delete any HALTs immediately before the placeholder halt. */
2776 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2777 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2778 prev = (fs_inst *) placeholder_halt->prev) {
2779 prev->remove(last_bblock);
2780 progress = true;
2781 }
2782
2783 if (progress)
2784 invalidate_live_intervals();
2785
2786 return progress;
2787 }
2788
2789 bool
2790 fs_visitor::compute_to_mrf()
2791 {
2792 bool progress = false;
2793 int next_ip = 0;
2794
2795 /* No MRFs on Gen >= 7. */
2796 if (devinfo->gen >= 7)
2797 return false;
2798
2799 calculate_live_intervals();
2800
2801 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2802 int ip = next_ip;
2803 next_ip++;
2804
2805 if (inst->opcode != BRW_OPCODE_MOV ||
2806 inst->is_partial_write() ||
2807 inst->dst.file != MRF || inst->src[0].file != VGRF ||
2808 inst->dst.type != inst->src[0].type ||
2809 inst->src[0].abs || inst->src[0].negate ||
2810 !inst->src[0].is_contiguous() ||
2811 inst->src[0].subreg_offset)
2812 continue;
2813
2814 /* Work out which hardware MRF registers are written by this
2815 * instruction.
2816 */
2817 int mrf_low = inst->dst.nr & ~BRW_MRF_COMPR4;
2818 int mrf_high;
2819 if (inst->dst.nr & BRW_MRF_COMPR4) {
2820 mrf_high = mrf_low + 4;
2821 } else if (inst->exec_size == 16) {
2822 mrf_high = mrf_low + 1;
2823 } else {
2824 mrf_high = mrf_low;
2825 }
2826
2827 /* Can't compute-to-MRF this GRF if someone else was going to
2828 * read it later.
2829 */
2830 if (this->virtual_grf_end[inst->src[0].nr] > ip)
2831 continue;
2832
2833 /* Found a move of a GRF to a MRF. Let's see if we can go
2834 * rewrite the thing that made this GRF to write into the MRF.
2835 */
2836 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
2837 if (scan_inst->dst.file == VGRF &&
2838 scan_inst->dst.nr == inst->src[0].nr) {
2839 /* Found the last thing to write our reg we want to turn
2840 * into a compute-to-MRF.
2841 */
2842
2843 /* If this one instruction didn't populate all the
2844 * channels, bail. We might be able to rewrite everything
2845 * that writes that reg, but it would require smarter
2846 * tracking to delay the rewriting until complete success.
2847 */
2848 if (scan_inst->is_partial_write())
2849 break;
2850
2851 /* Things returning more than one register would need us to
2852 * understand coalescing out more than one MOV at a time.
2853 */
2854 if (scan_inst->regs_written > scan_inst->exec_size / 8)
2855 break;
2856
2857 /* SEND instructions can't have MRF as a destination. */
2858 if (scan_inst->mlen)
2859 break;
2860
2861 if (devinfo->gen == 6) {
2862 /* gen6 math instructions must have the destination be
2863 * GRF, so no compute-to-MRF for them.
2864 */
2865 if (scan_inst->is_math()) {
2866 break;
2867 }
2868 }
2869
2870 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
2871 /* Found the creator of our MRF's source value. */
2872 scan_inst->dst.file = MRF;
2873 scan_inst->dst.nr = inst->dst.nr;
2874 scan_inst->dst.reg_offset = 0;
2875 scan_inst->saturate |= inst->saturate;
2876 inst->remove(block);
2877 progress = true;
2878 }
2879 break;
2880 }
2881
2882 /* We don't handle control flow here. Most computation of
2883 * values that end up in MRFs are shortly before the MRF
2884 * write anyway.
2885 */
2886 if (block->start() == scan_inst)
2887 break;
2888
2889 /* You can't read from an MRF, so if someone else reads our
2890 * MRF's source GRF that we wanted to rewrite, that stops us.
2891 */
2892 bool interfered = false;
2893 for (int i = 0; i < scan_inst->sources; i++) {
2894 if (scan_inst->src[i].file == VGRF &&
2895 scan_inst->src[i].nr == inst->src[0].nr &&
2896 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
2897 interfered = true;
2898 }
2899 }
2900 if (interfered)
2901 break;
2902
2903 if (scan_inst->dst.file == MRF) {
2904 /* If somebody else writes our MRF here, we can't
2905 * compute-to-MRF before that.
2906 */
2907 int scan_mrf_low = scan_inst->dst.nr & ~BRW_MRF_COMPR4;
2908 int scan_mrf_high;
2909
2910 if (scan_inst->dst.nr & BRW_MRF_COMPR4) {
2911 scan_mrf_high = scan_mrf_low + 4;
2912 } else if (scan_inst->exec_size == 16) {
2913 scan_mrf_high = scan_mrf_low + 1;
2914 } else {
2915 scan_mrf_high = scan_mrf_low;
2916 }
2917
2918 if (mrf_low == scan_mrf_low ||
2919 mrf_low == scan_mrf_high ||
2920 mrf_high == scan_mrf_low ||
2921 mrf_high == scan_mrf_high) {
2922 break;
2923 }
2924 }
2925
2926 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
2927 /* Found a SEND instruction, which means that there are
2928 * live values in MRFs from base_mrf to base_mrf +
2929 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2930 * above it.
2931 */
2932 if (mrf_low >= scan_inst->base_mrf &&
2933 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
2934 break;
2935 }
2936 if (mrf_high >= scan_inst->base_mrf &&
2937 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
2938 break;
2939 }
2940 }
2941 }
2942 }
2943
2944 if (progress)
2945 invalidate_live_intervals();
2946
2947 return progress;
2948 }
2949
2950 /**
2951 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2952 * flow. We could probably do better here with some form of divergence
2953 * analysis.
2954 */
2955 bool
2956 fs_visitor::eliminate_find_live_channel()
2957 {
2958 bool progress = false;
2959 unsigned depth = 0;
2960
2961 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2962 switch (inst->opcode) {
2963 case BRW_OPCODE_IF:
2964 case BRW_OPCODE_DO:
2965 depth++;
2966 break;
2967
2968 case BRW_OPCODE_ENDIF:
2969 case BRW_OPCODE_WHILE:
2970 depth--;
2971 break;
2972
2973 case FS_OPCODE_DISCARD_JUMP:
2974 /* This can potentially make control flow non-uniform until the end
2975 * of the program.
2976 */
2977 return progress;
2978
2979 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2980 if (depth == 0) {
2981 inst->opcode = BRW_OPCODE_MOV;
2982 inst->src[0] = brw_imm_ud(0u);
2983 inst->sources = 1;
2984 inst->force_writemask_all = true;
2985 progress = true;
2986 }
2987 break;
2988
2989 default:
2990 break;
2991 }
2992 }
2993
2994 return progress;
2995 }
2996
2997 /**
2998 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2999 * instructions to FS_OPCODE_REP_FB_WRITE.
3000 */
3001 void
3002 fs_visitor::emit_repclear_shader()
3003 {
3004 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3005 int base_mrf = 0;
3006 int color_mrf = base_mrf + 2;
3007 fs_inst *mov;
3008
3009 if (uniforms > 0) {
3010 mov = bld.exec_all().group(4, 0)
3011 .MOV(brw_message_reg(color_mrf),
3012 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
3013 } else {
3014 struct brw_reg reg =
3015 brw_reg(BRW_GENERAL_REGISTER_FILE, 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
3016 BRW_VERTICAL_STRIDE_8, BRW_WIDTH_2, BRW_HORIZONTAL_STRIDE_4,
3017 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
3018
3019 mov = bld.exec_all().group(4, 0)
3020 .MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg));
3021 }
3022
3023 fs_inst *write;
3024 if (key->nr_color_regions == 1) {
3025 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3026 write->saturate = key->clamp_fragment_color;
3027 write->base_mrf = color_mrf;
3028 write->target = 0;
3029 write->header_size = 0;
3030 write->mlen = 1;
3031 } else {
3032 assume(key->nr_color_regions > 0);
3033 for (int i = 0; i < key->nr_color_regions; ++i) {
3034 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
3035 write->saturate = key->clamp_fragment_color;
3036 write->base_mrf = base_mrf;
3037 write->target = i;
3038 write->header_size = 2;
3039 write->mlen = 3;
3040 }
3041 }
3042 write->eot = true;
3043
3044 calculate_cfg();
3045
3046 assign_constant_locations();
3047 assign_curb_setup();
3048
3049 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
3050 if (uniforms > 0) {
3051 assert(mov->src[0].file == FIXED_GRF);
3052 mov->src[0] = brw_vec4_grf(mov->src[0].nr, 0);
3053 }
3054 }
3055
3056 /**
3057 * Walks through basic blocks, looking for repeated MRF writes and
3058 * removing the later ones.
3059 */
3060 bool
3061 fs_visitor::remove_duplicate_mrf_writes()
3062 {
3063 fs_inst *last_mrf_move[BRW_MAX_MRF(devinfo->gen)];
3064 bool progress = false;
3065
3066 /* Need to update the MRF tracking for compressed instructions. */
3067 if (dispatch_width >= 16)
3068 return false;
3069
3070 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3071
3072 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3073 if (inst->is_control_flow()) {
3074 memset(last_mrf_move, 0, sizeof(last_mrf_move));
3075 }
3076
3077 if (inst->opcode == BRW_OPCODE_MOV &&
3078 inst->dst.file == MRF) {
3079 fs_inst *prev_inst = last_mrf_move[inst->dst.nr];
3080 if (prev_inst && inst->equals(prev_inst)) {
3081 inst->remove(block);
3082 progress = true;
3083 continue;
3084 }
3085 }
3086
3087 /* Clear out the last-write records for MRFs that were overwritten. */
3088 if (inst->dst.file == MRF) {
3089 last_mrf_move[inst->dst.nr] = NULL;
3090 }
3091
3092 if (inst->mlen > 0 && inst->base_mrf != -1) {
3093 /* Found a SEND instruction, which will include two or fewer
3094 * implied MRF writes. We could do better here.
3095 */
3096 for (int i = 0; i < implied_mrf_writes(inst); i++) {
3097 last_mrf_move[inst->base_mrf + i] = NULL;
3098 }
3099 }
3100
3101 /* Clear out any MRF move records whose sources got overwritten. */
3102 if (inst->dst.file == VGRF) {
3103 for (unsigned int i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
3104 if (last_mrf_move[i] &&
3105 last_mrf_move[i]->src[0].nr == inst->dst.nr) {
3106 last_mrf_move[i] = NULL;
3107 }
3108 }
3109 }
3110
3111 if (inst->opcode == BRW_OPCODE_MOV &&
3112 inst->dst.file == MRF &&
3113 inst->src[0].file == VGRF &&
3114 !inst->is_partial_write()) {
3115 last_mrf_move[inst->dst.nr] = inst;
3116 }
3117 }
3118
3119 if (progress)
3120 invalidate_live_intervals();
3121
3122 return progress;
3123 }
3124
3125 static void
3126 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
3127 {
3128 /* Clear the flag for registers that actually got read (as expected). */
3129 for (int i = 0; i < inst->sources; i++) {
3130 int grf;
3131 if (inst->src[i].file == VGRF || inst->src[i].file == FIXED_GRF) {
3132 grf = inst->src[i].nr;
3133 } else {
3134 continue;
3135 }
3136
3137 if (grf >= first_grf &&
3138 grf < first_grf + grf_len) {
3139 deps[grf - first_grf] = false;
3140 if (inst->exec_size == 16)
3141 deps[grf - first_grf + 1] = false;
3142 }
3143 }
3144 }
3145
3146 /**
3147 * Implements this workaround for the original 965:
3148 *
3149 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
3150 * check for post destination dependencies on this instruction, software
3151 * must ensure that there is no destination hazard for the case of ‘write
3152 * followed by a posted write’ shown in the following example.
3153 *
3154 * 1. mov r3 0
3155 * 2. send r3.xy <rest of send instruction>
3156 * 3. mov r2 r3
3157 *
3158 * Due to no post-destination dependency check on the ‘send’, the above
3159 * code sequence could have two instructions (1 and 2) in flight at the
3160 * same time that both consider ‘r3’ as the target of their final writes.
3161 */
3162 void
3163 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
3164 fs_inst *inst)
3165 {
3166 int write_len = inst->regs_written;
3167 int first_write_grf = inst->dst.nr;
3168 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3169 assert(write_len < (int)sizeof(needs_dep) - 1);
3170
3171 memset(needs_dep, false, sizeof(needs_dep));
3172 memset(needs_dep, true, write_len);
3173
3174 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
3175
3176 /* Walk backwards looking for writes to registers we're writing which
3177 * aren't read since being written. If we hit the start of the program,
3178 * we assume that there are no outstanding dependencies on entry to the
3179 * program.
3180 */
3181 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
3182 /* If we hit control flow, assume that there *are* outstanding
3183 * dependencies, and force their cleanup before our instruction.
3184 */
3185 if (block->start() == scan_inst) {
3186 for (int i = 0; i < write_len; i++) {
3187 if (needs_dep[i])
3188 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
3189 first_write_grf + i);
3190 }
3191 return;
3192 }
3193
3194 /* We insert our reads as late as possible on the assumption that any
3195 * instruction but a MOV that might have left us an outstanding
3196 * dependency has more latency than a MOV.
3197 */
3198 if (scan_inst->dst.file == VGRF) {
3199 for (int i = 0; i < scan_inst->regs_written; i++) {
3200 int reg = scan_inst->dst.nr + i;
3201
3202 if (reg >= first_write_grf &&
3203 reg < first_write_grf + write_len &&
3204 needs_dep[reg - first_write_grf]) {
3205 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
3206 needs_dep[reg - first_write_grf] = false;
3207 if (scan_inst->exec_size == 16)
3208 needs_dep[reg - first_write_grf + 1] = false;
3209 }
3210 }
3211 }
3212
3213 /* Clear the flag for registers that actually got read (as expected). */
3214 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3215
3216 /* Continue the loop only if we haven't resolved all the dependencies */
3217 int i;
3218 for (i = 0; i < write_len; i++) {
3219 if (needs_dep[i])
3220 break;
3221 }
3222 if (i == write_len)
3223 return;
3224 }
3225 }
3226
3227 /**
3228 * Implements this workaround for the original 965:
3229 *
3230 * "[DevBW, DevCL] Errata: A destination register from a send can not be
3231 * used as a destination register until after it has been sourced by an
3232 * instruction with a different destination register.
3233 */
3234 void
3235 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
3236 {
3237 int write_len = inst->regs_written;
3238 int first_write_grf = inst->dst.nr;
3239 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
3240 assert(write_len < (int)sizeof(needs_dep) - 1);
3241
3242 memset(needs_dep, false, sizeof(needs_dep));
3243 memset(needs_dep, true, write_len);
3244 /* Walk forwards looking for writes to registers we're writing which aren't
3245 * read before being written.
3246 */
3247 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst) {
3248 /* If we hit control flow, force resolve all remaining dependencies. */
3249 if (block->end() == scan_inst) {
3250 for (int i = 0; i < write_len; i++) {
3251 if (needs_dep[i])
3252 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3253 first_write_grf + i);
3254 }
3255 return;
3256 }
3257
3258 /* Clear the flag for registers that actually got read (as expected). */
3259 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
3260
3261 /* We insert our reads as late as possible since they're reading the
3262 * result of a SEND, which has massive latency.
3263 */
3264 if (scan_inst->dst.file == VGRF &&
3265 scan_inst->dst.nr >= first_write_grf &&
3266 scan_inst->dst.nr < first_write_grf + write_len &&
3267 needs_dep[scan_inst->dst.nr - first_write_grf]) {
3268 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
3269 scan_inst->dst.nr);
3270 needs_dep[scan_inst->dst.nr - first_write_grf] = false;
3271 }
3272
3273 /* Continue the loop only if we haven't resolved all the dependencies */
3274 int i;
3275 for (i = 0; i < write_len; i++) {
3276 if (needs_dep[i])
3277 break;
3278 }
3279 if (i == write_len)
3280 return;
3281 }
3282 }
3283
3284 void
3285 fs_visitor::insert_gen4_send_dependency_workarounds()
3286 {
3287 if (devinfo->gen != 4 || devinfo->is_g4x)
3288 return;
3289
3290 bool progress = false;
3291
3292 /* Note that we're done with register allocation, so GRF fs_regs always
3293 * have a .reg_offset of 0.
3294 */
3295
3296 foreach_block_and_inst(block, fs_inst, inst, cfg) {
3297 if (inst->mlen != 0 && inst->dst.file == VGRF) {
3298 insert_gen4_pre_send_dependency_workarounds(block, inst);
3299 insert_gen4_post_send_dependency_workarounds(block, inst);
3300 progress = true;
3301 }
3302 }
3303
3304 if (progress)
3305 invalidate_live_intervals();
3306 }
3307
3308 /**
3309 * Turns the generic expression-style uniform pull constant load instruction
3310 * into a hardware-specific series of instructions for loading a pull
3311 * constant.
3312 *
3313 * The expression style allows the CSE pass before this to optimize out
3314 * repeated loads from the same offset, and gives the pre-register-allocation
3315 * scheduling full flexibility, while the conversion to native instructions
3316 * allows the post-register-allocation scheduler the best information
3317 * possible.
3318 *
3319 * Note that execution masking for setting up pull constant loads is special:
3320 * the channels that need to be written are unrelated to the current execution
3321 * mask, since a later instruction will use one of the result channels as a
3322 * source operand for all 8 or 16 of its channels.
3323 */
3324 void
3325 fs_visitor::lower_uniform_pull_constant_loads()
3326 {
3327 foreach_block_and_inst (block, fs_inst, inst, cfg) {
3328 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
3329 continue;
3330
3331 if (devinfo->gen >= 7) {
3332 /* The offset arg is a vec4-aligned immediate byte offset. */
3333 fs_reg const_offset_reg = inst->src[1];
3334 assert(const_offset_reg.file == IMM &&
3335 const_offset_reg.type == BRW_REGISTER_TYPE_UD);
3336 assert(const_offset_reg.ud % 16 == 0);
3337
3338 fs_reg payload, offset;
3339 if (devinfo->gen >= 9) {
3340 /* We have to use a message header on Skylake to get SIMD4x2
3341 * mode. Reserve space for the register.
3342 */
3343 offset = payload = fs_reg(VGRF, alloc.allocate(2));
3344 offset.reg_offset++;
3345 inst->mlen = 2;
3346 } else {
3347 offset = payload = fs_reg(VGRF, alloc.allocate(1));
3348 inst->mlen = 1;
3349 }
3350
3351 /* This is actually going to be a MOV, but since only the first dword
3352 * is accessed, we have a special opcode to do just that one. Note
3353 * that this needs to be an operation that will be considered a def
3354 * by live variable analysis, or register allocation will explode.
3355 */
3356 fs_inst *setup = new(mem_ctx) fs_inst(FS_OPCODE_SET_SIMD4X2_OFFSET,
3357 8, offset, const_offset_reg);
3358 setup->force_writemask_all = true;
3359
3360 setup->ir = inst->ir;
3361 setup->annotation = inst->annotation;
3362 inst->insert_before(block, setup);
3363
3364 /* Similarly, this will only populate the first 4 channels of the
3365 * result register (since we only use smear values from 0-3), but we
3366 * don't tell the optimizer.
3367 */
3368 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3369 inst->src[1] = payload;
3370 inst->base_mrf = -1;
3371
3372 invalidate_live_intervals();
3373 } else {
3374 /* Before register allocation, we didn't tell the scheduler about the
3375 * MRF we use. We know it's safe to use this MRF because nothing
3376 * else does except for register spill/unspill, which generates and
3377 * uses its MRF within a single IR instruction.
3378 */
3379 inst->base_mrf = FIRST_PULL_LOAD_MRF(devinfo->gen) + 1;
3380 inst->mlen = 1;
3381 }
3382 }
3383 }
3384
3385 bool
3386 fs_visitor::lower_load_payload()
3387 {
3388 bool progress = false;
3389
3390 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3391 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3392 continue;
3393
3394 assert(inst->dst.file == MRF || inst->dst.file == VGRF);
3395 assert(inst->saturate == false);
3396 fs_reg dst = inst->dst;
3397
3398 /* Get rid of COMPR4. We'll add it back in if we need it */
3399 if (dst.file == MRF)
3400 dst.nr = dst.nr & ~BRW_MRF_COMPR4;
3401
3402 const fs_builder ibld(this, block, inst);
3403 const fs_builder hbld = ibld.exec_all().group(8, 0);
3404
3405 for (uint8_t i = 0; i < inst->header_size; i++) {
3406 if (inst->src[i].file != BAD_FILE) {
3407 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3408 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3409 hbld.MOV(mov_dst, mov_src);
3410 }
3411 dst = offset(dst, hbld, 1);
3412 }
3413
3414 if (inst->dst.file == MRF && (inst->dst.nr & BRW_MRF_COMPR4) &&
3415 inst->exec_size > 8) {
3416 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3417 * a straightforward copy. Instead, the result of the
3418 * LOAD_PAYLOAD is treated as interleaved and the first four
3419 * non-header sources are unpacked as:
3420 *
3421 * m + 0: r0
3422 * m + 1: g0
3423 * m + 2: b0
3424 * m + 3: a0
3425 * m + 4: r1
3426 * m + 5: g1
3427 * m + 6: b1
3428 * m + 7: a1
3429 *
3430 * This is used for gen <= 5 fb writes.
3431 */
3432 assert(inst->exec_size == 16);
3433 assert(inst->header_size + 4 <= inst->sources);
3434 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3435 if (inst->src[i].file != BAD_FILE) {
3436 if (devinfo->has_compr4) {
3437 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3438 compr4_dst.nr |= BRW_MRF_COMPR4;
3439 ibld.MOV(compr4_dst, inst->src[i]);
3440 } else {
3441 /* Platform doesn't have COMPR4. We have to fake it */
3442 fs_reg mov_dst = retype(dst, inst->src[i].type);
3443 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3444 mov_dst.nr += 4;
3445 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3446 }
3447 }
3448
3449 dst.nr++;
3450 }
3451
3452 /* The loop above only ever incremented us through the first set
3453 * of 4 registers. However, thanks to the magic of COMPR4, we
3454 * actually wrote to the first 8 registers, so we need to take
3455 * that into account now.
3456 */
3457 dst.nr += 4;
3458
3459 /* The COMPR4 code took care of the first 4 sources. We'll let
3460 * the regular path handle any remaining sources. Yes, we are
3461 * modifying the instruction but we're about to delete it so
3462 * this really doesn't hurt anything.
3463 */
3464 inst->header_size += 4;
3465 }
3466
3467 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3468 if (inst->src[i].file != BAD_FILE)
3469 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3470 dst = offset(dst, ibld, 1);
3471 }
3472
3473 inst->remove(block);
3474 progress = true;
3475 }
3476
3477 if (progress)
3478 invalidate_live_intervals();
3479
3480 return progress;
3481 }
3482
3483 bool
3484 fs_visitor::lower_integer_multiplication()
3485 {
3486 bool progress = false;
3487
3488 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3489 const fs_builder ibld(this, block, inst);
3490
3491 if (inst->opcode == BRW_OPCODE_MUL) {
3492 if (inst->dst.is_accumulator() ||
3493 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3494 inst->dst.type != BRW_REGISTER_TYPE_UD))
3495 continue;
3496
3497 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
3498 * operation directly, but CHV/BXT cannot.
3499 */
3500 if (devinfo->gen >= 8 &&
3501 !devinfo->is_cherryview && !devinfo->is_broxton)
3502 continue;
3503
3504 if (inst->src[1].file == IMM &&
3505 inst->src[1].ud < (1 << 16)) {
3506 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3507 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3508 * src1 are used.
3509 *
3510 * If multiplying by an immediate value that fits in 16-bits, do a
3511 * single MUL instruction with that value in the proper location.
3512 */
3513 if (devinfo->gen < 7) {
3514 fs_reg imm(VGRF, alloc.allocate(dispatch_width / 8),
3515 inst->dst.type);
3516 ibld.MOV(imm, inst->src[1]);
3517 ibld.MUL(inst->dst, imm, inst->src[0]);
3518 } else {
3519 ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
3520 }
3521 } else {
3522 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3523 * do 32-bit integer multiplication in one instruction, but instead
3524 * must do a sequence (which actually calculates a 64-bit result):
3525 *
3526 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3527 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3528 * mov(8) g2<1>D acc0<8,8,1>D
3529 *
3530 * But on Gen > 6, the ability to use second accumulator register
3531 * (acc1) for non-float data types was removed, preventing a simple
3532 * implementation in SIMD16. A 16-channel result can be calculated by
3533 * executing the three instructions twice in SIMD8, once with quarter
3534 * control of 1Q for the first eight channels and again with 2Q for
3535 * the second eight channels.
3536 *
3537 * Which accumulator register is implicitly accessed (by AccWrEnable
3538 * for instance) is determined by the quarter control. Unfortunately
3539 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3540 * implicit accumulator access by an instruction with 2Q will access
3541 * acc1 regardless of whether the data type is usable in acc1.
3542 *
3543 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3544 * integer data types.
3545 *
3546 * Since we only want the low 32-bits of the result, we can do two
3547 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3548 * adjust the high result and add them (like the mach is doing):
3549 *
3550 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3551 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3552 * shl(8) g9<1>D g8<8,8,1>D 16D
3553 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3554 *
3555 * We avoid the shl instruction by realizing that we only want to add
3556 * the low 16-bits of the "high" result to the high 16-bits of the
3557 * "low" result and using proper regioning on the add:
3558 *
3559 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3560 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3561 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3562 *
3563 * Since it does not use the (single) accumulator register, we can
3564 * schedule multi-component multiplications much better.
3565 */
3566
3567 fs_reg orig_dst = inst->dst;
3568 if (orig_dst.is_null() || orig_dst.file == MRF) {
3569 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
3570 inst->dst.type);
3571 }
3572 fs_reg low = inst->dst;
3573 fs_reg high(VGRF, alloc.allocate(dispatch_width / 8),
3574 inst->dst.type);
3575
3576 if (devinfo->gen >= 7) {
3577 fs_reg src1_0_w = inst->src[1];
3578 fs_reg src1_1_w = inst->src[1];
3579
3580 if (inst->src[1].file == IMM) {
3581 src1_0_w.ud &= 0xffff;
3582 src1_1_w.ud >>= 16;
3583 } else {
3584 src1_0_w.type = BRW_REGISTER_TYPE_UW;
3585 if (src1_0_w.stride != 0) {
3586 assert(src1_0_w.stride == 1);
3587 src1_0_w.stride = 2;
3588 }
3589
3590 src1_1_w.type = BRW_REGISTER_TYPE_UW;
3591 if (src1_1_w.stride != 0) {
3592 assert(src1_1_w.stride == 1);
3593 src1_1_w.stride = 2;
3594 }
3595 src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3596 }
3597 ibld.MUL(low, inst->src[0], src1_0_w);
3598 ibld.MUL(high, inst->src[0], src1_1_w);
3599 } else {
3600 fs_reg src0_0_w = inst->src[0];
3601 fs_reg src0_1_w = inst->src[0];
3602
3603 src0_0_w.type = BRW_REGISTER_TYPE_UW;
3604 if (src0_0_w.stride != 0) {
3605 assert(src0_0_w.stride == 1);
3606 src0_0_w.stride = 2;
3607 }
3608
3609 src0_1_w.type = BRW_REGISTER_TYPE_UW;
3610 if (src0_1_w.stride != 0) {
3611 assert(src0_1_w.stride == 1);
3612 src0_1_w.stride = 2;
3613 }
3614 src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3615
3616 ibld.MUL(low, src0_0_w, inst->src[1]);
3617 ibld.MUL(high, src0_1_w, inst->src[1]);
3618 }
3619
3620 fs_reg dst = inst->dst;
3621 dst.type = BRW_REGISTER_TYPE_UW;
3622 dst.subreg_offset = 2;
3623 dst.stride = 2;
3624
3625 high.type = BRW_REGISTER_TYPE_UW;
3626 high.stride = 2;
3627
3628 low.type = BRW_REGISTER_TYPE_UW;
3629 low.subreg_offset = 2;
3630 low.stride = 2;
3631
3632 ibld.ADD(dst, low, high);
3633
3634 if (inst->conditional_mod || orig_dst.file == MRF) {
3635 set_condmod(inst->conditional_mod,
3636 ibld.MOV(orig_dst, inst->dst));
3637 }
3638 }
3639
3640 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3641 /* Should have been lowered to 8-wide. */
3642 assert(inst->exec_size <= 8);
3643 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3644 inst->dst.type);
3645 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3646 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3647
3648 if (devinfo->gen >= 8) {
3649 /* Until Gen8, integer multiplies read 32-bits from one source,
3650 * and 16-bits from the other, and relying on the MACH instruction
3651 * to generate the high bits of the result.
3652 *
3653 * On Gen8, the multiply instruction does a full 32x32-bit
3654 * multiply, but in order to do a 64-bit multiply we can simulate
3655 * the previous behavior and then use a MACH instruction.
3656 *
3657 * FINISHME: Don't use source modifiers on src1.
3658 */
3659 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3660 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3661 mul->src[1].type = BRW_REGISTER_TYPE_UW;
3662 mul->src[1].stride *= 2;
3663
3664 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3665 inst->group > 0) {
3666 /* Among other things the quarter control bits influence which
3667 * accumulator register is used by the hardware for instructions
3668 * that access the accumulator implicitly (e.g. MACH). A
3669 * second-half instruction would normally map to acc1, which
3670 * doesn't exist on Gen7 and up (the hardware does emulate it for
3671 * floating-point instructions *only* by taking advantage of the
3672 * extra precision of acc0 not normally used for floating point
3673 * arithmetic).
3674 *
3675 * HSW and up are careful enough not to try to access an
3676 * accumulator register that doesn't exist, but on earlier Gen7
3677 * hardware we need to make sure that the quarter control bits are
3678 * zero to avoid non-deterministic behaviour and emit an extra MOV
3679 * to get the result masked correctly according to the current
3680 * channel enables.
3681 */
3682 mach->group = 0;
3683 mach->force_writemask_all = true;
3684 mach->dst = ibld.vgrf(inst->dst.type);
3685 ibld.MOV(inst->dst, mach->dst);
3686 }
3687 } else {
3688 continue;
3689 }
3690
3691 inst->remove(block);
3692 progress = true;
3693 }
3694
3695 if (progress)
3696 invalidate_live_intervals();
3697
3698 return progress;
3699 }
3700
3701 bool
3702 fs_visitor::lower_minmax()
3703 {
3704 assert(devinfo->gen < 6);
3705
3706 bool progress = false;
3707
3708 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3709 const fs_builder ibld(this, block, inst);
3710
3711 if (inst->opcode == BRW_OPCODE_SEL &&
3712 inst->predicate == BRW_PREDICATE_NONE) {
3713 /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
3714 * the original SEL.L/GE instruction
3715 */
3716 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
3717 inst->conditional_mod);
3718 inst->predicate = BRW_PREDICATE_NORMAL;
3719 inst->conditional_mod = BRW_CONDITIONAL_NONE;
3720
3721 progress = true;
3722 }
3723 }
3724
3725 if (progress)
3726 invalidate_live_intervals();
3727
3728 return progress;
3729 }
3730
3731 static void
3732 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
3733 fs_reg *dst, fs_reg color, unsigned components)
3734 {
3735 if (key->clamp_fragment_color) {
3736 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
3737 assert(color.type == BRW_REGISTER_TYPE_F);
3738
3739 for (unsigned i = 0; i < components; i++)
3740 set_saturate(true,
3741 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
3742
3743 color = tmp;
3744 }
3745
3746 for (unsigned i = 0; i < components; i++)
3747 dst[i] = offset(color, bld, i);
3748 }
3749
3750 static void
3751 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
3752 const brw_wm_prog_data *prog_data,
3753 const brw_wm_prog_key *key,
3754 const fs_visitor::thread_payload &payload)
3755 {
3756 assert(inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].file == IMM);
3757 const brw_device_info *devinfo = bld.shader->devinfo;
3758 const fs_reg &color0 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR0];
3759 const fs_reg &color1 = inst->src[FB_WRITE_LOGICAL_SRC_COLOR1];
3760 const fs_reg &src0_alpha = inst->src[FB_WRITE_LOGICAL_SRC_SRC0_ALPHA];
3761 const fs_reg &src_depth = inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH];
3762 const fs_reg &dst_depth = inst->src[FB_WRITE_LOGICAL_SRC_DST_DEPTH];
3763 const fs_reg &src_stencil = inst->src[FB_WRITE_LOGICAL_SRC_SRC_STENCIL];
3764 fs_reg sample_mask = inst->src[FB_WRITE_LOGICAL_SRC_OMASK];
3765 const unsigned components =
3766 inst->src[FB_WRITE_LOGICAL_SRC_COMPONENTS].ud;
3767
3768 /* We can potentially have a message length of up to 15, so we have to set
3769 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3770 */
3771 fs_reg sources[15];
3772 int header_size = 2, payload_header_size;
3773 unsigned length = 0;
3774
3775 /* From the Sandy Bridge PRM, volume 4, page 198:
3776 *
3777 * "Dispatched Pixel Enables. One bit per pixel indicating
3778 * which pixels were originally enabled when the thread was
3779 * dispatched. This field is only required for the end-of-
3780 * thread message and on all dual-source messages."
3781 */
3782 if (devinfo->gen >= 6 &&
3783 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3784 color1.file == BAD_FILE &&
3785 key->nr_color_regions == 1) {
3786 header_size = 0;
3787 }
3788
3789 if (header_size != 0) {
3790 assert(header_size == 2);
3791 /* Allocate 2 registers for a header */
3792 length += 2;
3793 }
3794
3795 if (payload.aa_dest_stencil_reg) {
3796 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
3797 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
3798 .MOV(sources[length],
3799 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
3800 length++;
3801 }
3802
3803 if (sample_mask.file != BAD_FILE) {
3804 sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1),
3805 BRW_REGISTER_TYPE_UD);
3806
3807 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
3808 * relevant. Since it's unsigned single words one vgrf is always
3809 * 16-wide, but only the lower or higher 8 channels will be used by the
3810 * hardware when doing a SIMD8 write depending on whether we have
3811 * selected the subspans for the first or second half respectively.
3812 */
3813 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
3814 sample_mask.type = BRW_REGISTER_TYPE_UW;
3815 sample_mask.stride *= 2;
3816
3817 bld.exec_all().annotate("FB write oMask")
3818 .MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
3819 inst->group),
3820 sample_mask);
3821 length++;
3822 }
3823
3824 payload_header_size = length;
3825
3826 if (src0_alpha.file != BAD_FILE) {
3827 /* FIXME: This is being passed at the wrong location in the payload and
3828 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
3829 * It's supposed to be immediately before oMask but there seems to be no
3830 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
3831 * requires header sources to form a contiguous segment at the beginning
3832 * of the message and src0_alpha has per-channel semantics.
3833 */
3834 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
3835 length++;
3836 }
3837
3838 setup_color_payload(bld, key, &sources[length], color0, components);
3839 length += 4;
3840
3841 if (color1.file != BAD_FILE) {
3842 setup_color_payload(bld, key, &sources[length], color1, components);
3843 length += 4;
3844 }
3845
3846 if (src_depth.file != BAD_FILE) {
3847 sources[length] = src_depth;
3848 length++;
3849 }
3850
3851 if (dst_depth.file != BAD_FILE) {
3852 sources[length] = dst_depth;
3853 length++;
3854 }
3855
3856 if (src_stencil.file != BAD_FILE) {
3857 assert(devinfo->gen >= 9);
3858 assert(bld.dispatch_width() != 16);
3859
3860 /* XXX: src_stencil is only available on gen9+. dst_depth is never
3861 * available on gen9+. As such it's impossible to have both enabled at the
3862 * same time and therefore length cannot overrun the array.
3863 */
3864 assert(length < 15);
3865
3866 sources[length] = bld.vgrf(BRW_REGISTER_TYPE_UD);
3867 bld.exec_all().annotate("FB write OS")
3868 .MOV(retype(sources[length], BRW_REGISTER_TYPE_UB),
3869 subscript(src_stencil, BRW_REGISTER_TYPE_UB, 0));
3870 length++;
3871 }
3872
3873 fs_inst *load;
3874 if (devinfo->gen >= 7) {
3875 /* Send from the GRF */
3876 fs_reg payload = fs_reg(VGRF, -1, BRW_REGISTER_TYPE_F);
3877 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
3878 payload.nr = bld.shader->alloc.allocate(load->regs_written);
3879 load->dst = payload;
3880
3881 inst->src[0] = payload;
3882 inst->resize_sources(1);
3883 inst->base_mrf = -1;
3884 } else {
3885 /* Send from the MRF */
3886 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
3887 sources, length, payload_header_size);
3888
3889 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3890 * will do this for us if we just give it a COMPR4 destination.
3891 */
3892 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
3893 load->dst.nr |= BRW_MRF_COMPR4;
3894
3895 inst->resize_sources(0);
3896 inst->base_mrf = 1;
3897 }
3898
3899 inst->opcode = FS_OPCODE_FB_WRITE;
3900 inst->mlen = load->regs_written;
3901 inst->header_size = header_size;
3902 }
3903
3904 static void
3905 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
3906 const fs_reg &coordinate,
3907 const fs_reg &shadow_c,
3908 const fs_reg &lod, const fs_reg &lod2,
3909 const fs_reg &surface,
3910 const fs_reg &sampler,
3911 unsigned coord_components,
3912 unsigned grad_components)
3913 {
3914 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
3915 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
3916 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
3917 fs_reg msg_end = msg_begin;
3918
3919 /* g0 header. */
3920 msg_end = offset(msg_end, bld.group(8, 0), 1);
3921
3922 for (unsigned i = 0; i < coord_components; i++)
3923 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
3924 offset(coordinate, bld, i));
3925
3926 msg_end = offset(msg_end, bld, coord_components);
3927
3928 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
3929 * require all three components to be present and zero if they are unused.
3930 */
3931 if (coord_components > 0 &&
3932 (has_lod || shadow_c.file != BAD_FILE ||
3933 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
3934 for (unsigned i = coord_components; i < 3; i++)
3935 bld.MOV(offset(msg_end, bld, i), brw_imm_f(0.0f));
3936
3937 msg_end = offset(msg_end, bld, 3 - coord_components);
3938 }
3939
3940 if (op == SHADER_OPCODE_TXD) {
3941 /* TXD unsupported in SIMD16 mode. */
3942 assert(bld.dispatch_width() == 8);
3943
3944 /* the slots for u and v are always present, but r is optional */
3945 if (coord_components < 2)
3946 msg_end = offset(msg_end, bld, 2 - coord_components);
3947
3948 /* P = u, v, r
3949 * dPdx = dudx, dvdx, drdx
3950 * dPdy = dudy, dvdy, drdy
3951 *
3952 * 1-arg: Does not exist.
3953 *
3954 * 2-arg: dudx dvdx dudy dvdy
3955 * dPdx.x dPdx.y dPdy.x dPdy.y
3956 * m4 m5 m6 m7
3957 *
3958 * 3-arg: dudx dvdx drdx dudy dvdy drdy
3959 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
3960 * m5 m6 m7 m8 m9 m10
3961 */
3962 for (unsigned i = 0; i < grad_components; i++)
3963 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
3964
3965 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3966
3967 for (unsigned i = 0; i < grad_components; i++)
3968 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
3969
3970 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3971 }
3972
3973 if (has_lod) {
3974 /* Bias/LOD with shadow comparitor is unsupported in SIMD16 -- *Without*
3975 * shadow comparitor (including RESINFO) it's unsupported in SIMD8 mode.
3976 */
3977 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
3978 bld.dispatch_width() == 16);
3979
3980 const brw_reg_type type =
3981 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
3982 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
3983 bld.MOV(retype(msg_end, type), lod);
3984 msg_end = offset(msg_end, bld, 1);
3985 }
3986
3987 if (shadow_c.file != BAD_FILE) {
3988 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
3989 /* There's no plain shadow compare message, so we use shadow
3990 * compare with a bias of 0.0.
3991 */
3992 bld.MOV(msg_end, brw_imm_f(0.0f));
3993 msg_end = offset(msg_end, bld, 1);
3994 }
3995
3996 bld.MOV(msg_end, shadow_c);
3997 msg_end = offset(msg_end, bld, 1);
3998 }
3999
4000 inst->opcode = op;
4001 inst->src[0] = reg_undef;
4002 inst->src[1] = surface;
4003 inst->src[2] = sampler;
4004 inst->resize_sources(3);
4005 inst->base_mrf = msg_begin.nr;
4006 inst->mlen = msg_end.nr - msg_begin.nr;
4007 inst->header_size = 1;
4008 }
4009
4010 static void
4011 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
4012 const fs_reg &coordinate,
4013 const fs_reg &shadow_c,
4014 const fs_reg &lod, const fs_reg &lod2,
4015 const fs_reg &sample_index,
4016 const fs_reg &surface,
4017 const fs_reg &sampler,
4018 const fs_reg &offset_value,
4019 unsigned coord_components,
4020 unsigned grad_components)
4021 {
4022 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
4023 fs_reg msg_coords = message;
4024 unsigned header_size = 0;
4025
4026 if (offset_value.file != BAD_FILE) {
4027 /* The offsets set up by the visitor are in the m1 header, so we can't
4028 * go headerless.
4029 */
4030 header_size = 1;
4031 message.nr--;
4032 }
4033
4034 for (unsigned i = 0; i < coord_components; i++)
4035 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type),
4036 offset(coordinate, bld, i));
4037
4038 fs_reg msg_end = offset(msg_coords, bld, coord_components);
4039 fs_reg msg_lod = offset(msg_coords, bld, 4);
4040
4041 if (shadow_c.file != BAD_FILE) {
4042 fs_reg msg_shadow = msg_lod;
4043 bld.MOV(msg_shadow, shadow_c);
4044 msg_lod = offset(msg_shadow, bld, 1);
4045 msg_end = msg_lod;
4046 }
4047
4048 switch (op) {
4049 case SHADER_OPCODE_TXL:
4050 case FS_OPCODE_TXB:
4051 bld.MOV(msg_lod, lod);
4052 msg_end = offset(msg_lod, bld, 1);
4053 break;
4054 case SHADER_OPCODE_TXD:
4055 /**
4056 * P = u, v, r
4057 * dPdx = dudx, dvdx, drdx
4058 * dPdy = dudy, dvdy, drdy
4059 *
4060 * Load up these values:
4061 * - dudx dudy dvdx dvdy drdx drdy
4062 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
4063 */
4064 msg_end = msg_lod;
4065 for (unsigned i = 0; i < grad_components; i++) {
4066 bld.MOV(msg_end, offset(lod, bld, i));
4067 msg_end = offset(msg_end, bld, 1);
4068
4069 bld.MOV(msg_end, offset(lod2, bld, i));
4070 msg_end = offset(msg_end, bld, 1);
4071 }
4072 break;
4073 case SHADER_OPCODE_TXS:
4074 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
4075 bld.MOV(msg_lod, lod);
4076 msg_end = offset(msg_lod, bld, 1);
4077 break;
4078 case SHADER_OPCODE_TXF:
4079 msg_lod = offset(msg_coords, bld, 3);
4080 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
4081 msg_end = offset(msg_lod, bld, 1);
4082 break;
4083 case SHADER_OPCODE_TXF_CMS:
4084 msg_lod = offset(msg_coords, bld, 3);
4085 /* lod */
4086 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
4087 /* sample index */
4088 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
4089 msg_end = offset(msg_lod, bld, 2);
4090 break;
4091 default:
4092 break;
4093 }
4094
4095 inst->opcode = op;
4096 inst->src[0] = reg_undef;
4097 inst->src[1] = surface;
4098 inst->src[2] = sampler;
4099 inst->resize_sources(3);
4100 inst->base_mrf = message.nr;
4101 inst->mlen = msg_end.nr - message.nr;
4102 inst->header_size = header_size;
4103
4104 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4105 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4106 }
4107
4108 static bool
4109 is_high_sampler(const struct brw_device_info *devinfo, const fs_reg &sampler)
4110 {
4111 if (devinfo->gen < 8 && !devinfo->is_haswell)
4112 return false;
4113
4114 return sampler.file != IMM || sampler.ud >= 16;
4115 }
4116
4117 static void
4118 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
4119 const fs_reg &coordinate,
4120 const fs_reg &shadow_c,
4121 fs_reg lod, const fs_reg &lod2,
4122 const fs_reg &sample_index,
4123 const fs_reg &mcs,
4124 const fs_reg &surface,
4125 const fs_reg &sampler,
4126 const fs_reg &offset_value,
4127 unsigned coord_components,
4128 unsigned grad_components)
4129 {
4130 const brw_device_info *devinfo = bld.shader->devinfo;
4131 int reg_width = bld.dispatch_width() / 8;
4132 unsigned header_size = 0, length = 0;
4133 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
4134 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
4135 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
4136
4137 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
4138 offset_value.file != BAD_FILE ||
4139 op == SHADER_OPCODE_SAMPLEINFO ||
4140 is_high_sampler(devinfo, sampler)) {
4141 /* For general texture offsets (no txf workaround), we need a header to
4142 * put them in. Note that we're only reserving space for it in the
4143 * message payload as it will be initialized implicitly by the
4144 * generator.
4145 *
4146 * TG4 needs to place its channel select in the header, for interaction
4147 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
4148 * larger sampler numbers we need to offset the Sampler State Pointer in
4149 * the header.
4150 */
4151 header_size = 1;
4152 sources[0] = fs_reg();
4153 length++;
4154
4155 /* If we're requesting fewer than four channels worth of response,
4156 * and we have an explicit header, we need to set up the sampler
4157 * writemask. It's reversed from normal: 1 means "don't write".
4158 */
4159 if (inst->regs_written != 4 * reg_width) {
4160 assert((inst->regs_written % reg_width) == 0);
4161 unsigned mask = ~((1 << (inst->regs_written / reg_width)) - 1) & 0xf;
4162 inst->offset |= mask << 12;
4163 }
4164 }
4165
4166 if (shadow_c.file != BAD_FILE) {
4167 bld.MOV(sources[length], shadow_c);
4168 length++;
4169 }
4170
4171 bool coordinate_done = false;
4172
4173 /* The sampler can only meaningfully compute LOD for fragment shader
4174 * messages. For all other stages, we change the opcode to TXL and
4175 * hardcode the LOD to 0.
4176 */
4177 if (bld.shader->stage != MESA_SHADER_FRAGMENT &&
4178 op == SHADER_OPCODE_TEX) {
4179 op = SHADER_OPCODE_TXL;
4180 lod = brw_imm_f(0.0f);
4181 }
4182
4183 /* Set up the LOD info */
4184 switch (op) {
4185 case FS_OPCODE_TXB:
4186 case SHADER_OPCODE_TXL:
4187 if (devinfo->gen >= 9 && op == SHADER_OPCODE_TXL && lod.is_zero()) {
4188 op = SHADER_OPCODE_TXL_LZ;
4189 break;
4190 }
4191 bld.MOV(sources[length], lod);
4192 length++;
4193 break;
4194 case SHADER_OPCODE_TXD:
4195 /* TXD should have been lowered in SIMD16 mode. */
4196 assert(bld.dispatch_width() == 8);
4197
4198 /* Load dPdx and the coordinate together:
4199 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
4200 */
4201 for (unsigned i = 0; i < coord_components; i++) {
4202 bld.MOV(sources[length++], offset(coordinate, bld, i));
4203
4204 /* For cube map array, the coordinate is (u,v,r,ai) but there are
4205 * only derivatives for (u, v, r).
4206 */
4207 if (i < grad_components) {
4208 bld.MOV(sources[length++], offset(lod, bld, i));
4209 bld.MOV(sources[length++], offset(lod2, bld, i));
4210 }
4211 }
4212
4213 coordinate_done = true;
4214 break;
4215 case SHADER_OPCODE_TXS:
4216 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
4217 length++;
4218 break;
4219 case SHADER_OPCODE_TXF:
4220 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
4221 * On Gen9 they are u, v, lod, r
4222 */
4223 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D), coordinate);
4224
4225 if (devinfo->gen >= 9) {
4226 if (coord_components >= 2) {
4227 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D),
4228 offset(coordinate, bld, 1));
4229 }
4230 length++;
4231 }
4232
4233 if (devinfo->gen >= 9 && lod.is_zero()) {
4234 op = SHADER_OPCODE_TXF_LZ;
4235 } else {
4236 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
4237 length++;
4238 }
4239
4240 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++)
4241 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4242 offset(coordinate, bld, i));
4243
4244 coordinate_done = true;
4245 break;
4246
4247 case SHADER_OPCODE_TXF_CMS:
4248 case SHADER_OPCODE_TXF_CMS_W:
4249 case SHADER_OPCODE_TXF_UMS:
4250 case SHADER_OPCODE_TXF_MCS:
4251 if (op == SHADER_OPCODE_TXF_UMS ||
4252 op == SHADER_OPCODE_TXF_CMS ||
4253 op == SHADER_OPCODE_TXF_CMS_W) {
4254 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
4255 length++;
4256 }
4257
4258 if (op == SHADER_OPCODE_TXF_CMS || op == SHADER_OPCODE_TXF_CMS_W) {
4259 /* Data from the multisample control surface. */
4260 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
4261 length++;
4262
4263 /* On Gen9+ we'll use ld2dms_w instead which has two registers for
4264 * the MCS data.
4265 */
4266 if (op == SHADER_OPCODE_TXF_CMS_W) {
4267 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD),
4268 mcs.file == IMM ?
4269 mcs :
4270 offset(mcs, bld, 1));
4271 length++;
4272 }
4273 }
4274
4275 /* There is no offsetting for this message; just copy in the integer
4276 * texture coordinates.
4277 */
4278 for (unsigned i = 0; i < coord_components; i++)
4279 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4280 offset(coordinate, bld, i));
4281
4282 coordinate_done = true;
4283 break;
4284 case SHADER_OPCODE_TG4_OFFSET:
4285 /* gather4_po_c should have been lowered in SIMD16 mode. */
4286 assert(bld.dispatch_width() == 8 || shadow_c.file == BAD_FILE);
4287
4288 /* More crazy intermixing */
4289 for (unsigned i = 0; i < 2; i++) /* u, v */
4290 bld.MOV(sources[length++], offset(coordinate, bld, i));
4291
4292 for (unsigned i = 0; i < 2; i++) /* offu, offv */
4293 bld.MOV(retype(sources[length++], BRW_REGISTER_TYPE_D),
4294 offset(offset_value, bld, i));
4295
4296 if (coord_components == 3) /* r if present */
4297 bld.MOV(sources[length++], offset(coordinate, bld, 2));
4298
4299 coordinate_done = true;
4300 break;
4301 default:
4302 break;
4303 }
4304
4305 /* Set up the coordinate (except for cases where it was done above) */
4306 if (!coordinate_done) {
4307 for (unsigned i = 0; i < coord_components; i++)
4308 bld.MOV(sources[length++], offset(coordinate, bld, i));
4309 }
4310
4311 int mlen;
4312 if (reg_width == 2)
4313 mlen = length * reg_width - header_size;
4314 else
4315 mlen = length * reg_width;
4316
4317 const fs_reg src_payload = fs_reg(VGRF, bld.shader->alloc.allocate(mlen),
4318 BRW_REGISTER_TYPE_F);
4319 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
4320
4321 /* Generate the SEND. */
4322 inst->opcode = op;
4323 inst->src[0] = src_payload;
4324 inst->src[1] = surface;
4325 inst->src[2] = sampler;
4326 inst->resize_sources(3);
4327 inst->base_mrf = -1;
4328 inst->mlen = mlen;
4329 inst->header_size = header_size;
4330
4331 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
4332 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
4333 }
4334
4335 static void
4336 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
4337 {
4338 const brw_device_info *devinfo = bld.shader->devinfo;
4339 const fs_reg &coordinate = inst->src[TEX_LOGICAL_SRC_COORDINATE];
4340 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4341 const fs_reg &lod = inst->src[TEX_LOGICAL_SRC_LOD];
4342 const fs_reg &lod2 = inst->src[TEX_LOGICAL_SRC_LOD2];
4343 const fs_reg &sample_index = inst->src[TEX_LOGICAL_SRC_SAMPLE_INDEX];
4344 const fs_reg &mcs = inst->src[TEX_LOGICAL_SRC_MCS];
4345 const fs_reg &surface = inst->src[TEX_LOGICAL_SRC_SURFACE];
4346 const fs_reg &sampler = inst->src[TEX_LOGICAL_SRC_SAMPLER];
4347 const fs_reg &offset_value = inst->src[TEX_LOGICAL_SRC_OFFSET_VALUE];
4348 assert(inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].file == IMM);
4349 const unsigned coord_components = inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4350 assert(inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].file == IMM);
4351 const unsigned grad_components = inst->src[TEX_LOGICAL_SRC_GRAD_COMPONENTS].ud;
4352
4353 if (devinfo->gen >= 7) {
4354 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
4355 shadow_c, lod, lod2, sample_index,
4356 mcs, surface, sampler, offset_value,
4357 coord_components, grad_components);
4358 } else if (devinfo->gen >= 5) {
4359 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
4360 shadow_c, lod, lod2, sample_index,
4361 surface, sampler, offset_value,
4362 coord_components, grad_components);
4363 } else {
4364 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
4365 shadow_c, lod, lod2,
4366 surface, sampler,
4367 coord_components, grad_components);
4368 }
4369 }
4370
4371 /**
4372 * Initialize the header present in some typed and untyped surface
4373 * messages.
4374 */
4375 static fs_reg
4376 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
4377 {
4378 fs_builder ubld = bld.exec_all().group(8, 0);
4379 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4380 ubld.MOV(dst, brw_imm_d(0));
4381 ubld.MOV(component(dst, 7), sample_mask);
4382 return dst;
4383 }
4384
4385 static void
4386 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
4387 const fs_reg &sample_mask)
4388 {
4389 /* Get the logical send arguments. */
4390 const fs_reg &addr = inst->src[0];
4391 const fs_reg &src = inst->src[1];
4392 const fs_reg &surface = inst->src[2];
4393 const UNUSED fs_reg &dims = inst->src[3];
4394 const fs_reg &arg = inst->src[4];
4395
4396 /* Calculate the total number of components of the payload. */
4397 const unsigned addr_sz = inst->components_read(0);
4398 const unsigned src_sz = inst->components_read(1);
4399 const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
4400 const unsigned sz = header_sz + addr_sz + src_sz;
4401
4402 /* Allocate space for the payload. */
4403 fs_reg *const components = new fs_reg[sz];
4404 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
4405 unsigned n = 0;
4406
4407 /* Construct the payload. */
4408 if (header_sz)
4409 components[n++] = emit_surface_header(bld, sample_mask);
4410
4411 for (unsigned i = 0; i < addr_sz; i++)
4412 components[n++] = offset(addr, bld, i);
4413
4414 for (unsigned i = 0; i < src_sz; i++)
4415 components[n++] = offset(src, bld, i);
4416
4417 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
4418
4419 /* Update the original instruction. */
4420 inst->opcode = op;
4421 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
4422 inst->header_size = header_sz;
4423
4424 inst->src[0] = payload;
4425 inst->src[1] = surface;
4426 inst->src[2] = arg;
4427 inst->resize_sources(3);
4428
4429 delete[] components;
4430 }
4431
4432 static void
4433 lower_varying_pull_constant_logical_send(const fs_builder &bld, fs_inst *inst)
4434 {
4435 const brw_device_info *devinfo = bld.shader->devinfo;
4436
4437 if (devinfo->gen >= 7) {
4438 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
4439
4440 } else {
4441 const fs_reg payload(MRF, FIRST_PULL_LOAD_MRF(devinfo->gen),
4442 BRW_REGISTER_TYPE_UD);
4443
4444 bld.MOV(byte_offset(payload, REG_SIZE), inst->src[1]);
4445
4446 inst->opcode = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4;
4447 inst->resize_sources(1);
4448 inst->base_mrf = payload.nr;
4449 inst->header_size = 1;
4450 inst->mlen = 1 + inst->exec_size / 8;
4451 }
4452 }
4453
4454 static void
4455 lower_math_logical_send(const fs_builder &bld, fs_inst *inst)
4456 {
4457 assert(bld.shader->devinfo->gen < 6);
4458
4459 inst->base_mrf = 2;
4460 inst->mlen = inst->sources * inst->exec_size / 8;
4461
4462 if (inst->sources > 1) {
4463 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
4464 * "Message Payload":
4465 *
4466 * "Operand0[7]. For the INT DIV functions, this operand is the
4467 * denominator."
4468 * ...
4469 * "Operand1[7]. For the INT DIV functions, this operand is the
4470 * numerator."
4471 */
4472 const bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
4473 const fs_reg src0 = is_int_div ? inst->src[1] : inst->src[0];
4474 const fs_reg src1 = is_int_div ? inst->src[0] : inst->src[1];
4475
4476 inst->resize_sources(1);
4477 inst->src[0] = src0;
4478
4479 assert(inst->exec_size == 8);
4480 bld.MOV(fs_reg(MRF, inst->base_mrf + 1, src1.type), src1);
4481 }
4482 }
4483
4484 bool
4485 fs_visitor::lower_logical_sends()
4486 {
4487 bool progress = false;
4488
4489 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4490 const fs_builder ibld(this, block, inst);
4491
4492 switch (inst->opcode) {
4493 case FS_OPCODE_FB_WRITE_LOGICAL:
4494 assert(stage == MESA_SHADER_FRAGMENT);
4495 lower_fb_write_logical_send(ibld, inst,
4496 (const brw_wm_prog_data *)prog_data,
4497 (const brw_wm_prog_key *)key,
4498 payload);
4499 break;
4500
4501 case SHADER_OPCODE_TEX_LOGICAL:
4502 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
4503 break;
4504
4505 case SHADER_OPCODE_TXD_LOGICAL:
4506 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
4507 break;
4508
4509 case SHADER_OPCODE_TXF_LOGICAL:
4510 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
4511 break;
4512
4513 case SHADER_OPCODE_TXL_LOGICAL:
4514 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
4515 break;
4516
4517 case SHADER_OPCODE_TXS_LOGICAL:
4518 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
4519 break;
4520
4521 case FS_OPCODE_TXB_LOGICAL:
4522 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
4523 break;
4524
4525 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4526 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
4527 break;
4528
4529 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
4530 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS_W);
4531 break;
4532
4533 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4534 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
4535 break;
4536
4537 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4538 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
4539 break;
4540
4541 case SHADER_OPCODE_LOD_LOGICAL:
4542 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
4543 break;
4544
4545 case SHADER_OPCODE_TG4_LOGICAL:
4546 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
4547 break;
4548
4549 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4550 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
4551 break;
4552
4553 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4554 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_SAMPLEINFO);
4555 break;
4556
4557 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4558 lower_surface_logical_send(ibld, inst,
4559 SHADER_OPCODE_UNTYPED_SURFACE_READ,
4560 fs_reg());
4561 break;
4562
4563 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4564 lower_surface_logical_send(ibld, inst,
4565 SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
4566 ibld.sample_mask_reg());
4567 break;
4568
4569 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4570 lower_surface_logical_send(ibld, inst,
4571 SHADER_OPCODE_UNTYPED_ATOMIC,
4572 ibld.sample_mask_reg());
4573 break;
4574
4575 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4576 lower_surface_logical_send(ibld, inst,
4577 SHADER_OPCODE_TYPED_SURFACE_READ,
4578 brw_imm_d(0xffff));
4579 break;
4580
4581 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4582 lower_surface_logical_send(ibld, inst,
4583 SHADER_OPCODE_TYPED_SURFACE_WRITE,
4584 ibld.sample_mask_reg());
4585 break;
4586
4587 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4588 lower_surface_logical_send(ibld, inst,
4589 SHADER_OPCODE_TYPED_ATOMIC,
4590 ibld.sample_mask_reg());
4591 break;
4592
4593 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4594 lower_varying_pull_constant_logical_send(ibld, inst);
4595 break;
4596
4597 case SHADER_OPCODE_RCP:
4598 case SHADER_OPCODE_RSQ:
4599 case SHADER_OPCODE_SQRT:
4600 case SHADER_OPCODE_EXP2:
4601 case SHADER_OPCODE_LOG2:
4602 case SHADER_OPCODE_SIN:
4603 case SHADER_OPCODE_COS:
4604 case SHADER_OPCODE_POW:
4605 case SHADER_OPCODE_INT_QUOTIENT:
4606 case SHADER_OPCODE_INT_REMAINDER:
4607 /* The math opcodes are overloaded for the send-like and
4608 * expression-like instructions which seems kind of icky. Gen6+ has
4609 * a native (but rather quirky) MATH instruction so we don't need to
4610 * do anything here. On Gen4-5 we'll have to lower the Gen6-like
4611 * logical instructions (which we can easily recognize because they
4612 * have mlen = 0) into send-like virtual instructions.
4613 */
4614 if (devinfo->gen < 6 && inst->mlen == 0) {
4615 lower_math_logical_send(ibld, inst);
4616 break;
4617
4618 } else {
4619 continue;
4620 }
4621
4622 default:
4623 continue;
4624 }
4625
4626 progress = true;
4627 }
4628
4629 if (progress)
4630 invalidate_live_intervals();
4631
4632 return progress;
4633 }
4634
4635 /**
4636 * Get the closest allowed SIMD width for instruction \p inst accounting for
4637 * some common regioning and execution control restrictions that apply to FPU
4638 * instructions. These restrictions don't necessarily have any relevance to
4639 * instructions not executed by the FPU pipeline like extended math, control
4640 * flow or send message instructions.
4641 *
4642 * For virtual opcodes it's really up to the instruction -- In some cases
4643 * (e.g. where a virtual instruction unrolls into a simple sequence of FPU
4644 * instructions) it may simplify virtual instruction lowering if we can
4645 * enforce FPU-like regioning restrictions already on the virtual instruction,
4646 * in other cases (e.g. virtual send-like instructions) this may be
4647 * excessively restrictive.
4648 */
4649 static unsigned
4650 get_fpu_lowered_simd_width(const struct brw_device_info *devinfo,
4651 const fs_inst *inst)
4652 {
4653 /* Maximum execution size representable in the instruction controls. */
4654 unsigned max_width = MIN2(32, inst->exec_size);
4655
4656 /* According to the PRMs:
4657 * "A. In Direct Addressing mode, a source cannot span more than 2
4658 * adjacent GRF registers.
4659 * B. A destination cannot span more than 2 adjacent GRF registers."
4660 *
4661 * Look for the source or destination with the largest register region
4662 * which is the one that is going to limit the overall execution size of
4663 * the instruction due to this rule.
4664 */
4665 unsigned reg_count = inst->regs_written;
4666
4667 for (unsigned i = 0; i < inst->sources; i++)
4668 reg_count = MAX2(reg_count, (unsigned)inst->regs_read(i));
4669
4670 /* Calculate the maximum execution size of the instruction based on the
4671 * factor by which it goes over the hardware limit of 2 GRFs.
4672 */
4673 if (reg_count > 2)
4674 max_width = MIN2(max_width, inst->exec_size / DIV_ROUND_UP(reg_count, 2));
4675
4676 /* According to the IVB PRMs:
4677 * "When destination spans two registers, the source MUST span two
4678 * registers. The exception to the above rule:
4679 *
4680 * - When source is scalar, the source registers are not incremented.
4681 * - When source is packed integer Word and destination is packed
4682 * integer DWord, the source register is not incremented but the
4683 * source sub register is incremented."
4684 *
4685 * The hardware specs from Gen4 to Gen7.5 mention similar regioning
4686 * restrictions. The code below intentionally doesn't check whether the
4687 * destination type is integer because empirically the hardware doesn't
4688 * seem to care what the actual type is as long as it's dword-aligned.
4689 */
4690 if (devinfo->gen < 8) {
4691 for (unsigned i = 0; i < inst->sources; i++) {
4692 if (inst->regs_written == 2 &&
4693 inst->regs_read(i) != 0 && inst->regs_read(i) != 2 &&
4694 !is_uniform(inst->src[i]) &&
4695 !(type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
4696 type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1))
4697 max_width = MIN2(max_width, inst->exec_size /
4698 inst->regs_written);
4699 }
4700 }
4701
4702 /* From the IVB PRMs:
4703 * "When an instruction is SIMD32, the low 16 bits of the execution mask
4704 * are applied for both halves of the SIMD32 instruction. If different
4705 * execution mask channels are required, split the instruction into two
4706 * SIMD16 instructions."
4707 *
4708 * There is similar text in the HSW PRMs. Gen4-6 don't even implement
4709 * 32-wide control flow support in hardware and will behave similarly.
4710 */
4711 if (devinfo->gen < 8 && !inst->force_writemask_all)
4712 max_width = MIN2(max_width, 16);
4713
4714 /* From the IVB PRMs (applies to HSW too):
4715 * "Instructions with condition modifiers must not use SIMD32."
4716 *
4717 * From the BDW PRMs (applies to later hardware too):
4718 * "Ternary instruction with condition modifiers must not use SIMD32."
4719 */
4720 if (inst->conditional_mod && (devinfo->gen < 8 || inst->is_3src(devinfo)))
4721 max_width = MIN2(max_width, 16);
4722
4723 /* From the IVB PRMs (applies to other devices that don't have the
4724 * brw_device_info::supports_simd16_3src flag set):
4725 * "In Align16 access mode, SIMD16 is not allowed for DW operations and
4726 * SIMD8 is not allowed for DF operations."
4727 */
4728 if (inst->is_3src(devinfo) && !devinfo->supports_simd16_3src)
4729 max_width = MIN2(max_width, inst->exec_size / reg_count);
4730
4731 /* Only power-of-two execution sizes are representable in the instruction
4732 * control fields.
4733 */
4734 return 1 << _mesa_logbase2(max_width);
4735 }
4736
4737 /**
4738 * Get the closest native SIMD width supported by the hardware for instruction
4739 * \p inst. The instruction will be left untouched by
4740 * fs_visitor::lower_simd_width() if the returned value is equal to the
4741 * original execution size.
4742 */
4743 static unsigned
4744 get_lowered_simd_width(const struct brw_device_info *devinfo,
4745 const fs_inst *inst)
4746 {
4747 switch (inst->opcode) {
4748 case BRW_OPCODE_MOV:
4749 case BRW_OPCODE_SEL:
4750 case BRW_OPCODE_NOT:
4751 case BRW_OPCODE_AND:
4752 case BRW_OPCODE_OR:
4753 case BRW_OPCODE_XOR:
4754 case BRW_OPCODE_SHR:
4755 case BRW_OPCODE_SHL:
4756 case BRW_OPCODE_ASR:
4757 case BRW_OPCODE_CMPN:
4758 case BRW_OPCODE_CSEL:
4759 case BRW_OPCODE_F32TO16:
4760 case BRW_OPCODE_F16TO32:
4761 case BRW_OPCODE_BFREV:
4762 case BRW_OPCODE_BFE:
4763 case BRW_OPCODE_ADD:
4764 case BRW_OPCODE_MUL:
4765 case BRW_OPCODE_AVG:
4766 case BRW_OPCODE_FRC:
4767 case BRW_OPCODE_RNDU:
4768 case BRW_OPCODE_RNDD:
4769 case BRW_OPCODE_RNDE:
4770 case BRW_OPCODE_RNDZ:
4771 case BRW_OPCODE_LZD:
4772 case BRW_OPCODE_FBH:
4773 case BRW_OPCODE_FBL:
4774 case BRW_OPCODE_CBIT:
4775 case BRW_OPCODE_SAD2:
4776 case BRW_OPCODE_MAD:
4777 case BRW_OPCODE_LRP:
4778 case FS_OPCODE_PACK:
4779 return get_fpu_lowered_simd_width(devinfo, inst);
4780
4781 case BRW_OPCODE_CMP: {
4782 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says that
4783 * when the destination is a GRF the dependency-clear bit on the flag
4784 * register is cleared early.
4785 *
4786 * Suggested workarounds are to disable coissuing CMP instructions
4787 * or to split CMP(16) instructions into two CMP(8) instructions.
4788 *
4789 * We choose to split into CMP(8) instructions since disabling
4790 * coissuing would affect CMP instructions not otherwise affected by
4791 * the errata.
4792 */
4793 const unsigned max_width = (devinfo->gen == 7 && !devinfo->is_haswell &&
4794 !inst->dst.is_null() ? 8 : ~0);
4795 return MIN2(max_width, get_fpu_lowered_simd_width(devinfo, inst));
4796 }
4797 case BRW_OPCODE_BFI1:
4798 case BRW_OPCODE_BFI2:
4799 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
4800 * should
4801 * "Force BFI instructions to be executed always in SIMD8."
4802 */
4803 return MIN2(devinfo->is_haswell ? 8 : ~0u,
4804 get_fpu_lowered_simd_width(devinfo, inst));
4805
4806 case BRW_OPCODE_IF:
4807 assert(inst->src[0].file == BAD_FILE || inst->exec_size <= 16);
4808 return inst->exec_size;
4809
4810 case SHADER_OPCODE_RCP:
4811 case SHADER_OPCODE_RSQ:
4812 case SHADER_OPCODE_SQRT:
4813 case SHADER_OPCODE_EXP2:
4814 case SHADER_OPCODE_LOG2:
4815 case SHADER_OPCODE_SIN:
4816 case SHADER_OPCODE_COS:
4817 /* Unary extended math instructions are limited to SIMD8 on Gen4 and
4818 * Gen6.
4819 */
4820 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4821 devinfo->gen == 5 || devinfo->is_g4x ? MIN2(16, inst->exec_size) :
4822 MIN2(8, inst->exec_size));
4823
4824 case SHADER_OPCODE_POW:
4825 /* SIMD16 is only allowed on Gen7+. */
4826 return (devinfo->gen >= 7 ? MIN2(16, inst->exec_size) :
4827 MIN2(8, inst->exec_size));
4828
4829 case SHADER_OPCODE_INT_QUOTIENT:
4830 case SHADER_OPCODE_INT_REMAINDER:
4831 /* Integer division is limited to SIMD8 on all generations. */
4832 return MIN2(8, inst->exec_size);
4833
4834 case FS_OPCODE_LINTERP:
4835 case FS_OPCODE_GET_BUFFER_SIZE:
4836 case FS_OPCODE_DDX_COARSE:
4837 case FS_OPCODE_DDX_FINE:
4838 case FS_OPCODE_DDY_COARSE:
4839 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
4840 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
4841 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
4842 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
4843 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
4844 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
4845 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
4846 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
4847 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
4848 return MIN2(16, inst->exec_size);
4849
4850 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
4851 /* Pre-ILK hardware doesn't have a SIMD8 variant of the texel fetch
4852 * message used to implement varying pull constant loads, so expand it
4853 * to SIMD16. An alternative with longer message payload length but
4854 * shorter return payload would be to use the SIMD8 sampler message that
4855 * takes (header, u, v, r) as parameters instead of (header, u).
4856 */
4857 return (devinfo->gen == 4 ? 16 : MIN2(16, inst->exec_size));
4858
4859 case FS_OPCODE_DDY_FINE:
4860 /* The implementation of this virtual opcode may require emitting
4861 * compressed Align16 instructions, which are severely limited on some
4862 * generations.
4863 *
4864 * From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
4865 * Region Restrictions):
4866 *
4867 * "In Align16 access mode, SIMD16 is not allowed for DW operations
4868 * and SIMD8 is not allowed for DF operations."
4869 *
4870 * In this context, "DW operations" means "operations acting on 32-bit
4871 * values", so it includes operations on floats.
4872 *
4873 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
4874 * (Instruction Compression -> Rules and Restrictions):
4875 *
4876 * "A compressed instruction must be in Align1 access mode. Align16
4877 * mode instructions cannot be compressed."
4878 *
4879 * Similar text exists in the g45 PRM.
4880 *
4881 * Empirically, compressed align16 instructions using odd register
4882 * numbers don't appear to work on Sandybridge either.
4883 */
4884 return (devinfo->gen == 4 || devinfo->gen == 6 ||
4885 (devinfo->gen == 7 && !devinfo->is_haswell) ?
4886 MIN2(8, inst->exec_size) : MIN2(16, inst->exec_size));
4887
4888 case SHADER_OPCODE_MULH:
4889 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
4890 * is 8-wide on Gen7+.
4891 */
4892 return (devinfo->gen >= 7 ? 8 :
4893 get_fpu_lowered_simd_width(devinfo, inst));
4894
4895 case FS_OPCODE_FB_WRITE_LOGICAL:
4896 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
4897 * here.
4898 */
4899 assert(devinfo->gen != 6 ||
4900 inst->src[FB_WRITE_LOGICAL_SRC_SRC_DEPTH].file == BAD_FILE ||
4901 inst->exec_size == 8);
4902 /* Dual-source FB writes are unsupported in SIMD16 mode. */
4903 return (inst->src[FB_WRITE_LOGICAL_SRC_COLOR1].file != BAD_FILE ?
4904 8 : MIN2(16, inst->exec_size));
4905
4906 case SHADER_OPCODE_TEX_LOGICAL:
4907 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4908 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4909 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4910 case SHADER_OPCODE_LOD_LOGICAL:
4911 case SHADER_OPCODE_TG4_LOGICAL:
4912 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
4913 return MIN2(16, inst->exec_size);
4914
4915 case SHADER_OPCODE_TXD_LOGICAL:
4916 /* TXD is unsupported in SIMD16 mode. */
4917 return 8;
4918
4919 case SHADER_OPCODE_TG4_OFFSET_LOGICAL: {
4920 /* gather4_po_c is unsupported in SIMD16 mode. */
4921 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4922 return (shadow_c.file != BAD_FILE ? 8 : MIN2(16, inst->exec_size));
4923 }
4924 case SHADER_OPCODE_TXL_LOGICAL:
4925 case FS_OPCODE_TXB_LOGICAL: {
4926 /* Gen4 doesn't have SIMD8 non-shadow-compare bias/LOD instructions, and
4927 * Gen4-6 can't support TXL and TXB with shadow comparison in SIMD16
4928 * mode because the message exceeds the maximum length of 11.
4929 */
4930 const fs_reg &shadow_c = inst->src[TEX_LOGICAL_SRC_SHADOW_C];
4931 if (devinfo->gen == 4 && shadow_c.file == BAD_FILE)
4932 return 16;
4933 else if (devinfo->gen < 7 && shadow_c.file != BAD_FILE)
4934 return 8;
4935 else
4936 return MIN2(16, inst->exec_size);
4937 }
4938 case SHADER_OPCODE_TXF_LOGICAL:
4939 case SHADER_OPCODE_TXS_LOGICAL:
4940 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
4941 * messages. Use SIMD16 instead.
4942 */
4943 if (devinfo->gen == 4)
4944 return 16;
4945 else
4946 return MIN2(16, inst->exec_size);
4947
4948 case SHADER_OPCODE_TXF_CMS_W_LOGICAL: {
4949 /* This opcode can take up to 6 arguments which means that in some
4950 * circumstances it can end up with a message that is too long in SIMD16
4951 * mode.
4952 */
4953 const unsigned coord_components =
4954 inst->src[TEX_LOGICAL_SRC_COORD_COMPONENTS].ud;
4955 /* First three arguments are the sample index and the two arguments for
4956 * the MCS data.
4957 */
4958 if ((coord_components + 3) * 2 > MAX_SAMPLER_MESSAGE_SIZE)
4959 return 8;
4960 else
4961 return MIN2(16, inst->exec_size);
4962 }
4963
4964 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4965 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4966 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4967 return 8;
4968
4969 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4970 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4971 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4972 return MIN2(16, inst->exec_size);
4973
4974 case SHADER_OPCODE_URB_READ_SIMD8:
4975 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
4976 case SHADER_OPCODE_URB_WRITE_SIMD8:
4977 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
4978 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
4979 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
4980 return MIN2(8, inst->exec_size);
4981
4982 case SHADER_OPCODE_MOV_INDIRECT:
4983 /* Prior to Broadwell, we only have 8 address subregisters */
4984 return MIN3(devinfo->gen >= 8 ? 16 : 8,
4985 2 * REG_SIZE / (inst->dst.stride * type_sz(inst->dst.type)),
4986 inst->exec_size);
4987
4988 case SHADER_OPCODE_LOAD_PAYLOAD: {
4989 const unsigned reg_count =
4990 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
4991
4992 if (reg_count > 2) {
4993 /* Only LOAD_PAYLOAD instructions with per-channel destination region
4994 * can be easily lowered (which excludes headers and heterogeneous
4995 * types).
4996 */
4997 assert(!inst->header_size);
4998 for (unsigned i = 0; i < inst->sources; i++)
4999 assert(type_sz(inst->dst.type) == type_sz(inst->src[i].type) ||
5000 inst->src[i].file == BAD_FILE);
5001
5002 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
5003 } else {
5004 return inst->exec_size;
5005 }
5006 }
5007 default:
5008 return inst->exec_size;
5009 }
5010 }
5011
5012 bool
5013 fs_visitor::lower_simd_width()
5014 {
5015 bool progress = false;
5016
5017 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5018 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
5019
5020 if (lower_width != inst->exec_size) {
5021 /* Builder matching the original instruction. We may also need to
5022 * emit an instruction of width larger than the original, set the
5023 * execution size of the builder to the highest of both for now so
5024 * we're sure that both cases can be handled.
5025 */
5026 const unsigned max_width = MAX2(inst->exec_size, lower_width);
5027 const fs_builder ibld = bld.at(block, inst)
5028 .exec_all(inst->force_writemask_all)
5029 .group(max_width, inst->group / max_width);
5030
5031 /* Split the copies in chunks of the execution width of either the
5032 * original or the lowered instruction, whichever is lower.
5033 */
5034 const unsigned copy_width = MIN2(lower_width, inst->exec_size);
5035 const unsigned n = inst->exec_size / copy_width;
5036 const unsigned dst_size = inst->regs_written * REG_SIZE /
5037 inst->dst.component_size(inst->exec_size);
5038 fs_reg dsts[4];
5039
5040 assert(n > 0 && n <= ARRAY_SIZE(dsts) &&
5041 !inst->writes_accumulator && !inst->mlen);
5042
5043 for (unsigned i = 0; i < n; i++) {
5044 /* Emit a copy of the original instruction with the lowered width.
5045 * If the EOT flag was set throw it away except for the last
5046 * instruction to avoid killing the thread prematurely.
5047 */
5048 fs_inst split_inst = *inst;
5049 split_inst.exec_size = lower_width;
5050 split_inst.eot = inst->eot && i == n - 1;
5051
5052 /* Select the correct channel enables for the i-th group, then
5053 * transform the sources and destination and emit the lowered
5054 * instruction.
5055 */
5056 const fs_builder lbld = ibld.group(lower_width, i);
5057 const fs_builder cbld = lbld.group(copy_width, 0);
5058
5059 for (unsigned j = 0; j < inst->sources; j++) {
5060 if (inst->src[j].file != BAD_FILE &&
5061 !is_periodic(inst->src[j], lower_width)) {
5062 /* Get the i-th copy_width-wide chunk of the source. */
5063 const fs_reg src = offset(inst->src[j], cbld, i);
5064 const unsigned src_size = inst->components_read(j);
5065
5066 /* Copy one every n copy_width-wide components of the
5067 * register into a temporary passed as source to the lowered
5068 * instruction.
5069 */
5070 split_inst.src[j] = lbld.vgrf(inst->src[j].type, src_size);
5071
5072 for (unsigned k = 0; k < src_size; ++k)
5073 cbld.MOV(offset(split_inst.src[j], lbld, k),
5074 offset(src, cbld, n * k));
5075 }
5076 }
5077
5078 if (inst->regs_written) {
5079 /* Allocate enough space to hold the result of the lowered
5080 * instruction and fix up the number of registers written.
5081 */
5082 split_inst.dst = dsts[i] =
5083 lbld.vgrf(inst->dst.type, dst_size);
5084 split_inst.regs_written =
5085 DIV_ROUND_UP(type_sz(inst->dst.type) * dst_size * lower_width,
5086 REG_SIZE);
5087
5088 if (inst->predicate) {
5089 /* Handle predication by copying the original contents of
5090 * the destination into the temporary before emitting the
5091 * lowered instruction.
5092 */
5093 for (unsigned k = 0; k < dst_size; ++k)
5094 cbld.MOV(offset(split_inst.dst, lbld, k),
5095 offset(inst->dst, cbld, n * k + i));
5096 }
5097 }
5098
5099 lbld.emit(split_inst);
5100 }
5101
5102 if (inst->regs_written) {
5103 const fs_builder lbld = ibld.group(lower_width, 0);
5104
5105 /* Interleave the components of the result from the lowered
5106 * instructions.
5107 */
5108 for (unsigned i = 0; i < dst_size; ++i) {
5109 for (unsigned j = 0; j < n; ++j) {
5110 const fs_builder cbld = ibld.group(copy_width, j);
5111 cbld.MOV(offset(inst->dst, cbld, n * i + j),
5112 offset(dsts[j], lbld, i));
5113 }
5114 }
5115 }
5116
5117 inst->remove(block);
5118 progress = true;
5119 }
5120 }
5121
5122 if (progress)
5123 invalidate_live_intervals();
5124
5125 return progress;
5126 }
5127
5128 void
5129 fs_visitor::dump_instructions()
5130 {
5131 dump_instructions(NULL);
5132 }
5133
5134 void
5135 fs_visitor::dump_instructions(const char *name)
5136 {
5137 FILE *file = stderr;
5138 if (name && geteuid() != 0) {
5139 file = fopen(name, "w");
5140 if (!file)
5141 file = stderr;
5142 }
5143
5144 if (cfg) {
5145 calculate_register_pressure();
5146 int ip = 0, max_pressure = 0;
5147 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
5148 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
5149 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
5150 dump_instruction(inst, file);
5151 ip++;
5152 }
5153 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
5154 } else {
5155 int ip = 0;
5156 foreach_in_list(backend_instruction, inst, &instructions) {
5157 fprintf(file, "%4d: ", ip++);
5158 dump_instruction(inst, file);
5159 }
5160 }
5161
5162 if (file != stderr) {
5163 fclose(file);
5164 }
5165 }
5166
5167 void
5168 fs_visitor::dump_instruction(backend_instruction *be_inst)
5169 {
5170 dump_instruction(be_inst, stderr);
5171 }
5172
5173 void
5174 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
5175 {
5176 fs_inst *inst = (fs_inst *)be_inst;
5177
5178 if (inst->predicate) {
5179 fprintf(file, "(%cf0.%d) ",
5180 inst->predicate_inverse ? '-' : '+',
5181 inst->flag_subreg);
5182 }
5183
5184 fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
5185 if (inst->saturate)
5186 fprintf(file, ".sat");
5187 if (inst->conditional_mod) {
5188 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
5189 if (!inst->predicate &&
5190 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
5191 inst->opcode != BRW_OPCODE_IF &&
5192 inst->opcode != BRW_OPCODE_WHILE))) {
5193 fprintf(file, ".f0.%d", inst->flag_subreg);
5194 }
5195 }
5196 fprintf(file, "(%d) ", inst->exec_size);
5197
5198 if (inst->mlen) {
5199 fprintf(file, "(mlen: %d) ", inst->mlen);
5200 }
5201
5202 switch (inst->dst.file) {
5203 case VGRF:
5204 fprintf(file, "vgrf%d", inst->dst.nr);
5205 if (alloc.sizes[inst->dst.nr] != inst->regs_written ||
5206 inst->dst.subreg_offset)
5207 fprintf(file, "+%d.%d",
5208 inst->dst.reg_offset, inst->dst.subreg_offset);
5209 break;
5210 case FIXED_GRF:
5211 fprintf(file, "g%d", inst->dst.nr);
5212 break;
5213 case MRF:
5214 fprintf(file, "m%d", inst->dst.nr);
5215 break;
5216 case BAD_FILE:
5217 fprintf(file, "(null)");
5218 break;
5219 case UNIFORM:
5220 fprintf(file, "***u%d***", inst->dst.nr + inst->dst.reg_offset);
5221 break;
5222 case ATTR:
5223 fprintf(file, "***attr%d***", inst->dst.nr + inst->dst.reg_offset);
5224 break;
5225 case ARF:
5226 switch (inst->dst.nr) {
5227 case BRW_ARF_NULL:
5228 fprintf(file, "null");
5229 break;
5230 case BRW_ARF_ADDRESS:
5231 fprintf(file, "a0.%d", inst->dst.subnr);
5232 break;
5233 case BRW_ARF_ACCUMULATOR:
5234 fprintf(file, "acc%d", inst->dst.subnr);
5235 break;
5236 case BRW_ARF_FLAG:
5237 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5238 break;
5239 default:
5240 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
5241 break;
5242 }
5243 if (inst->dst.subnr)
5244 fprintf(file, "+%d", inst->dst.subnr);
5245 break;
5246 case IMM:
5247 unreachable("not reached");
5248 }
5249 if (inst->dst.stride != 1)
5250 fprintf(file, "<%u>", inst->dst.stride);
5251 fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
5252
5253 for (int i = 0; i < inst->sources; i++) {
5254 if (inst->src[i].negate)
5255 fprintf(file, "-");
5256 if (inst->src[i].abs)
5257 fprintf(file, "|");
5258 switch (inst->src[i].file) {
5259 case VGRF:
5260 fprintf(file, "vgrf%d", inst->src[i].nr);
5261 if (alloc.sizes[inst->src[i].nr] != (unsigned)inst->regs_read(i) ||
5262 inst->src[i].subreg_offset)
5263 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
5264 inst->src[i].subreg_offset);
5265 break;
5266 case FIXED_GRF:
5267 fprintf(file, "g%d", inst->src[i].nr);
5268 break;
5269 case MRF:
5270 fprintf(file, "***m%d***", inst->src[i].nr);
5271 break;
5272 case ATTR:
5273 fprintf(file, "attr%d+%d", inst->src[i].nr, inst->src[i].reg_offset);
5274 break;
5275 case UNIFORM:
5276 fprintf(file, "u%d", inst->src[i].nr + inst->src[i].reg_offset);
5277 if (inst->src[i].subreg_offset) {
5278 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
5279 inst->src[i].subreg_offset);
5280 }
5281 break;
5282 case BAD_FILE:
5283 fprintf(file, "(null)");
5284 break;
5285 case IMM:
5286 switch (inst->src[i].type) {
5287 case BRW_REGISTER_TYPE_F:
5288 fprintf(file, "%-gf", inst->src[i].f);
5289 break;
5290 case BRW_REGISTER_TYPE_DF:
5291 fprintf(file, "%fdf", inst->src[i].df);
5292 break;
5293 case BRW_REGISTER_TYPE_W:
5294 case BRW_REGISTER_TYPE_D:
5295 fprintf(file, "%dd", inst->src[i].d);
5296 break;
5297 case BRW_REGISTER_TYPE_UW:
5298 case BRW_REGISTER_TYPE_UD:
5299 fprintf(file, "%uu", inst->src[i].ud);
5300 break;
5301 case BRW_REGISTER_TYPE_VF:
5302 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
5303 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
5304 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
5305 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
5306 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
5307 break;
5308 default:
5309 fprintf(file, "???");
5310 break;
5311 }
5312 break;
5313 case ARF:
5314 switch (inst->src[i].nr) {
5315 case BRW_ARF_NULL:
5316 fprintf(file, "null");
5317 break;
5318 case BRW_ARF_ADDRESS:
5319 fprintf(file, "a0.%d", inst->src[i].subnr);
5320 break;
5321 case BRW_ARF_ACCUMULATOR:
5322 fprintf(file, "acc%d", inst->src[i].subnr);
5323 break;
5324 case BRW_ARF_FLAG:
5325 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5326 break;
5327 default:
5328 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
5329 break;
5330 }
5331 if (inst->src[i].subnr)
5332 fprintf(file, "+%d", inst->src[i].subnr);
5333 break;
5334 }
5335 if (inst->src[i].abs)
5336 fprintf(file, "|");
5337
5338 if (inst->src[i].file != IMM) {
5339 unsigned stride;
5340 if (inst->src[i].file == ARF || inst->src[i].file == FIXED_GRF) {
5341 unsigned hstride = inst->src[i].hstride;
5342 stride = (hstride == 0 ? 0 : (1 << (hstride - 1)));
5343 } else {
5344 stride = inst->src[i].stride;
5345 }
5346 if (stride != 1)
5347 fprintf(file, "<%u>", stride);
5348
5349 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
5350 }
5351
5352 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
5353 fprintf(file, ", ");
5354 }
5355
5356 fprintf(file, " ");
5357
5358 if (inst->force_writemask_all)
5359 fprintf(file, "NoMask ");
5360
5361 if (inst->exec_size != dispatch_width)
5362 fprintf(file, "group%d ", inst->group);
5363
5364 fprintf(file, "\n");
5365 }
5366
5367 /**
5368 * Possibly returns an instruction that set up @param reg.
5369 *
5370 * Sometimes we want to take the result of some expression/variable
5371 * dereference tree and rewrite the instruction generating the result
5372 * of the tree. When processing the tree, we know that the
5373 * instructions generated are all writing temporaries that are dead
5374 * outside of this tree. So, if we have some instructions that write
5375 * a temporary, we're free to point that temp write somewhere else.
5376 *
5377 * Note that this doesn't guarantee that the instruction generated
5378 * only reg -- it might be the size=4 destination of a texture instruction.
5379 */
5380 fs_inst *
5381 fs_visitor::get_instruction_generating_reg(fs_inst *start,
5382 fs_inst *end,
5383 const fs_reg &reg)
5384 {
5385 if (end == start ||
5386 end->is_partial_write() ||
5387 !reg.equals(end->dst)) {
5388 return NULL;
5389 } else {
5390 return end;
5391 }
5392 }
5393
5394 void
5395 fs_visitor::setup_fs_payload_gen6()
5396 {
5397 assert(stage == MESA_SHADER_FRAGMENT);
5398 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
5399
5400 unsigned barycentric_interp_modes =
5401 (stage == MESA_SHADER_FRAGMENT) ?
5402 ((brw_wm_prog_data*) this->prog_data)->barycentric_interp_modes : 0;
5403
5404 assert(devinfo->gen >= 6);
5405
5406 /* R0-1: masks, pixel X/Y coordinates. */
5407 payload.num_regs = 2;
5408 /* R2: only for 32-pixel dispatch.*/
5409
5410 /* R3-26: barycentric interpolation coordinates. These appear in the
5411 * same order that they appear in the brw_wm_barycentric_interp_mode
5412 * enum. Each set of coordinates occupies 2 registers if dispatch width
5413 * == 8 and 4 registers if dispatch width == 16. Coordinates only
5414 * appear if they were enabled using the "Barycentric Interpolation
5415 * Mode" bits in WM_STATE.
5416 */
5417 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
5418 if (barycentric_interp_modes & (1 << i)) {
5419 payload.barycentric_coord_reg[i] = payload.num_regs;
5420 payload.num_regs += 2;
5421 if (dispatch_width == 16) {
5422 payload.num_regs += 2;
5423 }
5424 }
5425 }
5426
5427 /* R27: interpolated depth if uses source depth */
5428 prog_data->uses_src_depth =
5429 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5430 if (prog_data->uses_src_depth) {
5431 payload.source_depth_reg = payload.num_regs;
5432 payload.num_regs++;
5433 if (dispatch_width == 16) {
5434 /* R28: interpolated depth if not SIMD8. */
5435 payload.num_regs++;
5436 }
5437 }
5438
5439 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
5440 prog_data->uses_src_w =
5441 (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
5442 if (prog_data->uses_src_w) {
5443 payload.source_w_reg = payload.num_regs;
5444 payload.num_regs++;
5445 if (dispatch_width == 16) {
5446 /* R30: interpolated W if not SIMD8. */
5447 payload.num_regs++;
5448 }
5449 }
5450
5451 /* R31: MSAA position offsets. */
5452 if (prog_data->persample_dispatch &&
5453 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
5454 /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
5455 *
5456 * "MSDISPMODE_PERSAMPLE is required in order to select
5457 * POSOFFSET_SAMPLE"
5458 *
5459 * So we can only really get sample positions if we are doing real
5460 * per-sample dispatch. If we need gl_SamplePosition and we don't have
5461 * persample dispatch, we hard-code it to 0.5.
5462 */
5463 prog_data->uses_pos_offset = true;
5464 payload.sample_pos_reg = payload.num_regs;
5465 payload.num_regs++;
5466 }
5467
5468 /* R32: MSAA input coverage mask */
5469 prog_data->uses_sample_mask =
5470 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
5471 if (prog_data->uses_sample_mask) {
5472 assert(devinfo->gen >= 7);
5473 payload.sample_mask_in_reg = payload.num_regs;
5474 payload.num_regs++;
5475 if (dispatch_width == 16) {
5476 /* R33: input coverage mask if not SIMD8. */
5477 payload.num_regs++;
5478 }
5479 }
5480
5481 /* R34-: bary for 32-pixel. */
5482 /* R58-59: interp W for 32-pixel. */
5483
5484 if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
5485 source_depth_to_render_target = true;
5486 }
5487 }
5488
5489 void
5490 fs_visitor::setup_vs_payload()
5491 {
5492 /* R0: thread header, R1: urb handles */
5493 payload.num_regs = 2;
5494 }
5495
5496 /**
5497 * We are building the local ID push constant data using the simplest possible
5498 * method. We simply push the local IDs directly as they should appear in the
5499 * registers for the uvec3 gl_LocalInvocationID variable.
5500 *
5501 * Therefore, for SIMD8, we use 3 full registers, and for SIMD16 we use 6
5502 * registers worth of push constant space.
5503 *
5504 * Note: Any updates to brw_cs_prog_local_id_payload_dwords,
5505 * fill_local_id_payload or fs_visitor::emit_cs_local_invocation_id_setup need
5506 * to coordinated.
5507 *
5508 * FINISHME: There are a few easy optimizations to consider.
5509 *
5510 * 1. If gl_WorkGroupSize x, y or z is 1, we can just use zero, and there is
5511 * no need for using push constant space for that dimension.
5512 *
5513 * 2. Since GL_MAX_COMPUTE_WORK_GROUP_SIZE is currently 1024 or less, we can
5514 * easily use 16-bit words rather than 32-bit dwords in the push constant
5515 * data.
5516 *
5517 * 3. If gl_WorkGroupSize x, y or z is small, then we can use bytes for
5518 * conveying the data, and thereby reduce push constant usage.
5519 *
5520 */
5521 void
5522 fs_visitor::setup_gs_payload()
5523 {
5524 assert(stage == MESA_SHADER_GEOMETRY);
5525
5526 struct brw_gs_prog_data *gs_prog_data =
5527 (struct brw_gs_prog_data *) prog_data;
5528 struct brw_vue_prog_data *vue_prog_data =
5529 (struct brw_vue_prog_data *) prog_data;
5530
5531 /* R0: thread header, R1: output URB handles */
5532 payload.num_regs = 2;
5533
5534 if (gs_prog_data->include_primitive_id) {
5535 /* R2: Primitive ID 0..7 */
5536 payload.num_regs++;
5537 }
5538
5539 /* Use a maximum of 24 registers for push-model inputs. */
5540 const unsigned max_push_components = 24;
5541
5542 /* If pushing our inputs would take too many registers, reduce the URB read
5543 * length (which is in HWords, or 8 registers), and resort to pulling.
5544 *
5545 * Note that the GS reads <URB Read Length> HWords for every vertex - so we
5546 * have to multiply by VerticesIn to obtain the total storage requirement.
5547 */
5548 if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
5549 max_push_components) {
5550 gs_prog_data->base.include_vue_handles = true;
5551
5552 /* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
5553 payload.num_regs += nir->info.gs.vertices_in;
5554
5555 vue_prog_data->urb_read_length =
5556 ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
5557 }
5558 }
5559
5560 void
5561 fs_visitor::setup_cs_payload()
5562 {
5563 assert(devinfo->gen >= 7);
5564 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
5565
5566 payload.num_regs = 1;
5567
5568 if (nir->info.system_values_read & SYSTEM_BIT_LOCAL_INVOCATION_ID) {
5569 prog_data->local_invocation_id_regs = dispatch_width * 3 / 8;
5570 payload.local_invocation_id_reg = payload.num_regs;
5571 payload.num_regs += prog_data->local_invocation_id_regs;
5572 }
5573 }
5574
5575 void
5576 fs_visitor::calculate_register_pressure()
5577 {
5578 invalidate_live_intervals();
5579 calculate_live_intervals();
5580
5581 unsigned num_instructions = 0;
5582 foreach_block(block, cfg)
5583 num_instructions += block->instructions.length();
5584
5585 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
5586
5587 for (unsigned reg = 0; reg < alloc.count; reg++) {
5588 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
5589 regs_live_at_ip[ip] += alloc.sizes[reg];
5590 }
5591 }
5592
5593 /**
5594 * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
5595 *
5596 * The needs_unlit_centroid_workaround ends up producing one of these per
5597 * channel of centroid input, so it's good to clean them up.
5598 *
5599 * An assumption here is that nothing ever modifies the dispatched pixels
5600 * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
5601 * dictates that anyway.
5602 */
5603 bool
5604 fs_visitor::opt_drop_redundant_mov_to_flags()
5605 {
5606 bool flag_mov_found[2] = {false};
5607 bool progress = false;
5608
5609 /* Instructions removed by this pass can only be added if this were true */
5610 if (!devinfo->needs_unlit_centroid_workaround)
5611 return false;
5612
5613 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
5614 if (inst->is_control_flow()) {
5615 memset(flag_mov_found, 0, sizeof(flag_mov_found));
5616 } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
5617 if (!flag_mov_found[inst->flag_subreg]) {
5618 flag_mov_found[inst->flag_subreg] = true;
5619 } else {
5620 inst->remove(block);
5621 progress = true;
5622 }
5623 } else if (inst->flags_written()) {
5624 flag_mov_found[inst->flag_subreg] = false;
5625 }
5626 }
5627
5628 return progress;
5629 }
5630
5631 void
5632 fs_visitor::optimize()
5633 {
5634 /* Start by validating the shader we currently have. */
5635 validate();
5636
5637 /* bld is the common builder object pointing at the end of the program we
5638 * used to translate it into i965 IR. For the optimization and lowering
5639 * passes coming next, any code added after the end of the program without
5640 * having explicitly called fs_builder::at() clearly points at a mistake.
5641 * Ideally optimization passes wouldn't be part of the visitor so they
5642 * wouldn't have access to bld at all, but they do, so just in case some
5643 * pass forgets to ask for a location explicitly set it to NULL here to
5644 * make it trip. The dispatch width is initialized to a bogus value to
5645 * make sure that optimizations set the execution controls explicitly to
5646 * match the code they are manipulating instead of relying on the defaults.
5647 */
5648 bld = fs_builder(this, 64);
5649
5650 assign_constant_locations();
5651 lower_constant_loads();
5652
5653 validate();
5654
5655 split_virtual_grfs();
5656 validate();
5657
5658 #define OPT(pass, args...) ({ \
5659 pass_num++; \
5660 bool this_progress = pass(args); \
5661 \
5662 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
5663 char filename[64]; \
5664 snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
5665 stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
5666 \
5667 backend_shader::dump_instructions(filename); \
5668 } \
5669 \
5670 validate(); \
5671 \
5672 progress = progress || this_progress; \
5673 this_progress; \
5674 })
5675
5676 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
5677 char filename[64];
5678 snprintf(filename, 64, "%s%d-%s-00-00-start",
5679 stage_abbrev, dispatch_width, nir->info.name);
5680
5681 backend_shader::dump_instructions(filename);
5682 }
5683
5684 bool progress = false;
5685 int iteration = 0;
5686 int pass_num = 0;
5687
5688 OPT(opt_drop_redundant_mov_to_flags);
5689
5690 do {
5691 progress = false;
5692 pass_num = 0;
5693 iteration++;
5694
5695 OPT(remove_duplicate_mrf_writes);
5696
5697 OPT(opt_algebraic);
5698 OPT(opt_cse);
5699 OPT(opt_copy_propagate);
5700 OPT(opt_predicated_break, this);
5701 OPT(opt_cmod_propagation);
5702 OPT(dead_code_eliminate);
5703 OPT(opt_peephole_sel);
5704 OPT(dead_control_flow_eliminate, this);
5705 OPT(opt_register_renaming);
5706 OPT(opt_saturate_propagation);
5707 OPT(register_coalesce);
5708 OPT(compute_to_mrf);
5709 OPT(eliminate_find_live_channel);
5710
5711 OPT(compact_virtual_grfs);
5712 } while (progress);
5713
5714 progress = false;
5715 pass_num = 0;
5716
5717 OPT(lower_simd_width);
5718 OPT(lower_logical_sends);
5719
5720 if (progress) {
5721 OPT(opt_copy_propagate);
5722 /* Only run after logical send lowering because it's easier to implement
5723 * in terms of physical sends.
5724 */
5725 if (OPT(opt_zero_samples))
5726 OPT(opt_copy_propagate);
5727 /* Run after logical send lowering to give it a chance to CSE the
5728 * LOAD_PAYLOAD instructions created to construct the payloads of
5729 * e.g. texturing messages in cases where it wasn't possible to CSE the
5730 * whole logical instruction.
5731 */
5732 OPT(opt_cse);
5733 OPT(register_coalesce);
5734 OPT(compute_to_mrf);
5735 OPT(dead_code_eliminate);
5736 OPT(remove_duplicate_mrf_writes);
5737 OPT(opt_peephole_sel);
5738 }
5739
5740 OPT(opt_redundant_discard_jumps);
5741 OPT(opt_sampler_eot);
5742
5743 if (OPT(lower_load_payload)) {
5744 split_virtual_grfs();
5745 OPT(register_coalesce);
5746 OPT(compute_to_mrf);
5747 OPT(dead_code_eliminate);
5748 }
5749
5750 if (OPT(lower_pack)) {
5751 OPT(register_coalesce);
5752 OPT(dead_code_eliminate);
5753 }
5754
5755 if (OPT(lower_d2x)) {
5756 OPT(opt_copy_propagate);
5757 OPT(dead_code_eliminate);
5758 }
5759
5760 OPT(opt_combine_constants);
5761 OPT(lower_integer_multiplication);
5762
5763 if (devinfo->gen <= 5 && OPT(lower_minmax)) {
5764 OPT(opt_cmod_propagation);
5765 OPT(opt_cse);
5766 OPT(opt_copy_propagate);
5767 OPT(dead_code_eliminate);
5768 }
5769
5770 lower_uniform_pull_constant_loads();
5771
5772 validate();
5773 }
5774
5775 /**
5776 * Three source instruction must have a GRF/MRF destination register.
5777 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
5778 */
5779 void
5780 fs_visitor::fixup_3src_null_dest()
5781 {
5782 bool progress = false;
5783
5784 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
5785 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
5786 inst->dst = fs_reg(VGRF, alloc.allocate(dispatch_width / 8),
5787 inst->dst.type);
5788 progress = true;
5789 }
5790 }
5791
5792 if (progress)
5793 invalidate_live_intervals();
5794 }
5795
5796 void
5797 fs_visitor::allocate_registers(bool allow_spilling)
5798 {
5799 bool allocated_without_spills;
5800
5801 static const enum instruction_scheduler_mode pre_modes[] = {
5802 SCHEDULE_PRE,
5803 SCHEDULE_PRE_NON_LIFO,
5804 SCHEDULE_PRE_LIFO,
5805 };
5806
5807 bool spill_all = allow_spilling && (INTEL_DEBUG & DEBUG_SPILL_FS);
5808
5809 /* Try each scheduling heuristic to see if it can successfully register
5810 * allocate without spilling. They should be ordered by decreasing
5811 * performance but increasing likelihood of allocating.
5812 */
5813 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
5814 schedule_instructions(pre_modes[i]);
5815
5816 if (0) {
5817 assign_regs_trivial();
5818 allocated_without_spills = true;
5819 } else {
5820 allocated_without_spills = assign_regs(false, spill_all);
5821 }
5822 if (allocated_without_spills)
5823 break;
5824 }
5825
5826 if (!allocated_without_spills) {
5827 /* We assume that any spilling is worse than just dropping back to
5828 * SIMD8. There's probably actually some intermediate point where
5829 * SIMD16 with a couple of spills is still better.
5830 */
5831 if (dispatch_width > min_dispatch_width) {
5832 fail("Failure to register allocate. Reduce number of "
5833 "live scalar values to avoid this.");
5834 } else {
5835 compiler->shader_perf_log(log_data,
5836 "%s shader triggered register spilling. "
5837 "Try reducing the number of live scalar "
5838 "values to improve performance.\n",
5839 stage_name);
5840 }
5841
5842 /* Since we're out of heuristics, just go spill registers until we
5843 * get an allocation.
5844 */
5845 while (!assign_regs(true, spill_all)) {
5846 if (failed)
5847 break;
5848 }
5849 }
5850
5851 assert(last_scratch == 0 || allow_spilling);
5852
5853 /* This must come after all optimization and register allocation, since
5854 * it inserts dead code that happens to have side effects, and it does
5855 * so based on the actual physical registers in use.
5856 */
5857 insert_gen4_send_dependency_workarounds();
5858
5859 if (failed)
5860 return;
5861
5862 schedule_instructions(SCHEDULE_POST);
5863
5864 if (last_scratch > 0)
5865 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
5866 }
5867
5868 bool
5869 fs_visitor::run_vs(gl_clip_plane *clip_planes)
5870 {
5871 assert(stage == MESA_SHADER_VERTEX);
5872
5873 setup_vs_payload();
5874
5875 if (shader_time_index >= 0)
5876 emit_shader_time_begin();
5877
5878 emit_nir_code();
5879
5880 if (failed)
5881 return false;
5882
5883 compute_clip_distance(clip_planes);
5884
5885 emit_urb_writes();
5886
5887 if (shader_time_index >= 0)
5888 emit_shader_time_end();
5889
5890 calculate_cfg();
5891
5892 optimize();
5893
5894 assign_curb_setup();
5895 assign_vs_urb_setup();
5896
5897 fixup_3src_null_dest();
5898 allocate_registers(true);
5899
5900 return !failed;
5901 }
5902
5903 bool
5904 fs_visitor::run_tcs_single_patch()
5905 {
5906 assert(stage == MESA_SHADER_TESS_CTRL);
5907
5908 struct brw_tcs_prog_data *tcs_prog_data =
5909 (struct brw_tcs_prog_data *) prog_data;
5910
5911 /* r1-r4 contain the ICP handles. */
5912 payload.num_regs = 5;
5913
5914 if (shader_time_index >= 0)
5915 emit_shader_time_begin();
5916
5917 /* Initialize gl_InvocationID */
5918 fs_reg channels_uw = bld.vgrf(BRW_REGISTER_TYPE_UW);
5919 fs_reg channels_ud = bld.vgrf(BRW_REGISTER_TYPE_UD);
5920 bld.MOV(channels_uw, fs_reg(brw_imm_uv(0x76543210)));
5921 bld.MOV(channels_ud, channels_uw);
5922
5923 if (tcs_prog_data->instances == 1) {
5924 invocation_id = channels_ud;
5925 } else {
5926 invocation_id = bld.vgrf(BRW_REGISTER_TYPE_UD);
5927
5928 /* Get instance number from g0.2 bits 23:17, and multiply it by 8. */
5929 fs_reg t = bld.vgrf(BRW_REGISTER_TYPE_UD);
5930 fs_reg instance_times_8 = bld.vgrf(BRW_REGISTER_TYPE_UD);
5931 bld.AND(t, fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD)),
5932 brw_imm_ud(INTEL_MASK(23, 17)));
5933 bld.SHR(instance_times_8, t, brw_imm_ud(17 - 3));
5934
5935 bld.ADD(invocation_id, instance_times_8, channels_ud);
5936 }
5937
5938 /* Fix the disptach mask */
5939 if (nir->info.tcs.vertices_out % 8) {
5940 bld.CMP(bld.null_reg_ud(), invocation_id,
5941 brw_imm_ud(nir->info.tcs.vertices_out), BRW_CONDITIONAL_L);
5942 bld.IF(BRW_PREDICATE_NORMAL);
5943 }
5944
5945 emit_nir_code();
5946
5947 if (nir->info.tcs.vertices_out % 8) {
5948 bld.emit(BRW_OPCODE_ENDIF);
5949 }
5950
5951 /* Emit EOT write; set TR DS Cache bit */
5952 fs_reg srcs[3] = {
5953 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
5954 fs_reg(brw_imm_ud(WRITEMASK_X << 16)),
5955 fs_reg(brw_imm_ud(0)),
5956 };
5957 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
5958 bld.LOAD_PAYLOAD(payload, srcs, 3, 2);
5959
5960 fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
5961 bld.null_reg_ud(), payload);
5962 inst->mlen = 3;
5963 inst->base_mrf = -1;
5964 inst->eot = true;
5965
5966 if (shader_time_index >= 0)
5967 emit_shader_time_end();
5968
5969 if (failed)
5970 return false;
5971
5972 calculate_cfg();
5973
5974 optimize();
5975
5976 assign_curb_setup();
5977 assign_tcs_single_patch_urb_setup();
5978
5979 fixup_3src_null_dest();
5980 allocate_registers(true);
5981
5982 return !failed;
5983 }
5984
5985 bool
5986 fs_visitor::run_tes()
5987 {
5988 assert(stage == MESA_SHADER_TESS_EVAL);
5989
5990 /* R0: thread header, R1-3: gl_TessCoord.xyz, R4: URB handles */
5991 payload.num_regs = 5;
5992
5993 if (shader_time_index >= 0)
5994 emit_shader_time_begin();
5995
5996 emit_nir_code();
5997
5998 if (failed)
5999 return false;
6000
6001 emit_urb_writes();
6002
6003 if (shader_time_index >= 0)
6004 emit_shader_time_end();
6005
6006 calculate_cfg();
6007
6008 optimize();
6009
6010 assign_curb_setup();
6011 assign_tes_urb_setup();
6012
6013 fixup_3src_null_dest();
6014 allocate_registers(true);
6015
6016 return !failed;
6017 }
6018
6019 bool
6020 fs_visitor::run_gs()
6021 {
6022 assert(stage == MESA_SHADER_GEOMETRY);
6023
6024 setup_gs_payload();
6025
6026 this->final_gs_vertex_count = vgrf(glsl_type::uint_type);
6027
6028 if (gs_compile->control_data_header_size_bits > 0) {
6029 /* Create a VGRF to store accumulated control data bits. */
6030 this->control_data_bits = vgrf(glsl_type::uint_type);
6031
6032 /* If we're outputting more than 32 control data bits, then EmitVertex()
6033 * will set control_data_bits to 0 after emitting the first vertex.
6034 * Otherwise, we need to initialize it to 0 here.
6035 */
6036 if (gs_compile->control_data_header_size_bits <= 32) {
6037 const fs_builder abld = bld.annotate("initialize control data bits");
6038 abld.MOV(this->control_data_bits, brw_imm_ud(0u));
6039 }
6040 }
6041
6042 if (shader_time_index >= 0)
6043 emit_shader_time_begin();
6044
6045 emit_nir_code();
6046
6047 emit_gs_thread_end();
6048
6049 if (shader_time_index >= 0)
6050 emit_shader_time_end();
6051
6052 if (failed)
6053 return false;
6054
6055 calculate_cfg();
6056
6057 optimize();
6058
6059 assign_curb_setup();
6060 assign_gs_urb_setup();
6061
6062 fixup_3src_null_dest();
6063 allocate_registers(true);
6064
6065 return !failed;
6066 }
6067
6068 bool
6069 fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
6070 {
6071 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
6072 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
6073
6074 assert(stage == MESA_SHADER_FRAGMENT);
6075
6076 if (devinfo->gen >= 6)
6077 setup_fs_payload_gen6();
6078 else
6079 setup_fs_payload_gen4();
6080
6081 if (0) {
6082 emit_dummy_fs();
6083 } else if (do_rep_send) {
6084 assert(dispatch_width == 16);
6085 emit_repclear_shader();
6086 } else {
6087 if (shader_time_index >= 0)
6088 emit_shader_time_begin();
6089
6090 calculate_urb_setup();
6091 if (nir->info.inputs_read > 0) {
6092 if (devinfo->gen < 6)
6093 emit_interpolation_setup_gen4();
6094 else
6095 emit_interpolation_setup_gen6();
6096 }
6097
6098 /* We handle discards by keeping track of the still-live pixels in f0.1.
6099 * Initialize it with the dispatched pixels.
6100 */
6101 if (wm_prog_data->uses_kill) {
6102 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
6103 discard_init->flag_subreg = 1;
6104 }
6105
6106 /* Generate FS IR for main(). (the visitor only descends into
6107 * functions called "main").
6108 */
6109 emit_nir_code();
6110
6111 if (failed)
6112 return false;
6113
6114 if (wm_prog_data->uses_kill)
6115 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
6116
6117 if (wm_key->alpha_test_func)
6118 emit_alpha_test();
6119
6120 emit_fb_writes();
6121
6122 if (shader_time_index >= 0)
6123 emit_shader_time_end();
6124
6125 calculate_cfg();
6126
6127 optimize();
6128
6129 assign_curb_setup();
6130 assign_urb_setup();
6131
6132 fixup_3src_null_dest();
6133 allocate_registers(allow_spilling);
6134
6135 if (failed)
6136 return false;
6137 }
6138
6139 return !failed;
6140 }
6141
6142 bool
6143 fs_visitor::run_cs()
6144 {
6145 assert(stage == MESA_SHADER_COMPUTE);
6146
6147 setup_cs_payload();
6148
6149 if (shader_time_index >= 0)
6150 emit_shader_time_begin();
6151
6152 if (devinfo->is_haswell && prog_data->total_shared > 0) {
6153 /* Move SLM index from g0.0[27:24] to sr0.1[11:8] */
6154 const fs_builder abld = bld.exec_all().group(1, 0);
6155 abld.MOV(retype(suboffset(brw_sr0_reg(), 1), BRW_REGISTER_TYPE_UW),
6156 suboffset(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW), 1));
6157 }
6158
6159 emit_nir_code();
6160
6161 if (failed)
6162 return false;
6163
6164 emit_cs_terminate();
6165
6166 if (shader_time_index >= 0)
6167 emit_shader_time_end();
6168
6169 calculate_cfg();
6170
6171 optimize();
6172
6173 assign_curb_setup();
6174
6175 fixup_3src_null_dest();
6176 allocate_registers(true);
6177
6178 if (failed)
6179 return false;
6180
6181 return !failed;
6182 }
6183
6184 /**
6185 * Return a bitfield where bit n is set if barycentric interpolation mode n
6186 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
6187 */
6188 static unsigned
6189 brw_compute_barycentric_interp_modes(const struct brw_device_info *devinfo,
6190 bool shade_model_flat,
6191 bool persample_shading,
6192 const nir_shader *shader)
6193 {
6194 unsigned barycentric_interp_modes = 0;
6195
6196 nir_foreach_variable(var, &shader->inputs) {
6197 enum glsl_interp_qualifier interp_qualifier =
6198 (enum glsl_interp_qualifier)var->data.interpolation;
6199 bool is_centroid = var->data.centroid && !persample_shading;
6200 bool is_sample = var->data.sample || persample_shading;
6201 bool is_gl_Color = (var->data.location == VARYING_SLOT_COL0) ||
6202 (var->data.location == VARYING_SLOT_COL1);
6203
6204 /* Ignore WPOS and FACE, because they don't require interpolation. */
6205 if (var->data.location == VARYING_SLOT_POS ||
6206 var->data.location == VARYING_SLOT_FACE)
6207 continue;
6208
6209 /* Determine the set (or sets) of barycentric coordinates needed to
6210 * interpolate this variable. Note that when
6211 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
6212 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
6213 * for lit pixels, so we need both sets of barycentric coordinates.
6214 */
6215 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
6216 if (is_centroid) {
6217 barycentric_interp_modes |=
6218 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
6219 } else if (is_sample) {
6220 barycentric_interp_modes |=
6221 1 << BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
6222 }
6223 if ((!is_centroid && !is_sample) ||
6224 devinfo->needs_unlit_centroid_workaround) {
6225 barycentric_interp_modes |=
6226 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
6227 }
6228 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
6229 (!(shade_model_flat && is_gl_Color) &&
6230 interp_qualifier == INTERP_QUALIFIER_NONE)) {
6231 if (is_centroid) {
6232 barycentric_interp_modes |=
6233 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
6234 } else if (is_sample) {
6235 barycentric_interp_modes |=
6236 1 << BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
6237 }
6238 if ((!is_centroid && !is_sample) ||
6239 devinfo->needs_unlit_centroid_workaround) {
6240 barycentric_interp_modes |=
6241 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
6242 }
6243 }
6244 }
6245
6246 return barycentric_interp_modes;
6247 }
6248
6249 static void
6250 brw_compute_flat_inputs(struct brw_wm_prog_data *prog_data,
6251 bool shade_model_flat, const nir_shader *shader)
6252 {
6253 prog_data->flat_inputs = 0;
6254
6255 nir_foreach_variable(var, &shader->inputs) {
6256 enum glsl_interp_qualifier interp_qualifier =
6257 (enum glsl_interp_qualifier)var->data.interpolation;
6258 bool is_gl_Color = (var->data.location == VARYING_SLOT_COL0) ||
6259 (var->data.location == VARYING_SLOT_COL1);
6260
6261 int input_index = prog_data->urb_setup[var->data.location];
6262
6263 if (input_index < 0)
6264 continue;
6265
6266 /* flat shading */
6267 if (interp_qualifier == INTERP_QUALIFIER_FLAT ||
6268 (shade_model_flat && is_gl_Color &&
6269 interp_qualifier == INTERP_QUALIFIER_NONE))
6270 prog_data->flat_inputs |= (1 << input_index);
6271 }
6272 }
6273
6274 static uint8_t
6275 computed_depth_mode(const nir_shader *shader)
6276 {
6277 if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
6278 switch (shader->info.fs.depth_layout) {
6279 case FRAG_DEPTH_LAYOUT_NONE:
6280 case FRAG_DEPTH_LAYOUT_ANY:
6281 return BRW_PSCDEPTH_ON;
6282 case FRAG_DEPTH_LAYOUT_GREATER:
6283 return BRW_PSCDEPTH_ON_GE;
6284 case FRAG_DEPTH_LAYOUT_LESS:
6285 return BRW_PSCDEPTH_ON_LE;
6286 case FRAG_DEPTH_LAYOUT_UNCHANGED:
6287 return BRW_PSCDEPTH_OFF;
6288 }
6289 }
6290 return BRW_PSCDEPTH_OFF;
6291 }
6292
6293 const unsigned *
6294 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
6295 void *mem_ctx,
6296 const struct brw_wm_prog_key *key,
6297 struct brw_wm_prog_data *prog_data,
6298 const nir_shader *src_shader,
6299 struct gl_program *prog,
6300 int shader_time_index8, int shader_time_index16,
6301 bool allow_spilling,
6302 bool use_rep_send,
6303 unsigned *final_assembly_size,
6304 char **error_str)
6305 {
6306 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6307 shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
6308 true);
6309 brw_nir_lower_fs_inputs(shader);
6310 brw_nir_lower_fs_outputs(shader);
6311 shader = brw_postprocess_nir(shader, compiler->devinfo, true);
6312
6313 /* key->alpha_test_func means simulating alpha testing via discards,
6314 * so the shader definitely kills pixels.
6315 */
6316 prog_data->uses_kill = shader->info.fs.uses_discard || key->alpha_test_func;
6317 prog_data->uses_omask = key->multisample_fbo &&
6318 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
6319 prog_data->computed_depth_mode = computed_depth_mode(shader);
6320 prog_data->computed_stencil =
6321 shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
6322
6323 prog_data->persample_dispatch =
6324 key->multisample_fbo &&
6325 (key->persample_interp ||
6326 (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
6327 SYSTEM_BIT_SAMPLE_POS)) ||
6328 shader->info.fs.uses_sample_qualifier);
6329
6330 prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
6331
6332 prog_data->barycentric_interp_modes =
6333 brw_compute_barycentric_interp_modes(compiler->devinfo,
6334 key->flat_shade,
6335 key->persample_interp,
6336 shader);
6337
6338 cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL;
6339 uint8_t simd8_grf_start = 0, simd16_grf_start = 0;
6340 unsigned simd8_grf_used = 0, simd16_grf_used = 0;
6341
6342 fs_visitor v8(compiler, log_data, mem_ctx, key,
6343 &prog_data->base, prog, shader, 8,
6344 shader_time_index8);
6345 if (!v8.run_fs(allow_spilling, false /* do_rep_send */)) {
6346 if (error_str)
6347 *error_str = ralloc_strdup(mem_ctx, v8.fail_msg);
6348
6349 return NULL;
6350 } else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
6351 simd8_cfg = v8.cfg;
6352 simd8_grf_start = v8.payload.num_regs;
6353 simd8_grf_used = v8.grf_used;
6354 }
6355
6356 if (v8.max_dispatch_width >= 16 &&
6357 likely(!(INTEL_DEBUG & DEBUG_NO16) || use_rep_send)) {
6358 /* Try a SIMD16 compile */
6359 fs_visitor v16(compiler, log_data, mem_ctx, key,
6360 &prog_data->base, prog, shader, 16,
6361 shader_time_index16);
6362 v16.import_uniforms(&v8);
6363 if (!v16.run_fs(allow_spilling, use_rep_send)) {
6364 compiler->shader_perf_log(log_data,
6365 "SIMD16 shader failed to compile: %s",
6366 v16.fail_msg);
6367 } else {
6368 simd16_cfg = v16.cfg;
6369 simd16_grf_start = v16.payload.num_regs;
6370 simd16_grf_used = v16.grf_used;
6371 }
6372 }
6373
6374 /* When the caller requests a repclear shader, they want SIMD16-only */
6375 if (use_rep_send)
6376 simd8_cfg = NULL;
6377
6378 /* Prior to Iron Lake, the PS had a single shader offset with a jump table
6379 * at the top to select the shader. We've never implemented that.
6380 * Instead, we just give them exactly one shader and we pick the widest one
6381 * available.
6382 */
6383 if (compiler->devinfo->gen < 5 && simd16_cfg)
6384 simd8_cfg = NULL;
6385
6386 if (prog_data->persample_dispatch) {
6387 /* Starting with SandyBridge (where we first get MSAA), the different
6388 * pixel dispatch combinations are grouped into classifications A
6389 * through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
6390 * generations, the only configurations supporting persample dispatch
6391 * are are this in which only one dispatch width is enabled.
6392 *
6393 * If computed depth is enabled, SNB only allows SIMD8 while IVB+
6394 * allow SIMD8 or SIMD16 so we choose SIMD16 if available.
6395 */
6396 if (compiler->devinfo->gen == 6 &&
6397 prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF) {
6398 simd16_cfg = NULL;
6399 } else if (simd16_cfg) {
6400 simd8_cfg = NULL;
6401 }
6402 }
6403
6404 /* We have to compute the flat inputs after the visitor is finished running
6405 * because it relies on prog_data->urb_setup which is computed in
6406 * fs_visitor::calculate_urb_setup().
6407 */
6408 brw_compute_flat_inputs(prog_data, key->flat_shade, shader);
6409
6410 fs_generator g(compiler, log_data, mem_ctx, (void *) key, &prog_data->base,
6411 v8.promoted_constants, v8.runtime_check_aads_emit,
6412 MESA_SHADER_FRAGMENT);
6413
6414 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
6415 g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
6416 shader->info.label ? shader->info.label :
6417 "unnamed",
6418 shader->info.name));
6419 }
6420
6421 if (simd8_cfg) {
6422 prog_data->dispatch_8 = true;
6423 g.generate_code(simd8_cfg, 8);
6424 prog_data->base.dispatch_grf_start_reg = simd8_grf_start;
6425 prog_data->reg_blocks_0 = brw_register_blocks(simd8_grf_used);
6426
6427 if (simd16_cfg) {
6428 prog_data->dispatch_16 = true;
6429 prog_data->prog_offset_2 = g.generate_code(simd16_cfg, 16);
6430 prog_data->dispatch_grf_start_reg_2 = simd16_grf_start;
6431 prog_data->reg_blocks_2 = brw_register_blocks(simd16_grf_used);
6432 }
6433 } else if (simd16_cfg) {
6434 prog_data->dispatch_16 = true;
6435 g.generate_code(simd16_cfg, 16);
6436 prog_data->base.dispatch_grf_start_reg = simd16_grf_start;
6437 prog_data->reg_blocks_0 = brw_register_blocks(simd16_grf_used);
6438 }
6439
6440 return g.get_assembly(final_assembly_size);
6441 }
6442
6443 fs_reg *
6444 fs_visitor::emit_cs_local_invocation_id_setup()
6445 {
6446 assert(stage == MESA_SHADER_COMPUTE);
6447
6448 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
6449
6450 struct brw_reg src =
6451 brw_vec8_grf(payload.local_invocation_id_reg, 0);
6452 src = retype(src, BRW_REGISTER_TYPE_UD);
6453 bld.MOV(*reg, src);
6454 src.nr += dispatch_width / 8;
6455 bld.MOV(offset(*reg, bld, 1), src);
6456 src.nr += dispatch_width / 8;
6457 bld.MOV(offset(*reg, bld, 2), src);
6458
6459 return reg;
6460 }
6461
6462 fs_reg *
6463 fs_visitor::emit_cs_work_group_id_setup()
6464 {
6465 assert(stage == MESA_SHADER_COMPUTE);
6466
6467 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
6468
6469 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
6470 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
6471 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
6472
6473 bld.MOV(*reg, r0_1);
6474 bld.MOV(offset(*reg, bld, 1), r0_6);
6475 bld.MOV(offset(*reg, bld, 2), r0_7);
6476
6477 return reg;
6478 }
6479
6480 const unsigned *
6481 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
6482 void *mem_ctx,
6483 const struct brw_cs_prog_key *key,
6484 struct brw_cs_prog_data *prog_data,
6485 const nir_shader *src_shader,
6486 int shader_time_index,
6487 unsigned *final_assembly_size,
6488 char **error_str)
6489 {
6490 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
6491 shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
6492 true);
6493 brw_nir_lower_cs_shared(shader);
6494 prog_data->base.total_shared += shader->num_shared;
6495 shader = brw_postprocess_nir(shader, compiler->devinfo, true);
6496
6497 prog_data->local_size[0] = shader->info.cs.local_size[0];
6498 prog_data->local_size[1] = shader->info.cs.local_size[1];
6499 prog_data->local_size[2] = shader->info.cs.local_size[2];
6500 unsigned local_workgroup_size =
6501 shader->info.cs.local_size[0] * shader->info.cs.local_size[1] *
6502 shader->info.cs.local_size[2];
6503
6504 unsigned max_cs_threads = compiler->devinfo->max_cs_threads;
6505 unsigned simd_required = DIV_ROUND_UP(local_workgroup_size, max_cs_threads);
6506
6507 cfg_t *cfg = NULL;
6508 const char *fail_msg = NULL;
6509
6510 /* Now the main event: Visit the shader IR and generate our CS IR for it.
6511 */
6512 fs_visitor v8(compiler, log_data, mem_ctx, key, &prog_data->base,
6513 NULL, /* Never used in core profile */
6514 shader, 8, shader_time_index);
6515 if (simd_required <= 8) {
6516 if (!v8.run_cs()) {
6517 fail_msg = v8.fail_msg;
6518 } else {
6519 cfg = v8.cfg;
6520 prog_data->simd_size = 8;
6521 prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
6522 }
6523 }
6524
6525 fs_visitor v16(compiler, log_data, mem_ctx, key, &prog_data->base,
6526 NULL, /* Never used in core profile */
6527 shader, 16, shader_time_index);
6528 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
6529 !fail_msg && v8.max_dispatch_width >= 16 &&
6530 simd_required <= 16) {
6531 /* Try a SIMD16 compile */
6532 if (simd_required <= 8)
6533 v16.import_uniforms(&v8);
6534 if (!v16.run_cs()) {
6535 compiler->shader_perf_log(log_data,
6536 "SIMD16 shader failed to compile: %s",
6537 v16.fail_msg);
6538 if (!cfg) {
6539 fail_msg =
6540 "Couldn't generate SIMD16 program and not "
6541 "enough threads for SIMD8";
6542 }
6543 } else {
6544 cfg = v16.cfg;
6545 prog_data->simd_size = 16;
6546 prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
6547 }
6548 }
6549
6550 fs_visitor v32(compiler, log_data, mem_ctx, key, &prog_data->base,
6551 NULL, /* Never used in core profile */
6552 shader, 32, shader_time_index);
6553 if (!fail_msg && v8.max_dispatch_width >= 32 &&
6554 (simd_required > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
6555 /* Try a SIMD32 compile */
6556 if (simd_required <= 8)
6557 v32.import_uniforms(&v8);
6558 else if (simd_required <= 16)
6559 v32.import_uniforms(&v16);
6560
6561 if (!v32.run_cs()) {
6562 compiler->shader_perf_log(log_data,
6563 "SIMD32 shader failed to compile: %s",
6564 v16.fail_msg);
6565 if (!cfg) {
6566 fail_msg =
6567 "Couldn't generate SIMD32 program and not "
6568 "enough threads for SIMD16";
6569 }
6570 } else {
6571 cfg = v32.cfg;
6572 prog_data->simd_size = 32;
6573 }
6574 }
6575
6576 if (unlikely(cfg == NULL)) {
6577 assert(fail_msg);
6578 if (error_str)
6579 *error_str = ralloc_strdup(mem_ctx, fail_msg);
6580
6581 return NULL;
6582 }
6583
6584 fs_generator g(compiler, log_data, mem_ctx, (void*) key, &prog_data->base,
6585 v8.promoted_constants, v8.runtime_check_aads_emit,
6586 MESA_SHADER_COMPUTE);
6587 if (INTEL_DEBUG & DEBUG_CS) {
6588 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
6589 shader->info.label ? shader->info.label :
6590 "unnamed",
6591 shader->info.name);
6592 g.enable_debug(name);
6593 }
6594
6595 g.generate_code(cfg, prog_data->simd_size);
6596
6597 return g.get_assembly(final_assembly_size);
6598 }
6599
6600 void
6601 brw_cs_fill_local_id_payload(const struct brw_cs_prog_data *prog_data,
6602 void *buffer, uint32_t threads, uint32_t stride)
6603 {
6604 if (prog_data->local_invocation_id_regs == 0)
6605 return;
6606
6607 /* 'stride' should be an integer number of registers, that is, a multiple
6608 * of 32 bytes.
6609 */
6610 assert(stride % 32 == 0);
6611
6612 unsigned x = 0, y = 0, z = 0;
6613 for (unsigned t = 0; t < threads; t++) {
6614 uint32_t *param = (uint32_t *) buffer + stride * t / 4;
6615
6616 for (unsigned i = 0; i < prog_data->simd_size; i++) {
6617 param[0 * prog_data->simd_size + i] = x;
6618 param[1 * prog_data->simd_size + i] = y;
6619 param[2 * prog_data->simd_size + i] = z;
6620
6621 x++;
6622 if (x == prog_data->local_size[0]) {
6623 x = 0;
6624 y++;
6625 if (y == prog_data->local_size[1]) {
6626 y = 0;
6627 z++;
6628 if (z == prog_data->local_size[2])
6629 z = 0;
6630 }
6631 }
6632 }
6633 }
6634 }