intel/fs: Remove nasty open-coded CHV/BXT 64-bit workarounds.
[mesa.git] / src / intel / compiler / brw_fs_builder.h
1 /* -*- c++ -*- */
2 /*
3 * Copyright © 2010-2015 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #ifndef BRW_FS_BUILDER_H
26 #define BRW_FS_BUILDER_H
27
28 #include "brw_ir_fs.h"
29 #include "brw_shader.h"
30
31 namespace brw {
32 /**
33 * Toolbox to assemble an FS IR program out of individual instructions.
34 *
35 * This object is meant to have an interface consistent with
36 * brw::vec4_builder. They cannot be fully interchangeable because
37 * brw::fs_builder generates scalar code while brw::vec4_builder generates
38 * vector code.
39 */
40 class fs_builder {
41 public:
42 /** Type used in this IR to represent a source of an instruction. */
43 typedef fs_reg src_reg;
44
45 /** Type used in this IR to represent the destination of an instruction. */
46 typedef fs_reg dst_reg;
47
48 /** Type used in this IR to represent an instruction. */
49 typedef fs_inst instruction;
50
51 /**
52 * Construct an fs_builder that inserts instructions into \p shader.
53 * \p dispatch_width gives the native execution width of the program.
54 */
55 fs_builder(backend_shader *shader,
56 unsigned dispatch_width) :
57 shader(shader), block(NULL), cursor(NULL),
58 _dispatch_width(dispatch_width),
59 _group(0),
60 force_writemask_all(false),
61 annotation()
62 {
63 }
64
65 /**
66 * Construct an fs_builder that inserts instructions into \p shader
67 * before instruction \p inst in basic block \p block. The default
68 * execution controls and debug annotation are initialized from the
69 * instruction passed as argument.
70 */
71 fs_builder(backend_shader *shader, bblock_t *block, fs_inst *inst) :
72 shader(shader), block(block), cursor(inst),
73 _dispatch_width(inst->exec_size),
74 _group(inst->group),
75 force_writemask_all(inst->force_writemask_all)
76 {
77 annotation.str = inst->annotation;
78 annotation.ir = inst->ir;
79 }
80
81 /**
82 * Construct an fs_builder that inserts instructions before \p cursor in
83 * basic block \p block, inheriting other code generation parameters
84 * from this.
85 */
86 fs_builder
87 at(bblock_t *block, exec_node *cursor) const
88 {
89 fs_builder bld = *this;
90 bld.block = block;
91 bld.cursor = cursor;
92 return bld;
93 }
94
95 /**
96 * Construct an fs_builder appending instructions at the end of the
97 * instruction list of the shader, inheriting other code generation
98 * parameters from this.
99 */
100 fs_builder
101 at_end() const
102 {
103 return at(NULL, (exec_node *)&shader->instructions.tail_sentinel);
104 }
105
106 /**
107 * Construct a builder specifying the default SIMD width and group of
108 * channel enable signals, inheriting other code generation parameters
109 * from this.
110 *
111 * \p n gives the default SIMD width, \p i gives the slot group used for
112 * predication and control flow masking in multiples of \p n channels.
113 */
114 fs_builder
115 group(unsigned n, unsigned i) const
116 {
117 fs_builder bld = *this;
118
119 if (n <= dispatch_width() && i < dispatch_width() / n) {
120 bld._group += i * n;
121 } else {
122 /* The requested channel group isn't a subset of the channel group
123 * of this builder, which means that the resulting instructions
124 * would use (potentially undefined) channel enable signals not
125 * specified by the parent builder. That's only valid if the
126 * instruction doesn't have per-channel semantics, in which case
127 * we should clear off the default group index in order to prevent
128 * emitting instructions with channel group not aligned to their
129 * own execution size.
130 */
131 assert(force_writemask_all);
132 bld._group = 0;
133 }
134
135 bld._dispatch_width = n;
136 return bld;
137 }
138
139 /**
140 * Alias for group() with width equal to eight.
141 */
142 fs_builder
143 half(unsigned i) const
144 {
145 return group(8, i);
146 }
147
148 /**
149 * Construct a builder with per-channel control flow execution masking
150 * disabled if \p b is true. If control flow execution masking is
151 * already disabled this has no effect.
152 */
153 fs_builder
154 exec_all(bool b = true) const
155 {
156 fs_builder bld = *this;
157 if (b)
158 bld.force_writemask_all = true;
159 return bld;
160 }
161
162 /**
163 * Construct a builder with the given debug annotation info.
164 */
165 fs_builder
166 annotate(const char *str, const void *ir = NULL) const
167 {
168 fs_builder bld = *this;
169 bld.annotation.str = str;
170 bld.annotation.ir = ir;
171 return bld;
172 }
173
174 /**
175 * Get the SIMD width in use.
176 */
177 unsigned
178 dispatch_width() const
179 {
180 return _dispatch_width;
181 }
182
183 /**
184 * Get the channel group in use.
185 */
186 unsigned
187 group() const
188 {
189 return _group;
190 }
191
192 /**
193 * Allocate a virtual register of natural vector size (one for this IR)
194 * and SIMD width. \p n gives the amount of space to allocate in
195 * dispatch_width units (which is just enough space for one logical
196 * component in this IR).
197 */
198 dst_reg
199 vgrf(enum brw_reg_type type, unsigned n = 1) const
200 {
201 assert(dispatch_width() <= 32);
202
203 if (n > 0)
204 return dst_reg(VGRF, shader->alloc.allocate(
205 DIV_ROUND_UP(n * type_sz(type) * dispatch_width(),
206 REG_SIZE)),
207 type);
208 else
209 return retype(null_reg_ud(), type);
210 }
211
212 /**
213 * Create a null register of floating type.
214 */
215 dst_reg
216 null_reg_f() const
217 {
218 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_F));
219 }
220
221 dst_reg
222 null_reg_df() const
223 {
224 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_DF));
225 }
226
227 /**
228 * Create a null register of signed integer type.
229 */
230 dst_reg
231 null_reg_d() const
232 {
233 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
234 }
235
236 /**
237 * Create a null register of unsigned integer type.
238 */
239 dst_reg
240 null_reg_ud() const
241 {
242 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
243 }
244
245 /**
246 * Get the mask of SIMD channels enabled by dispatch and not yet
247 * disabled by discard.
248 */
249 src_reg
250 sample_mask_reg() const
251 {
252 if (shader->stage != MESA_SHADER_FRAGMENT) {
253 return brw_imm_d(0xffffffff);
254 } else if (brw_wm_prog_data(shader->stage_prog_data)->uses_kill) {
255 return brw_flag_reg(0, 1);
256 } else {
257 assert(shader->devinfo->gen >= 6 && dispatch_width() <= 16);
258 return retype(brw_vec1_grf((_group >= 16 ? 2 : 1), 7),
259 BRW_REGISTER_TYPE_UD);
260 }
261 }
262
263 /**
264 * Insert an instruction into the program.
265 */
266 instruction *
267 emit(const instruction &inst) const
268 {
269 return emit(new(shader->mem_ctx) instruction(inst));
270 }
271
272 /**
273 * Create and insert a nullary control instruction into the program.
274 */
275 instruction *
276 emit(enum opcode opcode) const
277 {
278 return emit(instruction(opcode, dispatch_width()));
279 }
280
281 /**
282 * Create and insert a nullary instruction into the program.
283 */
284 instruction *
285 emit(enum opcode opcode, const dst_reg &dst) const
286 {
287 return emit(instruction(opcode, dispatch_width(), dst));
288 }
289
290 /**
291 * Create and insert a unary instruction into the program.
292 */
293 instruction *
294 emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0) const
295 {
296 switch (opcode) {
297 case SHADER_OPCODE_RCP:
298 case SHADER_OPCODE_RSQ:
299 case SHADER_OPCODE_SQRT:
300 case SHADER_OPCODE_EXP2:
301 case SHADER_OPCODE_LOG2:
302 case SHADER_OPCODE_SIN:
303 case SHADER_OPCODE_COS:
304 return emit(instruction(opcode, dispatch_width(), dst,
305 fix_math_operand(src0)));
306
307 default:
308 return emit(instruction(opcode, dispatch_width(), dst, src0));
309 }
310 }
311
312 /**
313 * Create and insert a binary instruction into the program.
314 */
315 instruction *
316 emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0,
317 const src_reg &src1) const
318 {
319 switch (opcode) {
320 case SHADER_OPCODE_POW:
321 case SHADER_OPCODE_INT_QUOTIENT:
322 case SHADER_OPCODE_INT_REMAINDER:
323 return emit(instruction(opcode, dispatch_width(), dst,
324 fix_math_operand(src0),
325 fix_math_operand(src1)));
326
327 default:
328 return emit(instruction(opcode, dispatch_width(), dst, src0, src1));
329
330 }
331 }
332
333 /**
334 * Create and insert a ternary instruction into the program.
335 */
336 instruction *
337 emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0,
338 const src_reg &src1, const src_reg &src2) const
339 {
340 switch (opcode) {
341 case BRW_OPCODE_BFE:
342 case BRW_OPCODE_BFI2:
343 case BRW_OPCODE_MAD:
344 case BRW_OPCODE_LRP:
345 return emit(instruction(opcode, dispatch_width(), dst,
346 fix_3src_operand(src0),
347 fix_3src_operand(src1),
348 fix_3src_operand(src2)));
349
350 default:
351 return emit(instruction(opcode, dispatch_width(), dst,
352 src0, src1, src2));
353 }
354 }
355
356 /**
357 * Create and insert an instruction with a variable number of sources
358 * into the program.
359 */
360 instruction *
361 emit(enum opcode opcode, const dst_reg &dst, const src_reg srcs[],
362 unsigned n) const
363 {
364 return emit(instruction(opcode, dispatch_width(), dst, srcs, n));
365 }
366
367 /**
368 * Insert a preallocated instruction into the program.
369 */
370 instruction *
371 emit(instruction *inst) const
372 {
373 assert(inst->exec_size <= 32);
374 assert(inst->exec_size == dispatch_width() ||
375 force_writemask_all);
376
377 inst->group = _group;
378 inst->force_writemask_all = force_writemask_all;
379 inst->annotation = annotation.str;
380 inst->ir = annotation.ir;
381
382 if (block)
383 static_cast<instruction *>(cursor)->insert_before(block, inst);
384 else
385 cursor->insert_before(inst);
386
387 return inst;
388 }
389
390 /**
391 * Select \p src0 if the comparison of both sources with the given
392 * conditional mod evaluates to true, otherwise select \p src1.
393 *
394 * Generally useful to get the minimum or maximum of two values.
395 */
396 instruction *
397 emit_minmax(const dst_reg &dst, const src_reg &src0,
398 const src_reg &src1, brw_conditional_mod mod) const
399 {
400 assert(mod == BRW_CONDITIONAL_GE || mod == BRW_CONDITIONAL_L);
401
402 return set_condmod(mod, SEL(dst, fix_unsigned_negate(src0),
403 fix_unsigned_negate(src1)));
404 }
405
406 /**
407 * Copy any live channel from \p src to the first channel of the result.
408 */
409 src_reg
410 emit_uniformize(const src_reg &src) const
411 {
412 /* FIXME: We use a vector chan_index and dst to allow constant and
413 * copy propagration to move result all the way into the consuming
414 * instruction (typically a surface index or sampler index for a
415 * send). This uses 1 or 3 extra hw registers in 16 or 32 wide
416 * dispatch. Once we teach const/copy propagation about scalars we
417 * should go back to scalar destinations here.
418 */
419 const fs_builder ubld = exec_all();
420 const dst_reg chan_index = vgrf(BRW_REGISTER_TYPE_UD);
421 const dst_reg dst = vgrf(src.type);
422
423 ubld.emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, chan_index)->flag_subreg = 2;
424 ubld.emit(SHADER_OPCODE_BROADCAST, dst, src, component(chan_index, 0));
425
426 return src_reg(component(dst, 0));
427 }
428
429 void
430 emit_scan(enum opcode opcode, const dst_reg &tmp,
431 unsigned cluster_size, brw_conditional_mod mod) const
432 {
433 assert(dispatch_width() >= 8);
434
435 /* The instruction splitting code isn't advanced enough to split
436 * these so we need to handle that ourselves.
437 */
438 if (dispatch_width() * type_sz(tmp.type) > 2 * REG_SIZE) {
439 const unsigned half_width = dispatch_width() / 2;
440 const fs_builder ubld = exec_all().group(half_width, 0);
441 dst_reg left = tmp;
442 dst_reg right = horiz_offset(tmp, half_width);
443 ubld.emit_scan(opcode, left, cluster_size, mod);
444 ubld.emit_scan(opcode, right, cluster_size, mod);
445 if (cluster_size > half_width) {
446 src_reg left_comp = component(left, half_width - 1);
447 set_condmod(mod, ubld.emit(opcode, right, left_comp, right));
448 }
449 return;
450 }
451
452 if (cluster_size > 1) {
453 const fs_builder ubld = exec_all().group(dispatch_width() / 2, 0);
454 const dst_reg left = horiz_stride(tmp, 2);
455 const dst_reg right = horiz_stride(horiz_offset(tmp, 1), 2);
456 set_condmod(mod, ubld.emit(opcode, right, left, right));
457 }
458
459 if (cluster_size > 2) {
460 if (type_sz(tmp.type) <= 4) {
461 const fs_builder ubld =
462 exec_all().group(dispatch_width() / 4, 0);
463 src_reg left = horiz_stride(horiz_offset(tmp, 1), 4);
464
465 dst_reg right = horiz_stride(horiz_offset(tmp, 2), 4);
466 set_condmod(mod, ubld.emit(opcode, right, left, right));
467
468 right = horiz_stride(horiz_offset(tmp, 3), 4);
469 set_condmod(mod, ubld.emit(opcode, right, left, right));
470 } else {
471 /* For 64-bit types, we have to do things differently because
472 * the code above would land us with destination strides that
473 * the hardware can't handle. Fortunately, we'll only be
474 * 8-wide in that case and it's the same number of
475 * instructions.
476 */
477 const fs_builder ubld = exec_all().group(2, 0);
478
479 for (unsigned i = 0; i < dispatch_width(); i += 4) {
480 src_reg left = component(tmp, i + 1);
481 dst_reg right = horiz_offset(tmp, i + 2);
482 set_condmod(mod, ubld.emit(opcode, right, left, right));
483 }
484 }
485 }
486
487 if (cluster_size > 4) {
488 const fs_builder ubld = exec_all().group(4, 0);
489 src_reg left = component(tmp, 3);
490 dst_reg right = horiz_offset(tmp, 4);
491 set_condmod(mod, ubld.emit(opcode, right, left, right));
492
493 if (dispatch_width() > 8) {
494 left = component(tmp, 8 + 3);
495 right = horiz_offset(tmp, 8 + 4);
496 set_condmod(mod, ubld.emit(opcode, right, left, right));
497 }
498 }
499
500 if (cluster_size > 8 && dispatch_width() > 8) {
501 const fs_builder ubld = exec_all().group(8, 0);
502 src_reg left = component(tmp, 7);
503 dst_reg right = horiz_offset(tmp, 8);
504 set_condmod(mod, ubld.emit(opcode, right, left, right));
505 }
506 }
507
508 /**
509 * Assorted arithmetic ops.
510 * @{
511 */
512 #define ALU1(op) \
513 instruction * \
514 op(const dst_reg &dst, const src_reg &src0) const \
515 { \
516 return emit(BRW_OPCODE_##op, dst, src0); \
517 }
518
519 #define ALU2(op) \
520 instruction * \
521 op(const dst_reg &dst, const src_reg &src0, const src_reg &src1) const \
522 { \
523 return emit(BRW_OPCODE_##op, dst, src0, src1); \
524 }
525
526 #define ALU2_ACC(op) \
527 instruction * \
528 op(const dst_reg &dst, const src_reg &src0, const src_reg &src1) const \
529 { \
530 instruction *inst = emit(BRW_OPCODE_##op, dst, src0, src1); \
531 inst->writes_accumulator = true; \
532 return inst; \
533 }
534
535 #define ALU3(op) \
536 instruction * \
537 op(const dst_reg &dst, const src_reg &src0, const src_reg &src1, \
538 const src_reg &src2) const \
539 { \
540 return emit(BRW_OPCODE_##op, dst, src0, src1, src2); \
541 }
542
543 ALU2(ADD)
544 ALU2_ACC(ADDC)
545 ALU2(AND)
546 ALU2(ASR)
547 ALU2(AVG)
548 ALU3(BFE)
549 ALU2(BFI1)
550 ALU3(BFI2)
551 ALU1(BFREV)
552 ALU1(CBIT)
553 ALU2(CMPN)
554 ALU1(DIM)
555 ALU2(DP2)
556 ALU2(DP3)
557 ALU2(DP4)
558 ALU2(DPH)
559 ALU1(F16TO32)
560 ALU1(F32TO16)
561 ALU1(FBH)
562 ALU1(FBL)
563 ALU1(FRC)
564 ALU2(LINE)
565 ALU1(LZD)
566 ALU2(MAC)
567 ALU2_ACC(MACH)
568 ALU3(MAD)
569 ALU1(MOV)
570 ALU2(MUL)
571 ALU1(NOT)
572 ALU2(OR)
573 ALU2(PLN)
574 ALU1(RNDD)
575 ALU1(RNDE)
576 ALU1(RNDU)
577 ALU1(RNDZ)
578 ALU2(SAD2)
579 ALU2_ACC(SADA2)
580 ALU2(SEL)
581 ALU2(SHL)
582 ALU2(SHR)
583 ALU2_ACC(SUBB)
584 ALU2(XOR)
585
586 #undef ALU3
587 #undef ALU2_ACC
588 #undef ALU2
589 #undef ALU1
590 /** @} */
591
592 /**
593 * CMP: Sets the low bit of the destination channels with the result
594 * of the comparison, while the upper bits are undefined, and updates
595 * the flag register with the packed 16 bits of the result.
596 */
597 instruction *
598 CMP(const dst_reg &dst, const src_reg &src0, const src_reg &src1,
599 brw_conditional_mod condition) const
600 {
601 /* Take the instruction:
602 *
603 * CMP null<d> src0<f> src1<f>
604 *
605 * Original gen4 does type conversion to the destination type
606 * before comparison, producing garbage results for floating
607 * point comparisons.
608 *
609 * The destination type doesn't matter on newer generations,
610 * so we set the type to match src0 so we can compact the
611 * instruction.
612 */
613 return set_condmod(condition,
614 emit(BRW_OPCODE_CMP, retype(dst, src0.type),
615 fix_unsigned_negate(src0),
616 fix_unsigned_negate(src1)));
617 }
618
619 /**
620 * Gen4 predicated IF.
621 */
622 instruction *
623 IF(brw_predicate predicate) const
624 {
625 return set_predicate(predicate, emit(BRW_OPCODE_IF));
626 }
627
628 /**
629 * CSEL: dst = src2 <op> 0.0f ? src0 : src1
630 */
631 instruction *
632 CSEL(const dst_reg &dst, const src_reg &src0, const src_reg &src1,
633 const src_reg &src2, brw_conditional_mod condition) const
634 {
635 /* CSEL only operates on floats, so we can't do integer </<=/>=/>
636 * comparisons. Zero/non-zero (== and !=) comparisons almost work.
637 * 0x80000000 fails because it is -0.0, and -0.0 == 0.0.
638 */
639 assert(src2.type == BRW_REGISTER_TYPE_F);
640
641 return set_condmod(condition,
642 emit(BRW_OPCODE_CSEL,
643 retype(dst, BRW_REGISTER_TYPE_F),
644 retype(src0, BRW_REGISTER_TYPE_F),
645 retype(src1, BRW_REGISTER_TYPE_F),
646 src2));
647 }
648
649 /**
650 * Emit a linear interpolation instruction.
651 */
652 instruction *
653 LRP(const dst_reg &dst, const src_reg &x, const src_reg &y,
654 const src_reg &a) const
655 {
656 if (shader->devinfo->gen >= 6 && shader->devinfo->gen <= 10) {
657 /* The LRP instruction actually does op1 * op0 + op2 * (1 - op0), so
658 * we need to reorder the operands.
659 */
660 return emit(BRW_OPCODE_LRP, dst, a, y, x);
661
662 } else {
663 /* We can't use the LRP instruction. Emit x*(1-a) + y*a. */
664 const dst_reg y_times_a = vgrf(dst.type);
665 const dst_reg one_minus_a = vgrf(dst.type);
666 const dst_reg x_times_one_minus_a = vgrf(dst.type);
667
668 MUL(y_times_a, y, a);
669 ADD(one_minus_a, negate(a), brw_imm_f(1.0f));
670 MUL(x_times_one_minus_a, x, src_reg(one_minus_a));
671 return ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a));
672 }
673 }
674
675 /**
676 * Collect a number of registers in a contiguous range of registers.
677 */
678 instruction *
679 LOAD_PAYLOAD(const dst_reg &dst, const src_reg *src,
680 unsigned sources, unsigned header_size) const
681 {
682 instruction *inst = emit(SHADER_OPCODE_LOAD_PAYLOAD, dst, src, sources);
683 inst->header_size = header_size;
684 inst->size_written = header_size * REG_SIZE;
685 for (unsigned i = header_size; i < sources; i++) {
686 inst->size_written +=
687 ALIGN(dispatch_width() * type_sz(src[i].type) * dst.stride,
688 REG_SIZE);
689 }
690
691 return inst;
692 }
693
694 backend_shader *shader;
695
696 private:
697 /**
698 * Workaround for negation of UD registers. See comment in
699 * fs_generator::generate_code() for more details.
700 */
701 src_reg
702 fix_unsigned_negate(const src_reg &src) const
703 {
704 if (src.type == BRW_REGISTER_TYPE_UD &&
705 src.negate) {
706 dst_reg temp = vgrf(BRW_REGISTER_TYPE_UD);
707 MOV(temp, src);
708 return src_reg(temp);
709 } else {
710 return src;
711 }
712 }
713
714 /**
715 * Workaround for source register modes not supported by the ternary
716 * instruction encoding.
717 */
718 src_reg
719 fix_3src_operand(const src_reg &src) const
720 {
721 if (src.file == VGRF || src.file == UNIFORM || src.stride > 1) {
722 return src;
723 } else {
724 dst_reg expanded = vgrf(src.type);
725 MOV(expanded, src);
726 return expanded;
727 }
728 }
729
730 /**
731 * Workaround for source register modes not supported by the math
732 * instruction.
733 */
734 src_reg
735 fix_math_operand(const src_reg &src) const
736 {
737 /* Can't do hstride == 0 args on gen6 math, so expand it out. We
738 * might be able to do better by doing execsize = 1 math and then
739 * expanding that result out, but we would need to be careful with
740 * masking.
741 *
742 * Gen6 hardware ignores source modifiers (negate and abs) on math
743 * instructions, so we also move to a temp to set those up.
744 *
745 * Gen7 relaxes most of the above restrictions, but still can't use IMM
746 * operands to math
747 */
748 if ((shader->devinfo->gen == 6 &&
749 (src.file == IMM || src.file == UNIFORM ||
750 src.abs || src.negate)) ||
751 (shader->devinfo->gen == 7 && src.file == IMM)) {
752 const dst_reg tmp = vgrf(src.type);
753 MOV(tmp, src);
754 return tmp;
755 } else {
756 return src;
757 }
758 }
759
760 bblock_t *block;
761 exec_node *cursor;
762
763 unsigned _dispatch_width;
764 unsigned _group;
765 bool force_writemask_all;
766
767 /** Debug annotation info. */
768 struct {
769 const char *str;
770 const void *ir;
771 } annotation;
772 };
773 }
774
775 #endif