glsl: Add GLSL_TYPE_FUNCTION to the base types enums
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_visitor.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_vec4.h"
25 #include "brw_cfg.h"
26 #include "glsl/ir_uniform.h"
27 #include "program/sampler.h"
28
29 namespace brw {
30
31 vec4_instruction::vec4_instruction(enum opcode opcode, const dst_reg &dst,
32 const src_reg &src0, const src_reg &src1,
33 const src_reg &src2)
34 {
35 this->opcode = opcode;
36 this->dst = dst;
37 this->src[0] = src0;
38 this->src[1] = src1;
39 this->src[2] = src2;
40 this->saturate = false;
41 this->force_writemask_all = false;
42 this->no_dd_clear = false;
43 this->no_dd_check = false;
44 this->writes_accumulator = false;
45 this->conditional_mod = BRW_CONDITIONAL_NONE;
46 this->predicate = BRW_PREDICATE_NONE;
47 this->predicate_inverse = false;
48 this->target = 0;
49 this->regs_written = (dst.file == BAD_FILE ? 0 : 1);
50 this->shadow_compare = false;
51 this->ir = NULL;
52 this->urb_write_flags = BRW_URB_WRITE_NO_FLAGS;
53 this->header_size = 0;
54 this->flag_subreg = 0;
55 this->mlen = 0;
56 this->base_mrf = 0;
57 this->offset = 0;
58 this->annotation = NULL;
59 }
60
61 vec4_instruction *
62 vec4_visitor::emit(vec4_instruction *inst)
63 {
64 inst->ir = this->base_ir;
65 inst->annotation = this->current_annotation;
66
67 this->instructions.push_tail(inst);
68
69 return inst;
70 }
71
72 vec4_instruction *
73 vec4_visitor::emit_before(bblock_t *block, vec4_instruction *inst,
74 vec4_instruction *new_inst)
75 {
76 new_inst->ir = inst->ir;
77 new_inst->annotation = inst->annotation;
78
79 inst->insert_before(block, new_inst);
80
81 return inst;
82 }
83
84 vec4_instruction *
85 vec4_visitor::emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0,
86 const src_reg &src1, const src_reg &src2)
87 {
88 return emit(new(mem_ctx) vec4_instruction(opcode, dst, src0, src1, src2));
89 }
90
91
92 vec4_instruction *
93 vec4_visitor::emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0,
94 const src_reg &src1)
95 {
96 return emit(new(mem_ctx) vec4_instruction(opcode, dst, src0, src1));
97 }
98
99 vec4_instruction *
100 vec4_visitor::emit(enum opcode opcode, const dst_reg &dst, const src_reg &src0)
101 {
102 return emit(new(mem_ctx) vec4_instruction(opcode, dst, src0));
103 }
104
105 vec4_instruction *
106 vec4_visitor::emit(enum opcode opcode, const dst_reg &dst)
107 {
108 return emit(new(mem_ctx) vec4_instruction(opcode, dst));
109 }
110
111 vec4_instruction *
112 vec4_visitor::emit(enum opcode opcode)
113 {
114 return emit(new(mem_ctx) vec4_instruction(opcode, dst_reg()));
115 }
116
117 #define ALU1(op) \
118 vec4_instruction * \
119 vec4_visitor::op(const dst_reg &dst, const src_reg &src0) \
120 { \
121 return new(mem_ctx) vec4_instruction(BRW_OPCODE_##op, dst, src0); \
122 }
123
124 #define ALU2(op) \
125 vec4_instruction * \
126 vec4_visitor::op(const dst_reg &dst, const src_reg &src0, \
127 const src_reg &src1) \
128 { \
129 return new(mem_ctx) vec4_instruction(BRW_OPCODE_##op, dst, \
130 src0, src1); \
131 }
132
133 #define ALU2_ACC(op) \
134 vec4_instruction * \
135 vec4_visitor::op(const dst_reg &dst, const src_reg &src0, \
136 const src_reg &src1) \
137 { \
138 vec4_instruction *inst = new(mem_ctx) vec4_instruction( \
139 BRW_OPCODE_##op, dst, src0, src1); \
140 inst->writes_accumulator = true; \
141 return inst; \
142 }
143
144 #define ALU3(op) \
145 vec4_instruction * \
146 vec4_visitor::op(const dst_reg &dst, const src_reg &src0, \
147 const src_reg &src1, const src_reg &src2) \
148 { \
149 assert(devinfo->gen >= 6); \
150 return new(mem_ctx) vec4_instruction(BRW_OPCODE_##op, dst, \
151 src0, src1, src2); \
152 }
153
154 ALU1(NOT)
155 ALU1(MOV)
156 ALU1(FRC)
157 ALU1(RNDD)
158 ALU1(RNDE)
159 ALU1(RNDZ)
160 ALU1(F32TO16)
161 ALU1(F16TO32)
162 ALU2(ADD)
163 ALU2(MUL)
164 ALU2_ACC(MACH)
165 ALU2(AND)
166 ALU2(OR)
167 ALU2(XOR)
168 ALU2(DP3)
169 ALU2(DP4)
170 ALU2(DPH)
171 ALU2(SHL)
172 ALU2(SHR)
173 ALU2(ASR)
174 ALU3(LRP)
175 ALU1(BFREV)
176 ALU3(BFE)
177 ALU2(BFI1)
178 ALU3(BFI2)
179 ALU1(FBH)
180 ALU1(FBL)
181 ALU1(CBIT)
182 ALU3(MAD)
183 ALU2_ACC(ADDC)
184 ALU2_ACC(SUBB)
185 ALU2(MAC)
186
187 /** Gen4 predicated IF. */
188 vec4_instruction *
189 vec4_visitor::IF(enum brw_predicate predicate)
190 {
191 vec4_instruction *inst;
192
193 inst = new(mem_ctx) vec4_instruction(BRW_OPCODE_IF);
194 inst->predicate = predicate;
195
196 return inst;
197 }
198
199 /** Gen6 IF with embedded comparison. */
200 vec4_instruction *
201 vec4_visitor::IF(src_reg src0, src_reg src1,
202 enum brw_conditional_mod condition)
203 {
204 assert(devinfo->gen == 6);
205
206 vec4_instruction *inst;
207
208 resolve_ud_negate(&src0);
209 resolve_ud_negate(&src1);
210
211 inst = new(mem_ctx) vec4_instruction(BRW_OPCODE_IF, dst_null_d(),
212 src0, src1);
213 inst->conditional_mod = condition;
214
215 return inst;
216 }
217
218 /**
219 * CMP: Sets the low bit of the destination channels with the result
220 * of the comparison, while the upper bits are undefined, and updates
221 * the flag register with the packed 16 bits of the result.
222 */
223 vec4_instruction *
224 vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1,
225 enum brw_conditional_mod condition)
226 {
227 vec4_instruction *inst;
228
229 /* Take the instruction:
230 *
231 * CMP null<d> src0<f> src1<f>
232 *
233 * Original gen4 does type conversion to the destination type before
234 * comparison, producing garbage results for floating point comparisons.
235 *
236 * The destination type doesn't matter on newer generations, so we set the
237 * type to match src0 so we can compact the instruction.
238 */
239 dst.type = src0.type;
240 if (dst.file == HW_REG)
241 dst.fixed_hw_reg.type = dst.type;
242
243 resolve_ud_negate(&src0);
244 resolve_ud_negate(&src1);
245
246 inst = new(mem_ctx) vec4_instruction(BRW_OPCODE_CMP, dst, src0, src1);
247 inst->conditional_mod = condition;
248
249 return inst;
250 }
251
252 vec4_instruction *
253 vec4_visitor::SCRATCH_READ(const dst_reg &dst, const src_reg &index)
254 {
255 vec4_instruction *inst;
256
257 inst = new(mem_ctx) vec4_instruction(SHADER_OPCODE_GEN4_SCRATCH_READ,
258 dst, index);
259 inst->base_mrf = 14;
260 inst->mlen = 2;
261
262 return inst;
263 }
264
265 vec4_instruction *
266 vec4_visitor::SCRATCH_WRITE(const dst_reg &dst, const src_reg &src,
267 const src_reg &index)
268 {
269 vec4_instruction *inst;
270
271 inst = new(mem_ctx) vec4_instruction(SHADER_OPCODE_GEN4_SCRATCH_WRITE,
272 dst, src, index);
273 inst->base_mrf = 13;
274 inst->mlen = 3;
275
276 return inst;
277 }
278
279 void
280 vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements)
281 {
282 static enum opcode dot_opcodes[] = {
283 BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4
284 };
285
286 emit(dot_opcodes[elements - 2], dst, src0, src1);
287 }
288
289 src_reg
290 vec4_visitor::fix_3src_operand(src_reg src)
291 {
292 /* Using vec4 uniforms in SIMD4x2 programs is difficult. You'd like to be
293 * able to use vertical stride of zero to replicate the vec4 uniform, like
294 *
295 * g3<0;4,1>:f - [0, 4][1, 5][2, 6][3, 7]
296 *
297 * But you can't, since vertical stride is always four in three-source
298 * instructions. Instead, insert a MOV instruction to do the replication so
299 * that the three-source instruction can consume it.
300 */
301
302 /* The MOV is only needed if the source is a uniform or immediate. */
303 if (src.file != UNIFORM && src.file != IMM)
304 return src;
305
306 if (src.file == UNIFORM && brw_is_single_value_swizzle(src.swizzle))
307 return src;
308
309 dst_reg expanded = dst_reg(this, glsl_type::vec4_type);
310 expanded.type = src.type;
311 emit(VEC4_OPCODE_UNPACK_UNIFORM, expanded, src);
312 return src_reg(expanded);
313 }
314
315 src_reg
316 vec4_visitor::fix_math_operand(src_reg src)
317 {
318 if (devinfo->gen < 6 || devinfo->gen >= 8 || src.file == BAD_FILE)
319 return src;
320
321 /* The gen6 math instruction ignores the source modifiers --
322 * swizzle, abs, negate, and at least some parts of the register
323 * region description.
324 *
325 * Rather than trying to enumerate all these cases, *always* expand the
326 * operand to a temp GRF for gen6.
327 *
328 * For gen7, keep the operand as-is, except if immediate, which gen7 still
329 * can't use.
330 */
331
332 if (devinfo->gen == 7 && src.file != IMM)
333 return src;
334
335 dst_reg expanded = dst_reg(this, glsl_type::vec4_type);
336 expanded.type = src.type;
337 emit(MOV(expanded, src));
338 return src_reg(expanded);
339 }
340
341 void
342 vec4_visitor::emit_math(enum opcode opcode,
343 const dst_reg &dst,
344 const src_reg &src0, const src_reg &src1)
345 {
346 vec4_instruction *math =
347 emit(opcode, dst, fix_math_operand(src0), fix_math_operand(src1));
348
349 if (devinfo->gen == 6 && dst.writemask != WRITEMASK_XYZW) {
350 /* MATH on Gen6 must be align1, so we can't do writemasks. */
351 math->dst = dst_reg(this, glsl_type::vec4_type);
352 math->dst.type = dst.type;
353 emit(MOV(dst, src_reg(math->dst)));
354 } else if (devinfo->gen < 6) {
355 math->base_mrf = 1;
356 math->mlen = src1.file == BAD_FILE ? 1 : 2;
357 }
358 }
359
360 void
361 vec4_visitor::emit_pack_half_2x16(dst_reg dst, src_reg src0)
362 {
363 if (devinfo->gen < 7) {
364 unreachable("ir_unop_pack_half_2x16 should be lowered");
365 }
366
367 assert(dst.type == BRW_REGISTER_TYPE_UD);
368 assert(src0.type == BRW_REGISTER_TYPE_F);
369
370 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
371 *
372 * Because this instruction does not have a 16-bit floating-point type,
373 * the destination data type must be Word (W).
374 *
375 * The destination must be DWord-aligned and specify a horizontal stride
376 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
377 * each destination channel and the upper word is not modified.
378 *
379 * The above restriction implies that the f32to16 instruction must use
380 * align1 mode, because only in align1 mode is it possible to specify
381 * horizontal stride. We choose here to defy the hardware docs and emit
382 * align16 instructions.
383 *
384 * (I [chadv] did attempt to emit align1 instructions for VS f32to16
385 * instructions. I was partially successful in that the code passed all
386 * tests. However, the code was dubiously correct and fragile, and the
387 * tests were not harsh enough to probe that frailty. Not trusting the
388 * code, I chose instead to remain in align16 mode in defiance of the hw
389 * docs).
390 *
391 * I've [chadv] experimentally confirmed that, on gen7 hardware and the
392 * simulator, emitting a f32to16 in align16 mode with UD as destination
393 * data type is safe. The behavior differs from that specified in the PRM
394 * in that the upper word of each destination channel is cleared to 0.
395 */
396
397 dst_reg tmp_dst(this, glsl_type::uvec2_type);
398 src_reg tmp_src(tmp_dst);
399
400 #if 0
401 /* Verify the undocumented behavior on which the following instructions
402 * rely. If f32to16 fails to clear the upper word of the X and Y channels,
403 * then the result of the bit-or instruction below will be incorrect.
404 *
405 * You should inspect the disasm output in order to verify that the MOV is
406 * not optimized away.
407 */
408 emit(MOV(tmp_dst, src_reg(0x12345678u)));
409 #endif
410
411 /* Give tmp the form below, where "." means untouched.
412 *
413 * w z y x w z y x
414 * |.|.|0x0000hhhh|0x0000llll|.|.|0x0000hhhh|0x0000llll|
415 *
416 * That the upper word of each write-channel be 0 is required for the
417 * following bit-shift and bit-or instructions to work. Note that this
418 * relies on the undocumented hardware behavior mentioned above.
419 */
420 tmp_dst.writemask = WRITEMASK_XY;
421 emit(F32TO16(tmp_dst, src0));
422
423 /* Give the write-channels of dst the form:
424 * 0xhhhh0000
425 */
426 tmp_src.swizzle = BRW_SWIZZLE_YYYY;
427 emit(SHL(dst, tmp_src, src_reg(16u)));
428
429 /* Finally, give the write-channels of dst the form of packHalf2x16's
430 * output:
431 * 0xhhhhllll
432 */
433 tmp_src.swizzle = BRW_SWIZZLE_XXXX;
434 emit(OR(dst, src_reg(dst), tmp_src));
435 }
436
437 void
438 vec4_visitor::emit_unpack_half_2x16(dst_reg dst, src_reg src0)
439 {
440 if (devinfo->gen < 7) {
441 unreachable("ir_unop_unpack_half_2x16 should be lowered");
442 }
443
444 assert(dst.type == BRW_REGISTER_TYPE_F);
445 assert(src0.type == BRW_REGISTER_TYPE_UD);
446
447 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
448 *
449 * Because this instruction does not have a 16-bit floating-point type,
450 * the source data type must be Word (W). The destination type must be
451 * F (Float).
452 *
453 * To use W as the source data type, we must adjust horizontal strides,
454 * which is only possible in align1 mode. All my [chadv] attempts at
455 * emitting align1 instructions for unpackHalf2x16 failed to pass the
456 * Piglit tests, so I gave up.
457 *
458 * I've verified that, on gen7 hardware and the simulator, it is safe to
459 * emit f16to32 in align16 mode with UD as source data type.
460 */
461
462 dst_reg tmp_dst(this, glsl_type::uvec2_type);
463 src_reg tmp_src(tmp_dst);
464
465 tmp_dst.writemask = WRITEMASK_X;
466 emit(AND(tmp_dst, src0, src_reg(0xffffu)));
467
468 tmp_dst.writemask = WRITEMASK_Y;
469 emit(SHR(tmp_dst, src0, src_reg(16u)));
470
471 dst.writemask = WRITEMASK_XY;
472 emit(F16TO32(dst, tmp_src));
473 }
474
475 void
476 vec4_visitor::emit_unpack_unorm_4x8(const dst_reg &dst, src_reg src0)
477 {
478 /* Instead of splitting the 32-bit integer, shifting, and ORing it back
479 * together, we can shift it by <0, 8, 16, 24>. The packed integer immediate
480 * is not suitable to generate the shift values, but we can use the packed
481 * vector float and a type-converting MOV.
482 */
483 dst_reg shift(this, glsl_type::uvec4_type);
484 emit(MOV(shift, src_reg(0x00, 0x60, 0x70, 0x78)));
485
486 dst_reg shifted(this, glsl_type::uvec4_type);
487 src0.swizzle = BRW_SWIZZLE_XXXX;
488 emit(SHR(shifted, src0, src_reg(shift)));
489
490 shifted.type = BRW_REGISTER_TYPE_UB;
491 dst_reg f(this, glsl_type::vec4_type);
492 emit(VEC4_OPCODE_MOV_BYTES, f, src_reg(shifted));
493
494 emit(MUL(dst, src_reg(f), src_reg(1.0f / 255.0f)));
495 }
496
497 void
498 vec4_visitor::emit_unpack_snorm_4x8(const dst_reg &dst, src_reg src0)
499 {
500 /* Instead of splitting the 32-bit integer, shifting, and ORing it back
501 * together, we can shift it by <0, 8, 16, 24>. The packed integer immediate
502 * is not suitable to generate the shift values, but we can use the packed
503 * vector float and a type-converting MOV.
504 */
505 dst_reg shift(this, glsl_type::uvec4_type);
506 emit(MOV(shift, src_reg(0x00, 0x60, 0x70, 0x78)));
507
508 dst_reg shifted(this, glsl_type::uvec4_type);
509 src0.swizzle = BRW_SWIZZLE_XXXX;
510 emit(SHR(shifted, src0, src_reg(shift)));
511
512 shifted.type = BRW_REGISTER_TYPE_B;
513 dst_reg f(this, glsl_type::vec4_type);
514 emit(VEC4_OPCODE_MOV_BYTES, f, src_reg(shifted));
515
516 dst_reg scaled(this, glsl_type::vec4_type);
517 emit(MUL(scaled, src_reg(f), src_reg(1.0f / 127.0f)));
518
519 dst_reg max(this, glsl_type::vec4_type);
520 emit_minmax(BRW_CONDITIONAL_GE, max, src_reg(scaled), src_reg(-1.0f));
521 emit_minmax(BRW_CONDITIONAL_L, dst, src_reg(max), src_reg(1.0f));
522 }
523
524 void
525 vec4_visitor::emit_pack_unorm_4x8(const dst_reg &dst, const src_reg &src0)
526 {
527 dst_reg saturated(this, glsl_type::vec4_type);
528 vec4_instruction *inst = emit(MOV(saturated, src0));
529 inst->saturate = true;
530
531 dst_reg scaled(this, glsl_type::vec4_type);
532 emit(MUL(scaled, src_reg(saturated), src_reg(255.0f)));
533
534 dst_reg rounded(this, glsl_type::vec4_type);
535 emit(RNDE(rounded, src_reg(scaled)));
536
537 dst_reg u(this, glsl_type::uvec4_type);
538 emit(MOV(u, src_reg(rounded)));
539
540 src_reg bytes(u);
541 emit(VEC4_OPCODE_PACK_BYTES, dst, bytes);
542 }
543
544 void
545 vec4_visitor::emit_pack_snorm_4x8(const dst_reg &dst, const src_reg &src0)
546 {
547 dst_reg max(this, glsl_type::vec4_type);
548 emit_minmax(BRW_CONDITIONAL_GE, max, src0, src_reg(-1.0f));
549
550 dst_reg min(this, glsl_type::vec4_type);
551 emit_minmax(BRW_CONDITIONAL_L, min, src_reg(max), src_reg(1.0f));
552
553 dst_reg scaled(this, glsl_type::vec4_type);
554 emit(MUL(scaled, src_reg(min), src_reg(127.0f)));
555
556 dst_reg rounded(this, glsl_type::vec4_type);
557 emit(RNDE(rounded, src_reg(scaled)));
558
559 dst_reg i(this, glsl_type::ivec4_type);
560 emit(MOV(i, src_reg(rounded)));
561
562 src_reg bytes(i);
563 emit(VEC4_OPCODE_PACK_BYTES, dst, bytes);
564 }
565
566 void
567 vec4_visitor::visit_instructions(const exec_list *list)
568 {
569 foreach_in_list(ir_instruction, ir, list) {
570 base_ir = ir;
571 ir->accept(this);
572 }
573 }
574
575
576 static int
577 type_size(const struct glsl_type *type)
578 {
579 unsigned int i;
580 int size;
581
582 switch (type->base_type) {
583 case GLSL_TYPE_UINT:
584 case GLSL_TYPE_INT:
585 case GLSL_TYPE_FLOAT:
586 case GLSL_TYPE_BOOL:
587 if (type->is_matrix()) {
588 return type->matrix_columns;
589 } else {
590 /* Regardless of size of vector, it gets a vec4. This is bad
591 * packing for things like floats, but otherwise arrays become a
592 * mess. Hopefully a later pass over the code can pack scalars
593 * down if appropriate.
594 */
595 return 1;
596 }
597 case GLSL_TYPE_ARRAY:
598 assert(type->length > 0);
599 return type_size(type->fields.array) * type->length;
600 case GLSL_TYPE_STRUCT:
601 size = 0;
602 for (i = 0; i < type->length; i++) {
603 size += type_size(type->fields.structure[i].type);
604 }
605 return size;
606 case GLSL_TYPE_SAMPLER:
607 /* Samplers take up no register space, since they're baked in at
608 * link time.
609 */
610 return 0;
611 case GLSL_TYPE_ATOMIC_UINT:
612 return 0;
613 case GLSL_TYPE_IMAGE:
614 case GLSL_TYPE_VOID:
615 case GLSL_TYPE_DOUBLE:
616 case GLSL_TYPE_ERROR:
617 case GLSL_TYPE_INTERFACE:
618 case GLSL_TYPE_FUNCTION:
619 unreachable("not reached");
620 }
621
622 return 0;
623 }
624
625 src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type)
626 {
627 init();
628
629 this->file = GRF;
630 this->reg = v->alloc.allocate(type_size(type));
631
632 if (type->is_array() || type->is_record()) {
633 this->swizzle = BRW_SWIZZLE_NOOP;
634 } else {
635 this->swizzle = brw_swizzle_for_size(type->vector_elements);
636 }
637
638 this->type = brw_type_for_base_type(type);
639 }
640
641 src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type, int size)
642 {
643 assert(size > 0);
644
645 init();
646
647 this->file = GRF;
648 this->reg = v->alloc.allocate(type_size(type) * size);
649
650 this->swizzle = BRW_SWIZZLE_NOOP;
651
652 this->type = brw_type_for_base_type(type);
653 }
654
655 dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type)
656 {
657 init();
658
659 this->file = GRF;
660 this->reg = v->alloc.allocate(type_size(type));
661
662 if (type->is_array() || type->is_record()) {
663 this->writemask = WRITEMASK_XYZW;
664 } else {
665 this->writemask = (1 << type->vector_elements) - 1;
666 }
667
668 this->type = brw_type_for_base_type(type);
669 }
670
671 /* Our support for uniforms is piggy-backed on the struct
672 * gl_fragment_program, because that's where the values actually
673 * get stored, rather than in some global gl_shader_program uniform
674 * store.
675 */
676 void
677 vec4_visitor::setup_uniform_values(ir_variable *ir)
678 {
679 int namelen = strlen(ir->name);
680
681 /* The data for our (non-builtin) uniforms is stored in a series of
682 * gl_uniform_driver_storage structs for each subcomponent that
683 * glGetUniformLocation() could name. We know it's been set up in the same
684 * order we'd walk the type, so walk the list of storage and find anything
685 * with our name, or the prefix of a component that starts with our name.
686 */
687 for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
688 struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
689
690 if (strncmp(ir->name, storage->name, namelen) != 0 ||
691 (storage->name[namelen] != 0 &&
692 storage->name[namelen] != '.' &&
693 storage->name[namelen] != '[')) {
694 continue;
695 }
696
697 gl_constant_value *components = storage->storage;
698 unsigned vector_count = (MAX2(storage->array_elements, 1) *
699 storage->type->matrix_columns);
700
701 for (unsigned s = 0; s < vector_count; s++) {
702 assert(uniforms < uniform_array_size);
703 uniform_vector_size[uniforms] = storage->type->vector_elements;
704
705 int i;
706 for (i = 0; i < uniform_vector_size[uniforms]; i++) {
707 stage_prog_data->param[uniforms * 4 + i] = components;
708 components++;
709 }
710 for (; i < 4; i++) {
711 static gl_constant_value zero = { 0.0 };
712 stage_prog_data->param[uniforms * 4 + i] = &zero;
713 }
714
715 uniforms++;
716 }
717 }
718 }
719
720 void
721 vec4_visitor::setup_uniform_clipplane_values()
722 {
723 gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
724
725 for (int i = 0; i < key->nr_userclip_plane_consts; ++i) {
726 assert(this->uniforms < uniform_array_size);
727 this->uniform_vector_size[this->uniforms] = 4;
728 this->userplane[i] = dst_reg(UNIFORM, this->uniforms);
729 this->userplane[i].type = BRW_REGISTER_TYPE_F;
730 for (int j = 0; j < 4; ++j) {
731 stage_prog_data->param[this->uniforms * 4 + j] =
732 (gl_constant_value *) &clip_planes[i][j];
733 }
734 ++this->uniforms;
735 }
736 }
737
738 /* Our support for builtin uniforms is even scarier than non-builtin.
739 * It sits on top of the PROG_STATE_VAR parameters that are
740 * automatically updated from GL context state.
741 */
742 void
743 vec4_visitor::setup_builtin_uniform_values(ir_variable *ir)
744 {
745 const ir_state_slot *const slots = ir->get_state_slots();
746 assert(slots != NULL);
747
748 for (unsigned int i = 0; i < ir->get_num_state_slots(); i++) {
749 /* This state reference has already been setup by ir_to_mesa,
750 * but we'll get the same index back here. We can reference
751 * ParameterValues directly, since unlike brw_fs.cpp, we never
752 * add new state references during compile.
753 */
754 int index = _mesa_add_state_reference(this->prog->Parameters,
755 (gl_state_index *)slots[i].tokens);
756 gl_constant_value *values =
757 &this->prog->Parameters->ParameterValues[index][0];
758
759 assert(this->uniforms < uniform_array_size);
760
761 for (unsigned j = 0; j < 4; j++)
762 stage_prog_data->param[this->uniforms * 4 + j] =
763 &values[GET_SWZ(slots[i].swizzle, j)];
764
765 this->uniform_vector_size[this->uniforms] =
766 (ir->type->is_scalar() || ir->type->is_vector() ||
767 ir->type->is_matrix() ? ir->type->vector_elements : 4);
768
769 this->uniforms++;
770 }
771 }
772
773 dst_reg *
774 vec4_visitor::variable_storage(ir_variable *var)
775 {
776 return (dst_reg *)hash_table_find(this->variable_ht, var);
777 }
778
779 void
780 vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir,
781 enum brw_predicate *predicate)
782 {
783 ir_expression *expr = ir->as_expression();
784
785 *predicate = BRW_PREDICATE_NORMAL;
786
787 if (expr && expr->operation != ir_binop_ubo_load) {
788 src_reg op[3];
789 vec4_instruction *inst;
790
791 assert(expr->get_num_operands() <= 3);
792 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
793 expr->operands[i]->accept(this);
794 op[i] = this->result;
795
796 resolve_ud_negate(&op[i]);
797 }
798
799 switch (expr->operation) {
800 case ir_unop_logic_not:
801 inst = emit(AND(dst_null_d(), op[0], src_reg(1)));
802 inst->conditional_mod = BRW_CONDITIONAL_Z;
803 break;
804
805 case ir_binop_logic_xor:
806 if (devinfo->gen <= 5) {
807 src_reg temp = src_reg(this, ir->type);
808 emit(XOR(dst_reg(temp), op[0], op[1]));
809 inst = emit(AND(dst_null_d(), temp, src_reg(1)));
810 } else {
811 inst = emit(XOR(dst_null_d(), op[0], op[1]));
812 }
813 inst->conditional_mod = BRW_CONDITIONAL_NZ;
814 break;
815
816 case ir_binop_logic_or:
817 if (devinfo->gen <= 5) {
818 src_reg temp = src_reg(this, ir->type);
819 emit(OR(dst_reg(temp), op[0], op[1]));
820 inst = emit(AND(dst_null_d(), temp, src_reg(1)));
821 } else {
822 inst = emit(OR(dst_null_d(), op[0], op[1]));
823 }
824 inst->conditional_mod = BRW_CONDITIONAL_NZ;
825 break;
826
827 case ir_binop_logic_and:
828 if (devinfo->gen <= 5) {
829 src_reg temp = src_reg(this, ir->type);
830 emit(AND(dst_reg(temp), op[0], op[1]));
831 inst = emit(AND(dst_null_d(), temp, src_reg(1)));
832 } else {
833 inst = emit(AND(dst_null_d(), op[0], op[1]));
834 }
835 inst->conditional_mod = BRW_CONDITIONAL_NZ;
836 break;
837
838 case ir_unop_f2b:
839 if (devinfo->gen >= 6) {
840 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
841 } else {
842 inst = emit(MOV(dst_null_f(), op[0]));
843 inst->conditional_mod = BRW_CONDITIONAL_NZ;
844 }
845 break;
846
847 case ir_unop_i2b:
848 if (devinfo->gen >= 6) {
849 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
850 } else {
851 inst = emit(MOV(dst_null_d(), op[0]));
852 inst->conditional_mod = BRW_CONDITIONAL_NZ;
853 }
854 break;
855
856 case ir_binop_all_equal:
857 if (devinfo->gen <= 5) {
858 resolve_bool_comparison(expr->operands[0], &op[0]);
859 resolve_bool_comparison(expr->operands[1], &op[1]);
860 }
861 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
862 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
863 break;
864
865 case ir_binop_any_nequal:
866 if (devinfo->gen <= 5) {
867 resolve_bool_comparison(expr->operands[0], &op[0]);
868 resolve_bool_comparison(expr->operands[1], &op[1]);
869 }
870 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
871 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
872 break;
873
874 case ir_unop_any:
875 if (devinfo->gen <= 5) {
876 resolve_bool_comparison(expr->operands[0], &op[0]);
877 }
878 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
879 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
880 break;
881
882 case ir_binop_greater:
883 case ir_binop_gequal:
884 case ir_binop_less:
885 case ir_binop_lequal:
886 case ir_binop_equal:
887 case ir_binop_nequal:
888 if (devinfo->gen <= 5) {
889 resolve_bool_comparison(expr->operands[0], &op[0]);
890 resolve_bool_comparison(expr->operands[1], &op[1]);
891 }
892 emit(CMP(dst_null_d(), op[0], op[1],
893 brw_conditional_for_comparison(expr->operation)));
894 break;
895
896 case ir_triop_csel: {
897 /* Expand the boolean condition into the flag register. */
898 inst = emit(MOV(dst_null_d(), op[0]));
899 inst->conditional_mod = BRW_CONDITIONAL_NZ;
900
901 /* Select which boolean to return. */
902 dst_reg temp(this, expr->operands[1]->type);
903 inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
904 inst->predicate = BRW_PREDICATE_NORMAL;
905
906 /* Expand the result to a condition code. */
907 inst = emit(MOV(dst_null_d(), src_reg(temp)));
908 inst->conditional_mod = BRW_CONDITIONAL_NZ;
909 break;
910 }
911
912 default:
913 unreachable("not reached");
914 }
915 return;
916 }
917
918 ir->accept(this);
919
920 resolve_ud_negate(&this->result);
921
922 vec4_instruction *inst = emit(AND(dst_null_d(), this->result, src_reg(1)));
923 inst->conditional_mod = BRW_CONDITIONAL_NZ;
924 }
925
926 /**
927 * Emit a gen6 IF statement with the comparison folded into the IF
928 * instruction.
929 */
930 void
931 vec4_visitor::emit_if_gen6(ir_if *ir)
932 {
933 ir_expression *expr = ir->condition->as_expression();
934
935 if (expr && expr->operation != ir_binop_ubo_load) {
936 src_reg op[3];
937 dst_reg temp;
938
939 assert(expr->get_num_operands() <= 3);
940 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
941 expr->operands[i]->accept(this);
942 op[i] = this->result;
943 }
944
945 switch (expr->operation) {
946 case ir_unop_logic_not:
947 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z));
948 return;
949
950 case ir_binop_logic_xor:
951 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
952 return;
953
954 case ir_binop_logic_or:
955 temp = dst_reg(this, glsl_type::bool_type);
956 emit(OR(temp, op[0], op[1]));
957 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
958 return;
959
960 case ir_binop_logic_and:
961 temp = dst_reg(this, glsl_type::bool_type);
962 emit(AND(temp, op[0], op[1]));
963 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
964 return;
965
966 case ir_unop_f2b:
967 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
968 return;
969
970 case ir_unop_i2b:
971 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
972 return;
973
974 case ir_binop_greater:
975 case ir_binop_gequal:
976 case ir_binop_less:
977 case ir_binop_lequal:
978 case ir_binop_equal:
979 case ir_binop_nequal:
980 emit(IF(op[0], op[1],
981 brw_conditional_for_comparison(expr->operation)));
982 return;
983
984 case ir_binop_all_equal:
985 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
986 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H));
987 return;
988
989 case ir_binop_any_nequal:
990 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
991 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
992 return;
993
994 case ir_unop_any:
995 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
996 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
997 return;
998
999 case ir_triop_csel: {
1000 /* Expand the boolean condition into the flag register. */
1001 vec4_instruction *inst = emit(MOV(dst_null_d(), op[0]));
1002 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1003
1004 /* Select which boolean to return. */
1005 dst_reg temp(this, expr->operands[1]->type);
1006 inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
1007 inst->predicate = BRW_PREDICATE_NORMAL;
1008
1009 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
1010 return;
1011 }
1012
1013 default:
1014 unreachable("not reached");
1015 }
1016 return;
1017 }
1018
1019 ir->condition->accept(this);
1020
1021 emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ));
1022 }
1023
1024 void
1025 vec4_visitor::visit(ir_variable *ir)
1026 {
1027 dst_reg *reg = NULL;
1028
1029 if (variable_storage(ir))
1030 return;
1031
1032 switch (ir->data.mode) {
1033 case ir_var_shader_in:
1034 assert(ir->data.location != -1);
1035 reg = new(mem_ctx) dst_reg(ATTR, ir->data.location);
1036 break;
1037
1038 case ir_var_shader_out:
1039 assert(ir->data.location != -1);
1040 reg = new(mem_ctx) dst_reg(this, ir->type);
1041
1042 for (int i = 0; i < type_size(ir->type); i++) {
1043 output_reg[ir->data.location + i] = *reg;
1044 output_reg[ir->data.location + i].reg_offset = i;
1045 output_reg[ir->data.location + i].type =
1046 brw_type_for_base_type(ir->type->get_scalar_type());
1047 output_reg_annotation[ir->data.location + i] = ir->name;
1048 }
1049 break;
1050
1051 case ir_var_auto:
1052 case ir_var_temporary:
1053 reg = new(mem_ctx) dst_reg(this, ir->type);
1054 break;
1055
1056 case ir_var_uniform:
1057 reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms);
1058
1059 /* Thanks to the lower_ubo_reference pass, we will see only
1060 * ir_binop_ubo_load expressions and not ir_dereference_variable for UBO
1061 * variables, so no need for them to be in variable_ht.
1062 *
1063 * Some uniforms, such as samplers and atomic counters, have no actual
1064 * storage, so we should ignore them.
1065 */
1066 if (ir->is_in_uniform_block() || type_size(ir->type) == 0)
1067 return;
1068
1069 /* Track how big the whole uniform variable is, in case we need to put a
1070 * copy of its data into pull constants for array access.
1071 */
1072 assert(this->uniforms < uniform_array_size);
1073 this->uniform_size[this->uniforms] = type_size(ir->type);
1074
1075 if (!strncmp(ir->name, "gl_", 3)) {
1076 setup_builtin_uniform_values(ir);
1077 } else {
1078 setup_uniform_values(ir);
1079 }
1080 break;
1081
1082 case ir_var_system_value:
1083 reg = make_reg_for_system_value(ir);
1084 break;
1085
1086 default:
1087 unreachable("not reached");
1088 }
1089
1090 reg->type = brw_type_for_base_type(ir->type);
1091 hash_table_insert(this->variable_ht, reg, ir);
1092 }
1093
1094 void
1095 vec4_visitor::visit(ir_loop *ir)
1096 {
1097 /* We don't want debugging output to print the whole body of the
1098 * loop as the annotation.
1099 */
1100 this->base_ir = NULL;
1101
1102 emit(BRW_OPCODE_DO);
1103
1104 visit_instructions(&ir->body_instructions);
1105
1106 emit(BRW_OPCODE_WHILE);
1107 }
1108
1109 void
1110 vec4_visitor::visit(ir_loop_jump *ir)
1111 {
1112 switch (ir->mode) {
1113 case ir_loop_jump::jump_break:
1114 emit(BRW_OPCODE_BREAK);
1115 break;
1116 case ir_loop_jump::jump_continue:
1117 emit(BRW_OPCODE_CONTINUE);
1118 break;
1119 }
1120 }
1121
1122
1123 void
1124 vec4_visitor::visit(ir_function_signature *)
1125 {
1126 unreachable("not reached");
1127 }
1128
1129 void
1130 vec4_visitor::visit(ir_function *ir)
1131 {
1132 /* Ignore function bodies other than main() -- we shouldn't see calls to
1133 * them since they should all be inlined.
1134 */
1135 if (strcmp(ir->name, "main") == 0) {
1136 const ir_function_signature *sig;
1137 exec_list empty;
1138
1139 sig = ir->matching_signature(NULL, &empty, false);
1140
1141 assert(sig);
1142
1143 visit_instructions(&sig->body);
1144 }
1145 }
1146
1147 bool
1148 vec4_visitor::try_emit_mad(ir_expression *ir)
1149 {
1150 /* 3-src instructions were introduced in gen6. */
1151 if (devinfo->gen < 6)
1152 return false;
1153
1154 /* MAD can only handle floating-point data. */
1155 if (ir->type->base_type != GLSL_TYPE_FLOAT)
1156 return false;
1157
1158 ir_rvalue *nonmul;
1159 ir_expression *mul;
1160 bool mul_negate, mul_abs;
1161
1162 for (int i = 0; i < 2; i++) {
1163 mul_negate = false;
1164 mul_abs = false;
1165
1166 mul = ir->operands[i]->as_expression();
1167 nonmul = ir->operands[1 - i];
1168
1169 if (mul && mul->operation == ir_unop_abs) {
1170 mul = mul->operands[0]->as_expression();
1171 mul_abs = true;
1172 } else if (mul && mul->operation == ir_unop_neg) {
1173 mul = mul->operands[0]->as_expression();
1174 mul_negate = true;
1175 }
1176
1177 if (mul && mul->operation == ir_binop_mul)
1178 break;
1179 }
1180
1181 if (!mul || mul->operation != ir_binop_mul)
1182 return false;
1183
1184 nonmul->accept(this);
1185 src_reg src0 = fix_3src_operand(this->result);
1186
1187 mul->operands[0]->accept(this);
1188 src_reg src1 = fix_3src_operand(this->result);
1189 src1.negate ^= mul_negate;
1190 src1.abs = mul_abs;
1191 if (mul_abs)
1192 src1.negate = false;
1193
1194 mul->operands[1]->accept(this);
1195 src_reg src2 = fix_3src_operand(this->result);
1196 src2.abs = mul_abs;
1197 if (mul_abs)
1198 src2.negate = false;
1199
1200 this->result = src_reg(this, ir->type);
1201 emit(BRW_OPCODE_MAD, dst_reg(this->result), src0, src1, src2);
1202
1203 return true;
1204 }
1205
1206 bool
1207 vec4_visitor::try_emit_b2f_of_compare(ir_expression *ir)
1208 {
1209 /* This optimization relies on CMP setting the destination to 0 when
1210 * false. Early hardware only sets the least significant bit, and
1211 * leaves the other bits undefined. So we can't use it.
1212 */
1213 if (devinfo->gen < 6)
1214 return false;
1215
1216 ir_expression *const cmp = ir->operands[0]->as_expression();
1217
1218 if (cmp == NULL)
1219 return false;
1220
1221 switch (cmp->operation) {
1222 case ir_binop_less:
1223 case ir_binop_greater:
1224 case ir_binop_lequal:
1225 case ir_binop_gequal:
1226 case ir_binop_equal:
1227 case ir_binop_nequal:
1228 break;
1229
1230 default:
1231 return false;
1232 }
1233
1234 cmp->operands[0]->accept(this);
1235 const src_reg cmp_src0 = this->result;
1236
1237 cmp->operands[1]->accept(this);
1238 const src_reg cmp_src1 = this->result;
1239
1240 this->result = src_reg(this, ir->type);
1241
1242 emit(CMP(dst_reg(this->result), cmp_src0, cmp_src1,
1243 brw_conditional_for_comparison(cmp->operation)));
1244
1245 /* If the comparison is false, this->result will just happen to be zero.
1246 */
1247 vec4_instruction *const inst = emit(BRW_OPCODE_SEL, dst_reg(this->result),
1248 this->result, src_reg(1.0f));
1249 inst->predicate = BRW_PREDICATE_NORMAL;
1250 inst->predicate_inverse = true;
1251
1252 return true;
1253 }
1254
1255 void
1256 vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
1257 src_reg src0, src_reg src1)
1258 {
1259 vec4_instruction *inst;
1260
1261 if (devinfo->gen >= 6) {
1262 inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
1263 inst->conditional_mod = conditionalmod;
1264 } else {
1265 emit(CMP(dst, src0, src1, conditionalmod));
1266
1267 inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
1268 inst->predicate = BRW_PREDICATE_NORMAL;
1269 }
1270 }
1271
1272 void
1273 vec4_visitor::emit_lrp(const dst_reg &dst,
1274 const src_reg &x, const src_reg &y, const src_reg &a)
1275 {
1276 if (devinfo->gen >= 6) {
1277 /* Note that the instruction's argument order is reversed from GLSL
1278 * and the IR.
1279 */
1280 emit(LRP(dst,
1281 fix_3src_operand(a), fix_3src_operand(y), fix_3src_operand(x)));
1282 } else {
1283 /* Earlier generations don't support three source operations, so we
1284 * need to emit x*(1-a) + y*a.
1285 */
1286 dst_reg y_times_a = dst_reg(this, glsl_type::vec4_type);
1287 dst_reg one_minus_a = dst_reg(this, glsl_type::vec4_type);
1288 dst_reg x_times_one_minus_a = dst_reg(this, glsl_type::vec4_type);
1289 y_times_a.writemask = dst.writemask;
1290 one_minus_a.writemask = dst.writemask;
1291 x_times_one_minus_a.writemask = dst.writemask;
1292
1293 emit(MUL(y_times_a, y, a));
1294 emit(ADD(one_minus_a, negate(a), src_reg(1.0f)));
1295 emit(MUL(x_times_one_minus_a, x, src_reg(one_minus_a)));
1296 emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a)));
1297 }
1298 }
1299
1300 /**
1301 * Emits the instructions needed to perform a pull constant load. before_block
1302 * and before_inst can be NULL in which case the instruction will be appended
1303 * to the end of the instruction list.
1304 */
1305 void
1306 vec4_visitor::emit_pull_constant_load_reg(dst_reg dst,
1307 src_reg surf_index,
1308 src_reg offset_reg,
1309 bblock_t *before_block,
1310 vec4_instruction *before_inst)
1311 {
1312 assert((before_inst == NULL && before_block == NULL) ||
1313 (before_inst && before_block));
1314
1315 vec4_instruction *pull;
1316
1317 if (devinfo->gen >= 9) {
1318 /* Gen9+ needs a message header in order to use SIMD4x2 mode */
1319 src_reg header(this, glsl_type::uvec4_type, 2);
1320
1321 pull = new(mem_ctx)
1322 vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9,
1323 dst_reg(header));
1324
1325 if (before_inst)
1326 emit_before(before_block, before_inst, pull);
1327 else
1328 emit(pull);
1329
1330 dst_reg index_reg = retype(offset(dst_reg(header), 1),
1331 offset_reg.type);
1332 pull = MOV(writemask(index_reg, WRITEMASK_X), offset_reg);
1333
1334 if (before_inst)
1335 emit_before(before_block, before_inst, pull);
1336 else
1337 emit(pull);
1338
1339 pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7,
1340 dst,
1341 surf_index,
1342 header);
1343 pull->mlen = 2;
1344 pull->header_size = 1;
1345 } else if (devinfo->gen >= 7) {
1346 dst_reg grf_offset = dst_reg(this, glsl_type::int_type);
1347
1348 grf_offset.type = offset_reg.type;
1349
1350 pull = MOV(grf_offset, offset_reg);
1351
1352 if (before_inst)
1353 emit_before(before_block, before_inst, pull);
1354 else
1355 emit(pull);
1356
1357 pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD_GEN7,
1358 dst,
1359 surf_index,
1360 src_reg(grf_offset));
1361 pull->mlen = 1;
1362 } else {
1363 pull = new(mem_ctx) vec4_instruction(VS_OPCODE_PULL_CONSTANT_LOAD,
1364 dst,
1365 surf_index,
1366 offset_reg);
1367 pull->base_mrf = 14;
1368 pull->mlen = 1;
1369 }
1370
1371 if (before_inst)
1372 emit_before(before_block, before_inst, pull);
1373 else
1374 emit(pull);
1375 }
1376
1377 void
1378 vec4_visitor::emit_uniformize(const dst_reg &dst, const src_reg &src)
1379 {
1380 const src_reg chan_index(this, glsl_type::uint_type);
1381
1382 emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, dst_reg(chan_index))
1383 ->force_writemask_all = true;
1384 emit(SHADER_OPCODE_BROADCAST, dst, src, chan_index)
1385 ->force_writemask_all = true;
1386 }
1387
1388 void
1389 vec4_visitor::visit(ir_expression *ir)
1390 {
1391 unsigned int operand;
1392 src_reg op[ARRAY_SIZE(ir->operands)];
1393 vec4_instruction *inst;
1394
1395 if (ir->operation == ir_binop_add) {
1396 if (try_emit_mad(ir))
1397 return;
1398 }
1399
1400 if (ir->operation == ir_unop_b2f) {
1401 if (try_emit_b2f_of_compare(ir))
1402 return;
1403 }
1404
1405 /* Storage for our result. Ideally for an assignment we'd be using
1406 * the actual storage for the result here, instead.
1407 */
1408 dst_reg result_dst(this, ir->type);
1409 src_reg result_src(result_dst);
1410
1411 if (ir->operation == ir_triop_csel) {
1412 ir->operands[1]->accept(this);
1413 op[1] = this->result;
1414 ir->operands[2]->accept(this);
1415 op[2] = this->result;
1416
1417 enum brw_predicate predicate;
1418 emit_bool_to_cond_code(ir->operands[0], &predicate);
1419 inst = emit(BRW_OPCODE_SEL, result_dst, op[1], op[2]);
1420 inst->predicate = predicate;
1421 this->result = result_src;
1422 return;
1423 }
1424
1425 for (operand = 0; operand < ir->get_num_operands(); operand++) {
1426 this->result.file = BAD_FILE;
1427 ir->operands[operand]->accept(this);
1428 if (this->result.file == BAD_FILE) {
1429 fprintf(stderr, "Failed to get tree for expression operand:\n");
1430 ir->operands[operand]->fprint(stderr);
1431 exit(1);
1432 }
1433 op[operand] = this->result;
1434
1435 /* Matrix expression operands should have been broken down to vector
1436 * operations already.
1437 */
1438 assert(!ir->operands[operand]->type->is_matrix());
1439 }
1440
1441 /* If nothing special happens, this is the result. */
1442 this->result = result_src;
1443
1444 switch (ir->operation) {
1445 case ir_unop_logic_not:
1446 emit(NOT(result_dst, op[0]));
1447 break;
1448 case ir_unop_neg:
1449 op[0].negate = !op[0].negate;
1450 emit(MOV(result_dst, op[0]));
1451 break;
1452 case ir_unop_abs:
1453 op[0].abs = true;
1454 op[0].negate = false;
1455 emit(MOV(result_dst, op[0]));
1456 break;
1457
1458 case ir_unop_sign:
1459 if (ir->type->is_float()) {
1460 /* AND(val, 0x80000000) gives the sign bit.
1461 *
1462 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1463 * zero.
1464 */
1465 emit(CMP(dst_null_f(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
1466
1467 op[0].type = BRW_REGISTER_TYPE_UD;
1468 result_dst.type = BRW_REGISTER_TYPE_UD;
1469 emit(AND(result_dst, op[0], src_reg(0x80000000u)));
1470
1471 inst = emit(OR(result_dst, src_reg(result_dst), src_reg(0x3f800000u)));
1472 inst->predicate = BRW_PREDICATE_NORMAL;
1473
1474 this->result.type = BRW_REGISTER_TYPE_F;
1475 } else {
1476 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1477 * -> non-negative val generates 0x00000000.
1478 * Predicated OR sets 1 if val is positive.
1479 */
1480 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_G));
1481
1482 emit(ASR(result_dst, op[0], src_reg(31)));
1483
1484 inst = emit(OR(result_dst, src_reg(result_dst), src_reg(1)));
1485 inst->predicate = BRW_PREDICATE_NORMAL;
1486 }
1487 break;
1488
1489 case ir_unop_rcp:
1490 emit_math(SHADER_OPCODE_RCP, result_dst, op[0]);
1491 break;
1492
1493 case ir_unop_exp2:
1494 emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]);
1495 break;
1496 case ir_unop_log2:
1497 emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]);
1498 break;
1499 case ir_unop_exp:
1500 case ir_unop_log:
1501 unreachable("not reached: should be handled by ir_explog_to_explog2");
1502 case ir_unop_sin:
1503 emit_math(SHADER_OPCODE_SIN, result_dst, op[0]);
1504 break;
1505 case ir_unop_cos:
1506 emit_math(SHADER_OPCODE_COS, result_dst, op[0]);
1507 break;
1508
1509 case ir_unop_dFdx:
1510 case ir_unop_dFdx_coarse:
1511 case ir_unop_dFdx_fine:
1512 case ir_unop_dFdy:
1513 case ir_unop_dFdy_coarse:
1514 case ir_unop_dFdy_fine:
1515 unreachable("derivatives not valid in vertex shader");
1516
1517 case ir_unop_bitfield_reverse:
1518 emit(BFREV(result_dst, op[0]));
1519 break;
1520 case ir_unop_bit_count:
1521 emit(CBIT(result_dst, op[0]));
1522 break;
1523 case ir_unop_find_msb: {
1524 src_reg temp = src_reg(this, glsl_type::uint_type);
1525
1526 inst = emit(FBH(dst_reg(temp), op[0]));
1527 inst->dst.writemask = WRITEMASK_XYZW;
1528
1529 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1530 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1531 * subtract the result from 31 to convert the MSB count into an LSB count.
1532 */
1533
1534 /* FBH only supports UD type for dst, so use a MOV to convert UD to D. */
1535 temp.swizzle = BRW_SWIZZLE_NOOP;
1536 emit(MOV(result_dst, temp));
1537
1538 src_reg src_tmp = src_reg(result_dst);
1539 emit(CMP(dst_null_d(), src_tmp, src_reg(-1), BRW_CONDITIONAL_NZ));
1540
1541 src_tmp.negate = true;
1542 inst = emit(ADD(result_dst, src_tmp, src_reg(31)));
1543 inst->predicate = BRW_PREDICATE_NORMAL;
1544 break;
1545 }
1546 case ir_unop_find_lsb:
1547 emit(FBL(result_dst, op[0]));
1548 break;
1549 case ir_unop_saturate:
1550 inst = emit(MOV(result_dst, op[0]));
1551 inst->saturate = true;
1552 break;
1553
1554 case ir_unop_noise:
1555 unreachable("not reached: should be handled by lower_noise");
1556
1557 case ir_binop_add:
1558 emit(ADD(result_dst, op[0], op[1]));
1559 break;
1560 case ir_binop_sub:
1561 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1562
1563 case ir_binop_mul:
1564 if (devinfo->gen < 8 && ir->type->is_integer()) {
1565 /* For integer multiplication, the MUL uses the low 16 bits of one of
1566 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1567 * accumulates in the contribution of the upper 16 bits of that
1568 * operand. If we can determine that one of the args is in the low
1569 * 16 bits, though, we can just emit a single MUL.
1570 */
1571 if (ir->operands[0]->is_uint16_constant()) {
1572 if (devinfo->gen < 7)
1573 emit(MUL(result_dst, op[0], op[1]));
1574 else
1575 emit(MUL(result_dst, op[1], op[0]));
1576 } else if (ir->operands[1]->is_uint16_constant()) {
1577 if (devinfo->gen < 7)
1578 emit(MUL(result_dst, op[1], op[0]));
1579 else
1580 emit(MUL(result_dst, op[0], op[1]));
1581 } else {
1582 struct brw_reg acc = retype(brw_acc_reg(8), result_dst.type);
1583
1584 emit(MUL(acc, op[0], op[1]));
1585 emit(MACH(dst_null_d(), op[0], op[1]));
1586 emit(MOV(result_dst, src_reg(acc)));
1587 }
1588 } else {
1589 emit(MUL(result_dst, op[0], op[1]));
1590 }
1591 break;
1592 case ir_binop_imul_high: {
1593 struct brw_reg acc = retype(brw_acc_reg(8), result_dst.type);
1594
1595 emit(MUL(acc, op[0], op[1]));
1596 emit(MACH(result_dst, op[0], op[1]));
1597 break;
1598 }
1599 case ir_binop_div:
1600 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
1601 assert(ir->type->is_integer());
1602 emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
1603 break;
1604 case ir_binop_carry: {
1605 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1606
1607 emit(ADDC(dst_null_ud(), op[0], op[1]));
1608 emit(MOV(result_dst, src_reg(acc)));
1609 break;
1610 }
1611 case ir_binop_borrow: {
1612 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1613
1614 emit(SUBB(dst_null_ud(), op[0], op[1]));
1615 emit(MOV(result_dst, src_reg(acc)));
1616 break;
1617 }
1618 case ir_binop_mod:
1619 /* Floating point should be lowered by MOD_TO_FLOOR in the compiler. */
1620 assert(ir->type->is_integer());
1621 emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]);
1622 break;
1623
1624 case ir_binop_less:
1625 case ir_binop_greater:
1626 case ir_binop_lequal:
1627 case ir_binop_gequal:
1628 case ir_binop_equal:
1629 case ir_binop_nequal: {
1630 if (devinfo->gen <= 5) {
1631 resolve_bool_comparison(ir->operands[0], &op[0]);
1632 resolve_bool_comparison(ir->operands[1], &op[1]);
1633 }
1634 emit(CMP(result_dst, op[0], op[1],
1635 brw_conditional_for_comparison(ir->operation)));
1636 break;
1637 }
1638
1639 case ir_binop_all_equal:
1640 if (devinfo->gen <= 5) {
1641 resolve_bool_comparison(ir->operands[0], &op[0]);
1642 resolve_bool_comparison(ir->operands[1], &op[1]);
1643 }
1644
1645 /* "==" operator producing a scalar boolean. */
1646 if (ir->operands[0]->type->is_vector() ||
1647 ir->operands[1]->type->is_vector()) {
1648 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
1649 emit(MOV(result_dst, src_reg(0)));
1650 inst = emit(MOV(result_dst, src_reg(~0)));
1651 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1652 } else {
1653 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
1654 }
1655 break;
1656 case ir_binop_any_nequal:
1657 if (devinfo->gen <= 5) {
1658 resolve_bool_comparison(ir->operands[0], &op[0]);
1659 resolve_bool_comparison(ir->operands[1], &op[1]);
1660 }
1661
1662 /* "!=" operator producing a scalar boolean. */
1663 if (ir->operands[0]->type->is_vector() ||
1664 ir->operands[1]->type->is_vector()) {
1665 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
1666
1667 emit(MOV(result_dst, src_reg(0)));
1668 inst = emit(MOV(result_dst, src_reg(~0)));
1669 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1670 } else {
1671 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
1672 }
1673 break;
1674
1675 case ir_unop_any:
1676 if (devinfo->gen <= 5) {
1677 resolve_bool_comparison(ir->operands[0], &op[0]);
1678 }
1679 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
1680 emit(MOV(result_dst, src_reg(0)));
1681
1682 inst = emit(MOV(result_dst, src_reg(~0)));
1683 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1684 break;
1685
1686 case ir_binop_logic_xor:
1687 emit(XOR(result_dst, op[0], op[1]));
1688 break;
1689
1690 case ir_binop_logic_or:
1691 emit(OR(result_dst, op[0], op[1]));
1692 break;
1693
1694 case ir_binop_logic_and:
1695 emit(AND(result_dst, op[0], op[1]));
1696 break;
1697
1698 case ir_binop_dot:
1699 assert(ir->operands[0]->type->is_vector());
1700 assert(ir->operands[0]->type == ir->operands[1]->type);
1701 emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements);
1702 break;
1703
1704 case ir_unop_sqrt:
1705 emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]);
1706 break;
1707 case ir_unop_rsq:
1708 emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]);
1709 break;
1710
1711 case ir_unop_bitcast_i2f:
1712 case ir_unop_bitcast_u2f:
1713 this->result = op[0];
1714 this->result.type = BRW_REGISTER_TYPE_F;
1715 break;
1716
1717 case ir_unop_bitcast_f2i:
1718 this->result = op[0];
1719 this->result.type = BRW_REGISTER_TYPE_D;
1720 break;
1721
1722 case ir_unop_bitcast_f2u:
1723 this->result = op[0];
1724 this->result.type = BRW_REGISTER_TYPE_UD;
1725 break;
1726
1727 case ir_unop_i2f:
1728 case ir_unop_i2u:
1729 case ir_unop_u2i:
1730 case ir_unop_u2f:
1731 case ir_unop_f2i:
1732 case ir_unop_f2u:
1733 emit(MOV(result_dst, op[0]));
1734 break;
1735 case ir_unop_b2i:
1736 emit(AND(result_dst, op[0], src_reg(1)));
1737 break;
1738 case ir_unop_b2f:
1739 if (devinfo->gen <= 5) {
1740 resolve_bool_comparison(ir->operands[0], &op[0]);
1741 }
1742 op[0].type = BRW_REGISTER_TYPE_D;
1743 result_dst.type = BRW_REGISTER_TYPE_D;
1744 emit(AND(result_dst, op[0], src_reg(0x3f800000u)));
1745 result_dst.type = BRW_REGISTER_TYPE_F;
1746 break;
1747 case ir_unop_f2b:
1748 emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
1749 break;
1750 case ir_unop_i2b:
1751 emit(CMP(result_dst, op[0], src_reg(0), BRW_CONDITIONAL_NZ));
1752 break;
1753
1754 case ir_unop_trunc:
1755 emit(RNDZ(result_dst, op[0]));
1756 break;
1757 case ir_unop_ceil: {
1758 src_reg tmp = src_reg(this, ir->type);
1759 op[0].negate = !op[0].negate;
1760 emit(RNDD(dst_reg(tmp), op[0]));
1761 tmp.negate = true;
1762 emit(MOV(result_dst, tmp));
1763 }
1764 break;
1765 case ir_unop_floor:
1766 inst = emit(RNDD(result_dst, op[0]));
1767 break;
1768 case ir_unop_fract:
1769 inst = emit(FRC(result_dst, op[0]));
1770 break;
1771 case ir_unop_round_even:
1772 emit(RNDE(result_dst, op[0]));
1773 break;
1774
1775 case ir_binop_min:
1776 emit_minmax(BRW_CONDITIONAL_L, result_dst, op[0], op[1]);
1777 break;
1778 case ir_binop_max:
1779 emit_minmax(BRW_CONDITIONAL_GE, result_dst, op[0], op[1]);
1780 break;
1781
1782 case ir_binop_pow:
1783 emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]);
1784 break;
1785
1786 case ir_unop_bit_not:
1787 inst = emit(NOT(result_dst, op[0]));
1788 break;
1789 case ir_binop_bit_and:
1790 inst = emit(AND(result_dst, op[0], op[1]));
1791 break;
1792 case ir_binop_bit_xor:
1793 inst = emit(XOR(result_dst, op[0], op[1]));
1794 break;
1795 case ir_binop_bit_or:
1796 inst = emit(OR(result_dst, op[0], op[1]));
1797 break;
1798
1799 case ir_binop_lshift:
1800 inst = emit(SHL(result_dst, op[0], op[1]));
1801 break;
1802
1803 case ir_binop_rshift:
1804 if (ir->type->base_type == GLSL_TYPE_INT)
1805 inst = emit(ASR(result_dst, op[0], op[1]));
1806 else
1807 inst = emit(SHR(result_dst, op[0], op[1]));
1808 break;
1809
1810 case ir_binop_bfm:
1811 emit(BFI1(result_dst, op[0], op[1]));
1812 break;
1813
1814 case ir_binop_ubo_load: {
1815 ir_constant *const_uniform_block = ir->operands[0]->as_constant();
1816 ir_constant *const_offset_ir = ir->operands[1]->as_constant();
1817 unsigned const_offset = const_offset_ir ? const_offset_ir->value.u[0] : 0;
1818 src_reg offset;
1819
1820 /* Now, load the vector from that offset. */
1821 assert(ir->type->is_vector() || ir->type->is_scalar());
1822
1823 src_reg packed_consts = src_reg(this, glsl_type::vec4_type);
1824 packed_consts.type = result.type;
1825 src_reg surf_index;
1826
1827 if (const_uniform_block) {
1828 /* The block index is a constant, so just emit the binding table entry
1829 * as an immediate.
1830 */
1831 surf_index = src_reg(prog_data->base.binding_table.ubo_start +
1832 const_uniform_block->value.u[0]);
1833 } else {
1834 /* The block index is not a constant. Evaluate the index expression
1835 * per-channel and add the base UBO index; we have to select a value
1836 * from any live channel.
1837 */
1838 surf_index = src_reg(this, glsl_type::uint_type);
1839 emit(ADD(dst_reg(surf_index), op[0],
1840 src_reg(prog_data->base.binding_table.ubo_start)));
1841 emit_uniformize(dst_reg(surf_index), surf_index);
1842
1843 /* Assume this may touch any UBO. It would be nice to provide
1844 * a tighter bound, but the array information is already lowered away.
1845 */
1846 brw_mark_surface_used(&prog_data->base,
1847 prog_data->base.binding_table.ubo_start +
1848 shader_prog->NumUniformBlocks - 1);
1849 }
1850
1851 if (const_offset_ir) {
1852 if (devinfo->gen >= 8) {
1853 /* Store the offset in a GRF so we can send-from-GRF. */
1854 offset = src_reg(this, glsl_type::int_type);
1855 emit(MOV(dst_reg(offset), src_reg(const_offset / 16)));
1856 } else {
1857 /* Immediates are fine on older generations since they'll be moved
1858 * to a (potentially fake) MRF at the generator level.
1859 */
1860 offset = src_reg(const_offset / 16);
1861 }
1862 } else {
1863 offset = src_reg(this, glsl_type::uint_type);
1864 emit(SHR(dst_reg(offset), op[1], src_reg(4u)));
1865 }
1866
1867 emit_pull_constant_load_reg(dst_reg(packed_consts),
1868 surf_index,
1869 offset,
1870 NULL, NULL /* before_block/inst */);
1871
1872 packed_consts.swizzle = brw_swizzle_for_size(ir->type->vector_elements);
1873 packed_consts.swizzle += BRW_SWIZZLE4(const_offset % 16 / 4,
1874 const_offset % 16 / 4,
1875 const_offset % 16 / 4,
1876 const_offset % 16 / 4);
1877
1878 /* UBO bools are any nonzero int. We need to convert them to 0/~0. */
1879 if (ir->type->base_type == GLSL_TYPE_BOOL) {
1880 emit(CMP(result_dst, packed_consts, src_reg(0u),
1881 BRW_CONDITIONAL_NZ));
1882 } else {
1883 emit(MOV(result_dst, packed_consts));
1884 }
1885 break;
1886 }
1887
1888 case ir_binop_vector_extract:
1889 unreachable("should have been lowered by vec_index_to_cond_assign");
1890
1891 case ir_triop_fma:
1892 op[0] = fix_3src_operand(op[0]);
1893 op[1] = fix_3src_operand(op[1]);
1894 op[2] = fix_3src_operand(op[2]);
1895 /* Note that the instruction's argument order is reversed from GLSL
1896 * and the IR.
1897 */
1898 emit(MAD(result_dst, op[2], op[1], op[0]));
1899 break;
1900
1901 case ir_triop_lrp:
1902 emit_lrp(result_dst, op[0], op[1], op[2]);
1903 break;
1904
1905 case ir_triop_csel:
1906 unreachable("already handled above");
1907 break;
1908
1909 case ir_triop_bfi:
1910 op[0] = fix_3src_operand(op[0]);
1911 op[1] = fix_3src_operand(op[1]);
1912 op[2] = fix_3src_operand(op[2]);
1913 emit(BFI2(result_dst, op[0], op[1], op[2]));
1914 break;
1915
1916 case ir_triop_bitfield_extract:
1917 op[0] = fix_3src_operand(op[0]);
1918 op[1] = fix_3src_operand(op[1]);
1919 op[2] = fix_3src_operand(op[2]);
1920 /* Note that the instruction's argument order is reversed from GLSL
1921 * and the IR.
1922 */
1923 emit(BFE(result_dst, op[2], op[1], op[0]));
1924 break;
1925
1926 case ir_triop_vector_insert:
1927 unreachable("should have been lowered by lower_vector_insert");
1928
1929 case ir_quadop_bitfield_insert:
1930 unreachable("not reached: should be handled by "
1931 "bitfield_insert_to_bfm_bfi\n");
1932
1933 case ir_quadop_vector:
1934 unreachable("not reached: should be handled by lower_quadop_vector");
1935
1936 case ir_unop_pack_half_2x16:
1937 emit_pack_half_2x16(result_dst, op[0]);
1938 break;
1939 case ir_unop_unpack_half_2x16:
1940 emit_unpack_half_2x16(result_dst, op[0]);
1941 break;
1942 case ir_unop_unpack_unorm_4x8:
1943 emit_unpack_unorm_4x8(result_dst, op[0]);
1944 break;
1945 case ir_unop_unpack_snorm_4x8:
1946 emit_unpack_snorm_4x8(result_dst, op[0]);
1947 break;
1948 case ir_unop_pack_unorm_4x8:
1949 emit_pack_unorm_4x8(result_dst, op[0]);
1950 break;
1951 case ir_unop_pack_snorm_4x8:
1952 emit_pack_snorm_4x8(result_dst, op[0]);
1953 break;
1954 case ir_unop_pack_snorm_2x16:
1955 case ir_unop_pack_unorm_2x16:
1956 case ir_unop_unpack_snorm_2x16:
1957 case ir_unop_unpack_unorm_2x16:
1958 unreachable("not reached: should be handled by lower_packing_builtins");
1959 case ir_unop_unpack_half_2x16_split_x:
1960 case ir_unop_unpack_half_2x16_split_y:
1961 case ir_binop_pack_half_2x16_split:
1962 case ir_unop_interpolate_at_centroid:
1963 case ir_binop_interpolate_at_sample:
1964 case ir_binop_interpolate_at_offset:
1965 unreachable("not reached: should not occur in vertex shader");
1966 case ir_binop_ldexp:
1967 unreachable("not reached: should be handled by ldexp_to_arith()");
1968 case ir_unop_d2f:
1969 case ir_unop_f2d:
1970 case ir_unop_d2i:
1971 case ir_unop_i2d:
1972 case ir_unop_d2u:
1973 case ir_unop_u2d:
1974 case ir_unop_d2b:
1975 case ir_unop_pack_double_2x32:
1976 case ir_unop_unpack_double_2x32:
1977 case ir_unop_frexp_sig:
1978 case ir_unop_frexp_exp:
1979 unreachable("fp64 todo");
1980 }
1981 }
1982
1983
1984 void
1985 vec4_visitor::visit(ir_swizzle *ir)
1986 {
1987 /* Note that this is only swizzles in expressions, not those on the left
1988 * hand side of an assignment, which do write masking. See ir_assignment
1989 * for that.
1990 */
1991 const unsigned swz = brw_compose_swizzle(
1992 brw_swizzle_for_size(ir->type->vector_elements),
1993 BRW_SWIZZLE4(ir->mask.x, ir->mask.y, ir->mask.z, ir->mask.w));
1994
1995 ir->val->accept(this);
1996 this->result = swizzle(this->result, swz);
1997 }
1998
1999 void
2000 vec4_visitor::visit(ir_dereference_variable *ir)
2001 {
2002 const struct glsl_type *type = ir->type;
2003 dst_reg *reg = variable_storage(ir->var);
2004
2005 if (!reg) {
2006 fail("Failed to find variable storage for %s\n", ir->var->name);
2007 this->result = src_reg(brw_null_reg());
2008 return;
2009 }
2010
2011 this->result = src_reg(*reg);
2012
2013 /* System values get their swizzle from the dst_reg writemask */
2014 if (ir->var->data.mode == ir_var_system_value)
2015 return;
2016
2017 if (type->is_scalar() || type->is_vector() || type->is_matrix())
2018 this->result.swizzle = brw_swizzle_for_size(type->vector_elements);
2019 }
2020
2021
2022 int
2023 vec4_visitor::compute_array_stride(ir_dereference_array *ir)
2024 {
2025 /* Under normal circumstances array elements are stored consecutively, so
2026 * the stride is equal to the size of the array element.
2027 */
2028 return type_size(ir->type);
2029 }
2030
2031
2032 void
2033 vec4_visitor::visit(ir_dereference_array *ir)
2034 {
2035 ir_constant *constant_index;
2036 src_reg src;
2037 int array_stride = compute_array_stride(ir);
2038
2039 constant_index = ir->array_index->constant_expression_value();
2040
2041 ir->array->accept(this);
2042 src = this->result;
2043
2044 if (constant_index) {
2045 src.reg_offset += constant_index->value.i[0] * array_stride;
2046 } else {
2047 /* Variable index array dereference. It eats the "vec4" of the
2048 * base of the array and an index that offsets the Mesa register
2049 * index.
2050 */
2051 ir->array_index->accept(this);
2052
2053 src_reg index_reg;
2054
2055 if (array_stride == 1) {
2056 index_reg = this->result;
2057 } else {
2058 index_reg = src_reg(this, glsl_type::int_type);
2059
2060 emit(MUL(dst_reg(index_reg), this->result, src_reg(array_stride)));
2061 }
2062
2063 if (src.reladdr) {
2064 src_reg temp = src_reg(this, glsl_type::int_type);
2065
2066 emit(ADD(dst_reg(temp), *src.reladdr, index_reg));
2067
2068 index_reg = temp;
2069 }
2070
2071 src.reladdr = ralloc(mem_ctx, src_reg);
2072 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
2073 }
2074
2075 /* If the type is smaller than a vec4, replicate the last channel out. */
2076 if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
2077 src.swizzle = brw_swizzle_for_size(ir->type->vector_elements);
2078 else
2079 src.swizzle = BRW_SWIZZLE_NOOP;
2080 src.type = brw_type_for_base_type(ir->type);
2081
2082 this->result = src;
2083 }
2084
2085 void
2086 vec4_visitor::visit(ir_dereference_record *ir)
2087 {
2088 unsigned int i;
2089 const glsl_type *struct_type = ir->record->type;
2090 int offset = 0;
2091
2092 ir->record->accept(this);
2093
2094 for (i = 0; i < struct_type->length; i++) {
2095 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
2096 break;
2097 offset += type_size(struct_type->fields.structure[i].type);
2098 }
2099
2100 /* If the type is smaller than a vec4, replicate the last channel out. */
2101 if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
2102 this->result.swizzle = brw_swizzle_for_size(ir->type->vector_elements);
2103 else
2104 this->result.swizzle = BRW_SWIZZLE_NOOP;
2105 this->result.type = brw_type_for_base_type(ir->type);
2106
2107 this->result.reg_offset += offset;
2108 }
2109
2110 /**
2111 * We want to be careful in assignment setup to hit the actual storage
2112 * instead of potentially using a temporary like we might with the
2113 * ir_dereference handler.
2114 */
2115 static dst_reg
2116 get_assignment_lhs(ir_dereference *ir, vec4_visitor *v)
2117 {
2118 /* The LHS must be a dereference. If the LHS is a variable indexed array
2119 * access of a vector, it must be separated into a series conditional moves
2120 * before reaching this point (see ir_vec_index_to_cond_assign).
2121 */
2122 assert(ir->as_dereference());
2123 ir_dereference_array *deref_array = ir->as_dereference_array();
2124 if (deref_array) {
2125 assert(!deref_array->array->type->is_vector());
2126 }
2127
2128 /* Use the rvalue deref handler for the most part. We'll ignore
2129 * swizzles in it and write swizzles using writemask, though.
2130 */
2131 ir->accept(v);
2132 return dst_reg(v->result);
2133 }
2134
2135 void
2136 vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src,
2137 const struct glsl_type *type,
2138 enum brw_predicate predicate)
2139 {
2140 if (type->base_type == GLSL_TYPE_STRUCT) {
2141 for (unsigned int i = 0; i < type->length; i++) {
2142 emit_block_move(dst, src, type->fields.structure[i].type, predicate);
2143 }
2144 return;
2145 }
2146
2147 if (type->is_array()) {
2148 for (unsigned int i = 0; i < type->length; i++) {
2149 emit_block_move(dst, src, type->fields.array, predicate);
2150 }
2151 return;
2152 }
2153
2154 if (type->is_matrix()) {
2155 const struct glsl_type *vec_type;
2156
2157 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
2158 type->vector_elements, 1);
2159
2160 for (int i = 0; i < type->matrix_columns; i++) {
2161 emit_block_move(dst, src, vec_type, predicate);
2162 }
2163 return;
2164 }
2165
2166 assert(type->is_scalar() || type->is_vector());
2167
2168 dst->type = brw_type_for_base_type(type);
2169 src->type = dst->type;
2170
2171 dst->writemask = (1 << type->vector_elements) - 1;
2172
2173 src->swizzle = brw_swizzle_for_size(type->vector_elements);
2174
2175 vec4_instruction *inst = emit(MOV(*dst, *src));
2176 inst->predicate = predicate;
2177
2178 dst->reg_offset++;
2179 src->reg_offset++;
2180 }
2181
2182
2183 /* If the RHS processing resulted in an instruction generating a
2184 * temporary value, and it would be easy to rewrite the instruction to
2185 * generate its result right into the LHS instead, do so. This ends
2186 * up reliably removing instructions where it can be tricky to do so
2187 * later without real UD chain information.
2188 */
2189 bool
2190 vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
2191 dst_reg dst,
2192 src_reg src,
2193 vec4_instruction *pre_rhs_inst,
2194 vec4_instruction *last_rhs_inst)
2195 {
2196 /* This could be supported, but it would take more smarts. */
2197 if (ir->condition)
2198 return false;
2199
2200 if (pre_rhs_inst == last_rhs_inst)
2201 return false; /* No instructions generated to work with. */
2202
2203 /* Make sure the last instruction generated our source reg. */
2204 if (src.file != GRF ||
2205 src.file != last_rhs_inst->dst.file ||
2206 src.reg != last_rhs_inst->dst.reg ||
2207 src.reg_offset != last_rhs_inst->dst.reg_offset ||
2208 src.reladdr ||
2209 src.abs ||
2210 src.negate ||
2211 last_rhs_inst->predicate != BRW_PREDICATE_NONE)
2212 return false;
2213
2214 /* Check that that last instruction fully initialized the channels
2215 * we want to use, in the order we want to use them. We could
2216 * potentially reswizzle the operands of many instructions so that
2217 * we could handle out of order channels, but don't yet.
2218 */
2219
2220 for (unsigned i = 0; i < 4; i++) {
2221 if (dst.writemask & (1 << i)) {
2222 if (!(last_rhs_inst->dst.writemask & (1 << i)))
2223 return false;
2224
2225 if (BRW_GET_SWZ(src.swizzle, i) != i)
2226 return false;
2227 }
2228 }
2229
2230 /* Success! Rewrite the instruction. */
2231 last_rhs_inst->dst.file = dst.file;
2232 last_rhs_inst->dst.reg = dst.reg;
2233 last_rhs_inst->dst.reg_offset = dst.reg_offset;
2234 last_rhs_inst->dst.reladdr = dst.reladdr;
2235 last_rhs_inst->dst.writemask &= dst.writemask;
2236
2237 return true;
2238 }
2239
2240 void
2241 vec4_visitor::visit(ir_assignment *ir)
2242 {
2243 dst_reg dst = get_assignment_lhs(ir->lhs, this);
2244 enum brw_predicate predicate = BRW_PREDICATE_NONE;
2245
2246 if (!ir->lhs->type->is_scalar() &&
2247 !ir->lhs->type->is_vector()) {
2248 ir->rhs->accept(this);
2249 src_reg src = this->result;
2250
2251 if (ir->condition) {
2252 emit_bool_to_cond_code(ir->condition, &predicate);
2253 }
2254
2255 /* emit_block_move doesn't account for swizzles in the source register.
2256 * This should be ok, since the source register is a structure or an
2257 * array, and those can't be swizzled. But double-check to be sure.
2258 */
2259 assert(src.swizzle ==
2260 (ir->rhs->type->is_matrix()
2261 ? brw_swizzle_for_size(ir->rhs->type->vector_elements)
2262 : BRW_SWIZZLE_NOOP));
2263
2264 emit_block_move(&dst, &src, ir->rhs->type, predicate);
2265 return;
2266 }
2267
2268 /* Now we're down to just a scalar/vector with writemasks. */
2269 int i;
2270
2271 vec4_instruction *pre_rhs_inst, *last_rhs_inst;
2272 pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
2273
2274 ir->rhs->accept(this);
2275
2276 last_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
2277
2278 int swizzles[4];
2279 int src_chan = 0;
2280
2281 assert(ir->lhs->type->is_vector() ||
2282 ir->lhs->type->is_scalar());
2283 dst.writemask = ir->write_mask;
2284
2285 /* Swizzle a small RHS vector into the channels being written.
2286 *
2287 * glsl ir treats write_mask as dictating how many channels are
2288 * present on the RHS while in our instructions we need to make
2289 * those channels appear in the slots of the vec4 they're written to.
2290 */
2291 for (int i = 0; i < 4; i++)
2292 swizzles[i] = (ir->write_mask & (1 << i) ? src_chan++ : 0);
2293
2294 src_reg src = swizzle(this->result,
2295 BRW_SWIZZLE4(swizzles[0], swizzles[1],
2296 swizzles[2], swizzles[3]));
2297
2298 if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) {
2299 return;
2300 }
2301
2302 if (ir->condition) {
2303 emit_bool_to_cond_code(ir->condition, &predicate);
2304 }
2305
2306 for (i = 0; i < type_size(ir->lhs->type); i++) {
2307 vec4_instruction *inst = emit(MOV(dst, src));
2308 inst->predicate = predicate;
2309
2310 dst.reg_offset++;
2311 src.reg_offset++;
2312 }
2313 }
2314
2315 void
2316 vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir)
2317 {
2318 if (ir->type->base_type == GLSL_TYPE_STRUCT) {
2319 foreach_in_list(ir_constant, field_value, &ir->components) {
2320 emit_constant_values(dst, field_value);
2321 }
2322 return;
2323 }
2324
2325 if (ir->type->is_array()) {
2326 for (unsigned int i = 0; i < ir->type->length; i++) {
2327 emit_constant_values(dst, ir->array_elements[i]);
2328 }
2329 return;
2330 }
2331
2332 if (ir->type->is_matrix()) {
2333 for (int i = 0; i < ir->type->matrix_columns; i++) {
2334 float *vec = &ir->value.f[i * ir->type->vector_elements];
2335
2336 for (int j = 0; j < ir->type->vector_elements; j++) {
2337 dst->writemask = 1 << j;
2338 dst->type = BRW_REGISTER_TYPE_F;
2339
2340 emit(MOV(*dst, src_reg(vec[j])));
2341 }
2342 dst->reg_offset++;
2343 }
2344 return;
2345 }
2346
2347 int remaining_writemask = (1 << ir->type->vector_elements) - 1;
2348
2349 for (int i = 0; i < ir->type->vector_elements; i++) {
2350 if (!(remaining_writemask & (1 << i)))
2351 continue;
2352
2353 dst->writemask = 1 << i;
2354 dst->type = brw_type_for_base_type(ir->type);
2355
2356 /* Find other components that match the one we're about to
2357 * write. Emits fewer instructions for things like vec4(0.5,
2358 * 1.5, 1.5, 1.5).
2359 */
2360 for (int j = i + 1; j < ir->type->vector_elements; j++) {
2361 if (ir->type->base_type == GLSL_TYPE_BOOL) {
2362 if (ir->value.b[i] == ir->value.b[j])
2363 dst->writemask |= (1 << j);
2364 } else {
2365 /* u, i, and f storage all line up, so no need for a
2366 * switch case for comparing each type.
2367 */
2368 if (ir->value.u[i] == ir->value.u[j])
2369 dst->writemask |= (1 << j);
2370 }
2371 }
2372
2373 switch (ir->type->base_type) {
2374 case GLSL_TYPE_FLOAT:
2375 emit(MOV(*dst, src_reg(ir->value.f[i])));
2376 break;
2377 case GLSL_TYPE_INT:
2378 emit(MOV(*dst, src_reg(ir->value.i[i])));
2379 break;
2380 case GLSL_TYPE_UINT:
2381 emit(MOV(*dst, src_reg(ir->value.u[i])));
2382 break;
2383 case GLSL_TYPE_BOOL:
2384 emit(MOV(*dst, src_reg(ir->value.b[i] != 0 ? ~0 : 0)));
2385 break;
2386 default:
2387 unreachable("Non-float/uint/int/bool constant");
2388 }
2389
2390 remaining_writemask &= ~dst->writemask;
2391 }
2392 dst->reg_offset++;
2393 }
2394
2395 void
2396 vec4_visitor::visit(ir_constant *ir)
2397 {
2398 dst_reg dst = dst_reg(this, ir->type);
2399 this->result = src_reg(dst);
2400
2401 emit_constant_values(&dst, ir);
2402 }
2403
2404 void
2405 vec4_visitor::visit_atomic_counter_intrinsic(ir_call *ir)
2406 {
2407 ir_dereference *deref = static_cast<ir_dereference *>(
2408 ir->actual_parameters.get_head());
2409 ir_variable *location = deref->variable_referenced();
2410 unsigned surf_index = (prog_data->base.binding_table.abo_start +
2411 location->data.binding);
2412
2413 /* Calculate the surface offset */
2414 src_reg offset(this, glsl_type::uint_type);
2415 ir_dereference_array *deref_array = deref->as_dereference_array();
2416 if (deref_array) {
2417 deref_array->array_index->accept(this);
2418
2419 src_reg tmp(this, glsl_type::uint_type);
2420 emit(MUL(dst_reg(tmp), this->result, ATOMIC_COUNTER_SIZE));
2421 emit(ADD(dst_reg(offset), tmp, location->data.atomic.offset));
2422 } else {
2423 offset = location->data.atomic.offset;
2424 }
2425
2426 /* Emit the appropriate machine instruction */
2427 const char *callee = ir->callee->function_name();
2428 dst_reg dst = get_assignment_lhs(ir->return_deref, this);
2429
2430 if (!strcmp("__intrinsic_atomic_read", callee)) {
2431 emit_untyped_surface_read(surf_index, dst, offset);
2432
2433 } else if (!strcmp("__intrinsic_atomic_increment", callee)) {
2434 emit_untyped_atomic(BRW_AOP_INC, surf_index, dst, offset,
2435 src_reg(), src_reg());
2436
2437 } else if (!strcmp("__intrinsic_atomic_predecrement", callee)) {
2438 emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dst, offset,
2439 src_reg(), src_reg());
2440 }
2441 }
2442
2443 void
2444 vec4_visitor::visit(ir_call *ir)
2445 {
2446 const char *callee = ir->callee->function_name();
2447
2448 if (!strcmp("__intrinsic_atomic_read", callee) ||
2449 !strcmp("__intrinsic_atomic_increment", callee) ||
2450 !strcmp("__intrinsic_atomic_predecrement", callee)) {
2451 visit_atomic_counter_intrinsic(ir);
2452 } else {
2453 unreachable("Unsupported intrinsic.");
2454 }
2455 }
2456
2457 src_reg
2458 vec4_visitor::emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler)
2459 {
2460 vec4_instruction *inst =
2461 new(mem_ctx) vec4_instruction(SHADER_OPCODE_TXF_MCS,
2462 dst_reg(this, glsl_type::uvec4_type));
2463 inst->base_mrf = 2;
2464 inst->mlen = 1;
2465 inst->src[1] = sampler;
2466
2467 /* parameters are: u, v, r, lod; lod will always be zero due to api restrictions */
2468 int param_base = inst->base_mrf;
2469 int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1;
2470 int zero_mask = 0xf & ~coord_mask;
2471
2472 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, coord_mask),
2473 coordinate));
2474
2475 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
2476 src_reg(0)));
2477
2478 emit(inst);
2479 return src_reg(inst->dst);
2480 }
2481
2482 static bool
2483 is_high_sampler(const struct brw_device_info *devinfo, src_reg sampler)
2484 {
2485 if (devinfo->gen < 8 && !devinfo->is_haswell)
2486 return false;
2487
2488 return sampler.file != IMM || sampler.fixed_hw_reg.dw1.ud >= 16;
2489 }
2490
2491 void
2492 vec4_visitor::visit(ir_texture *ir)
2493 {
2494 uint32_t sampler =
2495 _mesa_get_sampler_uniform_value(ir->sampler, shader_prog, prog);
2496
2497 ir_rvalue *nonconst_sampler_index =
2498 _mesa_get_sampler_array_nonconst_index(ir->sampler);
2499
2500 /* Handle non-constant sampler array indexing */
2501 src_reg sampler_reg;
2502 if (nonconst_sampler_index) {
2503 /* The highest sampler which may be used by this operation is
2504 * the last element of the array. Mark it here, because the generator
2505 * doesn't have enough information to determine the bound.
2506 */
2507 uint32_t array_size = ir->sampler->as_dereference_array()
2508 ->array->type->array_size();
2509
2510 uint32_t max_used = sampler + array_size - 1;
2511 if (ir->op == ir_tg4 && devinfo->gen < 8) {
2512 max_used += prog_data->base.binding_table.gather_texture_start;
2513 } else {
2514 max_used += prog_data->base.binding_table.texture_start;
2515 }
2516
2517 brw_mark_surface_used(&prog_data->base, max_used);
2518
2519 /* Emit code to evaluate the actual indexing expression */
2520 nonconst_sampler_index->accept(this);
2521 dst_reg temp(this, glsl_type::uint_type);
2522 emit(ADD(temp, this->result, src_reg(sampler)));
2523 emit_uniformize(temp, src_reg(temp));
2524
2525 sampler_reg = src_reg(temp);
2526 } else {
2527 /* Single sampler, or constant array index; the indexing expression
2528 * is just an immediate.
2529 */
2530 sampler_reg = src_reg(sampler);
2531 }
2532
2533 /* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
2534 * emitting anything other than setting up the constant result.
2535 */
2536 if (ir->op == ir_tg4) {
2537 ir_constant *chan = ir->lod_info.component->as_constant();
2538 int swiz = GET_SWZ(key->tex.swizzles[sampler], chan->value.i[0]);
2539 if (swiz == SWIZZLE_ZERO || swiz == SWIZZLE_ONE) {
2540 dst_reg result(this, ir->type);
2541 this->result = src_reg(result);
2542 emit(MOV(result, src_reg(swiz == SWIZZLE_ONE ? 1.0f : 0.0f)));
2543 return;
2544 }
2545 }
2546
2547 /* Should be lowered by do_lower_texture_projection */
2548 assert(!ir->projector);
2549
2550 /* Should be lowered */
2551 assert(!ir->offset || !ir->offset->type->is_array());
2552
2553 /* Generate code to compute all the subexpression trees. This has to be
2554 * done before loading any values into MRFs for the sampler message since
2555 * generating these values may involve SEND messages that need the MRFs.
2556 */
2557 src_reg coordinate;
2558 if (ir->coordinate) {
2559 ir->coordinate->accept(this);
2560 coordinate = this->result;
2561 }
2562
2563 src_reg shadow_comparitor;
2564 if (ir->shadow_comparitor) {
2565 ir->shadow_comparitor->accept(this);
2566 shadow_comparitor = this->result;
2567 }
2568
2569 bool has_nonconstant_offset = ir->offset && !ir->offset->as_constant();
2570 src_reg offset_value;
2571 if (has_nonconstant_offset) {
2572 ir->offset->accept(this);
2573 offset_value = src_reg(this->result);
2574 }
2575
2576 const glsl_type *lod_type = NULL, *sample_index_type = NULL;
2577 src_reg lod, dPdx, dPdy, sample_index, mcs;
2578 switch (ir->op) {
2579 case ir_tex:
2580 lod = src_reg(0.0f);
2581 lod_type = glsl_type::float_type;
2582 break;
2583 case ir_txf:
2584 case ir_txl:
2585 case ir_txs:
2586 ir->lod_info.lod->accept(this);
2587 lod = this->result;
2588 lod_type = ir->lod_info.lod->type;
2589 break;
2590 case ir_query_levels:
2591 lod = src_reg(0);
2592 lod_type = glsl_type::int_type;
2593 break;
2594 case ir_txf_ms:
2595 ir->lod_info.sample_index->accept(this);
2596 sample_index = this->result;
2597 sample_index_type = ir->lod_info.sample_index->type;
2598
2599 if (devinfo->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<<sampler))
2600 mcs = emit_mcs_fetch(ir, coordinate, sampler_reg);
2601 else
2602 mcs = src_reg(0u);
2603 break;
2604 case ir_txd:
2605 ir->lod_info.grad.dPdx->accept(this);
2606 dPdx = this->result;
2607
2608 ir->lod_info.grad.dPdy->accept(this);
2609 dPdy = this->result;
2610
2611 lod_type = ir->lod_info.grad.dPdx->type;
2612 break;
2613 case ir_txb:
2614 case ir_lod:
2615 case ir_tg4:
2616 break;
2617 }
2618
2619 enum opcode opcode;
2620 switch (ir->op) {
2621 case ir_tex: opcode = SHADER_OPCODE_TXL; break;
2622 case ir_txl: opcode = SHADER_OPCODE_TXL; break;
2623 case ir_txd: opcode = SHADER_OPCODE_TXD; break;
2624 case ir_txf: opcode = SHADER_OPCODE_TXF; break;
2625 case ir_txf_ms: opcode = SHADER_OPCODE_TXF_CMS; break;
2626 case ir_txs: opcode = SHADER_OPCODE_TXS; break;
2627 case ir_tg4: opcode = has_nonconstant_offset
2628 ? SHADER_OPCODE_TG4_OFFSET : SHADER_OPCODE_TG4; break;
2629 case ir_query_levels: opcode = SHADER_OPCODE_TXS; break;
2630 case ir_txb:
2631 unreachable("TXB is not valid for vertex shaders.");
2632 case ir_lod:
2633 unreachable("LOD is not valid for vertex shaders.");
2634 default:
2635 unreachable("Unrecognized tex op");
2636 }
2637
2638 vec4_instruction *inst = new(mem_ctx) vec4_instruction(
2639 opcode, dst_reg(this, ir->type));
2640
2641 if (ir->offset != NULL && !has_nonconstant_offset) {
2642 inst->offset =
2643 brw_texture_offset(ir->offset->as_constant()->value.i,
2644 ir->offset->type->vector_elements);
2645 }
2646
2647 /* Stuff the channel select bits in the top of the texture offset */
2648 if (ir->op == ir_tg4)
2649 inst->offset |= gather_channel(ir, sampler) << 16;
2650
2651 /* The message header is necessary for:
2652 * - Gen4 (always)
2653 * - Gen9+ for selecting SIMD4x2
2654 * - Texel offsets
2655 * - Gather channel selection
2656 * - Sampler indices too large to fit in a 4-bit value.
2657 */
2658 inst->header_size =
2659 (devinfo->gen < 5 || devinfo->gen >= 9 ||
2660 inst->offset != 0 || ir->op == ir_tg4 ||
2661 is_high_sampler(devinfo, sampler_reg)) ? 1 : 0;
2662 inst->base_mrf = 2;
2663 inst->mlen = inst->header_size + 1; /* always at least one */
2664 inst->dst.writemask = WRITEMASK_XYZW;
2665 inst->shadow_compare = ir->shadow_comparitor != NULL;
2666
2667 inst->src[1] = sampler_reg;
2668
2669 /* MRF for the first parameter */
2670 int param_base = inst->base_mrf + inst->header_size;
2671
2672 if (ir->op == ir_txs || ir->op == ir_query_levels) {
2673 int writemask = devinfo->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
2674 emit(MOV(dst_reg(MRF, param_base, lod_type, writemask), lod));
2675 } else {
2676 /* Load the coordinate */
2677 /* FINISHME: gl_clamp_mask and saturate */
2678 int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1;
2679 int zero_mask = 0xf & ~coord_mask;
2680
2681 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, coord_mask),
2682 coordinate));
2683
2684 if (zero_mask != 0) {
2685 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
2686 src_reg(0)));
2687 }
2688 /* Load the shadow comparitor */
2689 if (ir->shadow_comparitor && ir->op != ir_txd && (ir->op != ir_tg4 || !has_nonconstant_offset)) {
2690 emit(MOV(dst_reg(MRF, param_base + 1, ir->shadow_comparitor->type,
2691 WRITEMASK_X),
2692 shadow_comparitor));
2693 inst->mlen++;
2694 }
2695
2696 /* Load the LOD info */
2697 if (ir->op == ir_tex || ir->op == ir_txl) {
2698 int mrf, writemask;
2699 if (devinfo->gen >= 5) {
2700 mrf = param_base + 1;
2701 if (ir->shadow_comparitor) {
2702 writemask = WRITEMASK_Y;
2703 /* mlen already incremented */
2704 } else {
2705 writemask = WRITEMASK_X;
2706 inst->mlen++;
2707 }
2708 } else /* devinfo->gen == 4 */ {
2709 mrf = param_base;
2710 writemask = WRITEMASK_W;
2711 }
2712 emit(MOV(dst_reg(MRF, mrf, lod_type, writemask), lod));
2713 } else if (ir->op == ir_txf) {
2714 emit(MOV(dst_reg(MRF, param_base, lod_type, WRITEMASK_W), lod));
2715 } else if (ir->op == ir_txf_ms) {
2716 emit(MOV(dst_reg(MRF, param_base + 1, sample_index_type, WRITEMASK_X),
2717 sample_index));
2718 if (devinfo->gen >= 7) {
2719 /* MCS data is in the first channel of `mcs`, but we need to get it into
2720 * the .y channel of the second vec4 of params, so replicate .x across
2721 * the whole vec4 and then mask off everything except .y
2722 */
2723 mcs.swizzle = BRW_SWIZZLE_XXXX;
2724 emit(MOV(dst_reg(MRF, param_base + 1, glsl_type::uint_type, WRITEMASK_Y),
2725 mcs));
2726 }
2727 inst->mlen++;
2728 } else if (ir->op == ir_txd) {
2729 const glsl_type *type = lod_type;
2730
2731 if (devinfo->gen >= 5) {
2732 dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
2733 dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
2734 emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx));
2735 emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_YW), dPdy));
2736 inst->mlen++;
2737
2738 if (ir->type->vector_elements == 3 || ir->shadow_comparitor) {
2739 dPdx.swizzle = BRW_SWIZZLE_ZZZZ;
2740 dPdy.swizzle = BRW_SWIZZLE_ZZZZ;
2741 emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_X), dPdx));
2742 emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_Y), dPdy));
2743 inst->mlen++;
2744
2745 if (ir->shadow_comparitor) {
2746 emit(MOV(dst_reg(MRF, param_base + 2,
2747 ir->shadow_comparitor->type, WRITEMASK_Z),
2748 shadow_comparitor));
2749 }
2750 }
2751 } else /* devinfo->gen == 4 */ {
2752 emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
2753 emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy));
2754 inst->mlen += 2;
2755 }
2756 } else if (ir->op == ir_tg4 && has_nonconstant_offset) {
2757 if (ir->shadow_comparitor) {
2758 emit(MOV(dst_reg(MRF, param_base, ir->shadow_comparitor->type, WRITEMASK_W),
2759 shadow_comparitor));
2760 }
2761
2762 emit(MOV(dst_reg(MRF, param_base + 1, glsl_type::ivec2_type, WRITEMASK_XY),
2763 offset_value));
2764 inst->mlen++;
2765 }
2766 }
2767
2768 emit(inst);
2769
2770 /* fixup num layers (z) for cube arrays: hardware returns faces * layers;
2771 * spec requires layers.
2772 */
2773 if (ir->op == ir_txs) {
2774 glsl_type const *type = ir->sampler->type;
2775 if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
2776 type->sampler_array) {
2777 emit_math(SHADER_OPCODE_INT_QUOTIENT,
2778 writemask(inst->dst, WRITEMASK_Z),
2779 src_reg(inst->dst), src_reg(6));
2780 }
2781 }
2782
2783 if (devinfo->gen == 6 && ir->op == ir_tg4) {
2784 emit_gen6_gather_wa(key->tex.gen6_gather_wa[sampler], inst->dst);
2785 }
2786
2787 swizzle_result(ir, src_reg(inst->dst), sampler);
2788 }
2789
2790 /**
2791 * Apply workarounds for Gen6 gather with UINT/SINT
2792 */
2793 void
2794 vec4_visitor::emit_gen6_gather_wa(uint8_t wa, dst_reg dst)
2795 {
2796 if (!wa)
2797 return;
2798
2799 int width = (wa & WA_8BIT) ? 8 : 16;
2800 dst_reg dst_f = dst;
2801 dst_f.type = BRW_REGISTER_TYPE_F;
2802
2803 /* Convert from UNORM to UINT */
2804 emit(MUL(dst_f, src_reg(dst_f), src_reg((float)((1 << width) - 1))));
2805 emit(MOV(dst, src_reg(dst_f)));
2806
2807 if (wa & WA_SIGN) {
2808 /* Reinterpret the UINT value as a signed INT value by
2809 * shifting the sign bit into place, then shifting back
2810 * preserving sign.
2811 */
2812 emit(SHL(dst, src_reg(dst), src_reg(32 - width)));
2813 emit(ASR(dst, src_reg(dst), src_reg(32 - width)));
2814 }
2815 }
2816
2817 /**
2818 * Set up the gather channel based on the swizzle, for gather4.
2819 */
2820 uint32_t
2821 vec4_visitor::gather_channel(ir_texture *ir, uint32_t sampler)
2822 {
2823 ir_constant *chan = ir->lod_info.component->as_constant();
2824 int swiz = GET_SWZ(key->tex.swizzles[sampler], chan->value.i[0]);
2825 switch (swiz) {
2826 case SWIZZLE_X: return 0;
2827 case SWIZZLE_Y:
2828 /* gather4 sampler is broken for green channel on RG32F --
2829 * we must ask for blue instead.
2830 */
2831 if (key->tex.gather_channel_quirk_mask & (1<<sampler))
2832 return 2;
2833 return 1;
2834 case SWIZZLE_Z: return 2;
2835 case SWIZZLE_W: return 3;
2836 default:
2837 unreachable("Not reached"); /* zero, one swizzles handled already */
2838 }
2839 }
2840
2841 void
2842 vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, uint32_t sampler)
2843 {
2844 int s = key->tex.swizzles[sampler];
2845
2846 this->result = src_reg(this, ir->type);
2847 dst_reg swizzled_result(this->result);
2848
2849 if (ir->op == ir_query_levels) {
2850 /* # levels is in .w */
2851 orig_val.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W);
2852 emit(MOV(swizzled_result, orig_val));
2853 return;
2854 }
2855
2856 if (ir->op == ir_txs || ir->type == glsl_type::float_type
2857 || s == SWIZZLE_NOOP || ir->op == ir_tg4) {
2858 emit(MOV(swizzled_result, orig_val));
2859 return;
2860 }
2861
2862
2863 int zero_mask = 0, one_mask = 0, copy_mask = 0;
2864 int swizzle[4] = {0};
2865
2866 for (int i = 0; i < 4; i++) {
2867 switch (GET_SWZ(s, i)) {
2868 case SWIZZLE_ZERO:
2869 zero_mask |= (1 << i);
2870 break;
2871 case SWIZZLE_ONE:
2872 one_mask |= (1 << i);
2873 break;
2874 default:
2875 copy_mask |= (1 << i);
2876 swizzle[i] = GET_SWZ(s, i);
2877 break;
2878 }
2879 }
2880
2881 if (copy_mask) {
2882 orig_val.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
2883 swizzled_result.writemask = copy_mask;
2884 emit(MOV(swizzled_result, orig_val));
2885 }
2886
2887 if (zero_mask) {
2888 swizzled_result.writemask = zero_mask;
2889 emit(MOV(swizzled_result, src_reg(0.0f)));
2890 }
2891
2892 if (one_mask) {
2893 swizzled_result.writemask = one_mask;
2894 emit(MOV(swizzled_result, src_reg(1.0f)));
2895 }
2896 }
2897
2898 void
2899 vec4_visitor::visit(ir_return *)
2900 {
2901 unreachable("not reached");
2902 }
2903
2904 void
2905 vec4_visitor::visit(ir_discard *)
2906 {
2907 unreachable("not reached");
2908 }
2909
2910 void
2911 vec4_visitor::visit(ir_if *ir)
2912 {
2913 /* Don't point the annotation at the if statement, because then it plus
2914 * the then and else blocks get printed.
2915 */
2916 this->base_ir = ir->condition;
2917
2918 if (devinfo->gen == 6) {
2919 emit_if_gen6(ir);
2920 } else {
2921 enum brw_predicate predicate;
2922 emit_bool_to_cond_code(ir->condition, &predicate);
2923 emit(IF(predicate));
2924 }
2925
2926 visit_instructions(&ir->then_instructions);
2927
2928 if (!ir->else_instructions.is_empty()) {
2929 this->base_ir = ir->condition;
2930 emit(BRW_OPCODE_ELSE);
2931
2932 visit_instructions(&ir->else_instructions);
2933 }
2934
2935 this->base_ir = ir->condition;
2936 emit(BRW_OPCODE_ENDIF);
2937 }
2938
2939 void
2940 vec4_visitor::visit(ir_emit_vertex *)
2941 {
2942 unreachable("not reached");
2943 }
2944
2945 void
2946 vec4_visitor::visit(ir_end_primitive *)
2947 {
2948 unreachable("not reached");
2949 }
2950
2951 void
2952 vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
2953 dst_reg dst, src_reg offset,
2954 src_reg src0, src_reg src1)
2955 {
2956 unsigned mlen = 0;
2957
2958 /* Set the atomic operation offset. */
2959 emit(MOV(brw_writemask(brw_uvec_mrf(8, mlen, 0), WRITEMASK_X), offset));
2960 mlen++;
2961
2962 /* Set the atomic operation arguments. */
2963 if (src0.file != BAD_FILE) {
2964 emit(MOV(brw_writemask(brw_uvec_mrf(8, mlen, 0), WRITEMASK_X), src0));
2965 mlen++;
2966 }
2967
2968 if (src1.file != BAD_FILE) {
2969 emit(MOV(brw_writemask(brw_uvec_mrf(8, mlen, 0), WRITEMASK_X), src1));
2970 mlen++;
2971 }
2972
2973 /* Emit the instruction. Note that this maps to the normal SIMD8
2974 * untyped atomic message on Ivy Bridge, but that's OK because
2975 * unused channels will be masked out.
2976 */
2977 vec4_instruction *inst = emit(SHADER_OPCODE_UNTYPED_ATOMIC, dst,
2978 brw_message_reg(0),
2979 src_reg(surf_index), src_reg(atomic_op));
2980 inst->mlen = mlen;
2981 }
2982
2983 void
2984 vec4_visitor::emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
2985 src_reg offset)
2986 {
2987 /* Set the surface read offset. */
2988 emit(MOV(brw_writemask(brw_uvec_mrf(8, 0, 0), WRITEMASK_X), offset));
2989
2990 /* Emit the instruction. Note that this maps to the normal SIMD8
2991 * untyped surface read message, but that's OK because unused
2992 * channels will be masked out.
2993 */
2994 vec4_instruction *inst = emit(SHADER_OPCODE_UNTYPED_SURFACE_READ, dst,
2995 brw_message_reg(0),
2996 src_reg(surf_index), src_reg(1));
2997 inst->mlen = 1;
2998 }
2999
3000 void
3001 vec4_visitor::emit_ndc_computation()
3002 {
3003 /* Get the position */
3004 src_reg pos = src_reg(output_reg[VARYING_SLOT_POS]);
3005
3006 /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */
3007 dst_reg ndc = dst_reg(this, glsl_type::vec4_type);
3008 output_reg[BRW_VARYING_SLOT_NDC] = ndc;
3009
3010 current_annotation = "NDC";
3011 dst_reg ndc_w = ndc;
3012 ndc_w.writemask = WRITEMASK_W;
3013 src_reg pos_w = pos;
3014 pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W);
3015 emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w);
3016
3017 dst_reg ndc_xyz = ndc;
3018 ndc_xyz.writemask = WRITEMASK_XYZ;
3019
3020 emit(MUL(ndc_xyz, pos, src_reg(ndc_w)));
3021 }
3022
3023 void
3024 vec4_visitor::emit_psiz_and_flags(dst_reg reg)
3025 {
3026 if (devinfo->gen < 6 &&
3027 ((prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) ||
3028 key->userclip_active || devinfo->has_negative_rhw_bug)) {
3029 dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
3030 dst_reg header1_w = header1;
3031 header1_w.writemask = WRITEMASK_W;
3032
3033 emit(MOV(header1, 0u));
3034
3035 if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
3036 src_reg psiz = src_reg(output_reg[VARYING_SLOT_PSIZ]);
3037
3038 current_annotation = "Point size";
3039 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11))));
3040 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8));
3041 }
3042
3043 if (key->userclip_active) {
3044 current_annotation = "Clipping flags";
3045 dst_reg flags0 = dst_reg(this, glsl_type::uint_type);
3046 dst_reg flags1 = dst_reg(this, glsl_type::uint_type);
3047
3048 emit(CMP(dst_null_f(), src_reg(output_reg[VARYING_SLOT_CLIP_DIST0]), src_reg(0.0f), BRW_CONDITIONAL_L));
3049 emit(VS_OPCODE_UNPACK_FLAGS_SIMD4X2, flags0, src_reg(0));
3050 emit(OR(header1_w, src_reg(header1_w), src_reg(flags0)));
3051
3052 emit(CMP(dst_null_f(), src_reg(output_reg[VARYING_SLOT_CLIP_DIST1]), src_reg(0.0f), BRW_CONDITIONAL_L));
3053 emit(VS_OPCODE_UNPACK_FLAGS_SIMD4X2, flags1, src_reg(0));
3054 emit(SHL(flags1, src_reg(flags1), src_reg(4)));
3055 emit(OR(header1_w, src_reg(header1_w), src_reg(flags1)));
3056 }
3057
3058 /* i965 clipping workaround:
3059 * 1) Test for -ve rhw
3060 * 2) If set,
3061 * set ndc = (0,0,0,0)
3062 * set ucp[6] = 1
3063 *
3064 * Later, clipping will detect ucp[6] and ensure the primitive is
3065 * clipped against all fixed planes.
3066 */
3067 if (devinfo->has_negative_rhw_bug) {
3068 src_reg ndc_w = src_reg(output_reg[BRW_VARYING_SLOT_NDC]);
3069 ndc_w.swizzle = BRW_SWIZZLE_WWWW;
3070 emit(CMP(dst_null_f(), ndc_w, src_reg(0.0f), BRW_CONDITIONAL_L));
3071 vec4_instruction *inst;
3072 inst = emit(OR(header1_w, src_reg(header1_w), src_reg(1u << 6)));
3073 inst->predicate = BRW_PREDICATE_NORMAL;
3074 inst = emit(MOV(output_reg[BRW_VARYING_SLOT_NDC], src_reg(0.0f)));
3075 inst->predicate = BRW_PREDICATE_NORMAL;
3076 }
3077
3078 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1)));
3079 } else if (devinfo->gen < 6) {
3080 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
3081 } else {
3082 emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
3083 if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
3084 dst_reg reg_w = reg;
3085 reg_w.writemask = WRITEMASK_W;
3086 emit(MOV(reg_w, src_reg(output_reg[VARYING_SLOT_PSIZ])));
3087 }
3088 if (prog_data->vue_map.slots_valid & VARYING_BIT_LAYER) {
3089 dst_reg reg_y = reg;
3090 reg_y.writemask = WRITEMASK_Y;
3091 reg_y.type = BRW_REGISTER_TYPE_D;
3092 emit(MOV(reg_y, src_reg(output_reg[VARYING_SLOT_LAYER])));
3093 }
3094 if (prog_data->vue_map.slots_valid & VARYING_BIT_VIEWPORT) {
3095 dst_reg reg_z = reg;
3096 reg_z.writemask = WRITEMASK_Z;
3097 reg_z.type = BRW_REGISTER_TYPE_D;
3098 emit(MOV(reg_z, src_reg(output_reg[VARYING_SLOT_VIEWPORT])));
3099 }
3100 }
3101 }
3102
3103 void
3104 vec4_visitor::emit_clip_distances(dst_reg reg, int offset)
3105 {
3106 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
3107 *
3108 * "If a linked set of shaders forming the vertex stage contains no
3109 * static write to gl_ClipVertex or gl_ClipDistance, but the
3110 * application has requested clipping against user clip planes through
3111 * the API, then the coordinate written to gl_Position is used for
3112 * comparison against the user clip planes."
3113 *
3114 * This function is only called if the shader didn't write to
3115 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping
3116 * if the user wrote to it; otherwise we use gl_Position.
3117 */
3118 gl_varying_slot clip_vertex = VARYING_SLOT_CLIP_VERTEX;
3119 if (!(prog_data->vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX)) {
3120 clip_vertex = VARYING_SLOT_POS;
3121 }
3122
3123 for (int i = 0; i + offset < key->nr_userclip_plane_consts && i < 4;
3124 ++i) {
3125 reg.writemask = 1 << i;
3126 emit(DP4(reg,
3127 src_reg(output_reg[clip_vertex]),
3128 src_reg(this->userplane[i + offset])));
3129 }
3130 }
3131
3132 vec4_instruction *
3133 vec4_visitor::emit_generic_urb_slot(dst_reg reg, int varying)
3134 {
3135 assert (varying < VARYING_SLOT_MAX);
3136 reg.type = output_reg[varying].type;
3137 current_annotation = output_reg_annotation[varying];
3138 /* Copy the register, saturating if necessary */
3139 return emit(MOV(reg, src_reg(output_reg[varying])));
3140 }
3141
3142 void
3143 vec4_visitor::emit_urb_slot(dst_reg reg, int varying)
3144 {
3145 reg.type = BRW_REGISTER_TYPE_F;
3146
3147 switch (varying) {
3148 case VARYING_SLOT_PSIZ:
3149 {
3150 /* PSIZ is always in slot 0, and is coupled with other flags. */
3151 current_annotation = "indices, point width, clip flags";
3152 emit_psiz_and_flags(reg);
3153 break;
3154 }
3155 case BRW_VARYING_SLOT_NDC:
3156 current_annotation = "NDC";
3157 emit(MOV(reg, src_reg(output_reg[BRW_VARYING_SLOT_NDC])));
3158 break;
3159 case VARYING_SLOT_POS:
3160 current_annotation = "gl_Position";
3161 emit(MOV(reg, src_reg(output_reg[VARYING_SLOT_POS])));
3162 break;
3163 case VARYING_SLOT_EDGE:
3164 /* This is present when doing unfilled polygons. We're supposed to copy
3165 * the edge flag from the user-provided vertex array
3166 * (glEdgeFlagPointer), or otherwise we'll copy from the current value
3167 * of that attribute (starts as 1.0f). This is then used in clipping to
3168 * determine which edges should be drawn as wireframe.
3169 */
3170 current_annotation = "edge flag";
3171 emit(MOV(reg, src_reg(dst_reg(ATTR, VERT_ATTRIB_EDGEFLAG,
3172 glsl_type::float_type, WRITEMASK_XYZW))));
3173 break;
3174 case BRW_VARYING_SLOT_PAD:
3175 /* No need to write to this slot */
3176 break;
3177 case VARYING_SLOT_COL0:
3178 case VARYING_SLOT_COL1:
3179 case VARYING_SLOT_BFC0:
3180 case VARYING_SLOT_BFC1: {
3181 /* These built-in varyings are only supported in compatibility mode,
3182 * and we only support GS in core profile. So, this must be a vertex
3183 * shader.
3184 */
3185 assert(stage == MESA_SHADER_VERTEX);
3186 vec4_instruction *inst = emit_generic_urb_slot(reg, varying);
3187 if (((struct brw_vs_prog_key *) key)->clamp_vertex_color)
3188 inst->saturate = true;
3189 break;
3190 }
3191
3192 default:
3193 emit_generic_urb_slot(reg, varying);
3194 break;
3195 }
3196 }
3197
3198 static int
3199 align_interleaved_urb_mlen(const struct brw_device_info *devinfo, int mlen)
3200 {
3201 if (devinfo->gen >= 6) {
3202 /* URB data written (does not include the message header reg) must
3203 * be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
3204 * section 5.4.3.2.2: URB_INTERLEAVED.
3205 *
3206 * URB entries are allocated on a multiple of 1024 bits, so an
3207 * extra 128 bits written here to make the end align to 256 is
3208 * no problem.
3209 */
3210 if ((mlen % 2) != 1)
3211 mlen++;
3212 }
3213
3214 return mlen;
3215 }
3216
3217
3218 /**
3219 * Generates the VUE payload plus the necessary URB write instructions to
3220 * output it.
3221 *
3222 * The VUE layout is documented in Volume 2a.
3223 */
3224 void
3225 vec4_visitor::emit_vertex()
3226 {
3227 /* MRF 0 is reserved for the debugger, so start with message header
3228 * in MRF 1.
3229 */
3230 int base_mrf = 1;
3231 int mrf = base_mrf;
3232 /* In the process of generating our URB write message contents, we
3233 * may need to unspill a register or load from an array. Those
3234 * reads would use MRFs 14-15.
3235 */
3236 int max_usable_mrf = 13;
3237
3238 /* The following assertion verifies that max_usable_mrf causes an
3239 * even-numbered amount of URB write data, which will meet gen6's
3240 * requirements for length alignment.
3241 */
3242 assert ((max_usable_mrf - base_mrf) % 2 == 0);
3243
3244 /* First mrf is the g0-based message header containing URB handles and
3245 * such.
3246 */
3247 emit_urb_write_header(mrf++);
3248
3249 if (devinfo->gen < 6) {
3250 emit_ndc_computation();
3251 }
3252
3253 /* Lower legacy ff and ClipVertex clipping to clip distances */
3254 if (key->userclip_active && !prog->UsesClipDistanceOut) {
3255 current_annotation = "user clip distances";
3256
3257 output_reg[VARYING_SLOT_CLIP_DIST0] = dst_reg(this, glsl_type::vec4_type);
3258 output_reg[VARYING_SLOT_CLIP_DIST1] = dst_reg(this, glsl_type::vec4_type);
3259
3260 emit_clip_distances(output_reg[VARYING_SLOT_CLIP_DIST0], 0);
3261 emit_clip_distances(output_reg[VARYING_SLOT_CLIP_DIST1], 4);
3262 }
3263
3264 /* We may need to split this up into several URB writes, so do them in a
3265 * loop.
3266 */
3267 int slot = 0;
3268 bool complete = false;
3269 do {
3270 /* URB offset is in URB row increments, and each of our MRFs is half of
3271 * one of those, since we're doing interleaved writes.
3272 */
3273 int offset = slot / 2;
3274
3275 mrf = base_mrf + 1;
3276 for (; slot < prog_data->vue_map.num_slots; ++slot) {
3277 emit_urb_slot(dst_reg(MRF, mrf++),
3278 prog_data->vue_map.slot_to_varying[slot]);
3279
3280 /* If this was max_usable_mrf, we can't fit anything more into this
3281 * URB WRITE.
3282 */
3283 if (mrf > max_usable_mrf) {
3284 slot++;
3285 break;
3286 }
3287 }
3288
3289 complete = slot >= prog_data->vue_map.num_slots;
3290 current_annotation = "URB write";
3291 vec4_instruction *inst = emit_urb_write_opcode(complete);
3292 inst->base_mrf = base_mrf;
3293 inst->mlen = align_interleaved_urb_mlen(devinfo, mrf - base_mrf);
3294 inst->offset += offset;
3295 } while(!complete);
3296 }
3297
3298
3299 src_reg
3300 vec4_visitor::get_scratch_offset(bblock_t *block, vec4_instruction *inst,
3301 src_reg *reladdr, int reg_offset)
3302 {
3303 /* Because we store the values to scratch interleaved like our
3304 * vertex data, we need to scale the vec4 index by 2.
3305 */
3306 int message_header_scale = 2;
3307
3308 /* Pre-gen6, the message header uses byte offsets instead of vec4
3309 * (16-byte) offset units.
3310 */
3311 if (devinfo->gen < 6)
3312 message_header_scale *= 16;
3313
3314 if (reladdr) {
3315 src_reg index = src_reg(this, glsl_type::int_type);
3316
3317 emit_before(block, inst, ADD(dst_reg(index), *reladdr,
3318 src_reg(reg_offset)));
3319 emit_before(block, inst, MUL(dst_reg(index), index,
3320 src_reg(message_header_scale)));
3321
3322 return index;
3323 } else {
3324 return src_reg(reg_offset * message_header_scale);
3325 }
3326 }
3327
3328 src_reg
3329 vec4_visitor::get_pull_constant_offset(bblock_t * block, vec4_instruction *inst,
3330 src_reg *reladdr, int reg_offset)
3331 {
3332 if (reladdr) {
3333 src_reg index = src_reg(this, glsl_type::int_type);
3334
3335 emit_before(block, inst, ADD(dst_reg(index), *reladdr,
3336 src_reg(reg_offset)));
3337
3338 /* Pre-gen6, the message header uses byte offsets instead of vec4
3339 * (16-byte) offset units.
3340 */
3341 if (devinfo->gen < 6) {
3342 emit_before(block, inst, MUL(dst_reg(index), index, src_reg(16)));
3343 }
3344
3345 return index;
3346 } else if (devinfo->gen >= 8) {
3347 /* Store the offset in a GRF so we can send-from-GRF. */
3348 src_reg offset = src_reg(this, glsl_type::int_type);
3349 emit_before(block, inst, MOV(dst_reg(offset), src_reg(reg_offset)));
3350 return offset;
3351 } else {
3352 int message_header_scale = devinfo->gen < 6 ? 16 : 1;
3353 return src_reg(reg_offset * message_header_scale);
3354 }
3355 }
3356
3357 /**
3358 * Emits an instruction before @inst to load the value named by @orig_src
3359 * from scratch space at @base_offset to @temp.
3360 *
3361 * @base_offset is measured in 32-byte units (the size of a register).
3362 */
3363 void
3364 vec4_visitor::emit_scratch_read(bblock_t *block, vec4_instruction *inst,
3365 dst_reg temp, src_reg orig_src,
3366 int base_offset)
3367 {
3368 int reg_offset = base_offset + orig_src.reg_offset;
3369 src_reg index = get_scratch_offset(block, inst, orig_src.reladdr,
3370 reg_offset);
3371
3372 emit_before(block, inst, SCRATCH_READ(temp, index));
3373 }
3374
3375 /**
3376 * Emits an instruction after @inst to store the value to be written
3377 * to @orig_dst to scratch space at @base_offset, from @temp.
3378 *
3379 * @base_offset is measured in 32-byte units (the size of a register).
3380 */
3381 void
3382 vec4_visitor::emit_scratch_write(bblock_t *block, vec4_instruction *inst,
3383 int base_offset)
3384 {
3385 int reg_offset = base_offset + inst->dst.reg_offset;
3386 src_reg index = get_scratch_offset(block, inst, inst->dst.reladdr,
3387 reg_offset);
3388
3389 /* Create a temporary register to store *inst's result in.
3390 *
3391 * We have to be careful in MOVing from our temporary result register in
3392 * the scratch write. If we swizzle from channels of the temporary that
3393 * weren't initialized, it will confuse live interval analysis, which will
3394 * make spilling fail to make progress.
3395 */
3396 const src_reg temp = swizzle(retype(src_reg(this, glsl_type::vec4_type),
3397 inst->dst.type),
3398 brw_swizzle_for_mask(inst->dst.writemask));
3399 dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0),
3400 inst->dst.writemask));
3401 vec4_instruction *write = SCRATCH_WRITE(dst, temp, index);
3402 write->predicate = inst->predicate;
3403 write->ir = inst->ir;
3404 write->annotation = inst->annotation;
3405 inst->insert_after(block, write);
3406
3407 inst->dst.file = temp.file;
3408 inst->dst.reg = temp.reg;
3409 inst->dst.reg_offset = temp.reg_offset;
3410 inst->dst.reladdr = NULL;
3411 }
3412
3413 /**
3414 * Checks if \p src and/or \p src.reladdr require a scratch read, and if so,
3415 * adds the scratch read(s) before \p inst. The function also checks for
3416 * recursive reladdr scratch accesses, issuing the corresponding scratch
3417 * loads and rewriting reladdr references accordingly.
3418 *
3419 * \return \p src if it did not require a scratch load, otherwise, the
3420 * register holding the result of the scratch load that the caller should
3421 * use to rewrite src.
3422 */
3423 src_reg
3424 vec4_visitor::emit_resolve_reladdr(int scratch_loc[], bblock_t *block,
3425 vec4_instruction *inst, src_reg src)
3426 {
3427 /* Resolve recursive reladdr scratch access by calling ourselves
3428 * with src.reladdr
3429 */
3430 if (src.reladdr)
3431 *src.reladdr = emit_resolve_reladdr(scratch_loc, block, inst,
3432 *src.reladdr);
3433
3434 /* Now handle scratch access on src */
3435 if (src.file == GRF && scratch_loc[src.reg] != -1) {
3436 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
3437 emit_scratch_read(block, inst, temp, src, scratch_loc[src.reg]);
3438 src.reg = temp.reg;
3439 src.reg_offset = temp.reg_offset;
3440 src.reladdr = NULL;
3441 }
3442
3443 return src;
3444 }
3445
3446 /**
3447 * We can't generally support array access in GRF space, because a
3448 * single instruction's destination can only span 2 contiguous
3449 * registers. So, we send all GRF arrays that get variable index
3450 * access to scratch space.
3451 */
3452 void
3453 vec4_visitor::move_grf_array_access_to_scratch()
3454 {
3455 int scratch_loc[this->alloc.count];
3456 memset(scratch_loc, -1, sizeof(scratch_loc));
3457
3458 /* First, calculate the set of virtual GRFs that need to be punted
3459 * to scratch due to having any array access on them, and where in
3460 * scratch.
3461 */
3462 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
3463 if (inst->dst.file == GRF && inst->dst.reladdr) {
3464 if (scratch_loc[inst->dst.reg] == -1) {
3465 scratch_loc[inst->dst.reg] = c->last_scratch;
3466 c->last_scratch += this->alloc.sizes[inst->dst.reg];
3467 }
3468
3469 for (src_reg *iter = inst->dst.reladdr;
3470 iter->reladdr;
3471 iter = iter->reladdr) {
3472 if (iter->file == GRF && scratch_loc[iter->reg] == -1) {
3473 scratch_loc[iter->reg] = c->last_scratch;
3474 c->last_scratch += this->alloc.sizes[iter->reg];
3475 }
3476 }
3477 }
3478
3479 for (int i = 0 ; i < 3; i++) {
3480 for (src_reg *iter = &inst->src[i];
3481 iter->reladdr;
3482 iter = iter->reladdr) {
3483 if (iter->file == GRF && scratch_loc[iter->reg] == -1) {
3484 scratch_loc[iter->reg] = c->last_scratch;
3485 c->last_scratch += this->alloc.sizes[iter->reg];
3486 }
3487 }
3488 }
3489 }
3490
3491 /* Now, for anything that will be accessed through scratch, rewrite
3492 * it to load/store. Note that this is a _safe list walk, because
3493 * we may generate a new scratch_write instruction after the one
3494 * we're processing.
3495 */
3496 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
3497 /* Set up the annotation tracking for new generated instructions. */
3498 base_ir = inst->ir;
3499 current_annotation = inst->annotation;
3500
3501 /* First handle scratch access on the dst. Notice we have to handle
3502 * the case where the dst's reladdr also points to scratch space.
3503 */
3504 if (inst->dst.reladdr)
3505 *inst->dst.reladdr = emit_resolve_reladdr(scratch_loc, block, inst,
3506 *inst->dst.reladdr);
3507
3508 /* Now that we have handled any (possibly recursive) reladdr scratch
3509 * accesses for dst we can safely do the scratch write for dst itself
3510 */
3511 if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1)
3512 emit_scratch_write(block, inst, scratch_loc[inst->dst.reg]);
3513
3514 /* Now handle scratch access on any src. In this case, since inst->src[i]
3515 * already is a src_reg, we can just call emit_resolve_reladdr with
3516 * inst->src[i] and it will take care of handling scratch loads for
3517 * both src and src.reladdr (recursively).
3518 */
3519 for (int i = 0 ; i < 3; i++) {
3520 inst->src[i] = emit_resolve_reladdr(scratch_loc, block, inst,
3521 inst->src[i]);
3522 }
3523 }
3524 }
3525
3526 /**
3527 * Emits an instruction before @inst to load the value named by @orig_src
3528 * from the pull constant buffer (surface) at @base_offset to @temp.
3529 */
3530 void
3531 vec4_visitor::emit_pull_constant_load(bblock_t *block, vec4_instruction *inst,
3532 dst_reg temp, src_reg orig_src,
3533 int base_offset)
3534 {
3535 int reg_offset = base_offset + orig_src.reg_offset;
3536 src_reg index = src_reg(prog_data->base.binding_table.pull_constants_start);
3537 src_reg offset = get_pull_constant_offset(block, inst, orig_src.reladdr,
3538 reg_offset);
3539
3540 emit_pull_constant_load_reg(temp,
3541 index,
3542 offset,
3543 block, inst);
3544 }
3545
3546 /**
3547 * Implements array access of uniforms by inserting a
3548 * PULL_CONSTANT_LOAD instruction.
3549 *
3550 * Unlike temporary GRF array access (where we don't support it due to
3551 * the difficulty of doing relative addressing on instruction
3552 * destinations), we could potentially do array access of uniforms
3553 * that were loaded in GRF space as push constants. In real-world
3554 * usage we've seen, though, the arrays being used are always larger
3555 * than we could load as push constants, so just always move all
3556 * uniform array access out to a pull constant buffer.
3557 */
3558 void
3559 vec4_visitor::move_uniform_array_access_to_pull_constants()
3560 {
3561 int pull_constant_loc[this->uniforms];
3562 memset(pull_constant_loc, -1, sizeof(pull_constant_loc));
3563 bool nested_reladdr;
3564
3565 /* Walk through and find array access of uniforms. Put a copy of that
3566 * uniform in the pull constant buffer.
3567 *
3568 * Note that we don't move constant-indexed accesses to arrays. No
3569 * testing has been done of the performance impact of this choice.
3570 */
3571 do {
3572 nested_reladdr = false;
3573
3574 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
3575 for (int i = 0 ; i < 3; i++) {
3576 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
3577 continue;
3578
3579 int uniform = inst->src[i].reg;
3580
3581 if (inst->src[i].reladdr->reladdr)
3582 nested_reladdr = true; /* will need another pass */
3583
3584 /* If this array isn't already present in the pull constant buffer,
3585 * add it.
3586 */
3587 if (pull_constant_loc[uniform] == -1) {
3588 const gl_constant_value **values =
3589 &stage_prog_data->param[uniform * 4];
3590
3591 pull_constant_loc[uniform] = stage_prog_data->nr_pull_params / 4;
3592
3593 assert(uniform < uniform_array_size);
3594 for (int j = 0; j < uniform_size[uniform] * 4; j++) {
3595 stage_prog_data->pull_param[stage_prog_data->nr_pull_params++]
3596 = values[j];
3597 }
3598 }
3599
3600 /* Set up the annotation tracking for new generated instructions. */
3601 base_ir = inst->ir;
3602 current_annotation = inst->annotation;
3603
3604 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
3605
3606 emit_pull_constant_load(block, inst, temp, inst->src[i],
3607 pull_constant_loc[uniform]);
3608
3609 inst->src[i].file = temp.file;
3610 inst->src[i].reg = temp.reg;
3611 inst->src[i].reg_offset = temp.reg_offset;
3612 inst->src[i].reladdr = NULL;
3613 }
3614 }
3615 } while (nested_reladdr);
3616
3617 /* Now there are no accesses of the UNIFORM file with a reladdr, so
3618 * no need to track them as larger-than-vec4 objects. This will be
3619 * relied on in cutting out unused uniform vectors from push
3620 * constants.
3621 */
3622 split_uniform_registers();
3623 }
3624
3625 void
3626 vec4_visitor::resolve_ud_negate(src_reg *reg)
3627 {
3628 if (reg->type != BRW_REGISTER_TYPE_UD ||
3629 !reg->negate)
3630 return;
3631
3632 src_reg temp = src_reg(this, glsl_type::uvec4_type);
3633 emit(BRW_OPCODE_MOV, dst_reg(temp), *reg);
3634 *reg = temp;
3635 }
3636
3637 /**
3638 * Resolve the result of a Gen4-5 CMP instruction to a proper boolean.
3639 *
3640 * CMP on Gen4-5 only sets the LSB of the result; the rest are undefined.
3641 * If we need a proper boolean value, we have to fix it up to be 0 or ~0.
3642 */
3643 void
3644 vec4_visitor::resolve_bool_comparison(ir_rvalue *rvalue, src_reg *reg)
3645 {
3646 assert(devinfo->gen <= 5);
3647
3648 if (!rvalue->type->is_boolean())
3649 return;
3650
3651 src_reg and_result = src_reg(this, rvalue->type);
3652 src_reg neg_result = src_reg(this, rvalue->type);
3653 emit(AND(dst_reg(and_result), *reg, src_reg(1)));
3654 emit(MOV(dst_reg(neg_result), negate(and_result)));
3655 *reg = neg_result;
3656 }
3657
3658 vec4_visitor::vec4_visitor(struct brw_context *brw,
3659 struct brw_vec4_compile *c,
3660 struct gl_program *prog,
3661 const struct brw_vue_prog_key *key,
3662 struct brw_vue_prog_data *prog_data,
3663 struct gl_shader_program *shader_prog,
3664 gl_shader_stage stage,
3665 void *mem_ctx,
3666 bool no_spills,
3667 shader_time_shader_type st_base,
3668 shader_time_shader_type st_written,
3669 shader_time_shader_type st_reset)
3670 : backend_visitor(brw, shader_prog, prog, &prog_data->base, stage),
3671 c(c),
3672 key(key),
3673 prog_data(prog_data),
3674 sanity_param_count(0),
3675 fail_msg(NULL),
3676 first_non_payload_grf(0),
3677 need_all_constants_in_pull_buffer(false),
3678 no_spills(no_spills),
3679 st_base(st_base),
3680 st_written(st_written),
3681 st_reset(st_reset)
3682 {
3683 this->mem_ctx = mem_ctx;
3684 this->failed = false;
3685
3686 this->base_ir = NULL;
3687 this->current_annotation = NULL;
3688 memset(this->output_reg_annotation, 0, sizeof(this->output_reg_annotation));
3689
3690 this->variable_ht = hash_table_ctor(0,
3691 hash_table_pointer_hash,
3692 hash_table_pointer_compare);
3693
3694 this->virtual_grf_start = NULL;
3695 this->virtual_grf_end = NULL;
3696 this->live_intervals = NULL;
3697
3698 this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
3699
3700 this->uniforms = 0;
3701
3702 /* Initialize uniform_array_size to at least 1 because pre-gen6 VS requires
3703 * at least one. See setup_uniforms() in brw_vec4.cpp.
3704 */
3705 this->uniform_array_size = 1;
3706 if (prog_data) {
3707 this->uniform_array_size =
3708 MAX2(DIV_ROUND_UP(stage_prog_data->nr_params, 4), 1);
3709 }
3710
3711 this->uniform_size = rzalloc_array(mem_ctx, int, this->uniform_array_size);
3712 this->uniform_vector_size = rzalloc_array(mem_ctx, int, this->uniform_array_size);
3713 }
3714
3715 vec4_visitor::~vec4_visitor()
3716 {
3717 hash_table_dtor(this->variable_ht);
3718 }
3719
3720
3721 void
3722 vec4_visitor::fail(const char *format, ...)
3723 {
3724 va_list va;
3725 char *msg;
3726
3727 if (failed)
3728 return;
3729
3730 failed = true;
3731
3732 va_start(va, format);
3733 msg = ralloc_vasprintf(mem_ctx, format, va);
3734 va_end(va);
3735 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
3736
3737 this->fail_msg = msg;
3738
3739 if (debug_enabled) {
3740 fprintf(stderr, "%s", msg);
3741 }
3742 }
3743
3744 } /* namespace brw */