vk/compiler: Fix up the binding hack and make it work in NIR
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_visitor.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs_visitor.cpp
25 *
26 * This file supports generating the FS LIR from the GLSL IR. The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
29 */
30 #include <sys/types.h>
31
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_optimize.h"
37 #include "util/register_allocate.h"
38 #include "program/hash_table.h"
39 #include "brw_context.h"
40 #include "brw_eu.h"
41 #include "brw_wm.h"
42 #include "brw_cs.h"
43 #include "brw_vec4.h"
44 #include "brw_fs.h"
45 #include "main/uniforms.h"
46 #include "glsl/glsl_types.h"
47 #include "glsl/ir_optimization.h"
48 #include "program/sampler.h"
49
50
51 fs_reg *
52 fs_visitor::emit_vs_system_value(int location)
53 {
54 fs_reg *reg = new(this->mem_ctx)
55 fs_reg(ATTR, VERT_ATTRIB_MAX, BRW_REGISTER_TYPE_D);
56 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
57
58 switch (location) {
59 case SYSTEM_VALUE_BASE_VERTEX:
60 reg->reg_offset = 0;
61 vs_prog_data->uses_vertexid = true;
62 break;
63 case SYSTEM_VALUE_VERTEX_ID:
64 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
65 reg->reg_offset = 2;
66 vs_prog_data->uses_vertexid = true;
67 break;
68 case SYSTEM_VALUE_INSTANCE_ID:
69 reg->reg_offset = 3;
70 vs_prog_data->uses_instanceid = true;
71 break;
72 default:
73 unreachable("not reached");
74 }
75
76 return reg;
77 }
78
79 void
80 fs_visitor::visit(ir_variable *ir)
81 {
82 fs_reg *reg = NULL;
83
84 if (variable_storage(ir))
85 return;
86
87 if (ir->data.mode == ir_var_shader_in) {
88 assert(ir->data.location != -1);
89 if (stage == MESA_SHADER_VERTEX) {
90 reg = new(this->mem_ctx)
91 fs_reg(ATTR, ir->data.location,
92 brw_type_for_base_type(ir->type->get_scalar_type()));
93 } else if (ir->data.location == VARYING_SLOT_POS) {
94 reg = emit_fragcoord_interpolation(ir->data.pixel_center_integer,
95 ir->data.origin_upper_left);
96 } else if (ir->data.location == VARYING_SLOT_FACE) {
97 reg = emit_frontfacing_interpolation();
98 } else {
99 reg = new(this->mem_ctx) fs_reg(vgrf(ir->type));
100 emit_general_interpolation(*reg, ir->name, ir->type,
101 (glsl_interp_qualifier) ir->data.interpolation,
102 ir->data.location, ir->data.centroid,
103 ir->data.sample);
104 }
105 assert(reg);
106 hash_table_insert(this->variable_ht, reg, ir);
107 return;
108 } else if (ir->data.mode == ir_var_shader_out) {
109 reg = new(this->mem_ctx) fs_reg(vgrf(ir->type));
110
111 if (stage == MESA_SHADER_VERTEX) {
112 int vector_elements =
113 ir->type->is_array() ? ir->type->fields.array->vector_elements
114 : ir->type->vector_elements;
115
116 for (int i = 0; i < (type_size(ir->type) + 3) / 4; i++) {
117 int output = ir->data.location + i;
118 this->outputs[output] = *reg;
119 this->outputs[output].reg_offset = i * 4;
120 this->output_components[output] = vector_elements;
121 }
122
123 } else if (ir->data.index > 0) {
124 assert(ir->data.location == FRAG_RESULT_DATA0);
125 assert(ir->data.index == 1);
126 this->dual_src_output = *reg;
127 this->do_dual_src = true;
128 } else if (ir->data.location == FRAG_RESULT_COLOR) {
129 /* Writing gl_FragColor outputs to all color regions. */
130 assert(stage == MESA_SHADER_FRAGMENT);
131 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
132 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
133 this->outputs[i] = *reg;
134 this->output_components[i] = 4;
135 }
136 } else if (ir->data.location == FRAG_RESULT_DEPTH) {
137 this->frag_depth = *reg;
138 } else if (ir->data.location == FRAG_RESULT_SAMPLE_MASK) {
139 this->sample_mask = *reg;
140 } else {
141 /* gl_FragData or a user-defined FS output */
142 assert(ir->data.location >= FRAG_RESULT_DATA0 &&
143 ir->data.location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
144
145 int vector_elements =
146 ir->type->is_array() ? ir->type->fields.array->vector_elements
147 : ir->type->vector_elements;
148
149 /* General color output. */
150 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) {
151 int output = ir->data.location - FRAG_RESULT_DATA0 + i;
152 this->outputs[output] = offset(*reg, vector_elements * i);
153 this->output_components[output] = vector_elements;
154 }
155 }
156 } else if (ir->data.mode == ir_var_uniform) {
157 int param_index = uniforms;
158
159 /* Thanks to the lower_ubo_reference pass, we will see only
160 * ir_binop_ubo_load expressions and not ir_dereference_variable for UBO
161 * variables, so no need for them to be in variable_ht.
162 *
163 * Some uniforms, such as samplers and atomic counters, have no actual
164 * storage, so we should ignore them.
165 */
166 if (ir->is_in_uniform_block() || type_size(ir->type) == 0)
167 return;
168
169 if (dispatch_width == 16) {
170 if (!variable_storage(ir)) {
171 fail("Failed to find uniform '%s' in SIMD16\n", ir->name);
172 }
173 return;
174 }
175
176 param_size[param_index] = type_size(ir->type);
177 if (!strncmp(ir->name, "gl_", 3)) {
178 setup_builtin_uniform_values(ir);
179 } else {
180 setup_uniform_values(ir);
181 }
182
183 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
184 reg->type = brw_type_for_base_type(ir->type);
185
186 } else if (ir->data.mode == ir_var_system_value) {
187 switch (ir->data.location) {
188 case SYSTEM_VALUE_BASE_VERTEX:
189 case SYSTEM_VALUE_VERTEX_ID:
190 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
191 case SYSTEM_VALUE_INSTANCE_ID:
192 reg = emit_vs_system_value(ir->data.location);
193 break;
194 case SYSTEM_VALUE_SAMPLE_POS:
195 reg = emit_samplepos_setup();
196 break;
197 case SYSTEM_VALUE_SAMPLE_ID:
198 reg = emit_sampleid_setup();
199 break;
200 case SYSTEM_VALUE_SAMPLE_MASK_IN:
201 assert(devinfo->gen >= 7);
202 reg = new(mem_ctx)
203 fs_reg(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
204 BRW_REGISTER_TYPE_D));
205 break;
206 }
207 }
208
209 if (!reg)
210 reg = new(this->mem_ctx) fs_reg(vgrf(ir->type));
211
212 hash_table_insert(this->variable_ht, reg, ir);
213 }
214
215 void
216 fs_visitor::visit(ir_dereference_variable *ir)
217 {
218 fs_reg *reg = variable_storage(ir->var);
219
220 if (!reg) {
221 fail("Failed to find variable storage for %s\n", ir->var->name);
222 this->result = fs_reg(reg_null_d);
223 return;
224 }
225 this->result = *reg;
226 }
227
228 void
229 fs_visitor::visit(ir_dereference_record *ir)
230 {
231 const glsl_type *struct_type = ir->record->type;
232
233 ir->record->accept(this);
234
235 unsigned int off = 0;
236 for (unsigned int i = 0; i < struct_type->length; i++) {
237 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
238 break;
239 off += type_size(struct_type->fields.structure[i].type);
240 }
241 this->result = offset(this->result, off);
242 this->result.type = brw_type_for_base_type(ir->type);
243 }
244
245 void
246 fs_visitor::visit(ir_dereference_array *ir)
247 {
248 ir_constant *constant_index;
249 fs_reg src;
250 int element_size = type_size(ir->type);
251
252 constant_index = ir->array_index->as_constant();
253
254 ir->array->accept(this);
255 src = this->result;
256 src.type = brw_type_for_base_type(ir->type);
257
258 if (constant_index) {
259 if (src.file == ATTR) {
260 /* Attribute arrays get loaded as one vec4 per element. In that case
261 * offset the source register.
262 */
263 src.reg += constant_index->value.i[0];
264 } else {
265 assert(src.file == UNIFORM || src.file == GRF || src.file == HW_REG);
266 src = offset(src, constant_index->value.i[0] * element_size);
267 }
268 } else {
269 /* Variable index array dereference. We attach the variable index
270 * component to the reg as a pointer to a register containing the
271 * offset. Currently only uniform arrays are supported in this patch,
272 * and that reladdr pointer is resolved by
273 * move_uniform_array_access_to_pull_constants(). All other array types
274 * are lowered by lower_variable_index_to_cond_assign().
275 */
276 ir->array_index->accept(this);
277
278 fs_reg index_reg;
279 index_reg = vgrf(glsl_type::int_type);
280 emit(BRW_OPCODE_MUL, index_reg, this->result, fs_reg(element_size));
281
282 if (src.reladdr) {
283 emit(BRW_OPCODE_ADD, index_reg, *src.reladdr, index_reg);
284 }
285
286 src.reladdr = ralloc(mem_ctx, fs_reg);
287 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
288 }
289 this->result = src;
290 }
291
292 fs_inst *
293 fs_visitor::emit_lrp(const fs_reg &dst, const fs_reg &x, const fs_reg &y,
294 const fs_reg &a)
295 {
296 if (devinfo->gen < 6) {
297 /* We can't use the LRP instruction. Emit x*(1-a) + y*a. */
298 fs_reg y_times_a = vgrf(glsl_type::float_type);
299 fs_reg one_minus_a = vgrf(glsl_type::float_type);
300 fs_reg x_times_one_minus_a = vgrf(glsl_type::float_type);
301
302 emit(MUL(y_times_a, y, a));
303
304 fs_reg negative_a = a;
305 negative_a.negate = !a.negate;
306 emit(ADD(one_minus_a, negative_a, fs_reg(1.0f)));
307 emit(MUL(x_times_one_minus_a, x, one_minus_a));
308
309 return emit(ADD(dst, x_times_one_minus_a, y_times_a));
310 } else {
311 /* The LRP instruction actually does op1 * op0 + op2 * (1 - op0), so
312 * we need to reorder the operands.
313 */
314 return emit(LRP(dst, a, y, x));
315 }
316 }
317
318 void
319 fs_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, const fs_reg &dst,
320 const fs_reg &src0, const fs_reg &src1)
321 {
322 assert(conditionalmod == BRW_CONDITIONAL_GE ||
323 conditionalmod == BRW_CONDITIONAL_L);
324
325 fs_inst *inst;
326
327 if (devinfo->gen >= 6) {
328 inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
329 inst->conditional_mod = conditionalmod;
330 } else {
331 emit(CMP(reg_null_d, src0, src1, conditionalmod));
332
333 inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
334 inst->predicate = BRW_PREDICATE_NORMAL;
335 }
336 }
337
338 void
339 fs_visitor::emit_uniformize(const fs_reg &dst, const fs_reg &src)
340 {
341 const fs_reg chan_index = vgrf(glsl_type::uint_type);
342
343 emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, component(chan_index, 0))
344 ->force_writemask_all = true;
345 emit(SHADER_OPCODE_BROADCAST, component(dst, 0),
346 src, component(chan_index, 0))
347 ->force_writemask_all = true;
348 }
349
350 bool
351 fs_visitor::try_emit_saturate(ir_expression *ir)
352 {
353 if (ir->operation != ir_unop_saturate)
354 return false;
355
356 ir_rvalue *sat_val = ir->operands[0];
357
358 fs_inst *pre_inst = (fs_inst *) this->instructions.get_tail();
359
360 sat_val->accept(this);
361 fs_reg src = this->result;
362
363 fs_inst *last_inst = (fs_inst *) this->instructions.get_tail();
364
365 /* If the last instruction from our accept() generated our
366 * src, just set the saturate flag instead of emmitting a separate mov.
367 */
368 fs_inst *modify = get_instruction_generating_reg(pre_inst, last_inst, src);
369 if (modify && modify->regs_written == modify->dst.width / 8 &&
370 modify->can_do_saturate()) {
371 modify->saturate = true;
372 this->result = src;
373 return true;
374 }
375
376 return false;
377 }
378
379 bool
380 fs_visitor::try_emit_line(ir_expression *ir)
381 {
382 /* LINE's src0 must be of type float. */
383 if (ir->type != glsl_type::float_type)
384 return false;
385
386 ir_rvalue *nonmul = ir->operands[1];
387 ir_expression *mul = ir->operands[0]->as_expression();
388
389 if (!mul || mul->operation != ir_binop_mul) {
390 nonmul = ir->operands[0];
391 mul = ir->operands[1]->as_expression();
392
393 if (!mul || mul->operation != ir_binop_mul)
394 return false;
395 }
396
397 ir_constant *const_add = nonmul->as_constant();
398 if (!const_add)
399 return false;
400
401 int add_operand_vf = brw_float_to_vf(const_add->value.f[0]);
402 if (add_operand_vf == -1)
403 return false;
404
405 ir_rvalue *non_const_mul = mul->operands[1];
406 ir_constant *const_mul = mul->operands[0]->as_constant();
407 if (!const_mul) {
408 const_mul = mul->operands[1]->as_constant();
409
410 if (!const_mul)
411 return false;
412
413 non_const_mul = mul->operands[0];
414 }
415
416 int mul_operand_vf = brw_float_to_vf(const_mul->value.f[0]);
417 if (mul_operand_vf == -1)
418 return false;
419
420 non_const_mul->accept(this);
421 fs_reg src1 = this->result;
422
423 fs_reg src0 = vgrf(ir->type);
424 emit(BRW_OPCODE_MOV, src0,
425 fs_reg((uint8_t)mul_operand_vf, 0, 0, (uint8_t)add_operand_vf));
426
427 this->result = vgrf(ir->type);
428 emit(BRW_OPCODE_LINE, this->result, src0, src1);
429 return true;
430 }
431
432 bool
433 fs_visitor::try_emit_mad(ir_expression *ir)
434 {
435 /* 3-src instructions were introduced in gen6. */
436 if (devinfo->gen < 6)
437 return false;
438
439 /* MAD can only handle floating-point data. */
440 if (ir->type != glsl_type::float_type)
441 return false;
442
443 ir_rvalue *nonmul;
444 ir_expression *mul;
445 bool mul_negate, mul_abs;
446
447 for (int i = 0; i < 2; i++) {
448 mul_negate = false;
449 mul_abs = false;
450
451 mul = ir->operands[i]->as_expression();
452 nonmul = ir->operands[1 - i];
453
454 if (mul && mul->operation == ir_unop_abs) {
455 mul = mul->operands[0]->as_expression();
456 mul_abs = true;
457 } else if (mul && mul->operation == ir_unop_neg) {
458 mul = mul->operands[0]->as_expression();
459 mul_negate = true;
460 }
461
462 if (mul && mul->operation == ir_binop_mul)
463 break;
464 }
465
466 if (!mul || mul->operation != ir_binop_mul)
467 return false;
468
469 nonmul->accept(this);
470 fs_reg src0 = this->result;
471
472 mul->operands[0]->accept(this);
473 fs_reg src1 = this->result;
474 src1.negate ^= mul_negate;
475 src1.abs = mul_abs;
476 if (mul_abs)
477 src1.negate = false;
478
479 mul->operands[1]->accept(this);
480 fs_reg src2 = this->result;
481 src2.abs = mul_abs;
482 if (mul_abs)
483 src2.negate = false;
484
485 this->result = vgrf(ir->type);
486 emit(BRW_OPCODE_MAD, this->result, src0, src1, src2);
487
488 return true;
489 }
490
491 bool
492 fs_visitor::try_emit_b2f_of_comparison(ir_expression *ir)
493 {
494 /* On platforms that do not natively generate 0u and ~0u for Boolean
495 * results, b2f expressions that look like
496 *
497 * f = b2f(expr cmp 0)
498 *
499 * will generate better code by pretending the expression is
500 *
501 * f = ir_triop_csel(0.0, 1.0, expr cmp 0)
502 *
503 * This is because the last instruction of "expr" can generate the
504 * condition code for the "cmp 0". This avoids having to do the "-(b & 1)"
505 * trick to generate 0u or ~0u for the Boolean result. This means code like
506 *
507 * mov(16) g16<1>F 1F
508 * mul.ge.f0(16) null g6<8,8,1>F g14<8,8,1>F
509 * (+f0) sel(16) m6<1>F g16<8,8,1>F 0F
510 *
511 * will be generated instead of
512 *
513 * mul(16) g2<1>F g12<8,8,1>F g4<8,8,1>F
514 * cmp.ge.f0(16) g2<1>D g4<8,8,1>F 0F
515 * and(16) g4<1>D g2<8,8,1>D 1D
516 * and(16) m6<1>D -g4<8,8,1>D 0x3f800000UD
517 *
518 * When the comparison is != 0.0 using the knowledge that the false case
519 * already results in zero would allow better code generation by possibly
520 * avoiding a load-immediate instruction.
521 */
522 ir_expression *cmp = ir->operands[0]->as_expression();
523 if (cmp == NULL)
524 return false;
525
526 if (cmp->operation == ir_binop_nequal) {
527 for (unsigned i = 0; i < 2; i++) {
528 ir_constant *c = cmp->operands[i]->as_constant();
529 if (c == NULL || !c->is_zero())
530 continue;
531
532 ir_expression *expr = cmp->operands[i ^ 1]->as_expression();
533 if (expr != NULL) {
534 fs_reg op[2];
535
536 for (unsigned j = 0; j < 2; j++) {
537 cmp->operands[j]->accept(this);
538 op[j] = this->result;
539
540 resolve_ud_negate(&op[j]);
541 }
542
543 emit_bool_to_cond_code_of_reg(cmp, op);
544
545 /* In this case we know when the condition is true, op[i ^ 1]
546 * contains zero. Invert the predicate, use op[i ^ 1] as src0,
547 * and immediate 1.0f as src1.
548 */
549 this->result = vgrf(ir->type);
550 op[i ^ 1].type = BRW_REGISTER_TYPE_F;
551
552 fs_inst *inst = emit(SEL(this->result, op[i ^ 1], fs_reg(1.0f)));
553 inst->predicate = BRW_PREDICATE_NORMAL;
554 inst->predicate_inverse = true;
555 return true;
556 }
557 }
558 }
559
560 emit_bool_to_cond_code(cmp);
561
562 fs_reg temp = vgrf(ir->type);
563 emit(MOV(temp, fs_reg(1.0f)));
564
565 this->result = vgrf(ir->type);
566 fs_inst *inst = emit(SEL(this->result, temp, fs_reg(0.0f)));
567 inst->predicate = BRW_PREDICATE_NORMAL;
568
569 return true;
570 }
571
572 static int
573 pack_pixel_offset(float x)
574 {
575 /* Clamp upper end of the range to +7/16. See explanation in non-constant
576 * offset case below. */
577 int n = MIN2((int)(x * 16), 7);
578 return n & 0xf;
579 }
580
581 void
582 fs_visitor::emit_interpolate_expression(ir_expression *ir)
583 {
584 /* in SIMD16 mode, the pixel interpolator returns coords interleaved
585 * 8 channels at a time, same as the barycentric coords presented in
586 * the FS payload. this requires a bit of extra work to support.
587 */
588 no16("interpolate_at_* not yet supported in SIMD16 mode.");
589
590 assert(stage == MESA_SHADER_FRAGMENT);
591 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
592
593 ir_dereference * deref = ir->operands[0]->as_dereference();
594 ir_swizzle * swiz = NULL;
595 if (!deref) {
596 /* the api does not allow a swizzle here, but the varying packing code
597 * may have pushed one into here.
598 */
599 swiz = ir->operands[0]->as_swizzle();
600 assert(swiz);
601 deref = swiz->val->as_dereference();
602 }
603 assert(deref);
604 ir_variable * var = deref->variable_referenced();
605 assert(var);
606
607 /* 1. collect interpolation factors */
608
609 fs_reg dst_xy = vgrf(glsl_type::get_instance(ir->type->base_type, 2, 1));
610
611 /* for most messages, we need one reg of ignored data; the hardware requires mlen==1
612 * even when there is no payload. in the per-slot offset case, we'll replace this with
613 * the proper source data. */
614 fs_reg src = vgrf(glsl_type::float_type);
615 int mlen = 1; /* one reg unless overriden */
616 int reg_width = dispatch_width / 8;
617 fs_inst *inst;
618
619 switch (ir->operation) {
620 case ir_unop_interpolate_at_centroid:
621 inst = emit(FS_OPCODE_INTERPOLATE_AT_CENTROID, dst_xy, src, fs_reg(0u));
622 break;
623
624 case ir_binop_interpolate_at_sample: {
625 ir_constant *sample_num = ir->operands[1]->as_constant();
626 assert(sample_num || !"nonconstant sample number should have been lowered.");
627
628 unsigned msg_data = sample_num->value.i[0] << 4;
629 inst = emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src, fs_reg(msg_data));
630 break;
631 }
632
633 case ir_binop_interpolate_at_offset: {
634 ir_constant *const_offset = ir->operands[1]->as_constant();
635 if (const_offset) {
636 unsigned msg_data = pack_pixel_offset(const_offset->value.f[0]) |
637 (pack_pixel_offset(const_offset->value.f[1]) << 4);
638 inst = emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
639 fs_reg(msg_data));
640 } else {
641 /* pack the operands: hw wants offsets as 4 bit signed ints */
642 ir->operands[1]->accept(this);
643 src = vgrf(glsl_type::ivec2_type);
644 fs_reg src2 = src;
645 for (int i = 0; i < 2; i++) {
646 fs_reg temp = vgrf(glsl_type::float_type);
647 emit(MUL(temp, this->result, fs_reg(16.0f)));
648 emit(MOV(src2, temp)); /* float to int */
649
650 /* Clamp the upper end of the range to +7/16. ARB_gpu_shader5 requires
651 * that we support a maximum offset of +0.5, which isn't representable
652 * in a S0.4 value -- if we didn't clamp it, we'd end up with -8/16,
653 * which is the opposite of what the shader author wanted.
654 *
655 * This is legal due to ARB_gpu_shader5's quantization rules:
656 *
657 * "Not all values of <offset> may be supported; x and y offsets may
658 * be rounded to fixed-point values with the number of fraction bits
659 * given by the implementation-dependent constant
660 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
661 */
662
663 fs_inst *inst = emit(BRW_OPCODE_SEL, src2, src2, fs_reg(7));
664 inst->conditional_mod = BRW_CONDITIONAL_L; /* min(src2, 7) */
665
666 src2 = offset(src2, 1);
667 this->result = offset(this->result, 1);
668 }
669
670 mlen = 2 * reg_width;
671 inst = emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
672 fs_reg(0u));
673 }
674 break;
675 }
676
677 default:
678 unreachable("not reached");
679 }
680
681 inst->mlen = mlen;
682 inst->regs_written = 2 * reg_width; /* 2 floats per slot returned */
683 inst->pi_noperspective = var->determine_interpolation_mode(key->flat_shade) ==
684 INTERP_QUALIFIER_NOPERSPECTIVE;
685
686 /* 2. emit linterp */
687
688 fs_reg res = vgrf(ir->type);
689 this->result = res;
690
691 for (int i = 0; i < ir->type->vector_elements; i++) {
692 int ch = swiz ? ((*(int *)&swiz->mask) >> 2*i) & 3 : i;
693 emit(FS_OPCODE_LINTERP, res, dst_xy,
694 fs_reg(interp_reg(var->data.location, ch)));
695 res = offset(res, 1);
696 }
697 }
698
699 void
700 fs_visitor::visit(ir_expression *ir)
701 {
702 unsigned int operand;
703 fs_reg op[3], temp;
704 fs_inst *inst;
705 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
706
707 assert(ir->get_num_operands() <= 3);
708
709 if (try_emit_saturate(ir))
710 return;
711
712 /* Deal with the real oddball stuff first */
713 switch (ir->operation) {
714 case ir_binop_add:
715 if (devinfo->gen <= 5 && try_emit_line(ir))
716 return;
717 if (try_emit_mad(ir))
718 return;
719 break;
720
721 case ir_triop_csel:
722 ir->operands[1]->accept(this);
723 op[1] = this->result;
724 ir->operands[2]->accept(this);
725 op[2] = this->result;
726
727 emit_bool_to_cond_code(ir->operands[0]);
728
729 this->result = vgrf(ir->type);
730 inst = emit(SEL(this->result, op[1], op[2]));
731 inst->predicate = BRW_PREDICATE_NORMAL;
732 return;
733
734 case ir_unop_b2f:
735 if (devinfo->gen <= 5 && try_emit_b2f_of_comparison(ir))
736 return;
737 break;
738
739 case ir_unop_interpolate_at_centroid:
740 case ir_binop_interpolate_at_offset:
741 case ir_binop_interpolate_at_sample:
742 emit_interpolate_expression(ir);
743 return;
744
745 default:
746 break;
747 }
748
749 for (operand = 0; operand < ir->get_num_operands(); operand++) {
750 ir->operands[operand]->accept(this);
751 if (this->result.file == BAD_FILE) {
752 fail("Failed to get tree for expression operand:\n");
753 ir->operands[operand]->fprint(stderr);
754 fprintf(stderr, "\n");
755 }
756 assert(this->result.file == GRF ||
757 this->result.file == UNIFORM || this->result.file == ATTR);
758 op[operand] = this->result;
759
760 /* Matrix expression operands should have been broken down to vector
761 * operations already.
762 */
763 assert(!ir->operands[operand]->type->is_matrix());
764 /* And then those vector operands should have been broken down to scalar.
765 */
766 assert(!ir->operands[operand]->type->is_vector());
767 }
768
769 /* Storage for our result. If our result goes into an assignment, it will
770 * just get copy-propagated out, so no worries.
771 */
772 this->result = vgrf(ir->type);
773
774 switch (ir->operation) {
775 case ir_unop_logic_not:
776 emit(NOT(this->result, op[0]));
777 break;
778 case ir_unop_neg:
779 op[0].negate = !op[0].negate;
780 emit(MOV(this->result, op[0]));
781 break;
782 case ir_unop_abs:
783 op[0].abs = true;
784 op[0].negate = false;
785 emit(MOV(this->result, op[0]));
786 break;
787 case ir_unop_sign:
788 if (ir->type->is_float()) {
789 /* AND(val, 0x80000000) gives the sign bit.
790 *
791 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
792 * zero.
793 */
794 emit(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
795
796 op[0].type = BRW_REGISTER_TYPE_UD;
797 this->result.type = BRW_REGISTER_TYPE_UD;
798 emit(AND(this->result, op[0], fs_reg(0x80000000u)));
799
800 inst = emit(OR(this->result, this->result, fs_reg(0x3f800000u)));
801 inst->predicate = BRW_PREDICATE_NORMAL;
802
803 this->result.type = BRW_REGISTER_TYPE_F;
804 } else {
805 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
806 * -> non-negative val generates 0x00000000.
807 * Predicated OR sets 1 if val is positive.
808 */
809 emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G));
810
811 emit(ASR(this->result, op[0], fs_reg(31)));
812
813 inst = emit(OR(this->result, this->result, fs_reg(1)));
814 inst->predicate = BRW_PREDICATE_NORMAL;
815 }
816 break;
817 case ir_unop_rcp:
818 emit_math(SHADER_OPCODE_RCP, this->result, op[0]);
819 break;
820
821 case ir_unop_exp2:
822 emit_math(SHADER_OPCODE_EXP2, this->result, op[0]);
823 break;
824 case ir_unop_log2:
825 emit_math(SHADER_OPCODE_LOG2, this->result, op[0]);
826 break;
827 case ir_unop_exp:
828 case ir_unop_log:
829 unreachable("not reached: should be handled by ir_explog_to_explog2");
830 case ir_unop_sin:
831 emit_math(SHADER_OPCODE_SIN, this->result, op[0]);
832 break;
833 case ir_unop_cos:
834 emit_math(SHADER_OPCODE_COS, this->result, op[0]);
835 break;
836
837 case ir_unop_dFdx:
838 /* Select one of the two opcodes based on the glHint value. */
839 if (fs_key->high_quality_derivatives)
840 emit(FS_OPCODE_DDX_FINE, this->result, op[0]);
841 else
842 emit(FS_OPCODE_DDX_COARSE, this->result, op[0]);
843 break;
844
845 case ir_unop_dFdx_coarse:
846 emit(FS_OPCODE_DDX_COARSE, this->result, op[0]);
847 break;
848
849 case ir_unop_dFdx_fine:
850 emit(FS_OPCODE_DDX_FINE, this->result, op[0]);
851 break;
852
853 case ir_unop_dFdy:
854 /* Select one of the two opcodes based on the glHint value. */
855 if (fs_key->high_quality_derivatives)
856 emit(FS_OPCODE_DDY_FINE, result, op[0], fs_reg(fs_key->render_to_fbo));
857 else
858 emit(FS_OPCODE_DDY_COARSE, result, op[0], fs_reg(fs_key->render_to_fbo));
859 break;
860
861 case ir_unop_dFdy_coarse:
862 emit(FS_OPCODE_DDY_COARSE, result, op[0], fs_reg(fs_key->render_to_fbo));
863 break;
864
865 case ir_unop_dFdy_fine:
866 emit(FS_OPCODE_DDY_FINE, result, op[0], fs_reg(fs_key->render_to_fbo));
867 break;
868
869 case ir_binop_add:
870 emit(ADD(this->result, op[0], op[1]));
871 break;
872 case ir_binop_sub:
873 unreachable("not reached: should be handled by ir_sub_to_add_neg");
874
875 case ir_binop_mul:
876 if (devinfo->gen < 8 && ir->type->is_integer()) {
877 /* For integer multiplication, the MUL uses the low 16 bits
878 * of one of the operands (src0 on gen6, src1 on gen7). The
879 * MACH accumulates in the contribution of the upper 16 bits
880 * of that operand.
881 */
882 if (ir->operands[0]->is_uint16_constant()) {
883 if (devinfo->gen < 7)
884 emit(MUL(this->result, op[0], op[1]));
885 else
886 emit(MUL(this->result, op[1], op[0]));
887 } else if (ir->operands[1]->is_uint16_constant()) {
888 if (devinfo->gen < 7)
889 emit(MUL(this->result, op[1], op[0]));
890 else
891 emit(MUL(this->result, op[0], op[1]));
892 } else {
893 if (devinfo->gen >= 7)
894 no16("SIMD16 explicit accumulator operands unsupported\n");
895
896 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
897 this->result.type);
898
899 emit(MUL(acc, op[0], op[1]));
900 emit(MACH(reg_null_d, op[0], op[1]));
901 emit(MOV(this->result, fs_reg(acc)));
902 }
903 } else {
904 emit(MUL(this->result, op[0], op[1]));
905 }
906 break;
907 case ir_binop_imul_high: {
908 if (devinfo->gen >= 7)
909 no16("SIMD16 explicit accumulator operands unsupported\n");
910
911 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
912 this->result.type);
913
914 fs_inst *mul = emit(MUL(acc, op[0], op[1]));
915 emit(MACH(this->result, op[0], op[1]));
916
917 /* Until Gen8, integer multiplies read 32-bits from one source, and
918 * 16-bits from the other, and relying on the MACH instruction to
919 * generate the high bits of the result.
920 *
921 * On Gen8, the multiply instruction does a full 32x32-bit multiply,
922 * but in order to do a 64x64-bit multiply we have to simulate the
923 * previous behavior and then use a MACH instruction.
924 *
925 * FINISHME: Don't use source modifiers on src1.
926 */
927 if (devinfo->gen >= 8) {
928 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
929 mul->src[1].type == BRW_REGISTER_TYPE_UD);
930 if (mul->src[1].type == BRW_REGISTER_TYPE_D) {
931 mul->src[1].type = BRW_REGISTER_TYPE_W;
932 mul->src[1].stride = 2;
933 } else {
934 mul->src[1].type = BRW_REGISTER_TYPE_UW;
935 mul->src[1].stride = 2;
936 }
937 }
938
939 break;
940 }
941 case ir_binop_div:
942 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
943 assert(ir->type->is_integer());
944 emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]);
945 break;
946 case ir_binop_carry: {
947 if (devinfo->gen >= 7)
948 no16("SIMD16 explicit accumulator operands unsupported\n");
949
950 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
951 BRW_REGISTER_TYPE_UD);
952
953 emit(ADDC(reg_null_ud, op[0], op[1]));
954 emit(MOV(this->result, fs_reg(acc)));
955 break;
956 }
957 case ir_binop_borrow: {
958 if (devinfo->gen >= 7)
959 no16("SIMD16 explicit accumulator operands unsupported\n");
960
961 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
962 BRW_REGISTER_TYPE_UD);
963
964 emit(SUBB(reg_null_ud, op[0], op[1]));
965 emit(MOV(this->result, fs_reg(acc)));
966 break;
967 }
968 case ir_binop_mod:
969 /* Floating point should be lowered by MOD_TO_FLOOR in the compiler. */
970 assert(ir->type->is_integer());
971 emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]);
972 break;
973
974 case ir_binop_less:
975 case ir_binop_greater:
976 case ir_binop_lequal:
977 case ir_binop_gequal:
978 case ir_binop_equal:
979 case ir_binop_all_equal:
980 case ir_binop_nequal:
981 case ir_binop_any_nequal:
982 if (devinfo->gen <= 5) {
983 resolve_bool_comparison(ir->operands[0], &op[0]);
984 resolve_bool_comparison(ir->operands[1], &op[1]);
985 }
986
987 emit(CMP(this->result, op[0], op[1],
988 brw_conditional_for_comparison(ir->operation)));
989 break;
990
991 case ir_binop_logic_xor:
992 emit(XOR(this->result, op[0], op[1]));
993 break;
994
995 case ir_binop_logic_or:
996 emit(OR(this->result, op[0], op[1]));
997 break;
998
999 case ir_binop_logic_and:
1000 emit(AND(this->result, op[0], op[1]));
1001 break;
1002
1003 case ir_binop_dot:
1004 case ir_unop_any:
1005 unreachable("not reached: should be handled by brw_fs_channel_expressions");
1006
1007 case ir_unop_noise:
1008 unreachable("not reached: should be handled by lower_noise");
1009
1010 case ir_quadop_vector:
1011 unreachable("not reached: should be handled by lower_quadop_vector");
1012
1013 case ir_binop_vector_extract:
1014 unreachable("not reached: should be handled by lower_vec_index_to_cond_assign()");
1015
1016 case ir_triop_vector_insert:
1017 unreachable("not reached: should be handled by lower_vector_insert()");
1018
1019 case ir_binop_ldexp:
1020 unreachable("not reached: should be handled by ldexp_to_arith()");
1021
1022 case ir_unop_sqrt:
1023 emit_math(SHADER_OPCODE_SQRT, this->result, op[0]);
1024 break;
1025
1026 case ir_unop_rsq:
1027 emit_math(SHADER_OPCODE_RSQ, this->result, op[0]);
1028 break;
1029
1030 case ir_unop_bitcast_i2f:
1031 case ir_unop_bitcast_u2f:
1032 op[0].type = BRW_REGISTER_TYPE_F;
1033 this->result = op[0];
1034 break;
1035 case ir_unop_i2u:
1036 case ir_unop_bitcast_f2u:
1037 op[0].type = BRW_REGISTER_TYPE_UD;
1038 this->result = op[0];
1039 break;
1040 case ir_unop_u2i:
1041 case ir_unop_bitcast_f2i:
1042 op[0].type = BRW_REGISTER_TYPE_D;
1043 this->result = op[0];
1044 break;
1045 case ir_unop_i2f:
1046 case ir_unop_u2f:
1047 case ir_unop_f2i:
1048 case ir_unop_f2u:
1049 emit(MOV(this->result, op[0]));
1050 break;
1051
1052 case ir_unop_b2i:
1053 emit(AND(this->result, op[0], fs_reg(1)));
1054 break;
1055 case ir_unop_b2f:
1056 if (devinfo->gen <= 5) {
1057 resolve_bool_comparison(ir->operands[0], &op[0]);
1058 }
1059 op[0].type = BRW_REGISTER_TYPE_D;
1060 this->result.type = BRW_REGISTER_TYPE_D;
1061 emit(AND(this->result, op[0], fs_reg(0x3f800000u)));
1062 this->result.type = BRW_REGISTER_TYPE_F;
1063 break;
1064
1065 case ir_unop_f2b:
1066 emit(CMP(this->result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
1067 break;
1068 case ir_unop_i2b:
1069 emit(CMP(this->result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
1070 break;
1071
1072 case ir_unop_trunc:
1073 emit(RNDZ(this->result, op[0]));
1074 break;
1075 case ir_unop_ceil: {
1076 fs_reg tmp = vgrf(ir->type);
1077 op[0].negate = !op[0].negate;
1078 emit(RNDD(tmp, op[0]));
1079 tmp.negate = true;
1080 emit(MOV(this->result, tmp));
1081 }
1082 break;
1083 case ir_unop_floor:
1084 emit(RNDD(this->result, op[0]));
1085 break;
1086 case ir_unop_fract:
1087 emit(FRC(this->result, op[0]));
1088 break;
1089 case ir_unop_round_even:
1090 emit(RNDE(this->result, op[0]));
1091 break;
1092
1093 case ir_binop_min:
1094 case ir_binop_max:
1095 resolve_ud_negate(&op[0]);
1096 resolve_ud_negate(&op[1]);
1097 emit_minmax(ir->operation == ir_binop_min ?
1098 BRW_CONDITIONAL_L : BRW_CONDITIONAL_GE,
1099 this->result, op[0], op[1]);
1100 break;
1101 case ir_unop_pack_snorm_2x16:
1102 case ir_unop_pack_snorm_4x8:
1103 case ir_unop_pack_unorm_2x16:
1104 case ir_unop_pack_unorm_4x8:
1105 case ir_unop_unpack_snorm_2x16:
1106 case ir_unop_unpack_snorm_4x8:
1107 case ir_unop_unpack_unorm_2x16:
1108 case ir_unop_unpack_unorm_4x8:
1109 case ir_unop_unpack_half_2x16:
1110 case ir_unop_pack_half_2x16:
1111 unreachable("not reached: should be handled by lower_packing_builtins");
1112 case ir_unop_unpack_half_2x16_split_x:
1113 emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, this->result, op[0]);
1114 break;
1115 case ir_unop_unpack_half_2x16_split_y:
1116 emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, this->result, op[0]);
1117 break;
1118 case ir_binop_pow:
1119 emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]);
1120 break;
1121
1122 case ir_unop_bitfield_reverse:
1123 emit(BFREV(this->result, op[0]));
1124 break;
1125 case ir_unop_bit_count:
1126 emit(CBIT(this->result, op[0]));
1127 break;
1128 case ir_unop_find_msb:
1129 temp = vgrf(glsl_type::uint_type);
1130 emit(FBH(temp, op[0]));
1131
1132 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1133 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1134 * subtract the result from 31 to convert the MSB count into an LSB count.
1135 */
1136
1137 /* FBH only supports UD type for dst, so use a MOV to convert UD to D. */
1138 emit(MOV(this->result, temp));
1139 emit(CMP(reg_null_d, this->result, fs_reg(-1), BRW_CONDITIONAL_NZ));
1140
1141 temp.negate = true;
1142 inst = emit(ADD(this->result, temp, fs_reg(31)));
1143 inst->predicate = BRW_PREDICATE_NORMAL;
1144 break;
1145 case ir_unop_find_lsb:
1146 emit(FBL(this->result, op[0]));
1147 break;
1148 case ir_unop_saturate:
1149 inst = emit(MOV(this->result, op[0]));
1150 inst->saturate = true;
1151 break;
1152 case ir_triop_bitfield_extract:
1153 /* Note that the instruction's argument order is reversed from GLSL
1154 * and the IR.
1155 */
1156 emit(BFE(this->result, op[2], op[1], op[0]));
1157 break;
1158 case ir_binop_bfm:
1159 emit(BFI1(this->result, op[0], op[1]));
1160 break;
1161 case ir_triop_bfi:
1162 emit(BFI2(this->result, op[0], op[1], op[2]));
1163 break;
1164 case ir_quadop_bitfield_insert:
1165 unreachable("not reached: should be handled by "
1166 "lower_instructions::bitfield_insert_to_bfm_bfi");
1167
1168 case ir_unop_bit_not:
1169 emit(NOT(this->result, op[0]));
1170 break;
1171 case ir_binop_bit_and:
1172 emit(AND(this->result, op[0], op[1]));
1173 break;
1174 case ir_binop_bit_xor:
1175 emit(XOR(this->result, op[0], op[1]));
1176 break;
1177 case ir_binop_bit_or:
1178 emit(OR(this->result, op[0], op[1]));
1179 break;
1180
1181 case ir_binop_lshift:
1182 emit(SHL(this->result, op[0], op[1]));
1183 break;
1184
1185 case ir_binop_rshift:
1186 if (ir->type->base_type == GLSL_TYPE_INT)
1187 emit(ASR(this->result, op[0], op[1]));
1188 else
1189 emit(SHR(this->result, op[0], op[1]));
1190 break;
1191 case ir_binop_pack_half_2x16_split:
1192 emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, this->result, op[0], op[1]);
1193 break;
1194 case ir_binop_ubo_load: {
1195 /* This IR node takes a constant uniform block and a constant or
1196 * variable byte offset within the block and loads a vector from that.
1197 */
1198 ir_constant *const_uniform_block = ir->operands[0]->as_constant();
1199 ir_constant *const_offset = ir->operands[1]->as_constant();
1200 fs_reg surf_index;
1201 uint32_t binding, set, index, set_index;
1202
1203 if (const_uniform_block) {
1204 /* The block index is a constant, so just emit the binding table entry
1205 * as an immediate.
1206 */
1207 index = const_uniform_block->value.u[0];
1208 set = shader->base.UniformBlocks[index].Set;
1209 set_index = shader->base.UniformBlocks[index].Binding;
1210 binding = stage_prog_data->bind_map[set][set_index];
1211 surf_index = fs_reg(binding);
1212 } else {
1213 assert(0 && "need more info from the ir for this.");
1214
1215 /* The block index is not a constant. Evaluate the index expression
1216 * per-channel and add the base UBO index; we have to select a value
1217 * from any live channel.
1218 */
1219 surf_index = vgrf(glsl_type::uint_type);
1220 emit(ADD(surf_index, op[0],
1221 fs_reg(stage_prog_data->binding_table.ubo_start)));
1222 emit_uniformize(surf_index, surf_index);
1223
1224 /* Assume this may touch any UBO. It would be nice to provide
1225 * a tighter bound, but the array information is already lowered away.
1226 */
1227 brw_mark_surface_used(prog_data,
1228 stage_prog_data->binding_table.ubo_start +
1229 shader_prog->NumUniformBlocks - 1);
1230 }
1231
1232 if (const_offset) {
1233 fs_reg packed_consts = vgrf(glsl_type::float_type);
1234 packed_consts.type = result.type;
1235
1236 fs_reg const_offset_reg = fs_reg(const_offset->value.u[0] & ~15);
1237 emit(new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, 8,
1238 packed_consts, surf_index, const_offset_reg));
1239
1240 for (int i = 0; i < ir->type->vector_elements; i++) {
1241 packed_consts.set_smear(const_offset->value.u[0] % 16 / 4 + i);
1242
1243 /* The std140 packing rules don't allow vectors to cross 16-byte
1244 * boundaries, and a reg is 32 bytes.
1245 */
1246 assert(packed_consts.subreg_offset < 32);
1247
1248 /* UBO bools are any nonzero value. We consider bools to be
1249 * values with the low bit set to 1. Convert them using CMP.
1250 */
1251 if (ir->type->base_type == GLSL_TYPE_BOOL) {
1252 emit(CMP(result, packed_consts, fs_reg(0u), BRW_CONDITIONAL_NZ));
1253 } else {
1254 emit(MOV(result, packed_consts));
1255 }
1256
1257 result = offset(result, 1);
1258 }
1259 } else {
1260 /* Turn the byte offset into a dword offset. */
1261 fs_reg base_offset = vgrf(glsl_type::int_type);
1262 emit(SHR(base_offset, op[1], fs_reg(2)));
1263
1264 for (int i = 0; i < ir->type->vector_elements; i++) {
1265 emit(VARYING_PULL_CONSTANT_LOAD(result, surf_index,
1266 base_offset, i));
1267
1268 if (ir->type->base_type == GLSL_TYPE_BOOL)
1269 emit(CMP(result, result, fs_reg(0), BRW_CONDITIONAL_NZ));
1270
1271 result = offset(result, 1);
1272 }
1273 }
1274
1275 result.reg_offset = 0;
1276 break;
1277 }
1278
1279 case ir_triop_fma:
1280 /* Note that the instruction's argument order is reversed from GLSL
1281 * and the IR.
1282 */
1283 emit(MAD(this->result, op[2], op[1], op[0]));
1284 break;
1285
1286 case ir_triop_lrp:
1287 emit_lrp(this->result, op[0], op[1], op[2]);
1288 break;
1289
1290 case ir_triop_csel:
1291 case ir_unop_interpolate_at_centroid:
1292 case ir_binop_interpolate_at_offset:
1293 case ir_binop_interpolate_at_sample:
1294 unreachable("already handled above");
1295 break;
1296
1297 case ir_unop_d2f:
1298 case ir_unop_f2d:
1299 case ir_unop_d2i:
1300 case ir_unop_i2d:
1301 case ir_unop_d2u:
1302 case ir_unop_u2d:
1303 case ir_unop_d2b:
1304 case ir_unop_pack_double_2x32:
1305 case ir_unop_unpack_double_2x32:
1306 case ir_unop_frexp_sig:
1307 case ir_unop_frexp_exp:
1308 unreachable("fp64 todo");
1309 break;
1310 }
1311 }
1312
1313 void
1314 fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
1315 const glsl_type *type, bool predicated)
1316 {
1317 switch (type->base_type) {
1318 case GLSL_TYPE_FLOAT:
1319 case GLSL_TYPE_UINT:
1320 case GLSL_TYPE_INT:
1321 case GLSL_TYPE_BOOL:
1322 for (unsigned int i = 0; i < type->components(); i++) {
1323 l.type = brw_type_for_base_type(type);
1324 r.type = brw_type_for_base_type(type);
1325
1326 if (predicated || !l.equals(r)) {
1327 fs_inst *inst = emit(MOV(l, r));
1328 inst->predicate = predicated ? BRW_PREDICATE_NORMAL : BRW_PREDICATE_NONE;
1329 }
1330
1331 l = offset(l, 1);
1332 r = offset(r, 1);
1333 }
1334 break;
1335 case GLSL_TYPE_ARRAY:
1336 for (unsigned int i = 0; i < type->length; i++) {
1337 emit_assignment_writes(l, r, type->fields.array, predicated);
1338 }
1339 break;
1340
1341 case GLSL_TYPE_STRUCT:
1342 for (unsigned int i = 0; i < type->length; i++) {
1343 emit_assignment_writes(l, r, type->fields.structure[i].type,
1344 predicated);
1345 }
1346 break;
1347
1348 case GLSL_TYPE_SAMPLER:
1349 case GLSL_TYPE_IMAGE:
1350 case GLSL_TYPE_ATOMIC_UINT:
1351 break;
1352
1353 case GLSL_TYPE_DOUBLE:
1354 case GLSL_TYPE_VOID:
1355 case GLSL_TYPE_ERROR:
1356 case GLSL_TYPE_INTERFACE:
1357 case GLSL_TYPE_FUNCTION:
1358 unreachable("not reached");
1359 }
1360 }
1361
1362 /* If the RHS processing resulted in an instruction generating a
1363 * temporary value, and it would be easy to rewrite the instruction to
1364 * generate its result right into the LHS instead, do so. This ends
1365 * up reliably removing instructions where it can be tricky to do so
1366 * later without real UD chain information.
1367 */
1368 bool
1369 fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
1370 fs_reg dst,
1371 fs_reg src,
1372 fs_inst *pre_rhs_inst,
1373 fs_inst *last_rhs_inst)
1374 {
1375 /* Only attempt if we're doing a direct assignment. */
1376 if (ir->condition ||
1377 !(ir->lhs->type->is_scalar() ||
1378 (ir->lhs->type->is_vector() &&
1379 ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1)))
1380 return false;
1381
1382 /* Make sure the last instruction generated our source reg. */
1383 fs_inst *modify = get_instruction_generating_reg(pre_rhs_inst,
1384 last_rhs_inst,
1385 src);
1386 if (!modify)
1387 return false;
1388
1389 /* If last_rhs_inst wrote a different number of components than our LHS,
1390 * we can't safely rewrite it.
1391 */
1392 if (alloc.sizes[dst.reg] != modify->regs_written)
1393 return false;
1394
1395 /* Success! Rewrite the instruction. */
1396 modify->dst = dst;
1397
1398 return true;
1399 }
1400
1401 void
1402 fs_visitor::visit(ir_assignment *ir)
1403 {
1404 fs_reg l, r;
1405 fs_inst *inst;
1406
1407 /* FINISHME: arrays on the lhs */
1408 ir->lhs->accept(this);
1409 l = this->result;
1410
1411 fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail();
1412
1413 ir->rhs->accept(this);
1414 r = this->result;
1415
1416 fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail();
1417
1418 assert(l.file != BAD_FILE);
1419 assert(r.file != BAD_FILE);
1420
1421 if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst))
1422 return;
1423
1424 if (ir->condition) {
1425 emit_bool_to_cond_code(ir->condition);
1426 }
1427
1428 if (ir->lhs->type->is_scalar() ||
1429 ir->lhs->type->is_vector()) {
1430 for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
1431 if (ir->write_mask & (1 << i)) {
1432 inst = emit(MOV(l, r));
1433 if (ir->condition)
1434 inst->predicate = BRW_PREDICATE_NORMAL;
1435 r = offset(r, 1);
1436 }
1437 l = offset(l, 1);
1438 }
1439 } else {
1440 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL);
1441 }
1442 }
1443
1444 fs_inst *
1445 fs_visitor::emit_texture_gen4(ir_texture_opcode op, fs_reg dst,
1446 fs_reg coordinate, int coord_components,
1447 fs_reg shadow_c,
1448 fs_reg lod, fs_reg dPdy, int grad_components,
1449 uint32_t sampler)
1450 {
1451 int mlen;
1452 int base_mrf = 1;
1453 bool simd16 = false;
1454 fs_reg orig_dst;
1455
1456 /* g0 header. */
1457 mlen = 1;
1458
1459 if (shadow_c.file != BAD_FILE) {
1460 for (int i = 0; i < coord_components; i++) {
1461 emit(MOV(fs_reg(MRF, base_mrf + mlen + i), coordinate));
1462 coordinate = offset(coordinate, 1);
1463 }
1464
1465 /* gen4's SIMD8 sampler always has the slots for u,v,r present.
1466 * the unused slots must be zeroed.
1467 */
1468 for (int i = coord_components; i < 3; i++) {
1469 emit(MOV(fs_reg(MRF, base_mrf + mlen + i), fs_reg(0.0f)));
1470 }
1471 mlen += 3;
1472
1473 if (op == ir_tex) {
1474 /* There's no plain shadow compare message, so we use shadow
1475 * compare with a bias of 0.0.
1476 */
1477 emit(MOV(fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)));
1478 mlen++;
1479 } else if (op == ir_txb || op == ir_txl) {
1480 emit(MOV(fs_reg(MRF, base_mrf + mlen), lod));
1481 mlen++;
1482 } else {
1483 unreachable("Should not get here.");
1484 }
1485
1486 emit(MOV(fs_reg(MRF, base_mrf + mlen), shadow_c));
1487 mlen++;
1488 } else if (op == ir_tex) {
1489 for (int i = 0; i < coord_components; i++) {
1490 emit(MOV(fs_reg(MRF, base_mrf + mlen + i), coordinate));
1491 coordinate = offset(coordinate, 1);
1492 }
1493 /* zero the others. */
1494 for (int i = coord_components; i<3; i++) {
1495 emit(MOV(fs_reg(MRF, base_mrf + mlen + i), fs_reg(0.0f)));
1496 }
1497 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1498 mlen += 3;
1499 } else if (op == ir_txd) {
1500 fs_reg &dPdx = lod;
1501
1502 for (int i = 0; i < coord_components; i++) {
1503 emit(MOV(fs_reg(MRF, base_mrf + mlen + i), coordinate));
1504 coordinate = offset(coordinate, 1);
1505 }
1506 /* the slots for u and v are always present, but r is optional */
1507 mlen += MAX2(coord_components, 2);
1508
1509 /* P = u, v, r
1510 * dPdx = dudx, dvdx, drdx
1511 * dPdy = dudy, dvdy, drdy
1512 *
1513 * 1-arg: Does not exist.
1514 *
1515 * 2-arg: dudx dvdx dudy dvdy
1516 * dPdx.x dPdx.y dPdy.x dPdy.y
1517 * m4 m5 m6 m7
1518 *
1519 * 3-arg: dudx dvdx drdx dudy dvdy drdy
1520 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
1521 * m5 m6 m7 m8 m9 m10
1522 */
1523 for (int i = 0; i < grad_components; i++) {
1524 emit(MOV(fs_reg(MRF, base_mrf + mlen), dPdx));
1525 dPdx = offset(dPdx, 1);
1526 }
1527 mlen += MAX2(grad_components, 2);
1528
1529 for (int i = 0; i < grad_components; i++) {
1530 emit(MOV(fs_reg(MRF, base_mrf + mlen), dPdy));
1531 dPdy = offset(dPdy, 1);
1532 }
1533 mlen += MAX2(grad_components, 2);
1534 } else if (op == ir_txs) {
1535 /* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */
1536 simd16 = true;
1537 emit(MOV(fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), lod));
1538 mlen += 2;
1539 } else {
1540 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1541 * instructions. We'll need to do SIMD16 here.
1542 */
1543 simd16 = true;
1544 assert(op == ir_txb || op == ir_txl || op == ir_txf);
1545
1546 for (int i = 0; i < coord_components; i++) {
1547 emit(MOV(fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type),
1548 coordinate));
1549 coordinate = offset(coordinate, 1);
1550 }
1551
1552 /* Initialize the rest of u/v/r with 0.0. Empirically, this seems to
1553 * be necessary for TXF (ld), but seems wise to do for all messages.
1554 */
1555 for (int i = coord_components; i < 3; i++) {
1556 emit(MOV(fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)));
1557 }
1558
1559 /* lod/bias appears after u/v/r. */
1560 mlen += 6;
1561
1562 emit(MOV(fs_reg(MRF, base_mrf + mlen, lod.type), lod));
1563 mlen++;
1564
1565 /* The unused upper half. */
1566 mlen++;
1567 }
1568
1569 if (simd16) {
1570 /* Now, since we're doing simd16, the return is 2 interleaved
1571 * vec4s where the odd-indexed ones are junk. We'll need to move
1572 * this weirdness around to the expected layout.
1573 */
1574 orig_dst = dst;
1575 dst = fs_reg(GRF, alloc.allocate(8), orig_dst.type);
1576 }
1577
1578 enum opcode opcode;
1579 switch (op) {
1580 case ir_tex: opcode = SHADER_OPCODE_TEX; break;
1581 case ir_txb: opcode = FS_OPCODE_TXB; break;
1582 case ir_txl: opcode = SHADER_OPCODE_TXL; break;
1583 case ir_txd: opcode = SHADER_OPCODE_TXD; break;
1584 case ir_txs: opcode = SHADER_OPCODE_TXS; break;
1585 case ir_txf: opcode = SHADER_OPCODE_TXF; break;
1586 default:
1587 unreachable("not reached");
1588 }
1589
1590 fs_inst *inst = emit(opcode, dst, reg_undef, fs_reg(sampler));
1591 inst->base_mrf = base_mrf;
1592 inst->mlen = mlen;
1593 inst->header_size = 1;
1594 inst->regs_written = simd16 ? 8 : 4;
1595
1596 if (simd16) {
1597 for (int i = 0; i < 4; i++) {
1598 emit(MOV(orig_dst, dst));
1599 orig_dst = offset(orig_dst, 1);
1600 dst = offset(dst, 2);
1601 }
1602 }
1603
1604 return inst;
1605 }
1606
1607 fs_inst *
1608 fs_visitor::emit_texture_gen4_simd16(ir_texture_opcode op, fs_reg dst,
1609 fs_reg coordinate, int vector_elements,
1610 fs_reg shadow_c, fs_reg lod,
1611 uint32_t sampler)
1612 {
1613 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F, dispatch_width);
1614 bool has_lod = op == ir_txl || op == ir_txb || op == ir_txf;
1615
1616 if (has_lod && shadow_c.file != BAD_FILE)
1617 no16("TXB and TXL with shadow comparison unsupported in SIMD16.");
1618
1619 if (op == ir_txd)
1620 no16("textureGrad unsupported in SIMD16.");
1621
1622 /* Copy the coordinates. */
1623 for (int i = 0; i < vector_elements; i++) {
1624 emit(MOV(retype(offset(message, i), coordinate.type), coordinate));
1625 coordinate = offset(coordinate, 1);
1626 }
1627
1628 fs_reg msg_end = offset(message, vector_elements);
1629
1630 /* Messages other than sample and ld require all three components */
1631 if (has_lod || shadow_c.file != BAD_FILE) {
1632 for (int i = vector_elements; i < 3; i++) {
1633 emit(MOV(offset(message, i), fs_reg(0.0f)));
1634 }
1635 }
1636
1637 if (has_lod) {
1638 fs_reg msg_lod = retype(offset(message, 3), op == ir_txf ?
1639 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
1640 emit(MOV(msg_lod, lod));
1641 msg_end = offset(msg_lod, 1);
1642 }
1643
1644 if (shadow_c.file != BAD_FILE) {
1645 fs_reg msg_ref = offset(message, 3 + has_lod);
1646 emit(MOV(msg_ref, shadow_c));
1647 msg_end = offset(msg_ref, 1);
1648 }
1649
1650 enum opcode opcode;
1651 switch (op) {
1652 case ir_tex: opcode = SHADER_OPCODE_TEX; break;
1653 case ir_txb: opcode = FS_OPCODE_TXB; break;
1654 case ir_txd: opcode = SHADER_OPCODE_TXD; break;
1655 case ir_txl: opcode = SHADER_OPCODE_TXL; break;
1656 case ir_txs: opcode = SHADER_OPCODE_TXS; break;
1657 case ir_txf: opcode = SHADER_OPCODE_TXF; break;
1658 default: unreachable("not reached");
1659 }
1660
1661 fs_inst *inst = emit(opcode, dst, reg_undef, fs_reg(sampler));
1662 inst->base_mrf = message.reg - 1;
1663 inst->mlen = msg_end.reg - inst->base_mrf;
1664 inst->header_size = 1;
1665 inst->regs_written = 8;
1666
1667 return inst;
1668 }
1669
1670 /* gen5's sampler has slots for u, v, r, array index, then optional
1671 * parameters like shadow comparitor or LOD bias. If optional
1672 * parameters aren't present, those base slots are optional and don't
1673 * need to be included in the message.
1674 *
1675 * We don't fill in the unnecessary slots regardless, which may look
1676 * surprising in the disassembly.
1677 */
1678 fs_inst *
1679 fs_visitor::emit_texture_gen5(ir_texture_opcode op, fs_reg dst,
1680 fs_reg coordinate, int vector_elements,
1681 fs_reg shadow_c,
1682 fs_reg lod, fs_reg lod2, int grad_components,
1683 fs_reg sample_index, uint32_t sampler,
1684 bool has_offset)
1685 {
1686 int reg_width = dispatch_width / 8;
1687 unsigned header_size = 0;
1688
1689 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F, dispatch_width);
1690 fs_reg msg_coords = message;
1691
1692 if (has_offset) {
1693 /* The offsets set up by the ir_texture visitor are in the
1694 * m1 header, so we can't go headerless.
1695 */
1696 header_size = 1;
1697 message.reg--;
1698 }
1699
1700 for (int i = 0; i < vector_elements; i++) {
1701 emit(MOV(retype(offset(msg_coords, i), coordinate.type), coordinate));
1702 coordinate = offset(coordinate, 1);
1703 }
1704 fs_reg msg_end = offset(msg_coords, vector_elements);
1705 fs_reg msg_lod = offset(msg_coords, 4);
1706
1707 if (shadow_c.file != BAD_FILE) {
1708 fs_reg msg_shadow = msg_lod;
1709 emit(MOV(msg_shadow, shadow_c));
1710 msg_lod = offset(msg_shadow, 1);
1711 msg_end = msg_lod;
1712 }
1713
1714 enum opcode opcode;
1715 switch (op) {
1716 case ir_tex:
1717 opcode = SHADER_OPCODE_TEX;
1718 break;
1719 case ir_txb:
1720 emit(MOV(msg_lod, lod));
1721 msg_end = offset(msg_lod, 1);
1722
1723 opcode = FS_OPCODE_TXB;
1724 break;
1725 case ir_txl:
1726 emit(MOV(msg_lod, lod));
1727 msg_end = offset(msg_lod, 1);
1728
1729 opcode = SHADER_OPCODE_TXL;
1730 break;
1731 case ir_txd: {
1732 /**
1733 * P = u, v, r
1734 * dPdx = dudx, dvdx, drdx
1735 * dPdy = dudy, dvdy, drdy
1736 *
1737 * Load up these values:
1738 * - dudx dudy dvdx dvdy drdx drdy
1739 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
1740 */
1741 msg_end = msg_lod;
1742 for (int i = 0; i < grad_components; i++) {
1743 emit(MOV(msg_end, lod));
1744 lod = offset(lod, 1);
1745 msg_end = offset(msg_end, 1);
1746
1747 emit(MOV(msg_end, lod2));
1748 lod2 = offset(lod2, 1);
1749 msg_end = offset(msg_end, 1);
1750 }
1751
1752 opcode = SHADER_OPCODE_TXD;
1753 break;
1754 }
1755 case ir_txs:
1756 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
1757 emit(MOV(msg_lod, lod));
1758 msg_end = offset(msg_lod, 1);
1759
1760 opcode = SHADER_OPCODE_TXS;
1761 break;
1762 case ir_query_levels:
1763 msg_lod = msg_end;
1764 emit(MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), fs_reg(0u)));
1765 msg_end = offset(msg_lod, 1);
1766
1767 opcode = SHADER_OPCODE_TXS;
1768 break;
1769 case ir_txf:
1770 msg_lod = offset(msg_coords, 3);
1771 emit(MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod));
1772 msg_end = offset(msg_lod, 1);
1773
1774 opcode = SHADER_OPCODE_TXF;
1775 break;
1776 case ir_txf_ms:
1777 msg_lod = offset(msg_coords, 3);
1778 /* lod */
1779 emit(MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), fs_reg(0u)));
1780 /* sample index */
1781 emit(MOV(retype(offset(msg_lod, 1), BRW_REGISTER_TYPE_UD), sample_index));
1782 msg_end = offset(msg_lod, 2);
1783
1784 opcode = SHADER_OPCODE_TXF_CMS;
1785 break;
1786 case ir_lod:
1787 opcode = SHADER_OPCODE_LOD;
1788 break;
1789 case ir_tg4:
1790 opcode = SHADER_OPCODE_TG4;
1791 break;
1792 default:
1793 unreachable("not reached");
1794 }
1795
1796 fs_inst *inst = emit(opcode, dst, reg_undef, fs_reg(sampler));
1797 inst->base_mrf = message.reg;
1798 inst->mlen = msg_end.reg - message.reg;
1799 inst->header_size = header_size;
1800 inst->regs_written = 4 * reg_width;
1801
1802 if (inst->mlen > MAX_SAMPLER_MESSAGE_SIZE) {
1803 fail("Message length >" STRINGIFY(MAX_SAMPLER_MESSAGE_SIZE)
1804 " disallowed by hardware\n");
1805 }
1806
1807 return inst;
1808 }
1809
1810 static bool
1811 is_high_sampler(const struct brw_device_info *devinfo, fs_reg sampler)
1812 {
1813 if (devinfo->gen < 8 && !devinfo->is_haswell)
1814 return false;
1815
1816 return sampler.file != IMM || sampler.fixed_hw_reg.dw1.ud >= 16;
1817 }
1818
1819 fs_inst *
1820 fs_visitor::emit_texture_gen7(ir_texture_opcode op, fs_reg dst,
1821 fs_reg coordinate, int coord_components,
1822 fs_reg shadow_c,
1823 fs_reg lod, fs_reg lod2, int grad_components,
1824 fs_reg sample_index, fs_reg mcs, fs_reg sampler,
1825 fs_reg offset_value)
1826 {
1827 int reg_width = dispatch_width / 8;
1828 unsigned header_size = 0;
1829
1830 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, MAX_SAMPLER_MESSAGE_SIZE);
1831 for (int i = 0; i < MAX_SAMPLER_MESSAGE_SIZE; i++) {
1832 sources[i] = vgrf(glsl_type::float_type);
1833 }
1834 int length = 0;
1835
1836 if (op == ir_tg4 || offset_value.file != BAD_FILE ||
1837 is_high_sampler(devinfo, sampler)) {
1838 /* For general texture offsets (no txf workaround), we need a header to
1839 * put them in. Note that for SIMD16 we're making space for two actual
1840 * hardware registers here, so the emit will have to fix up for this.
1841 *
1842 * * ir4_tg4 needs to place its channel select in the header,
1843 * for interaction with ARB_texture_swizzle
1844 *
1845 * The sampler index is only 4-bits, so for larger sampler numbers we
1846 * need to offset the Sampler State Pointer in the header.
1847 */
1848 header_size = 1;
1849 sources[0] = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
1850 length++;
1851 }
1852
1853 if (shadow_c.file != BAD_FILE) {
1854 emit(MOV(sources[length], shadow_c));
1855 length++;
1856 }
1857
1858 bool has_nonconstant_offset =
1859 offset_value.file != BAD_FILE && offset_value.file != IMM;
1860 bool coordinate_done = false;
1861
1862 /* The sampler can only meaningfully compute LOD for fragment shader
1863 * messages. For all other stages, we change the opcode to ir_txl and
1864 * hardcode the LOD to 0.
1865 */
1866 if (stage != MESA_SHADER_FRAGMENT && op == ir_tex) {
1867 op = ir_txl;
1868 lod = fs_reg(0.0f);
1869 }
1870
1871 /* Set up the LOD info */
1872 switch (op) {
1873 case ir_tex:
1874 case ir_lod:
1875 break;
1876 case ir_txb:
1877 emit(MOV(sources[length], lod));
1878 length++;
1879 break;
1880 case ir_txl:
1881 emit(MOV(sources[length], lod));
1882 length++;
1883 break;
1884 case ir_txd: {
1885 no16("Gen7 does not support sample_d/sample_d_c in SIMD16 mode.");
1886
1887 /* Load dPdx and the coordinate together:
1888 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
1889 */
1890 for (int i = 0; i < coord_components; i++) {
1891 emit(MOV(sources[length], coordinate));
1892 coordinate = offset(coordinate, 1);
1893 length++;
1894
1895 /* For cube map array, the coordinate is (u,v,r,ai) but there are
1896 * only derivatives for (u, v, r).
1897 */
1898 if (i < grad_components) {
1899 emit(MOV(sources[length], lod));
1900 lod = offset(lod, 1);
1901 length++;
1902
1903 emit(MOV(sources[length], lod2));
1904 lod2 = offset(lod2, 1);
1905 length++;
1906 }
1907 }
1908
1909 coordinate_done = true;
1910 break;
1911 }
1912 case ir_txs:
1913 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod));
1914 length++;
1915 break;
1916 case ir_query_levels:
1917 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), fs_reg(0u)));
1918 length++;
1919 break;
1920 case ir_txf:
1921 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
1922 * On Gen9 they are u, v, lod, r
1923 */
1924
1925 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate));
1926 coordinate = offset(coordinate, 1);
1927 length++;
1928
1929 if (devinfo->gen >= 9) {
1930 if (coord_components >= 2) {
1931 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate));
1932 coordinate = offset(coordinate, 1);
1933 }
1934 length++;
1935 }
1936
1937 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod));
1938 length++;
1939
1940 for (int i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++) {
1941 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate));
1942 coordinate = offset(coordinate, 1);
1943 length++;
1944 }
1945
1946 coordinate_done = true;
1947 break;
1948 case ir_txf_ms:
1949 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index));
1950 length++;
1951
1952 /* data from the multisample control surface */
1953 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs));
1954 length++;
1955
1956 /* there is no offsetting for this message; just copy in the integer
1957 * texture coordinates
1958 */
1959 for (int i = 0; i < coord_components; i++) {
1960 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate));
1961 coordinate = offset(coordinate, 1);
1962 length++;
1963 }
1964
1965 coordinate_done = true;
1966 break;
1967 case ir_tg4:
1968 if (has_nonconstant_offset) {
1969 if (shadow_c.file != BAD_FILE)
1970 no16("Gen7 does not support gather4_po_c in SIMD16 mode.");
1971
1972 /* More crazy intermixing */
1973 for (int i = 0; i < 2; i++) { /* u, v */
1974 emit(MOV(sources[length], coordinate));
1975 coordinate = offset(coordinate, 1);
1976 length++;
1977 }
1978
1979 for (int i = 0; i < 2; i++) { /* offu, offv */
1980 emit(MOV(retype(sources[length], BRW_REGISTER_TYPE_D), offset_value));
1981 offset_value = offset(offset_value, 1);
1982 length++;
1983 }
1984
1985 if (coord_components == 3) { /* r if present */
1986 emit(MOV(sources[length], coordinate));
1987 coordinate = offset(coordinate, 1);
1988 length++;
1989 }
1990
1991 coordinate_done = true;
1992 }
1993 break;
1994 }
1995
1996 /* Set up the coordinate (except for cases where it was done above) */
1997 if (!coordinate_done) {
1998 for (int i = 0; i < coord_components; i++) {
1999 emit(MOV(sources[length], coordinate));
2000 coordinate = offset(coordinate, 1);
2001 length++;
2002 }
2003 }
2004
2005 int mlen;
2006 if (reg_width == 2)
2007 mlen = length * reg_width - header_size;
2008 else
2009 mlen = length * reg_width;
2010
2011 fs_reg src_payload = fs_reg(GRF, alloc.allocate(mlen),
2012 BRW_REGISTER_TYPE_F, dispatch_width);
2013 emit(LOAD_PAYLOAD(src_payload, sources, length, header_size));
2014
2015 /* Generate the SEND */
2016 enum opcode opcode;
2017 switch (op) {
2018 case ir_tex: opcode = SHADER_OPCODE_TEX; break;
2019 case ir_txb: opcode = FS_OPCODE_TXB; break;
2020 case ir_txl: opcode = SHADER_OPCODE_TXL; break;
2021 case ir_txd: opcode = SHADER_OPCODE_TXD; break;
2022 case ir_txf: opcode = SHADER_OPCODE_TXF; break;
2023 case ir_txf_ms: opcode = SHADER_OPCODE_TXF_CMS; break;
2024 case ir_txs: opcode = SHADER_OPCODE_TXS; break;
2025 case ir_query_levels: opcode = SHADER_OPCODE_TXS; break;
2026 case ir_lod: opcode = SHADER_OPCODE_LOD; break;
2027 case ir_tg4:
2028 if (has_nonconstant_offset)
2029 opcode = SHADER_OPCODE_TG4_OFFSET;
2030 else
2031 opcode = SHADER_OPCODE_TG4;
2032 break;
2033 default:
2034 unreachable("not reached");
2035 }
2036 fs_inst *inst = emit(opcode, dst, src_payload, sampler);
2037 inst->base_mrf = -1;
2038 inst->mlen = mlen;
2039 inst->header_size = header_size;
2040 inst->regs_written = 4 * reg_width;
2041
2042 if (inst->mlen > MAX_SAMPLER_MESSAGE_SIZE) {
2043 fail("Message length >" STRINGIFY(MAX_SAMPLER_MESSAGE_SIZE)
2044 " disallowed by hardware\n");
2045 }
2046
2047 return inst;
2048 }
2049
2050 fs_reg
2051 fs_visitor::rescale_texcoord(fs_reg coordinate, int coord_components,
2052 bool is_rect, uint32_t sampler, int texunit)
2053 {
2054 fs_inst *inst = NULL;
2055 bool needs_gl_clamp = true;
2056 fs_reg scale_x, scale_y;
2057
2058 /* The 965 requires the EU to do the normalization of GL rectangle
2059 * texture coordinates. We use the program parameter state
2060 * tracking to get the scaling factor.
2061 */
2062 if (is_rect &&
2063 (devinfo->gen < 6 ||
2064 (devinfo->gen >= 6 && (key_tex->gl_clamp_mask[0] & (1 << sampler) ||
2065 key_tex->gl_clamp_mask[1] & (1 << sampler))))) {
2066 struct gl_program_parameter_list *params = prog->Parameters;
2067 int tokens[STATE_LENGTH] = {
2068 STATE_INTERNAL,
2069 STATE_TEXRECT_SCALE,
2070 texunit,
2071 0,
2072 0
2073 };
2074
2075 no16("rectangle scale uniform setup not supported on SIMD16\n");
2076 if (dispatch_width == 16) {
2077 return coordinate;
2078 }
2079
2080 GLuint index = _mesa_add_state_reference(params,
2081 (gl_state_index *)tokens);
2082 /* Try to find existing copies of the texrect scale uniforms. */
2083 for (unsigned i = 0; i < uniforms; i++) {
2084 if (stage_prog_data->param[i] ==
2085 &prog->Parameters->ParameterValues[index][0]) {
2086 scale_x = fs_reg(UNIFORM, i);
2087 scale_y = fs_reg(UNIFORM, i + 1);
2088 break;
2089 }
2090 }
2091
2092 /* If we didn't already set them up, do so now. */
2093 if (scale_x.file == BAD_FILE) {
2094 scale_x = fs_reg(UNIFORM, uniforms);
2095 scale_y = fs_reg(UNIFORM, uniforms + 1);
2096
2097 stage_prog_data->param[uniforms++] =
2098 &prog->Parameters->ParameterValues[index][0];
2099 stage_prog_data->param[uniforms++] =
2100 &prog->Parameters->ParameterValues[index][1];
2101 }
2102 }
2103
2104 /* The 965 requires the EU to do the normalization of GL rectangle
2105 * texture coordinates. We use the program parameter state
2106 * tracking to get the scaling factor.
2107 */
2108 if (devinfo->gen < 6 && is_rect) {
2109 fs_reg dst = fs_reg(GRF, alloc.allocate(coord_components));
2110 fs_reg src = coordinate;
2111 coordinate = dst;
2112
2113 emit(MUL(dst, src, scale_x));
2114 dst = offset(dst, 1);
2115 src = offset(src, 1);
2116 emit(MUL(dst, src, scale_y));
2117 } else if (is_rect) {
2118 /* On gen6+, the sampler handles the rectangle coordinates
2119 * natively, without needing rescaling. But that means we have
2120 * to do GL_CLAMP clamping at the [0, width], [0, height] scale,
2121 * not [0, 1] like the default case below.
2122 */
2123 needs_gl_clamp = false;
2124
2125 for (int i = 0; i < 2; i++) {
2126 if (key_tex->gl_clamp_mask[i] & (1 << sampler)) {
2127 fs_reg chan = coordinate;
2128 chan = offset(chan, i);
2129
2130 inst = emit(BRW_OPCODE_SEL, chan, chan, fs_reg(0.0f));
2131 inst->conditional_mod = BRW_CONDITIONAL_GE;
2132
2133 /* Our parameter comes in as 1.0/width or 1.0/height,
2134 * because that's what people normally want for doing
2135 * texture rectangle handling. We need width or height
2136 * for clamping, but we don't care enough to make a new
2137 * parameter type, so just invert back.
2138 */
2139 fs_reg limit = vgrf(glsl_type::float_type);
2140 emit(MOV(limit, i == 0 ? scale_x : scale_y));
2141 emit(SHADER_OPCODE_RCP, limit, limit);
2142
2143 inst = emit(BRW_OPCODE_SEL, chan, chan, limit);
2144 inst->conditional_mod = BRW_CONDITIONAL_L;
2145 }
2146 }
2147 }
2148
2149 if (coord_components > 0 && needs_gl_clamp) {
2150 for (int i = 0; i < MIN2(coord_components, 3); i++) {
2151 if (key_tex->gl_clamp_mask[i] & (1 << sampler)) {
2152 fs_reg chan = coordinate;
2153 chan = offset(chan, i);
2154
2155 fs_inst *inst = emit(MOV(chan, chan));
2156 inst->saturate = true;
2157 }
2158 }
2159 }
2160 return coordinate;
2161 }
2162
2163 /* Sample from the MCS surface attached to this multisample texture. */
2164 fs_reg
2165 fs_visitor::emit_mcs_fetch(fs_reg coordinate, int components, fs_reg sampler)
2166 {
2167 int reg_width = dispatch_width / 8;
2168 fs_reg payload = fs_reg(GRF, alloc.allocate(components * reg_width),
2169 BRW_REGISTER_TYPE_F, dispatch_width);
2170 fs_reg dest = vgrf(glsl_type::uvec4_type);
2171 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, components);
2172
2173 /* parameters are: u, v, r; missing parameters are treated as zero */
2174 for (int i = 0; i < components; i++) {
2175 sources[i] = vgrf(glsl_type::float_type);
2176 emit(MOV(retype(sources[i], BRW_REGISTER_TYPE_D), coordinate));
2177 coordinate = offset(coordinate, 1);
2178 }
2179
2180 emit(LOAD_PAYLOAD(payload, sources, components, 0));
2181
2182 fs_inst *inst = emit(SHADER_OPCODE_TXF_MCS, dest, payload, sampler);
2183 inst->base_mrf = -1;
2184 inst->mlen = components * reg_width;
2185 inst->header_size = 0;
2186 inst->regs_written = 4 * reg_width; /* we only care about one reg of
2187 * response, but the sampler always
2188 * writes 4/8
2189 */
2190
2191 return dest;
2192 }
2193
2194 void
2195 fs_visitor::emit_texture(ir_texture_opcode op,
2196 const glsl_type *dest_type,
2197 fs_reg coordinate, int coord_components,
2198 fs_reg shadow_c,
2199 fs_reg lod, fs_reg lod2, int grad_components,
2200 fs_reg sample_index,
2201 fs_reg offset_value,
2202 fs_reg mcs,
2203 int gather_component,
2204 bool is_cube_array,
2205 bool is_rect,
2206 uint32_t sampler,
2207 fs_reg sampler_reg, int texunit)
2208 {
2209 fs_inst *inst = NULL;
2210
2211 if (op == ir_tg4) {
2212 /* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
2213 * emitting anything other than setting up the constant result.
2214 */
2215 int swiz = GET_SWZ(key_tex->swizzles[sampler], gather_component);
2216 if (swiz == SWIZZLE_ZERO || swiz == SWIZZLE_ONE) {
2217
2218 fs_reg res = vgrf(glsl_type::vec4_type);
2219 this->result = res;
2220
2221 for (int i=0; i<4; i++) {
2222 emit(MOV(res, fs_reg(swiz == SWIZZLE_ZERO ? 0.0f : 1.0f)));
2223 res = offset(res, 1);
2224 }
2225 return;
2226 }
2227 }
2228
2229 if (coordinate.file != BAD_FILE) {
2230 /* FINISHME: Texture coordinate rescaling doesn't work with non-constant
2231 * samplers. This should only be a problem with GL_CLAMP on Gen7.
2232 */
2233 coordinate = rescale_texcoord(coordinate, coord_components, is_rect,
2234 sampler, texunit);
2235 }
2236
2237 /* Writemasking doesn't eliminate channels on SIMD8 texture
2238 * samples, so don't worry about them.
2239 */
2240 fs_reg dst = vgrf(glsl_type::get_instance(dest_type->base_type, 4, 1));
2241
2242 if (devinfo->gen >= 7) {
2243 inst = emit_texture_gen7(op, dst, coordinate, coord_components,
2244 shadow_c, lod, lod2, grad_components,
2245 sample_index, mcs, sampler_reg,
2246 offset_value);
2247 } else if (devinfo->gen >= 5) {
2248 inst = emit_texture_gen5(op, dst, coordinate, coord_components,
2249 shadow_c, lod, lod2, grad_components,
2250 sample_index, sampler,
2251 offset_value.file != BAD_FILE);
2252 } else if (dispatch_width == 16) {
2253 inst = emit_texture_gen4_simd16(op, dst, coordinate, coord_components,
2254 shadow_c, lod, sampler);
2255 } else {
2256 inst = emit_texture_gen4(op, dst, coordinate, coord_components,
2257 shadow_c, lod, lod2, grad_components,
2258 sampler);
2259 }
2260
2261 if (shadow_c.file != BAD_FILE)
2262 inst->shadow_compare = true;
2263
2264 if (offset_value.file == IMM)
2265 inst->offset = offset_value.fixed_hw_reg.dw1.ud;
2266
2267 if (op == ir_tg4) {
2268 inst->offset |=
2269 gather_channel(gather_component, sampler) << 16; /* M0.2:16-17 */
2270
2271 if (devinfo->gen == 6)
2272 emit_gen6_gather_wa(key_tex->gen6_gather_wa[sampler], dst);
2273 }
2274
2275 /* fixup #layers for cube map arrays */
2276 if (op == ir_txs && is_cube_array) {
2277 fs_reg depth = offset(dst, 2);
2278 fs_reg fixed_depth = vgrf(glsl_type::int_type);
2279 emit_math(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, fs_reg(6));
2280
2281 fs_reg *fixed_payload = ralloc_array(mem_ctx, fs_reg, inst->regs_written);
2282 int components = inst->regs_written / (dst.width / 8);
2283 for (int i = 0; i < components; i++) {
2284 if (i == 2) {
2285 fixed_payload[i] = fixed_depth;
2286 } else {
2287 fixed_payload[i] = offset(dst, i);
2288 }
2289 }
2290 emit(LOAD_PAYLOAD(dst, fixed_payload, components, 0));
2291 }
2292
2293 swizzle_result(op, dest_type->vector_elements, dst, sampler);
2294 }
2295
2296 void
2297 fs_visitor::visit(ir_texture *ir)
2298 {
2299 uint32_t sampler;
2300
2301 ir_dereference_variable *deref_var = ir->sampler->as_dereference_variable();
2302 assert(deref_var);
2303 ir_variable *var = deref_var->var;
2304
2305 sampler = stage_prog_data->bind_map[var->data.set][var->data.index];
2306
2307 ir_rvalue *nonconst_sampler_index =
2308 _mesa_get_sampler_array_nonconst_index(ir->sampler);
2309
2310 /* Handle non-constant sampler array indexing */
2311 fs_reg sampler_reg;
2312 if (nonconst_sampler_index) {
2313 /* The highest sampler which may be used by this operation is
2314 * the last element of the array. Mark it here, because the generator
2315 * doesn't have enough information to determine the bound.
2316 */
2317 uint32_t array_size = ir->sampler->as_dereference_array()
2318 ->array->type->array_size();
2319
2320 uint32_t max_used = sampler + array_size - 1;
2321 if (ir->op == ir_tg4 && devinfo->gen < 8) {
2322 max_used += stage_prog_data->binding_table.gather_texture_start;
2323 } else {
2324 max_used += stage_prog_data->binding_table.texture_start;
2325 }
2326
2327 brw_mark_surface_used(prog_data, max_used);
2328
2329 /* Emit code to evaluate the actual indexing expression */
2330 nonconst_sampler_index->accept(this);
2331 fs_reg temp = vgrf(glsl_type::uint_type);
2332 emit(ADD(temp, this->result, fs_reg(sampler)));
2333 emit_uniformize(temp, temp);
2334
2335 sampler_reg = temp;
2336 } else {
2337 /* Single sampler, or constant array index; the indexing expression
2338 * is just an immediate.
2339 */
2340 sampler_reg = fs_reg(sampler);
2341 }
2342
2343 /* FINISHME: We're failing to recompile our programs when the sampler is
2344 * updated. This only matters for the texture rectangle scale parameters
2345 * (pre-gen6, or gen6+ with GL_CLAMP).
2346 */
2347 int texunit = prog->SamplerUnits[sampler];
2348
2349 /* Should be lowered by do_lower_texture_projection */
2350 assert(!ir->projector);
2351
2352 /* Should be lowered */
2353 assert(!ir->offset || !ir->offset->type->is_array());
2354
2355 /* Generate code to compute all the subexpression trees. This has to be
2356 * done before loading any values into MRFs for the sampler message since
2357 * generating these values may involve SEND messages that need the MRFs.
2358 */
2359 fs_reg coordinate;
2360 int coord_components = 0;
2361 if (ir->coordinate) {
2362 coord_components = ir->coordinate->type->vector_elements;
2363 ir->coordinate->accept(this);
2364 coordinate = this->result;
2365 }
2366
2367 fs_reg shadow_comparitor;
2368 if (ir->shadow_comparitor) {
2369 ir->shadow_comparitor->accept(this);
2370 shadow_comparitor = this->result;
2371 }
2372
2373 fs_reg offset_value;
2374 if (ir->offset) {
2375 ir_constant *const_offset = ir->offset->as_constant();
2376 if (const_offset) {
2377 /* Store the header bitfield in an IMM register. This allows us to
2378 * use offset_value.file to distinguish between no offset, a constant
2379 * offset, and a non-constant offset.
2380 */
2381 offset_value =
2382 fs_reg(brw_texture_offset(const_offset->value.i,
2383 const_offset->type->vector_elements));
2384 } else {
2385 ir->offset->accept(this);
2386 offset_value = this->result;
2387 }
2388 }
2389
2390 fs_reg lod, lod2, sample_index, mcs;
2391 int grad_components = 0;
2392 switch (ir->op) {
2393 case ir_tex:
2394 case ir_lod:
2395 case ir_tg4:
2396 case ir_query_levels:
2397 break;
2398 case ir_txb:
2399 ir->lod_info.bias->accept(this);
2400 lod = this->result;
2401 break;
2402 case ir_txd:
2403 ir->lod_info.grad.dPdx->accept(this);
2404 lod = this->result;
2405
2406 ir->lod_info.grad.dPdy->accept(this);
2407 lod2 = this->result;
2408
2409 grad_components = ir->lod_info.grad.dPdx->type->vector_elements;
2410 break;
2411 case ir_txf:
2412 case ir_txl:
2413 case ir_txs:
2414 ir->lod_info.lod->accept(this);
2415 lod = this->result;
2416 break;
2417 case ir_txf_ms:
2418 ir->lod_info.sample_index->accept(this);
2419 sample_index = this->result;
2420
2421 if (devinfo->gen >= 7 &&
2422 key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
2423 mcs = emit_mcs_fetch(coordinate, ir->coordinate->type->vector_elements,
2424 sampler_reg);
2425 } else {
2426 mcs = fs_reg(0u);
2427 }
2428 break;
2429 default:
2430 unreachable("Unrecognized texture opcode");
2431 };
2432
2433 int gather_component = 0;
2434 if (ir->op == ir_tg4)
2435 gather_component = ir->lod_info.component->as_constant()->value.i[0];
2436
2437 bool is_rect =
2438 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT;
2439
2440 bool is_cube_array =
2441 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
2442 ir->sampler->type->sampler_array;
2443
2444 emit_texture(ir->op, ir->type, coordinate, coord_components,
2445 shadow_comparitor, lod, lod2, grad_components,
2446 sample_index, offset_value, mcs,
2447 gather_component, is_cube_array, is_rect, sampler,
2448 sampler_reg, texunit);
2449 }
2450
2451 /**
2452 * Apply workarounds for Gen6 gather with UINT/SINT
2453 */
2454 void
2455 fs_visitor::emit_gen6_gather_wa(uint8_t wa, fs_reg dst)
2456 {
2457 if (!wa)
2458 return;
2459
2460 int width = (wa & WA_8BIT) ? 8 : 16;
2461
2462 for (int i = 0; i < 4; i++) {
2463 fs_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
2464 /* Convert from UNORM to UINT */
2465 emit(MUL(dst_f, dst_f, fs_reg((float)((1 << width) - 1))));
2466 emit(MOV(dst, dst_f));
2467
2468 if (wa & WA_SIGN) {
2469 /* Reinterpret the UINT value as a signed INT value by
2470 * shifting the sign bit into place, then shifting back
2471 * preserving sign.
2472 */
2473 emit(SHL(dst, dst, fs_reg(32 - width)));
2474 emit(ASR(dst, dst, fs_reg(32 - width)));
2475 }
2476
2477 dst = offset(dst, 1);
2478 }
2479 }
2480
2481 /**
2482 * Set up the gather channel based on the swizzle, for gather4.
2483 */
2484 uint32_t
2485 fs_visitor::gather_channel(int orig_chan, uint32_t sampler)
2486 {
2487 int swiz = GET_SWZ(key_tex->swizzles[sampler], orig_chan);
2488 switch (swiz) {
2489 case SWIZZLE_X: return 0;
2490 case SWIZZLE_Y:
2491 /* gather4 sampler is broken for green channel on RG32F --
2492 * we must ask for blue instead.
2493 */
2494 if (key_tex->gather_channel_quirk_mask & (1 << sampler))
2495 return 2;
2496 return 1;
2497 case SWIZZLE_Z: return 2;
2498 case SWIZZLE_W: return 3;
2499 default:
2500 unreachable("Not reached"); /* zero, one swizzles handled already */
2501 }
2502 }
2503
2504 /**
2505 * Swizzle the result of a texture result. This is necessary for
2506 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons.
2507 */
2508 void
2509 fs_visitor::swizzle_result(ir_texture_opcode op, int dest_components,
2510 fs_reg orig_val, uint32_t sampler)
2511 {
2512 if (op == ir_query_levels) {
2513 /* # levels is in .w */
2514 this->result = offset(orig_val, 3);
2515 return;
2516 }
2517
2518 this->result = orig_val;
2519
2520 /* txs,lod don't actually sample the texture, so swizzling the result
2521 * makes no sense.
2522 */
2523 if (op == ir_txs || op == ir_lod || op == ir_tg4)
2524 return;
2525
2526 if (dest_components == 1) {
2527 /* Ignore DEPTH_TEXTURE_MODE swizzling. */
2528 } else if (key_tex->swizzles[sampler] != SWIZZLE_NOOP) {
2529 fs_reg swizzled_result = vgrf(glsl_type::vec4_type);
2530 swizzled_result.type = orig_val.type;
2531
2532 for (int i = 0; i < 4; i++) {
2533 int swiz = GET_SWZ(key_tex->swizzles[sampler], i);
2534 fs_reg l = swizzled_result;
2535 l = offset(l, i);
2536
2537 if (swiz == SWIZZLE_ZERO) {
2538 emit(MOV(l, fs_reg(0.0f)));
2539 } else if (swiz == SWIZZLE_ONE) {
2540 emit(MOV(l, fs_reg(1.0f)));
2541 } else {
2542 emit(MOV(l, offset(orig_val,
2543 GET_SWZ(key_tex->swizzles[sampler], i))));
2544 }
2545 }
2546 this->result = swizzled_result;
2547 }
2548 }
2549
2550 void
2551 fs_visitor::visit(ir_swizzle *ir)
2552 {
2553 ir->val->accept(this);
2554 fs_reg val = this->result;
2555
2556 if (ir->type->vector_elements == 1) {
2557 this->result = offset(this->result, ir->mask.x);
2558 return;
2559 }
2560
2561 fs_reg result = vgrf(ir->type);
2562 this->result = result;
2563
2564 for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
2565 fs_reg channel = val;
2566 int swiz = 0;
2567
2568 switch (i) {
2569 case 0:
2570 swiz = ir->mask.x;
2571 break;
2572 case 1:
2573 swiz = ir->mask.y;
2574 break;
2575 case 2:
2576 swiz = ir->mask.z;
2577 break;
2578 case 3:
2579 swiz = ir->mask.w;
2580 break;
2581 }
2582
2583 emit(MOV(result, offset(channel, swiz)));
2584 result = offset(result, 1);
2585 }
2586 }
2587
2588 void
2589 fs_visitor::visit(ir_discard *ir)
2590 {
2591 /* We track our discarded pixels in f0.1. By predicating on it, we can
2592 * update just the flag bits that aren't yet discarded. If there's no
2593 * condition, we emit a CMP of g0 != g0, so all currently executing
2594 * channels will get turned off.
2595 */
2596 fs_inst *cmp;
2597 if (ir->condition) {
2598 emit_bool_to_cond_code(ir->condition);
2599 cmp = (fs_inst *) this->instructions.get_tail();
2600 cmp->conditional_mod = brw_negate_cmod(cmp->conditional_mod);
2601 } else {
2602 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
2603 BRW_REGISTER_TYPE_UW));
2604 cmp = emit(CMP(reg_null_f, some_reg, some_reg, BRW_CONDITIONAL_NZ));
2605 }
2606 cmp->predicate = BRW_PREDICATE_NORMAL;
2607 cmp->flag_subreg = 1;
2608
2609 if (devinfo->gen >= 6) {
2610 emit_discard_jump();
2611 }
2612 }
2613
2614 void
2615 fs_visitor::visit(ir_constant *ir)
2616 {
2617 /* Set this->result to reg at the bottom of the function because some code
2618 * paths will cause this visitor to be applied to other fields. This will
2619 * cause the value stored in this->result to be modified.
2620 *
2621 * Make reg constant so that it doesn't get accidentally modified along the
2622 * way. Yes, I actually had this problem. :(
2623 */
2624 const fs_reg reg = vgrf(ir->type);
2625 fs_reg dst_reg = reg;
2626
2627 if (ir->type->is_array()) {
2628 const unsigned size = type_size(ir->type->fields.array);
2629
2630 for (unsigned i = 0; i < ir->type->length; i++) {
2631 ir->array_elements[i]->accept(this);
2632 fs_reg src_reg = this->result;
2633
2634 dst_reg.type = src_reg.type;
2635 for (unsigned j = 0; j < size; j++) {
2636 emit(MOV(dst_reg, src_reg));
2637 src_reg = offset(src_reg, 1);
2638 dst_reg = offset(dst_reg, 1);
2639 }
2640 }
2641 } else if (ir->type->is_record()) {
2642 foreach_in_list(ir_constant, field, &ir->components) {
2643 const unsigned size = type_size(field->type);
2644
2645 field->accept(this);
2646 fs_reg src_reg = this->result;
2647
2648 dst_reg.type = src_reg.type;
2649 for (unsigned j = 0; j < size; j++) {
2650 emit(MOV(dst_reg, src_reg));
2651 src_reg = offset(src_reg, 1);
2652 dst_reg = offset(dst_reg, 1);
2653 }
2654 }
2655 } else {
2656 const unsigned size = type_size(ir->type);
2657
2658 for (unsigned i = 0; i < size; i++) {
2659 switch (ir->type->base_type) {
2660 case GLSL_TYPE_FLOAT:
2661 emit(MOV(dst_reg, fs_reg(ir->value.f[i])));
2662 break;
2663 case GLSL_TYPE_UINT:
2664 emit(MOV(dst_reg, fs_reg(ir->value.u[i])));
2665 break;
2666 case GLSL_TYPE_INT:
2667 emit(MOV(dst_reg, fs_reg(ir->value.i[i])));
2668 break;
2669 case GLSL_TYPE_BOOL:
2670 emit(MOV(dst_reg, fs_reg(ir->value.b[i] != 0 ? ~0 : 0)));
2671 break;
2672 default:
2673 unreachable("Non-float/uint/int/bool constant");
2674 }
2675 dst_reg = offset(dst_reg, 1);
2676 }
2677 }
2678
2679 this->result = reg;
2680 }
2681
2682 void
2683 fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
2684 {
2685 ir_expression *expr = ir->as_expression();
2686
2687 if (!expr || expr->operation == ir_binop_ubo_load) {
2688 ir->accept(this);
2689
2690 fs_inst *inst = emit(AND(reg_null_d, this->result, fs_reg(1)));
2691 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2692 return;
2693 }
2694
2695 fs_reg op[3];
2696
2697 assert(expr->get_num_operands() <= 3);
2698 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
2699 assert(expr->operands[i]->type->is_scalar());
2700
2701 expr->operands[i]->accept(this);
2702 op[i] = this->result;
2703
2704 resolve_ud_negate(&op[i]);
2705 }
2706
2707 emit_bool_to_cond_code_of_reg(expr, op);
2708 }
2709
2710 void
2711 fs_visitor::emit_bool_to_cond_code_of_reg(ir_expression *expr, fs_reg op[3])
2712 {
2713 fs_inst *inst;
2714
2715 switch (expr->operation) {
2716 case ir_unop_logic_not:
2717 inst = emit(AND(reg_null_d, op[0], fs_reg(1)));
2718 inst->conditional_mod = BRW_CONDITIONAL_Z;
2719 break;
2720
2721 case ir_binop_logic_xor:
2722 if (devinfo->gen <= 5) {
2723 fs_reg temp = vgrf(expr->type);
2724 emit(XOR(temp, op[0], op[1]));
2725 inst = emit(AND(reg_null_d, temp, fs_reg(1)));
2726 } else {
2727 inst = emit(XOR(reg_null_d, op[0], op[1]));
2728 }
2729 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2730 break;
2731
2732 case ir_binop_logic_or:
2733 if (devinfo->gen <= 5) {
2734 fs_reg temp = vgrf(expr->type);
2735 emit(OR(temp, op[0], op[1]));
2736 inst = emit(AND(reg_null_d, temp, fs_reg(1)));
2737 } else {
2738 inst = emit(OR(reg_null_d, op[0], op[1]));
2739 }
2740 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2741 break;
2742
2743 case ir_binop_logic_and:
2744 if (devinfo->gen <= 5) {
2745 fs_reg temp = vgrf(expr->type);
2746 emit(AND(temp, op[0], op[1]));
2747 inst = emit(AND(reg_null_d, temp, fs_reg(1)));
2748 } else {
2749 inst = emit(AND(reg_null_d, op[0], op[1]));
2750 }
2751 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2752 break;
2753
2754 case ir_unop_f2b:
2755 if (devinfo->gen >= 6) {
2756 emit(CMP(reg_null_d, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
2757 } else {
2758 inst = emit(MOV(reg_null_f, op[0]));
2759 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2760 }
2761 break;
2762
2763 case ir_unop_i2b:
2764 if (devinfo->gen >= 6) {
2765 emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
2766 } else {
2767 inst = emit(MOV(reg_null_d, op[0]));
2768 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2769 }
2770 break;
2771
2772 case ir_binop_greater:
2773 case ir_binop_gequal:
2774 case ir_binop_less:
2775 case ir_binop_lequal:
2776 case ir_binop_equal:
2777 case ir_binop_all_equal:
2778 case ir_binop_nequal:
2779 case ir_binop_any_nequal:
2780 if (devinfo->gen <= 5) {
2781 resolve_bool_comparison(expr->operands[0], &op[0]);
2782 resolve_bool_comparison(expr->operands[1], &op[1]);
2783 }
2784
2785 emit(CMP(reg_null_d, op[0], op[1],
2786 brw_conditional_for_comparison(expr->operation)));
2787 break;
2788
2789 case ir_triop_csel: {
2790 /* Expand the boolean condition into the flag register. */
2791 inst = emit(MOV(reg_null_d, op[0]));
2792 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2793
2794 /* Select which boolean to return. */
2795 fs_reg temp = vgrf(expr->operands[1]->type);
2796 inst = emit(SEL(temp, op[1], op[2]));
2797 inst->predicate = BRW_PREDICATE_NORMAL;
2798
2799 /* Expand the result to a condition code. */
2800 inst = emit(MOV(reg_null_d, temp));
2801 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2802 break;
2803 }
2804
2805 default:
2806 unreachable("not reached");
2807 }
2808 }
2809
2810 /**
2811 * Emit a gen6 IF statement with the comparison folded into the IF
2812 * instruction.
2813 */
2814 void
2815 fs_visitor::emit_if_gen6(ir_if *ir)
2816 {
2817 ir_expression *expr = ir->condition->as_expression();
2818
2819 if (expr && expr->operation != ir_binop_ubo_load) {
2820 fs_reg op[3];
2821 fs_inst *inst;
2822 fs_reg temp;
2823
2824 assert(expr->get_num_operands() <= 3);
2825 for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
2826 assert(expr->operands[i]->type->is_scalar());
2827
2828 expr->operands[i]->accept(this);
2829 op[i] = this->result;
2830 }
2831
2832 switch (expr->operation) {
2833 case ir_unop_logic_not:
2834 emit(IF(op[0], fs_reg(0), BRW_CONDITIONAL_Z));
2835 return;
2836
2837 case ir_binop_logic_xor:
2838 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
2839 return;
2840
2841 case ir_binop_logic_or:
2842 temp = vgrf(glsl_type::bool_type);
2843 emit(OR(temp, op[0], op[1]));
2844 emit(IF(temp, fs_reg(0), BRW_CONDITIONAL_NZ));
2845 return;
2846
2847 case ir_binop_logic_and:
2848 temp = vgrf(glsl_type::bool_type);
2849 emit(AND(temp, op[0], op[1]));
2850 emit(IF(temp, fs_reg(0), BRW_CONDITIONAL_NZ));
2851 return;
2852
2853 case ir_unop_f2b:
2854 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0));
2855 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2856 return;
2857
2858 case ir_unop_i2b:
2859 emit(IF(op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
2860 return;
2861
2862 case ir_binop_greater:
2863 case ir_binop_gequal:
2864 case ir_binop_less:
2865 case ir_binop_lequal:
2866 case ir_binop_equal:
2867 case ir_binop_all_equal:
2868 case ir_binop_nequal:
2869 case ir_binop_any_nequal:
2870 if (devinfo->gen <= 5) {
2871 resolve_bool_comparison(expr->operands[0], &op[0]);
2872 resolve_bool_comparison(expr->operands[1], &op[1]);
2873 }
2874
2875 emit(IF(op[0], op[1],
2876 brw_conditional_for_comparison(expr->operation)));
2877 return;
2878
2879 case ir_triop_csel: {
2880 /* Expand the boolean condition into the flag register. */
2881 fs_inst *inst = emit(MOV(reg_null_d, op[0]));
2882 inst->conditional_mod = BRW_CONDITIONAL_NZ;
2883
2884 /* Select which boolean to use as the result. */
2885 fs_reg temp = vgrf(expr->operands[1]->type);
2886 inst = emit(SEL(temp, op[1], op[2]));
2887 inst->predicate = BRW_PREDICATE_NORMAL;
2888
2889 emit(IF(temp, fs_reg(0), BRW_CONDITIONAL_NZ));
2890 return;
2891 }
2892
2893 default:
2894 unreachable("not reached");
2895 }
2896 }
2897
2898 ir->condition->accept(this);
2899 emit(IF(this->result, fs_reg(0), BRW_CONDITIONAL_NZ));
2900 }
2901
2902 bool
2903 fs_visitor::try_opt_frontfacing_ternary(ir_if *ir)
2904 {
2905 ir_dereference_variable *deref = ir->condition->as_dereference_variable();
2906 if (!deref || strcmp(deref->var->name, "gl_FrontFacing") != 0)
2907 return false;
2908
2909 if (ir->then_instructions.length() != 1 ||
2910 ir->else_instructions.length() != 1)
2911 return false;
2912
2913 ir_assignment *then_assign =
2914 ((ir_instruction *)ir->then_instructions.head)->as_assignment();
2915 ir_assignment *else_assign =
2916 ((ir_instruction *)ir->else_instructions.head)->as_assignment();
2917
2918 if (!then_assign || then_assign->condition ||
2919 !else_assign || else_assign->condition ||
2920 then_assign->write_mask != else_assign->write_mask ||
2921 !then_assign->lhs->equals(else_assign->lhs))
2922 return false;
2923
2924 ir_constant *then_rhs = then_assign->rhs->as_constant();
2925 ir_constant *else_rhs = else_assign->rhs->as_constant();
2926
2927 if (!then_rhs || !else_rhs)
2928 return false;
2929
2930 if (then_rhs->type->base_type != GLSL_TYPE_FLOAT)
2931 return false;
2932
2933 if ((then_rhs->is_one() && else_rhs->is_negative_one()) ||
2934 (else_rhs->is_one() && then_rhs->is_negative_one())) {
2935 then_assign->lhs->accept(this);
2936 fs_reg dst = this->result;
2937 dst.type = BRW_REGISTER_TYPE_D;
2938 fs_reg tmp = vgrf(glsl_type::int_type);
2939
2940 if (devinfo->gen >= 6) {
2941 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
2942 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
2943
2944 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
2945 *
2946 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
2947 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
2948 *
2949 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
2950 */
2951
2952 if (then_rhs->is_negative_one()) {
2953 assert(else_rhs->is_one());
2954 g0.negate = true;
2955 }
2956
2957 tmp.type = BRW_REGISTER_TYPE_W;
2958 tmp.subreg_offset = 2;
2959 tmp.stride = 2;
2960
2961 fs_inst *or_inst = emit(OR(tmp, g0, fs_reg(0x3f80)));
2962 or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
2963
2964 tmp.type = BRW_REGISTER_TYPE_D;
2965 tmp.subreg_offset = 0;
2966 tmp.stride = 1;
2967 } else {
2968 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
2969 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
2970
2971 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
2972 *
2973 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
2974 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
2975 *
2976 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
2977 */
2978
2979 if (then_rhs->is_negative_one()) {
2980 assert(else_rhs->is_one());
2981 g1_6.negate = true;
2982 }
2983
2984 emit(OR(tmp, g1_6, fs_reg(0x3f800000)));
2985 }
2986 emit(AND(dst, tmp, fs_reg(0xbf800000)));
2987 return true;
2988 }
2989
2990 return false;
2991 }
2992
2993 /**
2994 * Try to replace IF/MOV/ELSE/MOV/ENDIF with SEL.
2995 *
2996 * Many GLSL shaders contain the following pattern:
2997 *
2998 * x = condition ? foo : bar
2999 *
3000 * The compiler emits an ir_if tree for this, since each subexpression might be
3001 * a complex tree that could have side-effects or short-circuit logic.
3002 *
3003 * However, the common case is to simply select one of two constants or
3004 * variable values---which is exactly what SEL is for. In this case, the
3005 * assembly looks like:
3006 *
3007 * (+f0) IF
3008 * MOV dst src0
3009 * ELSE
3010 * MOV dst src1
3011 * ENDIF
3012 *
3013 * which can be easily translated into:
3014 *
3015 * (+f0) SEL dst src0 src1
3016 *
3017 * If src0 is an immediate value, we promote it to a temporary GRF.
3018 */
3019 bool
3020 fs_visitor::try_replace_with_sel()
3021 {
3022 fs_inst *endif_inst = (fs_inst *) instructions.get_tail();
3023 assert(endif_inst->opcode == BRW_OPCODE_ENDIF);
3024
3025 /* Pattern match in reverse: IF, MOV, ELSE, MOV, ENDIF. */
3026 int opcodes[] = {
3027 BRW_OPCODE_IF, BRW_OPCODE_MOV, BRW_OPCODE_ELSE, BRW_OPCODE_MOV,
3028 };
3029
3030 fs_inst *match = (fs_inst *) endif_inst->prev;
3031 for (int i = 0; i < 4; i++) {
3032 if (match->is_head_sentinel() || match->opcode != opcodes[4-i-1])
3033 return false;
3034 match = (fs_inst *) match->prev;
3035 }
3036
3037 /* The opcodes match; it looks like the right sequence of instructions. */
3038 fs_inst *else_mov = (fs_inst *) endif_inst->prev;
3039 fs_inst *then_mov = (fs_inst *) else_mov->prev->prev;
3040 fs_inst *if_inst = (fs_inst *) then_mov->prev;
3041
3042 /* Check that the MOVs are the right form. */
3043 if (then_mov->dst.equals(else_mov->dst) &&
3044 !then_mov->is_partial_write() &&
3045 !else_mov->is_partial_write()) {
3046
3047 /* Remove the matched instructions; we'll emit a SEL to replace them. */
3048 while (!if_inst->next->is_tail_sentinel())
3049 if_inst->next->exec_node::remove();
3050 if_inst->exec_node::remove();
3051
3052 /* Only the last source register can be a constant, so if the MOV in
3053 * the "then" clause uses a constant, we need to put it in a temporary.
3054 */
3055 fs_reg src0(then_mov->src[0]);
3056 if (src0.file == IMM) {
3057 src0 = vgrf(glsl_type::float_type);
3058 src0.type = then_mov->src[0].type;
3059 emit(MOV(src0, then_mov->src[0]));
3060 }
3061
3062 fs_inst *sel;
3063 if (if_inst->conditional_mod) {
3064 /* Sandybridge-specific IF with embedded comparison */
3065 emit(CMP(reg_null_d, if_inst->src[0], if_inst->src[1],
3066 if_inst->conditional_mod));
3067 sel = emit(BRW_OPCODE_SEL, then_mov->dst, src0, else_mov->src[0]);
3068 sel->predicate = BRW_PREDICATE_NORMAL;
3069 } else {
3070 /* Separate CMP and IF instructions */
3071 sel = emit(BRW_OPCODE_SEL, then_mov->dst, src0, else_mov->src[0]);
3072 sel->predicate = if_inst->predicate;
3073 sel->predicate_inverse = if_inst->predicate_inverse;
3074 }
3075
3076 return true;
3077 }
3078
3079 return false;
3080 }
3081
3082 void
3083 fs_visitor::visit(ir_if *ir)
3084 {
3085 if (try_opt_frontfacing_ternary(ir))
3086 return;
3087
3088 /* Don't point the annotation at the if statement, because then it plus
3089 * the then and else blocks get printed.
3090 */
3091 this->base_ir = ir->condition;
3092
3093 if (devinfo->gen == 6) {
3094 emit_if_gen6(ir);
3095 } else {
3096 emit_bool_to_cond_code(ir->condition);
3097
3098 emit(IF(BRW_PREDICATE_NORMAL));
3099 }
3100
3101 foreach_in_list(ir_instruction, ir_, &ir->then_instructions) {
3102 this->base_ir = ir_;
3103 ir_->accept(this);
3104 }
3105
3106 if (!ir->else_instructions.is_empty()) {
3107 emit(BRW_OPCODE_ELSE);
3108
3109 foreach_in_list(ir_instruction, ir_, &ir->else_instructions) {
3110 this->base_ir = ir_;
3111 ir_->accept(this);
3112 }
3113 }
3114
3115 emit(BRW_OPCODE_ENDIF);
3116
3117 if (!try_replace_with_sel() && devinfo->gen < 6) {
3118 no16("Can't support (non-uniform) control flow on SIMD16\n");
3119 }
3120 }
3121
3122 void
3123 fs_visitor::visit(ir_loop *ir)
3124 {
3125 if (devinfo->gen < 6) {
3126 no16("Can't support (non-uniform) control flow on SIMD16\n");
3127 }
3128
3129 this->base_ir = NULL;
3130 emit(BRW_OPCODE_DO);
3131
3132 foreach_in_list(ir_instruction, ir_, &ir->body_instructions) {
3133 this->base_ir = ir_;
3134 ir_->accept(this);
3135 }
3136
3137 this->base_ir = NULL;
3138 emit(BRW_OPCODE_WHILE);
3139 }
3140
3141 void
3142 fs_visitor::visit(ir_loop_jump *ir)
3143 {
3144 switch (ir->mode) {
3145 case ir_loop_jump::jump_break:
3146 emit(BRW_OPCODE_BREAK);
3147 break;
3148 case ir_loop_jump::jump_continue:
3149 emit(BRW_OPCODE_CONTINUE);
3150 break;
3151 }
3152 }
3153
3154 void
3155 fs_visitor::visit_atomic_counter_intrinsic(ir_call *ir)
3156 {
3157 ir_dereference *deref = static_cast<ir_dereference *>(
3158 ir->actual_parameters.get_head());
3159 ir_variable *location = deref->variable_referenced();
3160 unsigned surf_index = (stage_prog_data->binding_table.abo_start +
3161 location->data.binding);
3162
3163 /* Calculate the surface offset */
3164 fs_reg offset = vgrf(glsl_type::uint_type);
3165 ir_dereference_array *deref_array = deref->as_dereference_array();
3166
3167 if (deref_array) {
3168 deref_array->array_index->accept(this);
3169
3170 fs_reg tmp = vgrf(glsl_type::uint_type);
3171 emit(MUL(tmp, this->result, fs_reg(ATOMIC_COUNTER_SIZE)));
3172 emit(ADD(offset, tmp, fs_reg(location->data.atomic.offset)));
3173 } else {
3174 offset = fs_reg(location->data.atomic.offset);
3175 }
3176
3177 /* Emit the appropriate machine instruction */
3178 const char *callee = ir->callee->function_name();
3179 ir->return_deref->accept(this);
3180 fs_reg dst = this->result;
3181
3182 if (!strcmp("__intrinsic_atomic_read", callee)) {
3183 emit_untyped_surface_read(surf_index, dst, offset);
3184
3185 } else if (!strcmp("__intrinsic_atomic_increment", callee)) {
3186 emit_untyped_atomic(BRW_AOP_INC, surf_index, dst, offset,
3187 fs_reg(), fs_reg());
3188
3189 } else if (!strcmp("__intrinsic_atomic_predecrement", callee)) {
3190 emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dst, offset,
3191 fs_reg(), fs_reg());
3192 }
3193 }
3194
3195 void
3196 fs_visitor::visit(ir_call *ir)
3197 {
3198 const char *callee = ir->callee->function_name();
3199
3200 if (!strcmp("__intrinsic_atomic_read", callee) ||
3201 !strcmp("__intrinsic_atomic_increment", callee) ||
3202 !strcmp("__intrinsic_atomic_predecrement", callee)) {
3203 visit_atomic_counter_intrinsic(ir);
3204 } else {
3205 unreachable("Unsupported intrinsic.");
3206 }
3207 }
3208
3209 void
3210 fs_visitor::visit(ir_return *)
3211 {
3212 unreachable("FINISHME");
3213 }
3214
3215 void
3216 fs_visitor::visit(ir_function *ir)
3217 {
3218 /* Ignore function bodies other than main() -- we shouldn't see calls to
3219 * them since they should all be inlined before we get to ir_to_mesa.
3220 */
3221 if (strcmp(ir->name, "main") == 0) {
3222 const ir_function_signature *sig;
3223 exec_list empty;
3224
3225 sig = ir->matching_signature(NULL, &empty, false);
3226
3227 assert(sig);
3228
3229 foreach_in_list(ir_instruction, ir_, &sig->body) {
3230 this->base_ir = ir_;
3231 ir_->accept(this);
3232 }
3233 }
3234 }
3235
3236 void
3237 fs_visitor::visit(ir_function_signature *)
3238 {
3239 unreachable("not reached");
3240 }
3241
3242 void
3243 fs_visitor::visit(ir_emit_vertex *)
3244 {
3245 unreachable("not reached");
3246 }
3247
3248 void
3249 fs_visitor::visit(ir_end_primitive *)
3250 {
3251 unreachable("not reached");
3252 }
3253
3254 void
3255 fs_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
3256 fs_reg dst, fs_reg offset, fs_reg src0,
3257 fs_reg src1)
3258 {
3259 int reg_width = dispatch_width / 8;
3260 int length = 0;
3261
3262 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 4);
3263
3264 sources[0] = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
3265 /* Initialize the sample mask in the message header. */
3266 emit(MOV(sources[0], fs_reg(0u)))
3267 ->force_writemask_all = true;
3268
3269 if (stage == MESA_SHADER_FRAGMENT) {
3270 if (((brw_wm_prog_data*)this->prog_data)->uses_kill) {
3271 emit(MOV(component(sources[0], 7), brw_flag_reg(0, 1)))
3272 ->force_writemask_all = true;
3273 } else {
3274 emit(MOV(component(sources[0], 7),
3275 retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UD)))
3276 ->force_writemask_all = true;
3277 }
3278 } else {
3279 /* The execution mask is part of the side-band information sent together with
3280 * the message payload to the data port. It's implicitly ANDed with the sample
3281 * mask sent in the header to compute the actual set of channels that execute
3282 * the atomic operation.
3283 */
3284 assert(stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_COMPUTE);
3285 emit(MOV(component(sources[0], 7),
3286 fs_reg(0xffffu)))->force_writemask_all = true;
3287 }
3288 length++;
3289
3290 /* Set the atomic operation offset. */
3291 sources[1] = vgrf(glsl_type::uint_type);
3292 emit(MOV(sources[1], offset));
3293 length++;
3294
3295 /* Set the atomic operation arguments. */
3296 if (src0.file != BAD_FILE) {
3297 sources[length] = vgrf(glsl_type::uint_type);
3298 emit(MOV(sources[length], src0));
3299 length++;
3300 }
3301
3302 if (src1.file != BAD_FILE) {
3303 sources[length] = vgrf(glsl_type::uint_type);
3304 emit(MOV(sources[length], src1));
3305 length++;
3306 }
3307
3308 int mlen = 1 + (length - 1) * reg_width;
3309 fs_reg src_payload = fs_reg(GRF, alloc.allocate(mlen),
3310 BRW_REGISTER_TYPE_UD, dispatch_width);
3311 emit(LOAD_PAYLOAD(src_payload, sources, length, 1));
3312
3313 /* Emit the instruction. */
3314 fs_inst *inst = emit(SHADER_OPCODE_UNTYPED_ATOMIC, dst, src_payload,
3315 fs_reg(surf_index), fs_reg(atomic_op));
3316 inst->mlen = mlen;
3317 }
3318
3319 void
3320 fs_visitor::emit_untyped_surface_read(unsigned surf_index, fs_reg dst,
3321 fs_reg offset)
3322 {
3323 int reg_width = dispatch_width / 8;
3324
3325 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 2);
3326
3327 sources[0] = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
3328 /* Initialize the sample mask in the message header. */
3329 emit(MOV(sources[0], fs_reg(0u)))
3330 ->force_writemask_all = true;
3331
3332 if (stage == MESA_SHADER_FRAGMENT) {
3333 if (((brw_wm_prog_data*)this->prog_data)->uses_kill) {
3334 emit(MOV(component(sources[0], 7), brw_flag_reg(0, 1)))
3335 ->force_writemask_all = true;
3336 } else {
3337 emit(MOV(component(sources[0], 7),
3338 retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UD)))
3339 ->force_writemask_all = true;
3340 }
3341 } else {
3342 /* The execution mask is part of the side-band information sent together with
3343 * the message payload to the data port. It's implicitly ANDed with the sample
3344 * mask sent in the header to compute the actual set of channels that execute
3345 * the atomic operation.
3346 */
3347 assert(stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_COMPUTE);
3348 emit(MOV(component(sources[0], 7),
3349 fs_reg(0xffffu)))->force_writemask_all = true;
3350 }
3351
3352 /* Set the surface read offset. */
3353 sources[1] = vgrf(glsl_type::uint_type);
3354 emit(MOV(sources[1], offset));
3355
3356 int mlen = 1 + reg_width;
3357 fs_reg src_payload = fs_reg(GRF, alloc.allocate(mlen),
3358 BRW_REGISTER_TYPE_UD, dispatch_width);
3359 fs_inst *inst = emit(LOAD_PAYLOAD(src_payload, sources, 2, 1));
3360
3361 /* Emit the instruction. */
3362 inst = emit(SHADER_OPCODE_UNTYPED_SURFACE_READ, dst, src_payload,
3363 fs_reg(surf_index), fs_reg(1));
3364 inst->mlen = mlen;
3365 }
3366
3367 fs_inst *
3368 fs_visitor::emit(fs_inst *inst)
3369 {
3370 if (dispatch_width == 16 && inst->exec_size == 8)
3371 inst->force_uncompressed = true;
3372
3373 inst->annotation = this->current_annotation;
3374 inst->ir = this->base_ir;
3375
3376 this->instructions.push_tail(inst);
3377
3378 return inst;
3379 }
3380
3381 void
3382 fs_visitor::emit(exec_list list)
3383 {
3384 foreach_in_list_safe(fs_inst, inst, &list) {
3385 inst->exec_node::remove();
3386 emit(inst);
3387 }
3388 }
3389
3390 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
3391 void
3392 fs_visitor::emit_dummy_fs()
3393 {
3394 int reg_width = dispatch_width / 8;
3395
3396 /* Everyone's favorite color. */
3397 const float color[4] = { 1.0, 0.0, 1.0, 0.0 };
3398 for (int i = 0; i < 4; i++) {
3399 emit(MOV(fs_reg(MRF, 2 + i * reg_width, BRW_REGISTER_TYPE_F,
3400 dispatch_width), fs_reg(color[i])));
3401 }
3402
3403 fs_inst *write;
3404 write = emit(FS_OPCODE_FB_WRITE);
3405 write->eot = true;
3406 if (devinfo->gen >= 6) {
3407 write->base_mrf = 2;
3408 write->mlen = 4 * reg_width;
3409 } else {
3410 write->header_size = 2;
3411 write->base_mrf = 0;
3412 write->mlen = 2 + 4 * reg_width;
3413 }
3414
3415 /* Tell the SF we don't have any inputs. Gen4-5 require at least one
3416 * varying to avoid GPU hangs, so set that.
3417 */
3418 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
3419 wm_prog_data->num_varying_inputs = devinfo->gen < 6 ? 1 : 0;
3420 memset(wm_prog_data->urb_setup, -1,
3421 sizeof(wm_prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
3422
3423 /* We don't have any uniforms. */
3424 stage_prog_data->nr_params = 0;
3425 stage_prog_data->nr_pull_params = 0;
3426 stage_prog_data->curb_read_length = 0;
3427 stage_prog_data->dispatch_grf_start_reg = 2;
3428 wm_prog_data->dispatch_grf_start_reg_16 = 2;
3429 grf_used = 1; /* Gen4-5 don't allow zero GRF blocks */
3430
3431 calculate_cfg();
3432 }
3433
3434 /* The register location here is relative to the start of the URB
3435 * data. It will get adjusted to be a real location before
3436 * generate_code() time.
3437 */
3438 struct brw_reg
3439 fs_visitor::interp_reg(int location, int channel)
3440 {
3441 assert(stage == MESA_SHADER_FRAGMENT);
3442 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
3443 int regnr = prog_data->urb_setup[location] * 2 + channel / 2;
3444 int stride = (channel & 1) * 4;
3445
3446 assert(prog_data->urb_setup[location] != -1);
3447
3448 return brw_vec1_grf(regnr, stride);
3449 }
3450
3451 /** Emits the interpolation for the varying inputs. */
3452 void
3453 fs_visitor::emit_interpolation_setup_gen4()
3454 {
3455 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
3456
3457 this->current_annotation = "compute pixel centers";
3458 this->pixel_x = vgrf(glsl_type::uint_type);
3459 this->pixel_y = vgrf(glsl_type::uint_type);
3460 this->pixel_x.type = BRW_REGISTER_TYPE_UW;
3461 this->pixel_y.type = BRW_REGISTER_TYPE_UW;
3462 emit(ADD(this->pixel_x,
3463 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
3464 fs_reg(brw_imm_v(0x10101010))));
3465 emit(ADD(this->pixel_y,
3466 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
3467 fs_reg(brw_imm_v(0x11001100))));
3468
3469 this->current_annotation = "compute pixel deltas from v0";
3470
3471 this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
3472 vgrf(glsl_type::vec2_type);
3473 const fs_reg &delta_xy = this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC];
3474 const fs_reg xstart(negate(brw_vec1_grf(1, 0)));
3475 const fs_reg ystart(negate(brw_vec1_grf(1, 1)));
3476
3477 if (devinfo->has_pln && dispatch_width == 16) {
3478 emit(ADD(half(offset(delta_xy, 0), 0), half(this->pixel_x, 0), xstart));
3479 emit(ADD(half(offset(delta_xy, 0), 1), half(this->pixel_y, 0), ystart));
3480 emit(ADD(half(offset(delta_xy, 1), 0), half(this->pixel_x, 1), xstart))
3481 ->force_sechalf = true;
3482 emit(ADD(half(offset(delta_xy, 1), 1), half(this->pixel_y, 1), ystart))
3483 ->force_sechalf = true;
3484 } else {
3485 emit(ADD(offset(delta_xy, 0), this->pixel_x, xstart));
3486 emit(ADD(offset(delta_xy, 1), this->pixel_y, ystart));
3487 }
3488
3489 this->current_annotation = "compute pos.w and 1/pos.w";
3490 /* Compute wpos.w. It's always in our setup, since it's needed to
3491 * interpolate the other attributes.
3492 */
3493 this->wpos_w = vgrf(glsl_type::float_type);
3494 emit(FS_OPCODE_LINTERP, wpos_w, delta_xy, interp_reg(VARYING_SLOT_POS, 3));
3495 /* Compute the pixel 1/W value from wpos.w. */
3496 this->pixel_w = vgrf(glsl_type::float_type);
3497 emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w);
3498 this->current_annotation = NULL;
3499 }
3500
3501 /** Emits the interpolation for the varying inputs. */
3502 void
3503 fs_visitor::emit_interpolation_setup_gen6()
3504 {
3505 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
3506
3507 this->current_annotation = "compute pixel centers";
3508 if (brw->gen >= 8 || dispatch_width == 8) {
3509 /* The "Register Region Restrictions" page says for BDW (and newer,
3510 * presumably):
3511 *
3512 * "When destination spans two registers, the source may be one or
3513 * two registers. The destination elements must be evenly split
3514 * between the two registers."
3515 *
3516 * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16 to
3517 * compute our pixel centers.
3518 */
3519 fs_reg int_pixel_xy(GRF, alloc.allocate(dispatch_width / 8),
3520 BRW_REGISTER_TYPE_UW, dispatch_width * 2);
3521 emit(ADD(int_pixel_xy,
3522 fs_reg(stride(suboffset(g1_uw, 4), 1, 4, 0)),
3523 fs_reg(brw_imm_v(0x11001010))))
3524 ->force_writemask_all = true;
3525
3526 this->pixel_x = vgrf(glsl_type::float_type);
3527 this->pixel_y = vgrf(glsl_type::float_type);
3528 emit(FS_OPCODE_PIXEL_X, this->pixel_x, int_pixel_xy);
3529 emit(FS_OPCODE_PIXEL_Y, this->pixel_y, int_pixel_xy);
3530 } else {
3531 /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
3532 *
3533 * "When destination spans two registers, the source MUST span two
3534 * registers."
3535 *
3536 * Since the GRF source of the ADD will only read a single register, we
3537 * must do two separate ADDs in SIMD16.
3538 */
3539 fs_reg int_pixel_x = vgrf(glsl_type::uint_type);
3540 fs_reg int_pixel_y = vgrf(glsl_type::uint_type);
3541 int_pixel_x.type = BRW_REGISTER_TYPE_UW;
3542 int_pixel_y.type = BRW_REGISTER_TYPE_UW;
3543 emit(ADD(int_pixel_x,
3544 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
3545 fs_reg(brw_imm_v(0x10101010))));
3546 emit(ADD(int_pixel_y,
3547 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
3548 fs_reg(brw_imm_v(0x11001100))));
3549
3550 /* As of gen6, we can no longer mix float and int sources. We have
3551 * to turn the integer pixel centers into floats for their actual
3552 * use.
3553 */
3554 this->pixel_x = vgrf(glsl_type::float_type);
3555 this->pixel_y = vgrf(glsl_type::float_type);
3556 emit(MOV(this->pixel_x, int_pixel_x));
3557 emit(MOV(this->pixel_y, int_pixel_y));
3558 }
3559
3560 this->current_annotation = "compute pos.w";
3561 this->pixel_w = fs_reg(brw_vec8_grf(payload.source_w_reg, 0));
3562 this->wpos_w = vgrf(glsl_type::float_type);
3563 emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w);
3564
3565 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
3566 uint8_t reg = payload.barycentric_coord_reg[i];
3567 this->delta_xy[i] = fs_reg(brw_vec16_grf(reg, 0));
3568 }
3569
3570 this->current_annotation = NULL;
3571 }
3572
3573 void
3574 fs_visitor::setup_color_payload(fs_reg *dst, fs_reg color, unsigned components,
3575 unsigned exec_size, bool use_2nd_half)
3576 {
3577 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3578 fs_inst *inst;
3579
3580 if (key->clamp_fragment_color) {
3581 fs_reg tmp = vgrf(glsl_type::vec4_type);
3582 assert(color.type == BRW_REGISTER_TYPE_F);
3583 for (unsigned i = 0; i < components; i++) {
3584 inst = emit(MOV(offset(tmp, i), offset(color, i)));
3585 inst->saturate = true;
3586 }
3587 color = tmp;
3588 }
3589
3590 if (exec_size < dispatch_width) {
3591 unsigned half_idx = use_2nd_half ? 1 : 0;
3592 for (unsigned i = 0; i < components; i++)
3593 dst[i] = half(offset(color, i), half_idx);
3594 } else {
3595 for (unsigned i = 0; i < components; i++)
3596 dst[i] = offset(color, i);
3597 }
3598 }
3599
3600 static enum brw_conditional_mod
3601 cond_for_alpha_func(GLenum func)
3602 {
3603 switch(func) {
3604 case GL_GREATER:
3605 return BRW_CONDITIONAL_G;
3606 case GL_GEQUAL:
3607 return BRW_CONDITIONAL_GE;
3608 case GL_LESS:
3609 return BRW_CONDITIONAL_L;
3610 case GL_LEQUAL:
3611 return BRW_CONDITIONAL_LE;
3612 case GL_EQUAL:
3613 return BRW_CONDITIONAL_EQ;
3614 case GL_NOTEQUAL:
3615 return BRW_CONDITIONAL_NEQ;
3616 default:
3617 unreachable("Not reached");
3618 }
3619 }
3620
3621 /**
3622 * Alpha test support for when we compile it into the shader instead
3623 * of using the normal fixed-function alpha test.
3624 */
3625 void
3626 fs_visitor::emit_alpha_test()
3627 {
3628 assert(stage == MESA_SHADER_FRAGMENT);
3629 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3630 this->current_annotation = "Alpha test";
3631
3632 fs_inst *cmp;
3633 if (key->alpha_test_func == GL_ALWAYS)
3634 return;
3635
3636 if (key->alpha_test_func == GL_NEVER) {
3637 /* f0.1 = 0 */
3638 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
3639 BRW_REGISTER_TYPE_UW));
3640 cmp = emit(CMP(reg_null_f, some_reg, some_reg,
3641 BRW_CONDITIONAL_NEQ));
3642 } else {
3643 /* RT0 alpha */
3644 fs_reg color = offset(outputs[0], 3);
3645
3646 /* f0.1 &= func(color, ref) */
3647 cmp = emit(CMP(reg_null_f, color, fs_reg(key->alpha_test_ref),
3648 cond_for_alpha_func(key->alpha_test_func)));
3649 }
3650 cmp->predicate = BRW_PREDICATE_NORMAL;
3651 cmp->flag_subreg = 1;
3652 }
3653
3654 fs_inst *
3655 fs_visitor::emit_single_fb_write(fs_reg color0, fs_reg color1,
3656 fs_reg src0_alpha, unsigned components,
3657 unsigned exec_size, bool use_2nd_half)
3658 {
3659 assert(stage == MESA_SHADER_FRAGMENT);
3660 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
3661 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3662
3663 this->current_annotation = "FB write header";
3664 int header_size = 2, payload_header_size;
3665
3666 /* We can potentially have a message length of up to 15, so we have to set
3667 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3668 */
3669 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, 15);
3670 int length = 0;
3671
3672 /* From the Sandy Bridge PRM, volume 4, page 198:
3673 *
3674 * "Dispatched Pixel Enables. One bit per pixel indicating
3675 * which pixels were originally enabled when the thread was
3676 * dispatched. This field is only required for the end-of-
3677 * thread message and on all dual-source messages."
3678 */
3679 if (devinfo->gen >= 6 &&
3680 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3681 color1.file == BAD_FILE &&
3682 key->nr_color_regions == 1) {
3683 header_size = 0;
3684 }
3685
3686 if (header_size != 0) {
3687 assert(header_size == 2);
3688 /* Allocate 2 registers for a header */
3689 length += 2;
3690 }
3691
3692 if (payload.aa_dest_stencil_reg) {
3693 sources[length] = fs_reg(GRF, alloc.allocate(1));
3694 emit(MOV(sources[length],
3695 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0))));
3696 length++;
3697 }
3698
3699 prog_data->uses_omask =
3700 prog->OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
3701 if (prog_data->uses_omask) {
3702 this->current_annotation = "FB write oMask";
3703 assert(this->sample_mask.file != BAD_FILE);
3704 /* Hand over gl_SampleMask. Only lower 16 bits are relevant. Since
3705 * it's unsinged single words, one vgrf is always 16-wide.
3706 */
3707 sources[length] = fs_reg(GRF, alloc.allocate(1),
3708 BRW_REGISTER_TYPE_UW, 16);
3709 emit(FS_OPCODE_SET_OMASK, sources[length], this->sample_mask);
3710 length++;
3711 }
3712
3713 payload_header_size = length;
3714
3715 if (color0.file == BAD_FILE) {
3716 /* Even if there's no color buffers enabled, we still need to send
3717 * alpha out the pipeline to our null renderbuffer to support
3718 * alpha-testing, alpha-to-coverage, and so on.
3719 */
3720 if (this->outputs[0].file != BAD_FILE)
3721 setup_color_payload(&sources[length + 3], offset(this->outputs[0], 3),
3722 1, exec_size, false);
3723 length += 4;
3724 } else if (color1.file == BAD_FILE) {
3725 if (src0_alpha.file != BAD_FILE) {
3726 setup_color_payload(&sources[length], src0_alpha, 1, exec_size, false);
3727 length++;
3728 }
3729
3730 setup_color_payload(&sources[length], color0, components,
3731 exec_size, use_2nd_half);
3732 length += 4;
3733 } else {
3734 setup_color_payload(&sources[length], color0, components,
3735 exec_size, use_2nd_half);
3736 length += 4;
3737 setup_color_payload(&sources[length], color1, components,
3738 exec_size, use_2nd_half);
3739 length += 4;
3740 }
3741
3742 if (source_depth_to_render_target) {
3743 if (devinfo->gen == 6) {
3744 /* For outputting oDepth on gen6, SIMD8 writes have to be
3745 * used. This would require SIMD8 moves of each half to
3746 * message regs, kind of like pre-gen5 SIMD16 FB writes.
3747 * Just bail on doing so for now.
3748 */
3749 no16("Missing support for simd16 depth writes on gen6\n");
3750 }
3751
3752 if (prog->OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
3753 /* Hand over gl_FragDepth. */
3754 assert(this->frag_depth.file != BAD_FILE);
3755 sources[length] = this->frag_depth;
3756 } else {
3757 /* Pass through the payload depth. */
3758 sources[length] = fs_reg(brw_vec8_grf(payload.source_depth_reg, 0));
3759 }
3760 length++;
3761 }
3762
3763 if (payload.dest_depth_reg)
3764 sources[length++] = fs_reg(brw_vec8_grf(payload.dest_depth_reg, 0));
3765
3766 fs_inst *load;
3767 fs_inst *write;
3768 if (devinfo->gen >= 7) {
3769 /* Send from the GRF */
3770 fs_reg payload = fs_reg(GRF, -1, BRW_REGISTER_TYPE_F, exec_size);
3771 load = emit(LOAD_PAYLOAD(payload, sources, length, payload_header_size));
3772 payload.reg = alloc.allocate(load->regs_written);
3773 load->dst = payload;
3774 write = emit(FS_OPCODE_FB_WRITE, reg_undef, payload);
3775 write->base_mrf = -1;
3776 } else {
3777 /* Send from the MRF */
3778 load = emit(LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F, exec_size),
3779 sources, length, payload_header_size));
3780
3781 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3782 * will do this for us if we just give it a COMPR4 destination.
3783 */
3784 if (brw->gen < 6 && exec_size == 16)
3785 load->dst.reg |= BRW_MRF_COMPR4;
3786
3787 write = emit(FS_OPCODE_FB_WRITE);
3788 write->exec_size = exec_size;
3789 write->base_mrf = 1;
3790 }
3791
3792 write->mlen = load->regs_written;
3793 write->header_size = header_size;
3794 if (prog_data->uses_kill) {
3795 write->predicate = BRW_PREDICATE_NORMAL;
3796 write->flag_subreg = 1;
3797 }
3798 return write;
3799 }
3800
3801 void
3802 fs_visitor::emit_fb_writes()
3803 {
3804 assert(stage == MESA_SHADER_FRAGMENT);
3805 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
3806 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
3807
3808 fs_inst *inst = NULL;
3809 if (do_dual_src) {
3810 this->current_annotation = ralloc_asprintf(this->mem_ctx,
3811 "FB dual-source write");
3812 inst = emit_single_fb_write(this->outputs[0], this->dual_src_output,
3813 reg_undef, 4, 8);
3814 inst->target = 0;
3815
3816 /* SIMD16 dual source blending requires to send two SIMD8 dual source
3817 * messages, where each message contains color data for 8 pixels. Color
3818 * data for the first group of pixels is stored in the "lower" half of
3819 * the color registers, so in SIMD16, the previous message did:
3820 * m + 0: r0
3821 * m + 1: g0
3822 * m + 2: b0
3823 * m + 3: a0
3824 *
3825 * Here goes the second message, which packs color data for the
3826 * remaining 8 pixels. Color data for these pixels is stored in the
3827 * "upper" half of the color registers, so we need to do:
3828 * m + 0: r1
3829 * m + 1: g1
3830 * m + 2: b1
3831 * m + 3: a1
3832 */
3833 if (dispatch_width == 16) {
3834 inst = emit_single_fb_write(this->outputs[0], this->dual_src_output,
3835 reg_undef, 4, 8, true);
3836 inst->target = 0;
3837 }
3838
3839 prog_data->dual_src_blend = true;
3840 } else {
3841 for (int target = 0; target < key->nr_color_regions; target++) {
3842 /* Skip over outputs that weren't written. */
3843 if (this->outputs[target].file == BAD_FILE)
3844 continue;
3845
3846 this->current_annotation = ralloc_asprintf(this->mem_ctx,
3847 "FB write target %d",
3848 target);
3849 fs_reg src0_alpha;
3850 if (devinfo->gen >= 6 && key->replicate_alpha && target != 0)
3851 src0_alpha = offset(outputs[0], 3);
3852
3853 inst = emit_single_fb_write(this->outputs[target], reg_undef,
3854 src0_alpha,
3855 this->output_components[target],
3856 dispatch_width);
3857 inst->target = target;
3858 }
3859 }
3860
3861 if (inst == NULL) {
3862 /* Even if there's no color buffers enabled, we still need to send
3863 * alpha out the pipeline to our null renderbuffer to support
3864 * alpha-testing, alpha-to-coverage, and so on.
3865 */
3866 inst = emit_single_fb_write(reg_undef, reg_undef, reg_undef, 0,
3867 dispatch_width);
3868 inst->target = 0;
3869 }
3870
3871 inst->eot = true;
3872 this->current_annotation = NULL;
3873 }
3874
3875 void
3876 fs_visitor::setup_uniform_clipplane_values()
3877 {
3878 gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
3879 const struct brw_vue_prog_key *key =
3880 (const struct brw_vue_prog_key *) this->key;
3881
3882 for (int i = 0; i < key->nr_userclip_plane_consts; i++) {
3883 this->userplane[i] = fs_reg(UNIFORM, uniforms);
3884 for (int j = 0; j < 4; ++j) {
3885 stage_prog_data->param[uniforms + j] =
3886 (gl_constant_value *) &clip_planes[i][j];
3887 }
3888 uniforms += 4;
3889 }
3890 }
3891
3892 void fs_visitor::compute_clip_distance()
3893 {
3894 struct brw_vue_prog_data *vue_prog_data =
3895 (struct brw_vue_prog_data *) prog_data;
3896 const struct brw_vue_prog_key *key =
3897 (const struct brw_vue_prog_key *) this->key;
3898
3899 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
3900 *
3901 * "If a linked set of shaders forming the vertex stage contains no
3902 * static write to gl_ClipVertex or gl_ClipDistance, but the
3903 * application has requested clipping against user clip planes through
3904 * the API, then the coordinate written to gl_Position is used for
3905 * comparison against the user clip planes."
3906 *
3907 * This function is only called if the shader didn't write to
3908 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping
3909 * if the user wrote to it; otherwise we use gl_Position.
3910 */
3911
3912 gl_varying_slot clip_vertex = VARYING_SLOT_CLIP_VERTEX;
3913 if (!(vue_prog_data->vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX))
3914 clip_vertex = VARYING_SLOT_POS;
3915
3916 /* If the clip vertex isn't written, skip this. Typically this means
3917 * the GS will set up clipping. */
3918 if (outputs[clip_vertex].file == BAD_FILE)
3919 return;
3920
3921 setup_uniform_clipplane_values();
3922
3923 current_annotation = "user clip distances";
3924
3925 this->outputs[VARYING_SLOT_CLIP_DIST0] = vgrf(glsl_type::vec4_type);
3926 this->outputs[VARYING_SLOT_CLIP_DIST1] = vgrf(glsl_type::vec4_type);
3927
3928 for (int i = 0; i < key->nr_userclip_plane_consts; i++) {
3929 fs_reg u = userplane[i];
3930 fs_reg output = outputs[VARYING_SLOT_CLIP_DIST0 + i / 4];
3931 output.reg_offset = i & 3;
3932
3933 emit(MUL(output, outputs[clip_vertex], u));
3934 for (int j = 1; j < 4; j++) {
3935 u.reg = userplane[i].reg + j;
3936 emit(MAD(output, output, offset(outputs[clip_vertex], j), u));
3937 }
3938 }
3939 }
3940
3941 void
3942 fs_visitor::emit_urb_writes()
3943 {
3944 int slot, urb_offset, length;
3945 struct brw_vs_prog_data *vs_prog_data =
3946 (struct brw_vs_prog_data *) prog_data;
3947 const struct brw_vs_prog_key *key =
3948 (const struct brw_vs_prog_key *) this->key;
3949 const GLbitfield64 psiz_mask =
3950 VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT | VARYING_BIT_PSIZ;
3951 const struct brw_vue_map *vue_map = &vs_prog_data->base.vue_map;
3952 bool flush;
3953 fs_reg sources[8];
3954
3955 /* Lower legacy ff and ClipVertex clipping to clip distances */
3956 if (key->base.userclip_active && !prog->UsesClipDistanceOut)
3957 compute_clip_distance();
3958
3959 /* If we don't have any valid slots to write, just do a minimal urb write
3960 * send to terminate the shader. */
3961 if (vue_map->slots_valid == 0) {
3962
3963 fs_reg payload = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
3964 fs_inst *inst = emit(MOV(payload, fs_reg(retype(brw_vec8_grf(1, 0),
3965 BRW_REGISTER_TYPE_UD))));
3966 inst->force_writemask_all = true;
3967
3968 inst = emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
3969 inst->eot = true;
3970 inst->mlen = 1;
3971 inst->offset = 1;
3972 return;
3973 }
3974
3975 length = 0;
3976 urb_offset = 0;
3977 flush = false;
3978 for (slot = 0; slot < vue_map->num_slots; slot++) {
3979 fs_reg reg, src, zero;
3980
3981 int varying = vue_map->slot_to_varying[slot];
3982 switch (varying) {
3983 case VARYING_SLOT_PSIZ:
3984
3985 /* The point size varying slot is the vue header and is always in the
3986 * vue map. But often none of the special varyings that live there
3987 * are written and in that case we can skip writing to the vue
3988 * header, provided the corresponding state properly clamps the
3989 * values further down the pipeline. */
3990 if ((vue_map->slots_valid & psiz_mask) == 0) {
3991 assert(length == 0);
3992 urb_offset++;
3993 break;
3994 }
3995
3996 zero = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
3997 emit(MOV(zero, fs_reg(0u)));
3998
3999 sources[length++] = zero;
4000 if (vue_map->slots_valid & VARYING_BIT_LAYER)
4001 sources[length++] = this->outputs[VARYING_SLOT_LAYER];
4002 else
4003 sources[length++] = zero;
4004
4005 if (vue_map->slots_valid & VARYING_BIT_VIEWPORT)
4006 sources[length++] = this->outputs[VARYING_SLOT_VIEWPORT];
4007 else
4008 sources[length++] = zero;
4009
4010 if (vue_map->slots_valid & VARYING_BIT_PSIZ)
4011 sources[length++] = this->outputs[VARYING_SLOT_PSIZ];
4012 else
4013 sources[length++] = zero;
4014 break;
4015
4016 case BRW_VARYING_SLOT_NDC:
4017 case VARYING_SLOT_EDGE:
4018 unreachable("unexpected scalar vs output");
4019 break;
4020
4021 case BRW_VARYING_SLOT_PAD:
4022 break;
4023
4024 default:
4025 /* gl_Position is always in the vue map, but isn't always written by
4026 * the shader. Other varyings (clip distances) get added to the vue
4027 * map but don't always get written. In those cases, the
4028 * corresponding this->output[] slot will be invalid we and can skip
4029 * the urb write for the varying. If we've already queued up a vue
4030 * slot for writing we flush a mlen 5 urb write, otherwise we just
4031 * advance the urb_offset.
4032 */
4033 if (this->outputs[varying].file == BAD_FILE) {
4034 if (length > 0)
4035 flush = true;
4036 else
4037 urb_offset++;
4038 break;
4039 }
4040
4041 if ((varying == VARYING_SLOT_COL0 ||
4042 varying == VARYING_SLOT_COL1 ||
4043 varying == VARYING_SLOT_BFC0 ||
4044 varying == VARYING_SLOT_BFC1) &&
4045 key->clamp_vertex_color) {
4046 /* We need to clamp these guys, so do a saturating MOV into a
4047 * temp register and use that for the payload.
4048 */
4049 for (int i = 0; i < 4; i++) {
4050 reg = fs_reg(GRF, alloc.allocate(1), outputs[varying].type);
4051 src = offset(this->outputs[varying], i);
4052 fs_inst *inst = emit(MOV(reg, src));
4053 inst->saturate = true;
4054 sources[length++] = reg;
4055 }
4056 } else {
4057 for (int i = 0; i < 4; i++)
4058 sources[length++] = offset(this->outputs[varying], i);
4059 }
4060 break;
4061 }
4062
4063 current_annotation = "URB write";
4064
4065 /* If we've queued up 8 registers of payload (2 VUE slots), if this is
4066 * the last slot or if we need to flush (see BAD_FILE varying case
4067 * above), emit a URB write send now to flush out the data.
4068 */
4069 int last = slot == vue_map->num_slots - 1;
4070 if (length == 8 || last)
4071 flush = true;
4072 if (flush) {
4073 fs_reg *payload_sources = ralloc_array(mem_ctx, fs_reg, length + 1);
4074 fs_reg payload = fs_reg(GRF, alloc.allocate(length + 1),
4075 BRW_REGISTER_TYPE_F, dispatch_width);
4076
4077 /* We need WE_all on the MOV for the message header (the URB handles)
4078 * so do a MOV to a dummy register and set force_writemask_all on the
4079 * MOV. LOAD_PAYLOAD will preserve that.
4080 */
4081 fs_reg dummy = fs_reg(GRF, alloc.allocate(1),
4082 BRW_REGISTER_TYPE_UD);
4083 fs_inst *inst = emit(MOV(dummy, fs_reg(retype(brw_vec8_grf(1, 0),
4084 BRW_REGISTER_TYPE_UD))));
4085 inst->force_writemask_all = true;
4086 payload_sources[0] = dummy;
4087
4088 memcpy(&payload_sources[1], sources, length * sizeof sources[0]);
4089 emit(LOAD_PAYLOAD(payload, payload_sources, length + 1, 1));
4090
4091 inst = emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
4092 inst->eot = last;
4093 inst->mlen = length + 1;
4094 inst->offset = urb_offset;
4095 urb_offset = slot + 1;
4096 length = 0;
4097 flush = false;
4098 }
4099 }
4100 }
4101
4102 void
4103 fs_visitor::resolve_ud_negate(fs_reg *reg)
4104 {
4105 if (reg->type != BRW_REGISTER_TYPE_UD ||
4106 !reg->negate)
4107 return;
4108
4109 fs_reg temp = vgrf(glsl_type::uint_type);
4110 emit(MOV(temp, *reg));
4111 *reg = temp;
4112 }
4113
4114 void
4115 fs_visitor::emit_cs_terminate()
4116 {
4117 assert(brw->gen >= 7);
4118
4119 /* We are getting the thread ID from the compute shader header */
4120 assert(stage == MESA_SHADER_COMPUTE);
4121
4122 /* We can't directly send from g0, since sends with EOT have to use
4123 * g112-127. So, copy it to a virtual register, The register allocator will
4124 * make sure it uses the appropriate register range.
4125 */
4126 struct brw_reg g0 = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD);
4127 fs_reg payload = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
4128 fs_inst *inst = emit(MOV(payload, g0));
4129 inst->force_writemask_all = true;
4130
4131 /* Send a message to the thread spawner to terminate the thread. */
4132 inst = emit(CS_OPCODE_CS_TERMINATE, reg_undef, payload);
4133 inst->eot = true;
4134 }
4135
4136 /**
4137 * Resolve the result of a Gen4-5 CMP instruction to a proper boolean.
4138 *
4139 * CMP on Gen4-5 only sets the LSB of the result; the rest are undefined.
4140 * If we need a proper boolean value, we have to fix it up to be 0 or ~0.
4141 */
4142 void
4143 fs_visitor::resolve_bool_comparison(ir_rvalue *rvalue, fs_reg *reg)
4144 {
4145 assert(devinfo->gen <= 5);
4146
4147 if (rvalue->type != glsl_type::bool_type)
4148 return;
4149
4150 fs_reg and_result = vgrf(glsl_type::bool_type);
4151 fs_reg neg_result = vgrf(glsl_type::bool_type);
4152 emit(AND(and_result, *reg, fs_reg(1)));
4153 emit(MOV(neg_result, negate(and_result)));
4154 *reg = neg_result;
4155 }
4156
4157 fs_visitor::fs_visitor(struct brw_context *brw,
4158 void *mem_ctx,
4159 const struct brw_wm_prog_key *key,
4160 struct brw_wm_prog_data *prog_data,
4161 struct gl_shader_program *shader_prog,
4162 struct gl_fragment_program *fp,
4163 unsigned dispatch_width)
4164 : backend_visitor(brw, shader_prog, &fp->Base, &prog_data->base,
4165 MESA_SHADER_FRAGMENT),
4166 reg_null_f(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_F)),
4167 reg_null_d(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_D)),
4168 reg_null_ud(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_UD)),
4169 key(key), prog_data(&prog_data->base),
4170 dispatch_width(dispatch_width), promoted_constants(0)
4171 {
4172 this->mem_ctx = mem_ctx;
4173 init();
4174 }
4175
4176 fs_visitor::fs_visitor(struct brw_context *brw,
4177 void *mem_ctx,
4178 const struct brw_vs_prog_key *key,
4179 struct brw_vs_prog_data *prog_data,
4180 struct gl_shader_program *shader_prog,
4181 struct gl_vertex_program *cp,
4182 unsigned dispatch_width)
4183 : backend_visitor(brw, shader_prog, &cp->Base, &prog_data->base.base,
4184 MESA_SHADER_VERTEX),
4185 reg_null_f(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_F)),
4186 reg_null_d(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_D)),
4187 reg_null_ud(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_UD)),
4188 key(key), prog_data(&prog_data->base.base),
4189 dispatch_width(dispatch_width), promoted_constants(0)
4190 {
4191 this->mem_ctx = mem_ctx;
4192 init();
4193 }
4194
4195 fs_visitor::fs_visitor(struct brw_context *brw,
4196 void *mem_ctx,
4197 const struct brw_cs_prog_key *key,
4198 struct brw_cs_prog_data *prog_data,
4199 struct gl_shader_program *shader_prog,
4200 struct gl_compute_program *cp,
4201 unsigned dispatch_width)
4202 : backend_visitor(brw, shader_prog, &cp->Base, &prog_data->base,
4203 MESA_SHADER_COMPUTE),
4204 reg_null_f(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_F)),
4205 reg_null_d(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_D)),
4206 reg_null_ud(retype(brw_null_vec(dispatch_width), BRW_REGISTER_TYPE_UD)),
4207 key(key), prog_data(&prog_data->base),
4208 dispatch_width(dispatch_width)
4209 {
4210 this->mem_ctx = mem_ctx;
4211 init();
4212 }
4213
4214 void
4215 fs_visitor::init()
4216 {
4217 switch (stage) {
4218 case MESA_SHADER_FRAGMENT:
4219 key_tex = &((const brw_wm_prog_key *) key)->tex;
4220 break;
4221 case MESA_SHADER_VERTEX:
4222 case MESA_SHADER_GEOMETRY:
4223 key_tex = &((const brw_vue_prog_key *) key)->tex;
4224 break;
4225 case MESA_SHADER_COMPUTE:
4226 key_tex = &((const brw_cs_prog_key*) key)->tex;
4227 break;
4228 default:
4229 unreachable("unhandled shader stage");
4230 }
4231
4232 this->failed = false;
4233 this->simd16_unsupported = false;
4234 this->no16_msg = NULL;
4235 this->variable_ht = hash_table_ctor(0,
4236 hash_table_pointer_hash,
4237 hash_table_pointer_compare);
4238
4239 this->nir_locals = NULL;
4240 this->nir_globals = NULL;
4241
4242 memset(&this->payload, 0, sizeof(this->payload));
4243 memset(this->outputs, 0, sizeof(this->outputs));
4244 memset(this->output_components, 0, sizeof(this->output_components));
4245 this->source_depth_to_render_target = false;
4246 this->runtime_check_aads_emit = false;
4247 this->first_non_payload_grf = 0;
4248 this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
4249
4250 this->current_annotation = NULL;
4251 this->base_ir = NULL;
4252
4253 this->virtual_grf_start = NULL;
4254 this->virtual_grf_end = NULL;
4255 this->live_intervals = NULL;
4256 this->regs_live_at_ip = NULL;
4257
4258 this->uniforms = 0;
4259 this->last_scratch = 0;
4260 this->pull_constant_loc = NULL;
4261 this->push_constant_loc = NULL;
4262
4263 this->spilled_any_registers = false;
4264 this->do_dual_src = false;
4265
4266 if (dispatch_width == 8)
4267 this->param_size = rzalloc_array(mem_ctx, int, stage_prog_data->nr_params);
4268 }
4269
4270 fs_visitor::~fs_visitor()
4271 {
4272 hash_table_dtor(this->variable_ht);
4273 }