nir: Make bcsel a fully vector operation
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "glsl/ir.h"
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "brw_fs.h"
28
29 void
30 fs_visitor::emit_nir_code()
31 {
32 /* first, lower the GLSL IR shader to NIR */
33 lower_output_reads(shader->base.ir);
34 nir_shader *nir = glsl_to_nir(shader->base.ir, NULL, true);
35 nir_validate_shader(nir);
36
37 nir_lower_global_vars_to_local(nir);
38 nir_validate_shader(nir);
39
40 nir_split_var_copies(nir);
41 nir_validate_shader(nir);
42
43 bool progress;
44 do {
45 progress = false;
46 nir_lower_variables(nir);
47 nir_validate_shader(nir);
48 progress |= nir_copy_prop(nir);
49 nir_validate_shader(nir);
50 progress |= nir_opt_dce(nir);
51 nir_validate_shader(nir);
52 progress |= nir_opt_cse(nir);
53 nir_validate_shader(nir);
54 progress |= nir_opt_peephole_select(nir);
55 nir_validate_shader(nir);
56 progress |= nir_opt_algebraic(nir);
57 nir_validate_shader(nir);
58 progress |= nir_opt_constant_folding(nir);
59 nir_validate_shader(nir);
60 } while (progress);
61
62 /* Lower a bunch of stuff */
63 nir_lower_io(nir);
64 nir_validate_shader(nir);
65
66 nir_lower_locals_to_regs(nir);
67 nir_validate_shader(nir);
68
69 nir_remove_dead_variables(nir);
70 nir_validate_shader(nir);
71
72 nir_lower_to_source_mods(nir);
73 nir_validate_shader(nir);
74 nir_copy_prop(nir);
75 nir_validate_shader(nir);
76 nir_convert_from_ssa(nir);
77 nir_validate_shader(nir);
78 nir_lower_vec_to_movs(nir);
79 nir_validate_shader(nir);
80
81 nir_lower_samplers(nir, shader_prog, shader->base.Program);
82 nir_validate_shader(nir);
83
84 nir_lower_system_values(nir);
85 nir_validate_shader(nir);
86
87 nir_lower_atomics(nir);
88 nir_validate_shader(nir);
89
90 /* emit the arrays used for inputs and outputs - load/store intrinsics will
91 * be converted to reads/writes of these arrays
92 */
93
94 if (nir->num_inputs > 0) {
95 nir_inputs = fs_reg(GRF, virtual_grf_alloc(nir->num_inputs));
96 nir_setup_inputs(nir);
97 }
98
99 if (nir->num_outputs > 0) {
100 nir_outputs = fs_reg(GRF, virtual_grf_alloc(nir->num_outputs));
101 nir_setup_outputs(nir);
102 }
103
104 if (nir->num_uniforms > 0) {
105 nir_uniforms = fs_reg(UNIFORM, 0);
106 nir_setup_uniforms(nir);
107 }
108
109 nir_globals = ralloc_array(mem_ctx, fs_reg, nir->reg_alloc);
110 foreach_list_typed(nir_register, reg, node, &nir->registers) {
111 unsigned array_elems =
112 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
113 unsigned size = array_elems * reg->num_components;
114 nir_globals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
115 }
116
117 /* get the main function and emit it */
118 nir_foreach_overload(nir, overload) {
119 assert(strcmp(overload->function->name, "main") == 0);
120 assert(overload->impl);
121 nir_emit_impl(overload->impl);
122 }
123
124 ralloc_free(nir);
125 }
126
127 void
128 fs_visitor::nir_setup_inputs(nir_shader *shader)
129 {
130 fs_reg varying = nir_inputs;
131
132 struct hash_entry *entry;
133 hash_table_foreach(shader->inputs, entry) {
134 nir_variable *var = (nir_variable *) entry->data;
135 varying.reg_offset = var->data.driver_location;
136
137 fs_reg reg;
138 if (!strcmp(var->name, "gl_FragCoord")) {
139 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
140 var->data.origin_upper_left);
141 emit_percomp(MOV(varying, reg), 0xF);
142 } else if (!strcmp(var->name, "gl_FrontFacing")) {
143 reg = *emit_frontfacing_interpolation();
144 emit(MOV(retype(varying, BRW_REGISTER_TYPE_UD), reg));
145 } else {
146 emit_general_interpolation(varying, var->name, var->type,
147 (glsl_interp_qualifier) var->data.interpolation,
148 var->data.location, var->data.centroid,
149 var->data.sample);
150 }
151 }
152 }
153
154 void
155 fs_visitor::nir_setup_outputs(nir_shader *shader)
156 {
157 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
158 fs_reg reg = nir_outputs;
159
160 struct hash_entry *entry;
161 hash_table_foreach(shader->outputs, entry) {
162 nir_variable *var = (nir_variable *) entry->data;
163 reg.reg_offset = var->data.driver_location;
164
165 if (var->data.index > 0) {
166 assert(var->data.location == FRAG_RESULT_DATA0);
167 assert(var->data.index == 1);
168 this->dual_src_output = reg;
169 this->do_dual_src = true;
170 } else if (var->data.location == FRAG_RESULT_COLOR) {
171 /* Writing gl_FragColor outputs to all color regions. */
172 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
173 this->outputs[i] = reg;
174 this->output_components[i] = 4;
175 }
176 } else if (var->data.location == FRAG_RESULT_DEPTH) {
177 this->frag_depth = reg;
178 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
179 this->sample_mask = reg;
180 } else {
181 /* gl_FragData or a user-defined FS output */
182 assert(var->data.location >= FRAG_RESULT_DATA0 &&
183 var->data.location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
184
185 int vector_elements =
186 var->type->is_array() ? var->type->fields.array->vector_elements
187 : var->type->vector_elements;
188
189 /* General color output. */
190 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
191 int output = var->data.location - FRAG_RESULT_DATA0 + i;
192 this->outputs[output] = reg;
193 this->outputs[output].reg_offset += vector_elements * i;
194 this->output_components[output] = vector_elements;
195 }
196 }
197 }
198 }
199
200 void
201 fs_visitor::nir_setup_uniforms(nir_shader *shader)
202 {
203 uniforms = shader->num_uniforms;
204 param_size[0] = shader->num_uniforms;
205
206 if (dispatch_width != 8)
207 return;
208
209 struct hash_entry *entry;
210 hash_table_foreach(shader->uniforms, entry) {
211 nir_variable *var = (nir_variable *) entry->data;
212
213 /* UBO's and atomics don't take up space in the uniform file */
214
215 if (var->interface_type != NULL || var->type->contains_atomic())
216 continue;
217
218 if (strncmp(var->name, "gl_", 3) == 0)
219 nir_setup_builtin_uniform(var);
220 else
221 nir_setup_uniform(var);
222 }
223 }
224
225 void
226 fs_visitor::nir_setup_uniform(nir_variable *var)
227 {
228 int namelen = strlen(var->name);
229
230 /* The data for our (non-builtin) uniforms is stored in a series of
231 * gl_uniform_driver_storage structs for each subcomponent that
232 * glGetUniformLocation() could name. We know it's been set up in the
233 * same order we'd walk the type, so walk the list of storage and find
234 * anything with our name, or the prefix of a component that starts with
235 * our name.
236 */
237 unsigned index = var->data.driver_location;
238 for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
239 struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
240
241 if (strncmp(var->name, storage->name, namelen) != 0 ||
242 (storage->name[namelen] != 0 &&
243 storage->name[namelen] != '.' &&
244 storage->name[namelen] != '[')) {
245 continue;
246 }
247
248 unsigned slots = storage->type->component_slots();
249 if (storage->array_elements)
250 slots *= storage->array_elements;
251
252 for (unsigned i = 0; i < slots; i++) {
253 stage_prog_data->param[index++] = &storage->storage[i];
254 }
255 }
256
257 /* Make sure we actually initialized the right amount of stuff here. */
258 assert(var->data.driver_location + var->type->component_slots() == index);
259 }
260
261 void
262 fs_visitor::nir_setup_builtin_uniform(nir_variable *var)
263 {
264 const nir_state_slot *const slots = var->state_slots;
265 assert(var->state_slots != NULL);
266
267 unsigned uniform_index = var->data.driver_location;
268 for (unsigned int i = 0; i < var->num_state_slots; i++) {
269 /* This state reference has already been setup by ir_to_mesa, but we'll
270 * get the same index back here.
271 */
272 int index = _mesa_add_state_reference(this->prog->Parameters,
273 (gl_state_index *)slots[i].tokens);
274
275 /* Add each of the unique swizzles of the element as a parameter.
276 * This'll end up matching the expected layout of the
277 * array/matrix/structure we're trying to fill in.
278 */
279 int last_swiz = -1;
280 for (unsigned int j = 0; j < 4; j++) {
281 int swiz = GET_SWZ(slots[i].swizzle, j);
282 if (swiz == last_swiz)
283 break;
284 last_swiz = swiz;
285
286 stage_prog_data->param[uniform_index++] =
287 &prog->Parameters->ParameterValues[index][swiz];
288 }
289 }
290 }
291
292 void
293 fs_visitor::nir_emit_impl(nir_function_impl *impl)
294 {
295 nir_locals = reralloc(mem_ctx, nir_locals, fs_reg, impl->reg_alloc);
296 foreach_list_typed(nir_register, reg, node, &impl->registers) {
297 unsigned array_elems =
298 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
299 unsigned size = array_elems * reg->num_components;
300 nir_locals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
301 }
302
303 nir_emit_cf_list(&impl->body);
304 }
305
306 void
307 fs_visitor::nir_emit_cf_list(exec_list *list)
308 {
309 foreach_list_typed(nir_cf_node, node, node, list) {
310 switch (node->type) {
311 case nir_cf_node_if:
312 nir_emit_if(nir_cf_node_as_if(node));
313 break;
314
315 case nir_cf_node_loop:
316 nir_emit_loop(nir_cf_node_as_loop(node));
317 break;
318
319 case nir_cf_node_block:
320 nir_emit_block(nir_cf_node_as_block(node));
321 break;
322
323 default:
324 unreachable("Invalid CFG node block");
325 }
326 }
327 }
328
329 void
330 fs_visitor::nir_emit_if(nir_if *if_stmt)
331 {
332 if (brw->gen < 6) {
333 no16("Can't support (non-uniform) control flow on SIMD16\n");
334 }
335
336 /* first, put the condition into f0 */
337 fs_inst *inst = emit(MOV(reg_null_d,
338 retype(get_nir_src(if_stmt->condition),
339 BRW_REGISTER_TYPE_UD)));
340 inst->conditional_mod = BRW_CONDITIONAL_NZ;
341
342 emit(IF(BRW_PREDICATE_NORMAL));
343
344 nir_emit_cf_list(&if_stmt->then_list);
345
346 /* note: if the else is empty, dead CF elimination will remove it */
347 emit(BRW_OPCODE_ELSE);
348
349 nir_emit_cf_list(&if_stmt->else_list);
350
351 emit(BRW_OPCODE_ENDIF);
352
353 try_replace_with_sel();
354 }
355
356 void
357 fs_visitor::nir_emit_loop(nir_loop *loop)
358 {
359 if (brw->gen < 6) {
360 no16("Can't support (non-uniform) control flow on SIMD16\n");
361 }
362
363 emit(BRW_OPCODE_DO);
364
365 nir_emit_cf_list(&loop->body);
366
367 emit(BRW_OPCODE_WHILE);
368 }
369
370 void
371 fs_visitor::nir_emit_block(nir_block *block)
372 {
373 nir_foreach_instr(block, instr) {
374 nir_emit_instr(instr);
375 }
376 }
377
378 void
379 fs_visitor::nir_emit_instr(nir_instr *instr)
380 {
381 switch (instr->type) {
382 case nir_instr_type_alu:
383 nir_emit_alu(nir_instr_as_alu(instr));
384 break;
385
386 case nir_instr_type_intrinsic:
387 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
388 break;
389
390 case nir_instr_type_tex:
391 nir_emit_texture(nir_instr_as_tex(instr));
392 break;
393
394 case nir_instr_type_load_const:
395 nir_emit_load_const(nir_instr_as_load_const(instr));
396 break;
397
398 case nir_instr_type_jump:
399 nir_emit_jump(nir_instr_as_jump(instr));
400 break;
401
402 default:
403 unreachable("unknown instruction type");
404 }
405 }
406
407 static brw_reg_type
408 brw_type_for_nir_type(nir_alu_type type)
409 {
410 switch (type) {
411 case nir_type_bool:
412 case nir_type_unsigned:
413 return BRW_REGISTER_TYPE_UD;
414 case nir_type_int:
415 return BRW_REGISTER_TYPE_D;
416 case nir_type_float:
417 return BRW_REGISTER_TYPE_F;
418 default:
419 unreachable("unknown type");
420 }
421
422 return BRW_REGISTER_TYPE_F;
423 }
424
425 void
426 fs_visitor::nir_emit_alu(nir_alu_instr *instr)
427 {
428 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
429
430 fs_reg op[3];
431 fs_reg dest = get_nir_dest(instr->dest.dest);
432 dest.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
433
434 fs_reg result;
435 if (instr->has_predicate) {
436 result = fs_reg(GRF, virtual_grf_alloc(4));
437 result.type = dest.type;
438 } else {
439 result = dest;
440 }
441
442
443 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
444 op[i] = get_nir_alu_src(instr, i);
445
446 switch (instr->op) {
447 case nir_op_fmov:
448 case nir_op_i2f:
449 case nir_op_u2f: {
450 fs_inst *inst = MOV(result, op[0]);
451 inst->saturate = instr->dest.saturate;
452 emit_percomp(inst, instr->dest.write_mask);
453 }
454 break;
455
456 case nir_op_imov:
457 case nir_op_f2i:
458 case nir_op_f2u:
459 emit_percomp(MOV(result, op[0]), instr->dest.write_mask);
460 break;
461
462 case nir_op_fsign: {
463 /* AND(val, 0x80000000) gives the sign bit.
464 *
465 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
466 * zero.
467 */
468 emit_percomp(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
469 instr->dest.write_mask);
470
471 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
472 op[0].type = BRW_REGISTER_TYPE_UD;
473 result.type = BRW_REGISTER_TYPE_UD;
474 emit_percomp(AND(result_int, op[0], fs_reg(0x80000000u)),
475 instr->dest.write_mask);
476
477 fs_inst *inst = OR(result_int, result_int, fs_reg(0x3f800000u));
478 inst->predicate = BRW_PREDICATE_NORMAL;
479 emit_percomp(inst, instr->dest.write_mask);
480 if (instr->dest.saturate) {
481 fs_inst *inst = MOV(result, result);
482 inst->saturate = true;
483 emit_percomp(inst, instr->dest.write_mask);
484 }
485 break;
486 }
487
488 case nir_op_isign: {
489 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
490 * -> non-negative val generates 0x00000000.
491 * Predicated OR sets 1 if val is positive.
492 */
493 emit_percomp(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G),
494 instr->dest.write_mask);
495
496 emit_percomp(ASR(result, op[0], fs_reg(31)), instr->dest.write_mask);
497
498 fs_inst *inst = OR(result, result, fs_reg(1));
499 inst->predicate = BRW_PREDICATE_NORMAL;
500 emit_percomp(inst, instr->dest.write_mask);
501 break;
502 }
503
504 case nir_op_frcp:
505 emit_math_percomp(SHADER_OPCODE_RCP, result, op[0],
506 instr->dest.write_mask, instr->dest.saturate);
507 break;
508
509 case nir_op_fexp2:
510 emit_math_percomp(SHADER_OPCODE_EXP2, result, op[0],
511 instr->dest.write_mask, instr->dest.saturate);
512 break;
513
514 case nir_op_flog2:
515 emit_math_percomp(SHADER_OPCODE_LOG2, result, op[0],
516 instr->dest.write_mask, instr->dest.saturate);
517 break;
518
519 case nir_op_fexp:
520 case nir_op_flog:
521 unreachable("not reached: should be handled by ir_explog_to_explog2");
522
523 case nir_op_fsin:
524 case nir_op_fsin_reduced:
525 emit_math_percomp(SHADER_OPCODE_SIN, result, op[0],
526 instr->dest.write_mask, instr->dest.saturate);
527 break;
528
529 case nir_op_fcos:
530 case nir_op_fcos_reduced:
531 emit_math_percomp(SHADER_OPCODE_COS, result, op[0],
532 instr->dest.write_mask, instr->dest.saturate);
533 break;
534
535 case nir_op_fddx:
536 if (fs_key->high_quality_derivatives)
537 emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
538 instr->dest.write_mask, instr->dest.saturate);
539 else
540 emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
541 instr->dest.write_mask, instr->dest.saturate);
542 break;
543 case nir_op_fddx_fine:
544 emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
545 instr->dest.write_mask, instr->dest.saturate);
546 break;
547 case nir_op_fddx_coarse:
548 emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
549 instr->dest.write_mask, instr->dest.saturate);
550 break;
551 case nir_op_fddy:
552 if (fs_key->high_quality_derivatives)
553 emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
554 fs_reg(fs_key->render_to_fbo),
555 instr->dest.write_mask, instr->dest.saturate);
556 else
557 emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
558 fs_reg(fs_key->render_to_fbo),
559 instr->dest.write_mask, instr->dest.saturate);
560 break;
561 case nir_op_fddy_fine:
562 emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
563 fs_reg(fs_key->render_to_fbo),
564 instr->dest.write_mask, instr->dest.saturate);
565 break;
566 case nir_op_fddy_coarse:
567 emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
568 fs_reg(fs_key->render_to_fbo),
569 instr->dest.write_mask, instr->dest.saturate);
570 break;
571
572 case nir_op_fadd:
573 case nir_op_iadd: {
574 fs_inst *inst = ADD(result, op[0], op[1]);
575 inst->saturate = instr->dest.saturate;
576 emit_percomp(inst, instr->dest.write_mask);
577 break;
578 }
579
580 case nir_op_fmul: {
581 fs_inst *inst = MUL(result, op[0], op[1]);
582 inst->saturate = instr->dest.saturate;
583 emit_percomp(inst, instr->dest.write_mask);
584 break;
585 }
586
587 case nir_op_imul: {
588 /* TODO put in the 16-bit constant optimization once we have SSA */
589
590 if (brw->gen >= 7)
591 no16("SIMD16 explicit accumulator operands unsupported\n");
592
593 struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
594
595 emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
596 emit_percomp(MACH(reg_null_d, op[0], op[1]), instr->dest.write_mask);
597 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
598 break;
599 }
600
601 case nir_op_imul_high:
602 case nir_op_umul_high: {
603 if (brw->gen >= 7)
604 no16("SIMD16 explicit accumulator operands unsupported\n");
605
606 struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
607
608 emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
609 emit_percomp(MACH(result, op[0], op[1]), instr->dest.write_mask);
610 break;
611 }
612
613 case nir_op_idiv:
614 case nir_op_udiv:
615 emit_math_percomp(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1],
616 instr->dest.write_mask);
617 break;
618
619 case nir_op_uadd_carry: {
620 if (brw->gen >= 7)
621 no16("SIMD16 explicit accumulator operands unsupported\n");
622
623 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
624 BRW_REGISTER_TYPE_UD);
625
626 emit_percomp(ADDC(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
627 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
628 break;
629 }
630
631 case nir_op_usub_borrow: {
632 if (brw->gen >= 7)
633 no16("SIMD16 explicit accumulator operands unsupported\n");
634
635 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
636 BRW_REGISTER_TYPE_UD);
637
638 emit_percomp(SUBB(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
639 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
640 break;
641 }
642
643 case nir_op_umod:
644 emit_math_percomp(SHADER_OPCODE_INT_REMAINDER, result, op[0],
645 op[1], instr->dest.write_mask);
646 break;
647
648 case nir_op_flt:
649 case nir_op_ilt:
650 case nir_op_ult:
651 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_L),
652 instr->dest.write_mask);
653 break;
654
655 case nir_op_fge:
656 case nir_op_ige:
657 case nir_op_uge:
658 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE),
659 instr->dest.write_mask);
660 break;
661
662 case nir_op_feq:
663 case nir_op_ieq:
664 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z),
665 instr->dest.write_mask);
666 break;
667
668 case nir_op_fne:
669 case nir_op_ine:
670 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ),
671 instr->dest.write_mask);
672 break;
673
674 case nir_op_ball_fequal2:
675 case nir_op_ball_iequal2:
676 case nir_op_ball_fequal3:
677 case nir_op_ball_iequal3:
678 case nir_op_ball_fequal4:
679 case nir_op_ball_iequal4: {
680 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
681 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
682 emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_Z),
683 (1 << num_components) - 1);
684 emit_reduction(BRW_OPCODE_AND, result, temp, num_components);
685 break;
686 }
687
688 case nir_op_bany_fnequal2:
689 case nir_op_bany_inequal2:
690 case nir_op_bany_fnequal3:
691 case nir_op_bany_inequal3:
692 case nir_op_bany_fnequal4:
693 case nir_op_bany_inequal4: {
694 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
695 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
696 temp.type = BRW_REGISTER_TYPE_UD;
697 emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_NZ),
698 (1 << num_components) - 1);
699 emit_reduction(BRW_OPCODE_OR, result, temp, num_components);
700 break;
701 }
702
703 case nir_op_inot:
704 emit_percomp(NOT(result, op[0]), instr->dest.write_mask);
705 break;
706 case nir_op_ixor:
707 emit_percomp(XOR(result, op[0], op[1]), instr->dest.write_mask);
708 break;
709 case nir_op_ior:
710 emit_percomp(OR(result, op[0], op[1]), instr->dest.write_mask);
711 break;
712 case nir_op_iand:
713 emit_percomp(AND(result, op[0], op[1]), instr->dest.write_mask);
714 break;
715
716 case nir_op_fdot2:
717 case nir_op_fdot3:
718 case nir_op_fdot4: {
719 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
720 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
721 emit_percomp(MUL(temp, op[0], op[1]), (1 << num_components) - 1);
722 emit_reduction(BRW_OPCODE_ADD, result, temp, num_components);
723 if (instr->dest.saturate) {
724 fs_inst *inst = emit(MOV(result, result));
725 inst->saturate = true;
726 }
727 break;
728 }
729
730 case nir_op_bany2:
731 case nir_op_bany3:
732 case nir_op_bany4: {
733 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
734 emit_reduction(BRW_OPCODE_OR, result, op[0], num_components);
735 break;
736 }
737
738 case nir_op_ball2:
739 case nir_op_ball3:
740 case nir_op_ball4: {
741 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
742 emit_reduction(BRW_OPCODE_AND, result, op[0], num_components);
743 break;
744 }
745
746 case nir_op_fnoise1_1:
747 case nir_op_fnoise1_2:
748 case nir_op_fnoise1_3:
749 case nir_op_fnoise1_4:
750 case nir_op_fnoise2_1:
751 case nir_op_fnoise2_2:
752 case nir_op_fnoise2_3:
753 case nir_op_fnoise2_4:
754 case nir_op_fnoise3_1:
755 case nir_op_fnoise3_2:
756 case nir_op_fnoise3_3:
757 case nir_op_fnoise3_4:
758 case nir_op_fnoise4_1:
759 case nir_op_fnoise4_2:
760 case nir_op_fnoise4_3:
761 case nir_op_fnoise4_4:
762 unreachable("not reached: should be handled by lower_noise");
763
764 case nir_op_vec2:
765 case nir_op_vec3:
766 case nir_op_vec4:
767 unreachable("not reached: should be handled by lower_quadop_vector");
768
769 case nir_op_ldexp:
770 unreachable("not reached: should be handled by ldexp_to_arith()");
771
772 case nir_op_fsqrt:
773 emit_math_percomp(SHADER_OPCODE_SQRT, result, op[0],
774 instr->dest.write_mask, instr->dest.saturate);
775 break;
776
777 case nir_op_frsq:
778 emit_math_percomp(SHADER_OPCODE_RSQ, result, op[0],
779 instr->dest.write_mask, instr->dest.saturate);
780 break;
781
782 case nir_op_b2i:
783 emit_percomp(AND(result, op[0], fs_reg(1)), instr->dest.write_mask);
784 break;
785 case nir_op_b2f: {
786 emit_percomp(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0],
787 fs_reg(0x3f800000u)),
788 instr->dest.write_mask);
789 break;
790 }
791
792 case nir_op_f2b:
793 emit_percomp(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
794 instr->dest.write_mask);
795 break;
796 case nir_op_i2b:
797 emit_percomp(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ),
798 instr->dest.write_mask);
799 break;
800
801 case nir_op_ftrunc: {
802 fs_inst *inst = RNDZ(result, op[0]);
803 inst->saturate = instr->dest.saturate;
804 emit_percomp(inst, instr->dest.write_mask);
805 break;
806 }
807 case nir_op_fceil: {
808 op[0].negate = !op[0].negate;
809 fs_reg temp = fs_reg(this, glsl_type::vec4_type);
810 emit_percomp(RNDD(temp, op[0]), instr->dest.write_mask);
811 temp.negate = true;
812 fs_inst *inst = MOV(result, temp);
813 inst->saturate = instr->dest.saturate;
814 emit_percomp(inst, instr->dest.write_mask);
815 break;
816 }
817 case nir_op_ffloor: {
818 fs_inst *inst = RNDD(result, op[0]);
819 inst->saturate = instr->dest.saturate;
820 emit_percomp(inst, instr->dest.write_mask);
821 break;
822 }
823 case nir_op_ffract: {
824 fs_inst *inst = FRC(result, op[0]);
825 inst->saturate = instr->dest.saturate;
826 emit_percomp(inst, instr->dest.write_mask);
827 break;
828 }
829 case nir_op_fround_even: {
830 fs_inst *inst = RNDE(result, op[0]);
831 inst->saturate = instr->dest.saturate;
832 emit_percomp(inst, instr->dest.write_mask);
833 break;
834 }
835
836 case nir_op_fmin:
837 case nir_op_imin:
838 case nir_op_umin:
839 if (brw->gen >= 6) {
840 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
841 instr->dest.write_mask, instr->dest.saturate,
842 BRW_PREDICATE_NONE, BRW_CONDITIONAL_L);
843 } else {
844 emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L),
845 instr->dest.write_mask);
846
847 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
848 instr->dest.write_mask, instr->dest.saturate,
849 BRW_PREDICATE_NORMAL);
850 }
851 break;
852
853 case nir_op_fmax:
854 case nir_op_imax:
855 case nir_op_umax:
856 if (brw->gen >= 6) {
857 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
858 instr->dest.write_mask, instr->dest.saturate,
859 BRW_PREDICATE_NONE, BRW_CONDITIONAL_GE);
860 } else {
861 emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE),
862 instr->dest.write_mask);
863
864 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
865 instr->dest.write_mask, instr->dest.saturate,
866 BRW_PREDICATE_NORMAL);
867 }
868 break;
869
870 case nir_op_pack_snorm_2x16:
871 case nir_op_pack_snorm_4x8:
872 case nir_op_pack_unorm_2x16:
873 case nir_op_pack_unorm_4x8:
874 case nir_op_unpack_snorm_2x16:
875 case nir_op_unpack_snorm_4x8:
876 case nir_op_unpack_unorm_2x16:
877 case nir_op_unpack_unorm_4x8:
878 case nir_op_unpack_half_2x16:
879 case nir_op_pack_half_2x16:
880 unreachable("not reached: should be handled by lower_packing_builtins");
881
882 case nir_op_unpack_half_2x16_split_x:
883 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0],
884 instr->dest.write_mask, instr->dest.saturate);
885 break;
886 case nir_op_unpack_half_2x16_split_y:
887 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0],
888 instr->dest.write_mask, instr->dest.saturate);
889 break;
890
891 case nir_op_fpow:
892 emit_percomp(SHADER_OPCODE_POW, result, op[0], op[1],
893 instr->dest.write_mask, instr->dest.saturate);
894 break;
895
896 case nir_op_bitfield_reverse:
897 emit_percomp(BFREV(result, op[0]), instr->dest.write_mask);
898 break;
899
900 case nir_op_bit_count:
901 emit_percomp(CBIT(result, op[0]), instr->dest.write_mask);
902 break;
903
904 case nir_op_ufind_msb:
905 case nir_op_ifind_msb: {
906 emit_percomp(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]),
907 instr->dest.write_mask);
908
909 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
910 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
911 * subtract the result from 31 to convert the MSB count into an LSB count.
912 */
913
914 emit_percomp(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ),
915 instr->dest.write_mask);
916 fs_reg neg_result(result);
917 neg_result.negate = true;
918 fs_inst *inst = ADD(result, neg_result, fs_reg(31));
919 inst->predicate = BRW_PREDICATE_NORMAL;
920 emit_percomp(inst, instr->dest.write_mask);
921 break;
922 }
923
924 case nir_op_find_lsb:
925 emit_percomp(FBL(result, op[0]), instr->dest.write_mask);
926 break;
927
928 case nir_op_ubitfield_extract:
929 case nir_op_ibitfield_extract:
930 emit_percomp(BFE(result, op[2], op[1], op[0]), instr->dest.write_mask);
931 break;
932 case nir_op_bfm:
933 emit_percomp(BFI1(result, op[0], op[1]), instr->dest.write_mask);
934 break;
935 case nir_op_bfi:
936 emit_percomp(BFI2(result, op[0], op[1], op[2]), instr->dest.write_mask);
937 break;
938
939 case nir_op_bitfield_insert:
940 unreachable("not reached: should be handled by "
941 "lower_instructions::bitfield_insert_to_bfm_bfi");
942
943 case nir_op_ishl:
944 emit_percomp(SHL(result, op[0], op[1]), instr->dest.write_mask);
945 break;
946 case nir_op_ishr:
947 emit_percomp(ASR(result, op[0], op[1]), instr->dest.write_mask);
948 break;
949 case nir_op_ushr:
950 emit_percomp(SHR(result, op[0], op[1]), instr->dest.write_mask);
951 break;
952
953 case nir_op_pack_half_2x16_split:
954 emit_percomp(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1],
955 instr->dest.write_mask);
956 break;
957
958 case nir_op_ffma:
959 emit_percomp(MAD(result, op[2], op[1], op[0]), instr->dest.write_mask);
960 break;
961
962 case nir_op_flrp:
963 /* TODO emulate for gen < 6 */
964 emit_percomp(LRP(result, op[2], op[1], op[0]), instr->dest.write_mask);
965 break;
966
967 case nir_op_bcsel:
968 for (unsigned i = 0; i < 4; i++) {
969 if (!((instr->dest.write_mask >> i) & 1))
970 continue;
971
972 emit(CMP(reg_null_d, offset(op[0], i), fs_reg(0), BRW_CONDITIONAL_NZ));
973 emit(SEL(offset(result, i), offset(op[1], i), offset(op[2], i)))
974 ->predicate = BRW_PREDICATE_NORMAL;
975 }
976 break;
977
978 default:
979 unreachable("unhandled instruction");
980 }
981
982 /* emit a predicated move if there was predication */
983 if (instr->has_predicate) {
984 fs_inst *inst = emit(MOV(reg_null_d,
985 retype(get_nir_src(instr->predicate),
986 BRW_REGISTER_TYPE_UD)));
987 inst->conditional_mod = BRW_CONDITIONAL_NZ;
988 inst = MOV(dest, result);
989 inst->predicate = BRW_PREDICATE_NORMAL;
990 emit_percomp(inst, instr->dest.write_mask);
991 }
992 }
993
994 fs_reg
995 fs_visitor::get_nir_src(nir_src src)
996 {
997 if (src.is_ssa) {
998 assert(src.ssa->parent_instr->type == nir_instr_type_load_const);
999 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1000 fs_reg reg(GRF, virtual_grf_alloc(src.ssa->num_components),
1001 BRW_REGISTER_TYPE_D);
1002
1003 for (unsigned i = 0; i < src.ssa->num_components; ++i)
1004 emit(MOV(offset(reg, i), fs_reg(load->value.i[i])));
1005
1006 return reg;
1007 } else {
1008 fs_reg reg;
1009 if (src.reg.reg->is_global)
1010 reg = nir_globals[src.reg.reg->index];
1011 else
1012 reg = nir_locals[src.reg.reg->index];
1013
1014 /* to avoid floating-point denorm flushing problems, set the type by
1015 * default to D - instructions that need floating point semantics will set
1016 * this to F if they need to
1017 */
1018 reg.type = BRW_REGISTER_TYPE_D;
1019 reg.reg_offset = src.reg.base_offset;
1020 if (src.reg.indirect) {
1021 reg.reladdr = new(mem_ctx) fs_reg();
1022 *reg.reladdr = retype(get_nir_src(*src.reg.indirect),
1023 BRW_REGISTER_TYPE_D);
1024 }
1025
1026 return reg;
1027 }
1028 }
1029
1030 fs_reg
1031 fs_visitor::get_nir_alu_src(nir_alu_instr *instr, unsigned src)
1032 {
1033 fs_reg reg = get_nir_src(instr->src[src].src);
1034
1035 reg.type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[src]);
1036 reg.abs = instr->src[src].abs;
1037 reg.negate = instr->src[src].negate;
1038
1039 bool needs_swizzle = false;
1040 unsigned num_components = 0;
1041 for (unsigned i = 0; i < 4; i++) {
1042 if (!nir_alu_instr_channel_used(instr, src, i))
1043 continue;
1044
1045 if (instr->src[src].swizzle[i] != i)
1046 needs_swizzle = true;
1047
1048 num_components = i + 1;
1049 }
1050
1051 if (needs_swizzle) {
1052 /* resolve the swizzle through MOV's */
1053 fs_reg new_reg = fs_reg(GRF, virtual_grf_alloc(num_components), reg.type);
1054
1055 for (unsigned i = 0; i < 4; i++) {
1056 if (!nir_alu_instr_channel_used(instr, src, i))
1057 continue;
1058
1059 emit(MOV(offset(new_reg, i),
1060 offset(reg, instr->src[src].swizzle[i])));
1061 }
1062
1063 return new_reg;
1064 }
1065
1066 return reg;
1067 }
1068
1069 fs_reg
1070 fs_visitor::get_nir_dest(nir_dest dest)
1071 {
1072 fs_reg reg;
1073 if (dest.reg.reg->is_global)
1074 reg = nir_globals[dest.reg.reg->index];
1075 else
1076 reg = nir_locals[dest.reg.reg->index];
1077
1078 reg.reg_offset = dest.reg.base_offset;
1079 if (dest.reg.indirect) {
1080 reg.reladdr = new(mem_ctx) fs_reg();
1081 *reg.reladdr = retype(get_nir_src(*dest.reg.indirect),
1082 BRW_REGISTER_TYPE_D);
1083 }
1084
1085 return reg;
1086 }
1087
1088 void
1089 fs_visitor::emit_percomp(fs_inst *inst, unsigned wr_mask)
1090 {
1091 for (unsigned i = 0; i < 4; i++) {
1092 if (!((wr_mask >> i) & 1))
1093 continue;
1094
1095 fs_inst *new_inst = new(mem_ctx) fs_inst(*inst);
1096 new_inst->dst.reg_offset += i;
1097 for (unsigned j = 0; j < new_inst->sources; j++)
1098 if (inst->src[j].file == GRF)
1099 new_inst->src[j].reg_offset += i;
1100
1101 emit(new_inst);
1102 }
1103 }
1104
1105 void
1106 fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1107 unsigned wr_mask, bool saturate,
1108 enum brw_predicate predicate,
1109 enum brw_conditional_mod mod)
1110 {
1111 for (unsigned i = 0; i < 4; i++) {
1112 if (!((wr_mask >> i) & 1))
1113 continue;
1114
1115 fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0);
1116 new_inst->dst.reg_offset += i;
1117 for (unsigned j = 0; j < new_inst->sources; j++)
1118 if (new_inst->src[j].file == GRF)
1119 new_inst->src[j].reg_offset += i;
1120
1121 new_inst->predicate = predicate;
1122 new_inst->conditional_mod = mod;
1123 new_inst->saturate = saturate;
1124 emit(new_inst);
1125 }
1126 }
1127
1128 void
1129 fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0, fs_reg src1,
1130 unsigned wr_mask, bool saturate,
1131 enum brw_predicate predicate,
1132 enum brw_conditional_mod mod)
1133 {
1134 for (unsigned i = 0; i < 4; i++) {
1135 if (!((wr_mask >> i) & 1))
1136 continue;
1137
1138 fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0, src1);
1139 new_inst->dst.reg_offset += i;
1140 for (unsigned j = 0; j < new_inst->sources; j++)
1141 if (new_inst->src[j].file == GRF)
1142 new_inst->src[j].reg_offset += i;
1143
1144 new_inst->predicate = predicate;
1145 new_inst->conditional_mod = mod;
1146 new_inst->saturate = saturate;
1147 emit(new_inst);
1148 }
1149 }
1150
1151 void
1152 fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1153 unsigned wr_mask, bool saturate)
1154 {
1155 for (unsigned i = 0; i < 4; i++) {
1156 if (!((wr_mask >> i) & 1))
1157 continue;
1158
1159 fs_reg new_dest = dest;
1160 new_dest.reg_offset += i;
1161 fs_reg new_src0 = src0;
1162 if (src0.file == GRF)
1163 new_src0.reg_offset += i;
1164
1165 fs_inst *new_inst = emit_math(op, new_dest, new_src0);
1166 new_inst->saturate = saturate;
1167 }
1168 }
1169
1170 void
1171 fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1172 fs_reg src1, unsigned wr_mask,
1173 bool saturate)
1174 {
1175 for (unsigned i = 0; i < 4; i++) {
1176 if (!((wr_mask >> i) & 1))
1177 continue;
1178
1179 fs_reg new_dest = dest;
1180 new_dest.reg_offset += i;
1181 fs_reg new_src0 = src0;
1182 if (src0.file == GRF)
1183 new_src0.reg_offset += i;
1184 fs_reg new_src1 = src1;
1185 if (src1.file == GRF)
1186 new_src1.reg_offset += i;
1187
1188 fs_inst *new_inst = emit_math(op, new_dest, new_src0, new_src1);
1189 new_inst->saturate = saturate;
1190 }
1191 }
1192
1193 void
1194 fs_visitor::emit_reduction(enum opcode op, fs_reg dest, fs_reg src,
1195 unsigned num_components)
1196 {
1197 fs_reg src0 = src;
1198 fs_reg src1 = src;
1199 src1.reg_offset++;
1200
1201 if (num_components == 2) {
1202 emit(op, dest, src0, src1);
1203 return;
1204 }
1205
1206 fs_reg temp1 = fs_reg(GRF, virtual_grf_alloc(1));
1207 temp1.type = src.type;
1208 emit(op, temp1, src0, src1);
1209
1210 fs_reg src2 = src;
1211 src2.reg_offset += 2;
1212
1213 if (num_components == 3) {
1214 emit(op, dest, temp1, src2);
1215 return;
1216 }
1217
1218 assert(num_components == 4);
1219
1220 fs_reg src3 = src;
1221 src3.reg_offset += 3;
1222 fs_reg temp2 = fs_reg(GRF, virtual_grf_alloc(1));
1223 temp2.type = src.type;
1224
1225 emit(op, temp2, src2, src3);
1226 emit(op, dest, temp1, temp2);
1227 }
1228
1229 void
1230 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
1231 {
1232 fs_reg dest;
1233 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1234 dest = get_nir_dest(instr->dest);
1235 if (instr->has_predicate) {
1236 fs_inst *inst = emit(MOV(reg_null_d,
1237 retype(get_nir_src(instr->predicate),
1238 BRW_REGISTER_TYPE_UD)));
1239 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1240 }
1241
1242 bool has_indirect = false;
1243
1244 switch (instr->intrinsic) {
1245 case nir_intrinsic_discard: {
1246 /* We track our discarded pixels in f0.1. By predicating on it, we can
1247 * update just the flag bits that aren't yet discarded. By emitting a
1248 * CMP of g0 != g0, all our currently executing channels will get turned
1249 * off.
1250 */
1251 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1252 BRW_REGISTER_TYPE_UW));
1253 fs_inst *cmp = emit(CMP(reg_null_f, some_reg, some_reg,
1254 BRW_CONDITIONAL_NZ));
1255 cmp->predicate = BRW_PREDICATE_NORMAL;
1256 cmp->flag_subreg = 1;
1257
1258 if (brw->gen >= 6) {
1259 /* For performance, after a discard, jump to the end of the shader.
1260 * Only jump if all relevant channels have been discarded.
1261 */
1262 fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
1263 discard_jump->flag_subreg = 1;
1264
1265 discard_jump->predicate = (dispatch_width == 8)
1266 ? BRW_PREDICATE_ALIGN1_ANY8H
1267 : BRW_PREDICATE_ALIGN1_ANY16H;
1268 discard_jump->predicate_inverse = true;
1269 }
1270
1271 break;
1272 }
1273
1274 case nir_intrinsic_atomic_counter_inc:
1275 case nir_intrinsic_atomic_counter_dec:
1276 case nir_intrinsic_atomic_counter_read: {
1277 unsigned surf_index = prog_data->binding_table.abo_start +
1278 (unsigned) instr->const_index[0];
1279 fs_reg offset = fs_reg(get_nir_src(instr->src[0]));
1280
1281 switch (instr->intrinsic) {
1282 case nir_intrinsic_atomic_counter_inc:
1283 emit_untyped_atomic(BRW_AOP_INC, surf_index, dest, offset,
1284 fs_reg(), fs_reg());
1285 break;
1286 case nir_intrinsic_atomic_counter_dec:
1287 emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dest, offset,
1288 fs_reg(), fs_reg());
1289 break;
1290 case nir_intrinsic_atomic_counter_read:
1291 emit_untyped_surface_read(surf_index, dest, offset);
1292 break;
1293 default:
1294 unreachable("Unreachable");
1295 }
1296 break;
1297 }
1298
1299 case nir_intrinsic_load_front_face:
1300 assert(!"TODO");
1301
1302 case nir_intrinsic_load_sample_mask_in: {
1303 assert(brw->gen >= 7);
1304 fs_reg reg = fs_reg(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
1305 BRW_REGISTER_TYPE_D));
1306 dest.type = reg.type;
1307 fs_inst *inst = MOV(dest, reg);
1308 if (instr->has_predicate)
1309 inst->predicate = BRW_PREDICATE_NORMAL;
1310 emit(inst);
1311 break;
1312 }
1313
1314 case nir_intrinsic_load_sample_pos: {
1315 fs_reg *reg = emit_samplepos_setup();
1316 dest.type = reg->type;
1317 emit(MOV(dest, *reg));
1318 emit(MOV(offset(dest, 1), offset(*reg, 1)));
1319 break;
1320 }
1321
1322 case nir_intrinsic_load_sample_id: {
1323 fs_reg *reg = emit_sampleid_setup();
1324 dest.type = reg->type;
1325 emit(MOV(dest, *reg));
1326 break;
1327 }
1328
1329 case nir_intrinsic_load_uniform_indirect:
1330 has_indirect = true;
1331 case nir_intrinsic_load_uniform: {
1332 unsigned index = 0;
1333 for (int i = 0; i < instr->const_index[1]; i++) {
1334 for (unsigned j = 0; j < instr->num_components; j++) {
1335 fs_reg src = nir_uniforms;
1336 src.reg_offset = instr->const_index[0] + index;
1337 if (has_indirect)
1338 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1339 src.type = dest.type;
1340 index++;
1341
1342 fs_inst *inst = MOV(dest, src);
1343 if (instr->has_predicate)
1344 inst->predicate = BRW_PREDICATE_NORMAL;
1345 emit(inst);
1346 dest.reg_offset++;
1347 }
1348 }
1349 break;
1350 }
1351
1352 case nir_intrinsic_load_ubo_indirect:
1353 has_indirect = true;
1354 case nir_intrinsic_load_ubo: {
1355 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
1356 fs_reg surf_index;
1357
1358 if (const_index) {
1359 surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
1360 const_index->u[0]);
1361 } else {
1362 /* The block index is not a constant. Evaluate the index expression
1363 * per-channel and add the base UBO index; the generator will select
1364 * a value from any live channel.
1365 */
1366 surf_index = fs_reg(this, glsl_type::uint_type);
1367 emit(ADD(surf_index, get_nir_src(instr->src[0]),
1368 fs_reg(stage_prog_data->binding_table.ubo_start)))
1369 ->force_writemask_all = true;
1370
1371 /* Assume this may touch any UBO. It would be nice to provide
1372 * a tighter bound, but the array information is already lowered away.
1373 */
1374 brw_mark_surface_used(prog_data,
1375 stage_prog_data->binding_table.ubo_start +
1376 shader_prog->NumUniformBlocks - 1);
1377 }
1378
1379 if (has_indirect) {
1380 /* Turn the byte offset into a dword offset. */
1381 fs_reg base_offset = fs_reg(this, glsl_type::int_type);
1382 emit(SHR(base_offset, retype(get_nir_src(instr->src[1]),
1383 BRW_REGISTER_TYPE_D),
1384 fs_reg(2)));
1385
1386 unsigned vec4_offset = instr->const_index[0] / 4;
1387 for (int i = 0; i < instr->num_components; i++) {
1388 exec_list list = VARYING_PULL_CONSTANT_LOAD(offset(dest, i),
1389 surf_index, base_offset,
1390 vec4_offset + i);
1391
1392 fs_inst *last_inst = (fs_inst *) list.get_tail();
1393 if (instr->has_predicate)
1394 last_inst->predicate = BRW_PREDICATE_NORMAL;
1395 emit(list);
1396 }
1397 } else {
1398 fs_reg packed_consts = fs_reg(this, glsl_type::float_type);
1399 packed_consts.type = dest.type;
1400
1401 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
1402 emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
1403 surf_index, const_offset_reg);
1404
1405 for (unsigned i = 0; i < instr->num_components; i++) {
1406 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
1407
1408 /* The std140 packing rules don't allow vectors to cross 16-byte
1409 * boundaries, and a reg is 32 bytes.
1410 */
1411 assert(packed_consts.subreg_offset < 32);
1412
1413 fs_inst *inst = MOV(dest, packed_consts);
1414 if (instr->has_predicate)
1415 inst->predicate = BRW_PREDICATE_NORMAL;
1416 emit(inst);
1417
1418 dest.reg_offset++;
1419 }
1420 }
1421 break;
1422 }
1423
1424 case nir_intrinsic_load_input_indirect:
1425 has_indirect = true;
1426 case nir_intrinsic_load_input: {
1427 unsigned index = 0;
1428 for (int i = 0; i < instr->const_index[1]; i++) {
1429 for (unsigned j = 0; j < instr->num_components; j++) {
1430 fs_reg src = nir_inputs;
1431 src.reg_offset = instr->const_index[0] + index;
1432 if (has_indirect)
1433 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1434 src.type = dest.type;
1435 index++;
1436
1437 fs_inst *inst = MOV(dest, src);
1438 if (instr->has_predicate)
1439 inst->predicate = BRW_PREDICATE_NORMAL;
1440 emit(inst);
1441 dest.reg_offset++;
1442 }
1443 }
1444 break;
1445 }
1446
1447 /* Handle ARB_gpu_shader5 interpolation intrinsics
1448 *
1449 * It's worth a quick word of explanation as to why we handle the full
1450 * variable-based interpolation intrinsic rather than a lowered version
1451 * with like we do for other inputs. We have to do that because the way
1452 * we set up inputs doesn't allow us to use the already setup inputs for
1453 * interpolation. At the beginning of the shader, we go through all of
1454 * the input variables and do the initial interpolation and put it in
1455 * the nir_inputs array based on its location as determined in
1456 * nir_lower_io. If the input isn't used, dead code cleans up and
1457 * everything works fine. However, when we get to the ARB_gpu_shader5
1458 * interpolation intrinsics, we need to reinterpolate the input
1459 * differently. If we used an intrinsic that just had an index it would
1460 * only give us the offset into the nir_inputs array. However, this is
1461 * useless because that value is post-interpolation and we need
1462 * pre-interpolation. In order to get the actual location of the bits
1463 * we get from the vertex fetching hardware, we need the variable.
1464 */
1465 case nir_intrinsic_interp_var_at_centroid:
1466 case nir_intrinsic_interp_var_at_sample:
1467 case nir_intrinsic_interp_var_at_offset: {
1468 /* in SIMD16 mode, the pixel interpolator returns coords interleaved
1469 * 8 channels at a time, same as the barycentric coords presented in
1470 * the FS payload. this requires a bit of extra work to support.
1471 */
1472 no16("interpolate_at_* not yet supported in SIMD16 mode.");
1473
1474 fs_reg dst_x(GRF, virtual_grf_alloc(2), BRW_REGISTER_TYPE_F);
1475 fs_reg dst_y = offset(dst_x, 1);
1476
1477 /* For most messages, we need one reg of ignored data; the hardware
1478 * requires mlen==1 even when there is no payload. in the per-slot
1479 * offset case, we'll replace this with the proper source data.
1480 */
1481 fs_reg src(this, glsl_type::float_type);
1482 int mlen = 1; /* one reg unless overriden */
1483 fs_inst *inst;
1484
1485 switch (instr->intrinsic) {
1486 case nir_intrinsic_interp_var_at_centroid:
1487 inst = emit(FS_OPCODE_INTERPOLATE_AT_CENTROID, dst_x, src, fs_reg(0u));
1488 break;
1489
1490 case nir_intrinsic_interp_var_at_sample: {
1491 /* XXX: We should probably handle non-constant sample id's */
1492 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1493 assert(const_sample);
1494 unsigned msg_data = const_sample ? const_sample->i[0] << 4 : 0;
1495 inst = emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_x, src,
1496 fs_reg(msg_data));
1497 break;
1498 }
1499
1500 case nir_intrinsic_interp_var_at_offset: {
1501 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1502
1503 if (const_offset) {
1504 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1505 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1506
1507 inst = emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_x, src,
1508 fs_reg(off_x | (off_y << 4)));
1509 } else {
1510 src = fs_reg(this, glsl_type::ivec2_type);
1511 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1512 BRW_REGISTER_TYPE_F);
1513 for (int i = 0; i < 2; i++) {
1514 fs_reg temp(this, glsl_type::float_type);
1515 emit(MUL(temp, offset(offset_src, i), fs_reg(16.0f)));
1516 fs_reg itemp(this, glsl_type::int_type);
1517 emit(MOV(itemp, temp)); /* float to int */
1518
1519 /* Clamp the upper end of the range to +7/16.
1520 * ARB_gpu_shader5 requires that we support a maximum offset
1521 * of +0.5, which isn't representable in a S0.4 value -- if
1522 * we didn't clamp it, we'd end up with -8/16, which is the
1523 * opposite of what the shader author wanted.
1524 *
1525 * This is legal due to ARB_gpu_shader5's quantization
1526 * rules:
1527 *
1528 * "Not all values of <offset> may be supported; x and y
1529 * offsets may be rounded to fixed-point values with the
1530 * number of fraction bits given by the
1531 * implementation-dependent constant
1532 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1533 */
1534
1535 emit(BRW_OPCODE_SEL, offset(src, i), itemp, fs_reg(7))
1536 ->conditional_mod = BRW_CONDITIONAL_L; /* min(src2, 7) */
1537 }
1538
1539 mlen = 2;
1540 inst = emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_x, src,
1541 fs_reg(0u));
1542 }
1543 break;
1544 }
1545
1546 default:
1547 unreachable("Invalid intrinsic");
1548 }
1549
1550 inst->mlen = mlen;
1551 inst->regs_written = 2; /* 2 floats per slot returned */
1552 inst->pi_noperspective = instr->variables[0]->var->data.interpolation ==
1553 INTERP_QUALIFIER_NOPERSPECTIVE;
1554
1555 for (unsigned j = 0; j < instr->num_components; j++) {
1556 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1557 src.type = dest.type;
1558
1559 fs_inst *inst = emit(FS_OPCODE_LINTERP, dest, dst_x, dst_y, src);
1560 if (instr->has_predicate)
1561 inst->predicate = BRW_PREDICATE_NORMAL;
1562 dest.reg_offset++;
1563 }
1564 break;
1565 }
1566
1567 case nir_intrinsic_store_output_indirect:
1568 has_indirect = true;
1569 case nir_intrinsic_store_output: {
1570 fs_reg src = get_nir_src(instr->src[0]);
1571 unsigned index = 0;
1572 for (int i = 0; i < instr->const_index[1]; i++) {
1573 for (unsigned j = 0; j < instr->num_components; j++) {
1574 fs_reg new_dest = nir_outputs;
1575 new_dest.reg_offset = instr->const_index[0] + index;
1576 if (has_indirect)
1577 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
1578 new_dest.type = src.type;
1579 index++;
1580 fs_inst *inst = MOV(new_dest, src);
1581 if (instr->has_predicate)
1582 inst->predicate = BRW_PREDICATE_NORMAL;
1583 emit(inst);
1584 src.reg_offset++;
1585 }
1586 }
1587 break;
1588 }
1589
1590 default:
1591 unreachable("unknown intrinsic");
1592 }
1593 }
1594
1595 void
1596 fs_visitor::nir_emit_texture(nir_tex_instr *instr)
1597 {
1598 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1599 unsigned sampler = instr->sampler_index;
1600 fs_reg sampler_reg(sampler);
1601
1602 /* FINISHME: We're failing to recompile our programs when the sampler is
1603 * updated. This only matters for the texture rectangle scale parameters
1604 * (pre-gen6, or gen6+ with GL_CLAMP).
1605 */
1606 int texunit = prog->SamplerUnits[sampler];
1607
1608 int gather_component = instr->component;
1609
1610 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
1611
1612 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
1613 instr->is_array;
1614
1615 int lod_components, offset_components = 0;
1616
1617 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, offset;
1618
1619 for (unsigned i = 0; i < instr->num_srcs; i++) {
1620 fs_reg src = get_nir_src(instr->src[i]);
1621 switch (instr->src_type[i]) {
1622 case nir_tex_src_bias:
1623 lod = retype(src, BRW_REGISTER_TYPE_F);
1624 break;
1625 case nir_tex_src_comparitor:
1626 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
1627 break;
1628 case nir_tex_src_coord:
1629 switch (instr->op) {
1630 case nir_texop_txf:
1631 case nir_texop_txf_ms:
1632 coordinate = retype(src, BRW_REGISTER_TYPE_D);
1633 break;
1634 default:
1635 coordinate = retype(src, BRW_REGISTER_TYPE_F);
1636 break;
1637 }
1638 break;
1639 case nir_tex_src_ddx:
1640 lod = retype(src, BRW_REGISTER_TYPE_F);
1641 lod_components = nir_tex_instr_src_size(instr, i);
1642 break;
1643 case nir_tex_src_ddy:
1644 lod2 = retype(src, BRW_REGISTER_TYPE_F);
1645 break;
1646 case nir_tex_src_lod:
1647 switch (instr->op) {
1648 case nir_texop_txs:
1649 lod = retype(src, BRW_REGISTER_TYPE_UD);
1650 break;
1651 case nir_texop_txf:
1652 lod = retype(src, BRW_REGISTER_TYPE_D);
1653 break;
1654 default:
1655 lod = retype(src, BRW_REGISTER_TYPE_F);
1656 break;
1657 }
1658 break;
1659 case nir_tex_src_ms_index:
1660 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
1661 break;
1662 case nir_tex_src_offset:
1663 offset = retype(src, BRW_REGISTER_TYPE_D);
1664 if (instr->is_array)
1665 offset_components = instr->coord_components - 1;
1666 else
1667 offset_components = instr->coord_components;
1668 break;
1669 case nir_tex_src_projector:
1670 unreachable("should be lowered");
1671
1672 case nir_tex_src_sampler_offset: {
1673 /* Figure out the highest possible sampler index and mark it as used */
1674 uint32_t max_used = sampler + instr->sampler_array_size - 1;
1675 if (instr->op == nir_texop_tg4 && brw->gen < 8) {
1676 max_used += stage_prog_data->binding_table.gather_texture_start;
1677 } else {
1678 max_used += stage_prog_data->binding_table.texture_start;
1679 }
1680 brw_mark_surface_used(prog_data, max_used);
1681
1682 /* Emit code to evaluate the actual indexing expression */
1683 sampler_reg = fs_reg(this, glsl_type::uint_type);
1684 emit(ADD(sampler_reg, src, fs_reg(sampler)))
1685 ->force_writemask_all = true;
1686 break;
1687 }
1688
1689 default:
1690 unreachable("unknown texture source");
1691 }
1692 }
1693
1694 if (instr->op == nir_texop_txf_ms) {
1695 if (brw->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<<sampler))
1696 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
1697 else
1698 mcs = fs_reg(0u);
1699 }
1700
1701 for (unsigned i = 0; i < 3; i++) {
1702 if (instr->const_offset[i] != 0) {
1703 assert(offset_components == 0);
1704 offset = fs_reg(brw_texture_offset(ctx, instr->const_offset, 3));
1705 break;
1706 }
1707 }
1708
1709 enum glsl_base_type dest_base_type;
1710 switch (instr->dest_type) {
1711 case nir_type_float:
1712 dest_base_type = GLSL_TYPE_FLOAT;
1713 break;
1714 case nir_type_int:
1715 dest_base_type = GLSL_TYPE_INT;
1716 break;
1717 case nir_type_unsigned:
1718 dest_base_type = GLSL_TYPE_UINT;
1719 break;
1720 default:
1721 unreachable("bad type");
1722 }
1723
1724 const glsl_type *dest_type =
1725 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
1726 1);
1727
1728 ir_texture_opcode op;
1729 switch (instr->op) {
1730 case nir_texop_lod: op = ir_lod; break;
1731 case nir_texop_query_levels: op = ir_query_levels; break;
1732 case nir_texop_tex: op = ir_tex; break;
1733 case nir_texop_tg4: op = ir_tg4; break;
1734 case nir_texop_txb: op = ir_txb; break;
1735 case nir_texop_txd: op = ir_txd; break;
1736 case nir_texop_txf: op = ir_txf; break;
1737 case nir_texop_txf_ms: op = ir_txf_ms; break;
1738 case nir_texop_txl: op = ir_txl; break;
1739 case nir_texop_txs: op = ir_txs; break;
1740 default:
1741 unreachable("unknown texture opcode");
1742 }
1743
1744 emit_texture(op, dest_type, coordinate, instr->coord_components,
1745 shadow_comparitor, lod, lod2, lod_components, sample_index,
1746 offset, offset_components, mcs, gather_component,
1747 is_cube_array, is_rect, sampler, sampler_reg, texunit);
1748
1749 fs_reg dest = get_nir_dest(instr->dest);
1750 dest.type = this->result.type;
1751 unsigned num_components = nir_tex_instr_dest_size(instr);
1752 emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
1753 }
1754
1755 void
1756 fs_visitor::nir_emit_load_const(nir_load_const_instr *instr)
1757 {
1758 /* Bail on SSA constant loads. These are used for immediates. */
1759 if (instr->dest.is_ssa)
1760 return;
1761
1762 fs_reg dest = get_nir_dest(instr->dest);
1763 dest.type = BRW_REGISTER_TYPE_UD;
1764 if (instr->array_elems == 0) {
1765 for (unsigned i = 0; i < instr->num_components; i++) {
1766 emit(MOV(dest, fs_reg(instr->value.u[i])));
1767 dest.reg_offset++;
1768 }
1769 } else {
1770 for (unsigned i = 0; i < instr->array_elems; i++) {
1771 for (unsigned j = 0; j < instr->num_components; j++) {
1772 emit(MOV(dest, fs_reg(instr->array[i].u[j])));
1773 dest.reg_offset++;
1774 }
1775 }
1776 }
1777 }
1778
1779 void
1780 fs_visitor::nir_emit_jump(nir_jump_instr *instr)
1781 {
1782 switch (instr->type) {
1783 case nir_jump_break:
1784 emit(BRW_OPCODE_BREAK);
1785 break;
1786 case nir_jump_continue:
1787 emit(BRW_OPCODE_CONTINUE);
1788 break;
1789 case nir_jump_return:
1790 default:
1791 unreachable("unknown jump");
1792 }
1793 }