nir: Rename lower_variables to lower_vars_to_ssa
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "glsl/ir.h"
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "brw_fs.h"
28
29 void
30 fs_visitor::emit_nir_code()
31 {
32 /* first, lower the GLSL IR shader to NIR */
33 lower_output_reads(shader->base.ir);
34 nir_shader *nir = glsl_to_nir(shader->base.ir, NULL, true);
35 nir_validate_shader(nir);
36
37 nir_lower_global_vars_to_local(nir);
38 nir_validate_shader(nir);
39
40 nir_split_var_copies(nir);
41 nir_validate_shader(nir);
42
43 bool progress;
44 do {
45 progress = false;
46 nir_lower_vars_to_ssa(nir);
47 nir_validate_shader(nir);
48 progress |= nir_copy_prop(nir);
49 nir_validate_shader(nir);
50 progress |= nir_opt_dce(nir);
51 nir_validate_shader(nir);
52 progress |= nir_opt_cse(nir);
53 nir_validate_shader(nir);
54 progress |= nir_opt_peephole_select(nir);
55 nir_validate_shader(nir);
56 progress |= nir_opt_algebraic(nir);
57 nir_validate_shader(nir);
58 progress |= nir_opt_constant_folding(nir);
59 nir_validate_shader(nir);
60 } while (progress);
61
62 /* Lower a bunch of stuff */
63 nir_lower_io(nir);
64 nir_validate_shader(nir);
65
66 nir_lower_locals_to_regs(nir);
67 nir_validate_shader(nir);
68
69 nir_remove_dead_variables(nir);
70 nir_validate_shader(nir);
71
72 nir_lower_samplers(nir, shader_prog, shader->base.Program);
73 nir_validate_shader(nir);
74
75 nir_lower_system_values(nir);
76 nir_validate_shader(nir);
77
78 nir_lower_atomics(nir);
79 nir_validate_shader(nir);
80
81 nir_lower_to_source_mods(nir);
82 nir_validate_shader(nir);
83 nir_copy_prop(nir);
84 nir_validate_shader(nir);
85 nir_convert_from_ssa(nir);
86 nir_validate_shader(nir);
87 nir_lower_vec_to_movs(nir);
88 nir_validate_shader(nir);
89
90 /* emit the arrays used for inputs and outputs - load/store intrinsics will
91 * be converted to reads/writes of these arrays
92 */
93
94 if (nir->num_inputs > 0) {
95 nir_inputs = fs_reg(GRF, virtual_grf_alloc(nir->num_inputs));
96 nir_setup_inputs(nir);
97 }
98
99 if (nir->num_outputs > 0) {
100 nir_outputs = fs_reg(GRF, virtual_grf_alloc(nir->num_outputs));
101 nir_setup_outputs(nir);
102 }
103
104 if (nir->num_uniforms > 0) {
105 nir_uniforms = fs_reg(UNIFORM, 0);
106 nir_setup_uniforms(nir);
107 }
108
109 nir_emit_system_values(nir);
110
111 nir_globals = ralloc_array(mem_ctx, fs_reg, nir->reg_alloc);
112 foreach_list_typed(nir_register, reg, node, &nir->registers) {
113 unsigned array_elems =
114 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
115 unsigned size = array_elems * reg->num_components;
116 nir_globals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
117 }
118
119 /* get the main function and emit it */
120 nir_foreach_overload(nir, overload) {
121 assert(strcmp(overload->function->name, "main") == 0);
122 assert(overload->impl);
123 nir_emit_impl(overload->impl);
124 }
125
126 ralloc_free(nir);
127 }
128
129 void
130 fs_visitor::nir_setup_inputs(nir_shader *shader)
131 {
132 fs_reg varying = nir_inputs;
133
134 struct hash_entry *entry;
135 hash_table_foreach(shader->inputs, entry) {
136 nir_variable *var = (nir_variable *) entry->data;
137 varying.reg_offset = var->data.driver_location;
138
139 fs_reg reg;
140 if (!strcmp(var->name, "gl_FragCoord")) {
141 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
142 var->data.origin_upper_left);
143 emit_percomp(MOV(varying, reg), 0xF);
144 } else if (!strcmp(var->name, "gl_FrontFacing")) {
145 reg = *emit_frontfacing_interpolation();
146 emit(MOV(retype(varying, BRW_REGISTER_TYPE_UD), reg));
147 } else {
148 emit_general_interpolation(varying, var->name, var->type,
149 (glsl_interp_qualifier) var->data.interpolation,
150 var->data.location, var->data.centroid,
151 var->data.sample);
152 }
153 }
154 }
155
156 void
157 fs_visitor::nir_setup_outputs(nir_shader *shader)
158 {
159 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
160 fs_reg reg = nir_outputs;
161
162 struct hash_entry *entry;
163 hash_table_foreach(shader->outputs, entry) {
164 nir_variable *var = (nir_variable *) entry->data;
165 reg.reg_offset = var->data.driver_location;
166
167 if (var->data.index > 0) {
168 assert(var->data.location == FRAG_RESULT_DATA0);
169 assert(var->data.index == 1);
170 this->dual_src_output = reg;
171 this->do_dual_src = true;
172 } else if (var->data.location == FRAG_RESULT_COLOR) {
173 /* Writing gl_FragColor outputs to all color regions. */
174 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
175 this->outputs[i] = reg;
176 this->output_components[i] = 4;
177 }
178 } else if (var->data.location == FRAG_RESULT_DEPTH) {
179 this->frag_depth = reg;
180 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
181 this->sample_mask = reg;
182 } else {
183 /* gl_FragData or a user-defined FS output */
184 assert(var->data.location >= FRAG_RESULT_DATA0 &&
185 var->data.location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
186
187 int vector_elements =
188 var->type->is_array() ? var->type->fields.array->vector_elements
189 : var->type->vector_elements;
190
191 /* General color output. */
192 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
193 int output = var->data.location - FRAG_RESULT_DATA0 + i;
194 this->outputs[output] = reg;
195 this->outputs[output].reg_offset += vector_elements * i;
196 this->output_components[output] = vector_elements;
197 }
198 }
199 }
200 }
201
202 void
203 fs_visitor::nir_setup_uniforms(nir_shader *shader)
204 {
205 uniforms = shader->num_uniforms;
206 param_size[0] = shader->num_uniforms;
207
208 if (dispatch_width != 8)
209 return;
210
211 struct hash_entry *entry;
212 hash_table_foreach(shader->uniforms, entry) {
213 nir_variable *var = (nir_variable *) entry->data;
214
215 /* UBO's and atomics don't take up space in the uniform file */
216
217 if (var->interface_type != NULL || var->type->contains_atomic())
218 continue;
219
220 if (strncmp(var->name, "gl_", 3) == 0)
221 nir_setup_builtin_uniform(var);
222 else
223 nir_setup_uniform(var);
224 }
225 }
226
227 void
228 fs_visitor::nir_setup_uniform(nir_variable *var)
229 {
230 int namelen = strlen(var->name);
231
232 /* The data for our (non-builtin) uniforms is stored in a series of
233 * gl_uniform_driver_storage structs for each subcomponent that
234 * glGetUniformLocation() could name. We know it's been set up in the
235 * same order we'd walk the type, so walk the list of storage and find
236 * anything with our name, or the prefix of a component that starts with
237 * our name.
238 */
239 unsigned index = var->data.driver_location;
240 for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
241 struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
242
243 if (strncmp(var->name, storage->name, namelen) != 0 ||
244 (storage->name[namelen] != 0 &&
245 storage->name[namelen] != '.' &&
246 storage->name[namelen] != '[')) {
247 continue;
248 }
249
250 unsigned slots = storage->type->component_slots();
251 if (storage->array_elements)
252 slots *= storage->array_elements;
253
254 for (unsigned i = 0; i < slots; i++) {
255 stage_prog_data->param[index++] = &storage->storage[i];
256 }
257 }
258
259 /* Make sure we actually initialized the right amount of stuff here. */
260 assert(var->data.driver_location + var->type->component_slots() == index);
261 }
262
263 void
264 fs_visitor::nir_setup_builtin_uniform(nir_variable *var)
265 {
266 const nir_state_slot *const slots = var->state_slots;
267 assert(var->state_slots != NULL);
268
269 unsigned uniform_index = var->data.driver_location;
270 for (unsigned int i = 0; i < var->num_state_slots; i++) {
271 /* This state reference has already been setup by ir_to_mesa, but we'll
272 * get the same index back here.
273 */
274 int index = _mesa_add_state_reference(this->prog->Parameters,
275 (gl_state_index *)slots[i].tokens);
276
277 /* Add each of the unique swizzles of the element as a parameter.
278 * This'll end up matching the expected layout of the
279 * array/matrix/structure we're trying to fill in.
280 */
281 int last_swiz = -1;
282 for (unsigned int j = 0; j < 4; j++) {
283 int swiz = GET_SWZ(slots[i].swizzle, j);
284 if (swiz == last_swiz)
285 break;
286 last_swiz = swiz;
287
288 stage_prog_data->param[uniform_index++] =
289 &prog->Parameters->ParameterValues[index][swiz];
290 }
291 }
292 }
293
294 static bool
295 emit_system_values_block(nir_block *block, void *void_visitor)
296 {
297 fs_visitor *v = (fs_visitor *)void_visitor;
298 fs_reg *reg;
299
300 nir_foreach_instr(block, instr) {
301 if (instr->type != nir_instr_type_intrinsic)
302 continue;
303
304 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
305 switch (intrin->intrinsic) {
306 case nir_intrinsic_load_sample_pos:
307 assert(v->stage == MESA_SHADER_FRAGMENT);
308 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
309 if (reg->file == BAD_FILE)
310 *reg = *v->emit_samplepos_setup();
311 break;
312
313 case nir_intrinsic_load_sample_id:
314 assert(v->stage == MESA_SHADER_FRAGMENT);
315 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
316 if (reg->file == BAD_FILE)
317 *reg = *v->emit_sampleid_setup();
318 break;
319
320 case nir_intrinsic_load_sample_mask_in:
321 assert(v->stage == MESA_SHADER_FRAGMENT);
322 assert(v->brw->gen >= 7);
323 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
324 if (reg->file == BAD_FILE)
325 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
326 BRW_REGISTER_TYPE_D));
327 break;
328
329 default:
330 break;
331 }
332 }
333
334 return true;
335 }
336
337 void
338 fs_visitor::nir_emit_system_values(nir_shader *shader)
339 {
340 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
341 nir_foreach_overload(shader, overload) {
342 assert(strcmp(overload->function->name, "main") == 0);
343 assert(overload->impl);
344 nir_foreach_block(overload->impl, emit_system_values_block, this);
345 }
346 }
347
348 void
349 fs_visitor::nir_emit_impl(nir_function_impl *impl)
350 {
351 nir_locals = reralloc(mem_ctx, nir_locals, fs_reg, impl->reg_alloc);
352 foreach_list_typed(nir_register, reg, node, &impl->registers) {
353 unsigned array_elems =
354 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
355 unsigned size = array_elems * reg->num_components;
356 nir_locals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
357 }
358
359 nir_emit_cf_list(&impl->body);
360 }
361
362 void
363 fs_visitor::nir_emit_cf_list(exec_list *list)
364 {
365 foreach_list_typed(nir_cf_node, node, node, list) {
366 switch (node->type) {
367 case nir_cf_node_if:
368 nir_emit_if(nir_cf_node_as_if(node));
369 break;
370
371 case nir_cf_node_loop:
372 nir_emit_loop(nir_cf_node_as_loop(node));
373 break;
374
375 case nir_cf_node_block:
376 nir_emit_block(nir_cf_node_as_block(node));
377 break;
378
379 default:
380 unreachable("Invalid CFG node block");
381 }
382 }
383 }
384
385 void
386 fs_visitor::nir_emit_if(nir_if *if_stmt)
387 {
388 if (brw->gen < 6) {
389 no16("Can't support (non-uniform) control flow on SIMD16\n");
390 }
391
392 /* first, put the condition into f0 */
393 fs_inst *inst = emit(MOV(reg_null_d,
394 retype(get_nir_src(if_stmt->condition),
395 BRW_REGISTER_TYPE_UD)));
396 inst->conditional_mod = BRW_CONDITIONAL_NZ;
397
398 emit(IF(BRW_PREDICATE_NORMAL));
399
400 nir_emit_cf_list(&if_stmt->then_list);
401
402 /* note: if the else is empty, dead CF elimination will remove it */
403 emit(BRW_OPCODE_ELSE);
404
405 nir_emit_cf_list(&if_stmt->else_list);
406
407 emit(BRW_OPCODE_ENDIF);
408
409 try_replace_with_sel();
410 }
411
412 void
413 fs_visitor::nir_emit_loop(nir_loop *loop)
414 {
415 if (brw->gen < 6) {
416 no16("Can't support (non-uniform) control flow on SIMD16\n");
417 }
418
419 emit(BRW_OPCODE_DO);
420
421 nir_emit_cf_list(&loop->body);
422
423 emit(BRW_OPCODE_WHILE);
424 }
425
426 void
427 fs_visitor::nir_emit_block(nir_block *block)
428 {
429 nir_foreach_instr(block, instr) {
430 nir_emit_instr(instr);
431 }
432 }
433
434 void
435 fs_visitor::nir_emit_instr(nir_instr *instr)
436 {
437 switch (instr->type) {
438 case nir_instr_type_alu:
439 nir_emit_alu(nir_instr_as_alu(instr));
440 break;
441
442 case nir_instr_type_intrinsic:
443 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
444 break;
445
446 case nir_instr_type_tex:
447 nir_emit_texture(nir_instr_as_tex(instr));
448 break;
449
450 case nir_instr_type_load_const:
451 /* We can hit these, but we do nothing now and use them as
452 * immediates later.
453 */
454 break;
455
456 case nir_instr_type_jump:
457 nir_emit_jump(nir_instr_as_jump(instr));
458 break;
459
460 default:
461 unreachable("unknown instruction type");
462 }
463 }
464
465 static brw_reg_type
466 brw_type_for_nir_type(nir_alu_type type)
467 {
468 switch (type) {
469 case nir_type_bool:
470 case nir_type_unsigned:
471 return BRW_REGISTER_TYPE_UD;
472 case nir_type_int:
473 return BRW_REGISTER_TYPE_D;
474 case nir_type_float:
475 return BRW_REGISTER_TYPE_F;
476 default:
477 unreachable("unknown type");
478 }
479
480 return BRW_REGISTER_TYPE_F;
481 }
482
483 void
484 fs_visitor::nir_emit_alu(nir_alu_instr *instr)
485 {
486 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
487
488 fs_reg op[3];
489 fs_reg result = get_nir_dest(instr->dest.dest);
490 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
491
492 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
493 op[i] = get_nir_alu_src(instr, i);
494
495 switch (instr->op) {
496 case nir_op_fmov:
497 case nir_op_i2f:
498 case nir_op_u2f: {
499 fs_inst *inst = MOV(result, op[0]);
500 inst->saturate = instr->dest.saturate;
501 emit_percomp(inst, instr->dest.write_mask);
502 }
503 break;
504
505 case nir_op_imov:
506 case nir_op_f2i:
507 case nir_op_f2u:
508 emit_percomp(MOV(result, op[0]), instr->dest.write_mask);
509 break;
510
511 case nir_op_fsign: {
512 /* AND(val, 0x80000000) gives the sign bit.
513 *
514 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
515 * zero.
516 */
517 emit_percomp(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
518 instr->dest.write_mask);
519
520 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
521 op[0].type = BRW_REGISTER_TYPE_UD;
522 result.type = BRW_REGISTER_TYPE_UD;
523 emit_percomp(AND(result_int, op[0], fs_reg(0x80000000u)),
524 instr->dest.write_mask);
525
526 fs_inst *inst = OR(result_int, result_int, fs_reg(0x3f800000u));
527 inst->predicate = BRW_PREDICATE_NORMAL;
528 emit_percomp(inst, instr->dest.write_mask);
529 if (instr->dest.saturate) {
530 fs_inst *inst = MOV(result, result);
531 inst->saturate = true;
532 emit_percomp(inst, instr->dest.write_mask);
533 }
534 break;
535 }
536
537 case nir_op_isign: {
538 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
539 * -> non-negative val generates 0x00000000.
540 * Predicated OR sets 1 if val is positive.
541 */
542 emit_percomp(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G),
543 instr->dest.write_mask);
544
545 emit_percomp(ASR(result, op[0], fs_reg(31)), instr->dest.write_mask);
546
547 fs_inst *inst = OR(result, result, fs_reg(1));
548 inst->predicate = BRW_PREDICATE_NORMAL;
549 emit_percomp(inst, instr->dest.write_mask);
550 break;
551 }
552
553 case nir_op_frcp:
554 emit_math_percomp(SHADER_OPCODE_RCP, result, op[0],
555 instr->dest.write_mask, instr->dest.saturate);
556 break;
557
558 case nir_op_fexp2:
559 emit_math_percomp(SHADER_OPCODE_EXP2, result, op[0],
560 instr->dest.write_mask, instr->dest.saturate);
561 break;
562
563 case nir_op_flog2:
564 emit_math_percomp(SHADER_OPCODE_LOG2, result, op[0],
565 instr->dest.write_mask, instr->dest.saturate);
566 break;
567
568 case nir_op_fexp:
569 case nir_op_flog:
570 unreachable("not reached: should be handled by ir_explog_to_explog2");
571
572 case nir_op_fsin:
573 case nir_op_fsin_reduced:
574 emit_math_percomp(SHADER_OPCODE_SIN, result, op[0],
575 instr->dest.write_mask, instr->dest.saturate);
576 break;
577
578 case nir_op_fcos:
579 case nir_op_fcos_reduced:
580 emit_math_percomp(SHADER_OPCODE_COS, result, op[0],
581 instr->dest.write_mask, instr->dest.saturate);
582 break;
583
584 case nir_op_fddx:
585 if (fs_key->high_quality_derivatives)
586 emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
587 instr->dest.write_mask, instr->dest.saturate);
588 else
589 emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
590 instr->dest.write_mask, instr->dest.saturate);
591 break;
592 case nir_op_fddx_fine:
593 emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
594 instr->dest.write_mask, instr->dest.saturate);
595 break;
596 case nir_op_fddx_coarse:
597 emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
598 instr->dest.write_mask, instr->dest.saturate);
599 break;
600 case nir_op_fddy:
601 if (fs_key->high_quality_derivatives)
602 emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
603 fs_reg(fs_key->render_to_fbo),
604 instr->dest.write_mask, instr->dest.saturate);
605 else
606 emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
607 fs_reg(fs_key->render_to_fbo),
608 instr->dest.write_mask, instr->dest.saturate);
609 break;
610 case nir_op_fddy_fine:
611 emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
612 fs_reg(fs_key->render_to_fbo),
613 instr->dest.write_mask, instr->dest.saturate);
614 break;
615 case nir_op_fddy_coarse:
616 emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
617 fs_reg(fs_key->render_to_fbo),
618 instr->dest.write_mask, instr->dest.saturate);
619 break;
620
621 case nir_op_fadd:
622 case nir_op_iadd: {
623 fs_inst *inst = ADD(result, op[0], op[1]);
624 inst->saturate = instr->dest.saturate;
625 emit_percomp(inst, instr->dest.write_mask);
626 break;
627 }
628
629 case nir_op_fmul: {
630 fs_inst *inst = MUL(result, op[0], op[1]);
631 inst->saturate = instr->dest.saturate;
632 emit_percomp(inst, instr->dest.write_mask);
633 break;
634 }
635
636 case nir_op_imul: {
637 /* TODO put in the 16-bit constant optimization once we have SSA */
638
639 if (brw->gen >= 7)
640 no16("SIMD16 explicit accumulator operands unsupported\n");
641
642 struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
643
644 emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
645 emit_percomp(MACH(reg_null_d, op[0], op[1]), instr->dest.write_mask);
646 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
647 break;
648 }
649
650 case nir_op_imul_high:
651 case nir_op_umul_high: {
652 if (brw->gen >= 7)
653 no16("SIMD16 explicit accumulator operands unsupported\n");
654
655 struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
656
657 emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
658 emit_percomp(MACH(result, op[0], op[1]), instr->dest.write_mask);
659 break;
660 }
661
662 case nir_op_idiv:
663 case nir_op_udiv:
664 emit_math_percomp(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1],
665 instr->dest.write_mask);
666 break;
667
668 case nir_op_uadd_carry: {
669 if (brw->gen >= 7)
670 no16("SIMD16 explicit accumulator operands unsupported\n");
671
672 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
673 BRW_REGISTER_TYPE_UD);
674
675 emit_percomp(ADDC(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
676 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
677 break;
678 }
679
680 case nir_op_usub_borrow: {
681 if (brw->gen >= 7)
682 no16("SIMD16 explicit accumulator operands unsupported\n");
683
684 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
685 BRW_REGISTER_TYPE_UD);
686
687 emit_percomp(SUBB(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
688 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
689 break;
690 }
691
692 case nir_op_umod:
693 emit_math_percomp(SHADER_OPCODE_INT_REMAINDER, result, op[0],
694 op[1], instr->dest.write_mask);
695 break;
696
697 case nir_op_flt:
698 case nir_op_ilt:
699 case nir_op_ult:
700 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_L),
701 instr->dest.write_mask);
702 break;
703
704 case nir_op_fge:
705 case nir_op_ige:
706 case nir_op_uge:
707 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE),
708 instr->dest.write_mask);
709 break;
710
711 case nir_op_feq:
712 case nir_op_ieq:
713 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z),
714 instr->dest.write_mask);
715 break;
716
717 case nir_op_fne:
718 case nir_op_ine:
719 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ),
720 instr->dest.write_mask);
721 break;
722
723 case nir_op_ball_fequal2:
724 case nir_op_ball_iequal2:
725 case nir_op_ball_fequal3:
726 case nir_op_ball_iequal3:
727 case nir_op_ball_fequal4:
728 case nir_op_ball_iequal4: {
729 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
730 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
731 emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_Z),
732 (1 << num_components) - 1);
733 emit_reduction(BRW_OPCODE_AND, result, temp, num_components);
734 break;
735 }
736
737 case nir_op_bany_fnequal2:
738 case nir_op_bany_inequal2:
739 case nir_op_bany_fnequal3:
740 case nir_op_bany_inequal3:
741 case nir_op_bany_fnequal4:
742 case nir_op_bany_inequal4: {
743 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
744 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
745 temp.type = BRW_REGISTER_TYPE_UD;
746 emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_NZ),
747 (1 << num_components) - 1);
748 emit_reduction(BRW_OPCODE_OR, result, temp, num_components);
749 break;
750 }
751
752 case nir_op_inot:
753 emit_percomp(NOT(result, op[0]), instr->dest.write_mask);
754 break;
755 case nir_op_ixor:
756 emit_percomp(XOR(result, op[0], op[1]), instr->dest.write_mask);
757 break;
758 case nir_op_ior:
759 emit_percomp(OR(result, op[0], op[1]), instr->dest.write_mask);
760 break;
761 case nir_op_iand:
762 emit_percomp(AND(result, op[0], op[1]), instr->dest.write_mask);
763 break;
764
765 case nir_op_fdot2:
766 case nir_op_fdot3:
767 case nir_op_fdot4: {
768 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
769 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
770 emit_percomp(MUL(temp, op[0], op[1]), (1 << num_components) - 1);
771 emit_reduction(BRW_OPCODE_ADD, result, temp, num_components);
772 if (instr->dest.saturate) {
773 fs_inst *inst = emit(MOV(result, result));
774 inst->saturate = true;
775 }
776 break;
777 }
778
779 case nir_op_bany2:
780 case nir_op_bany3:
781 case nir_op_bany4: {
782 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
783 emit_reduction(BRW_OPCODE_OR, result, op[0], num_components);
784 break;
785 }
786
787 case nir_op_ball2:
788 case nir_op_ball3:
789 case nir_op_ball4: {
790 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
791 emit_reduction(BRW_OPCODE_AND, result, op[0], num_components);
792 break;
793 }
794
795 case nir_op_fnoise1_1:
796 case nir_op_fnoise1_2:
797 case nir_op_fnoise1_3:
798 case nir_op_fnoise1_4:
799 case nir_op_fnoise2_1:
800 case nir_op_fnoise2_2:
801 case nir_op_fnoise2_3:
802 case nir_op_fnoise2_4:
803 case nir_op_fnoise3_1:
804 case nir_op_fnoise3_2:
805 case nir_op_fnoise3_3:
806 case nir_op_fnoise3_4:
807 case nir_op_fnoise4_1:
808 case nir_op_fnoise4_2:
809 case nir_op_fnoise4_3:
810 case nir_op_fnoise4_4:
811 unreachable("not reached: should be handled by lower_noise");
812
813 case nir_op_vec2:
814 case nir_op_vec3:
815 case nir_op_vec4:
816 unreachable("not reached: should be handled by lower_quadop_vector");
817
818 case nir_op_ldexp:
819 unreachable("not reached: should be handled by ldexp_to_arith()");
820
821 case nir_op_fsqrt:
822 emit_math_percomp(SHADER_OPCODE_SQRT, result, op[0],
823 instr->dest.write_mask, instr->dest.saturate);
824 break;
825
826 case nir_op_frsq:
827 emit_math_percomp(SHADER_OPCODE_RSQ, result, op[0],
828 instr->dest.write_mask, instr->dest.saturate);
829 break;
830
831 case nir_op_b2i:
832 emit_percomp(AND(result, op[0], fs_reg(1)), instr->dest.write_mask);
833 break;
834 case nir_op_b2f: {
835 emit_percomp(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0],
836 fs_reg(0x3f800000u)),
837 instr->dest.write_mask);
838 break;
839 }
840
841 case nir_op_f2b:
842 emit_percomp(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
843 instr->dest.write_mask);
844 break;
845 case nir_op_i2b:
846 emit_percomp(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ),
847 instr->dest.write_mask);
848 break;
849
850 case nir_op_ftrunc: {
851 fs_inst *inst = RNDZ(result, op[0]);
852 inst->saturate = instr->dest.saturate;
853 emit_percomp(inst, instr->dest.write_mask);
854 break;
855 }
856 case nir_op_fceil: {
857 op[0].negate = !op[0].negate;
858 fs_reg temp = fs_reg(this, glsl_type::vec4_type);
859 emit_percomp(RNDD(temp, op[0]), instr->dest.write_mask);
860 temp.negate = true;
861 fs_inst *inst = MOV(result, temp);
862 inst->saturate = instr->dest.saturate;
863 emit_percomp(inst, instr->dest.write_mask);
864 break;
865 }
866 case nir_op_ffloor: {
867 fs_inst *inst = RNDD(result, op[0]);
868 inst->saturate = instr->dest.saturate;
869 emit_percomp(inst, instr->dest.write_mask);
870 break;
871 }
872 case nir_op_ffract: {
873 fs_inst *inst = FRC(result, op[0]);
874 inst->saturate = instr->dest.saturate;
875 emit_percomp(inst, instr->dest.write_mask);
876 break;
877 }
878 case nir_op_fround_even: {
879 fs_inst *inst = RNDE(result, op[0]);
880 inst->saturate = instr->dest.saturate;
881 emit_percomp(inst, instr->dest.write_mask);
882 break;
883 }
884
885 case nir_op_fmin:
886 case nir_op_imin:
887 case nir_op_umin:
888 if (brw->gen >= 6) {
889 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
890 instr->dest.write_mask, instr->dest.saturate,
891 BRW_PREDICATE_NONE, BRW_CONDITIONAL_L);
892 } else {
893 emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L),
894 instr->dest.write_mask);
895
896 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
897 instr->dest.write_mask, instr->dest.saturate,
898 BRW_PREDICATE_NORMAL);
899 }
900 break;
901
902 case nir_op_fmax:
903 case nir_op_imax:
904 case nir_op_umax:
905 if (brw->gen >= 6) {
906 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
907 instr->dest.write_mask, instr->dest.saturate,
908 BRW_PREDICATE_NONE, BRW_CONDITIONAL_GE);
909 } else {
910 emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE),
911 instr->dest.write_mask);
912
913 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
914 instr->dest.write_mask, instr->dest.saturate,
915 BRW_PREDICATE_NORMAL);
916 }
917 break;
918
919 case nir_op_pack_snorm_2x16:
920 case nir_op_pack_snorm_4x8:
921 case nir_op_pack_unorm_2x16:
922 case nir_op_pack_unorm_4x8:
923 case nir_op_unpack_snorm_2x16:
924 case nir_op_unpack_snorm_4x8:
925 case nir_op_unpack_unorm_2x16:
926 case nir_op_unpack_unorm_4x8:
927 case nir_op_unpack_half_2x16:
928 case nir_op_pack_half_2x16:
929 unreachable("not reached: should be handled by lower_packing_builtins");
930
931 case nir_op_unpack_half_2x16_split_x:
932 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0],
933 instr->dest.write_mask, instr->dest.saturate);
934 break;
935 case nir_op_unpack_half_2x16_split_y:
936 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0],
937 instr->dest.write_mask, instr->dest.saturate);
938 break;
939
940 case nir_op_fpow:
941 emit_percomp(SHADER_OPCODE_POW, result, op[0], op[1],
942 instr->dest.write_mask, instr->dest.saturate);
943 break;
944
945 case nir_op_bitfield_reverse:
946 emit_percomp(BFREV(result, op[0]), instr->dest.write_mask);
947 break;
948
949 case nir_op_bit_count:
950 emit_percomp(CBIT(result, op[0]), instr->dest.write_mask);
951 break;
952
953 case nir_op_ufind_msb:
954 case nir_op_ifind_msb: {
955 emit_percomp(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]),
956 instr->dest.write_mask);
957
958 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
959 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
960 * subtract the result from 31 to convert the MSB count into an LSB count.
961 */
962
963 emit_percomp(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ),
964 instr->dest.write_mask);
965 fs_reg neg_result(result);
966 neg_result.negate = true;
967 fs_inst *inst = ADD(result, neg_result, fs_reg(31));
968 inst->predicate = BRW_PREDICATE_NORMAL;
969 emit_percomp(inst, instr->dest.write_mask);
970 break;
971 }
972
973 case nir_op_find_lsb:
974 emit_percomp(FBL(result, op[0]), instr->dest.write_mask);
975 break;
976
977 case nir_op_ubitfield_extract:
978 case nir_op_ibitfield_extract:
979 emit_percomp(BFE(result, op[2], op[1], op[0]), instr->dest.write_mask);
980 break;
981 case nir_op_bfm:
982 emit_percomp(BFI1(result, op[0], op[1]), instr->dest.write_mask);
983 break;
984 case nir_op_bfi:
985 emit_percomp(BFI2(result, op[0], op[1], op[2]), instr->dest.write_mask);
986 break;
987
988 case nir_op_bitfield_insert:
989 unreachable("not reached: should be handled by "
990 "lower_instructions::bitfield_insert_to_bfm_bfi");
991
992 case nir_op_ishl:
993 emit_percomp(SHL(result, op[0], op[1]), instr->dest.write_mask);
994 break;
995 case nir_op_ishr:
996 emit_percomp(ASR(result, op[0], op[1]), instr->dest.write_mask);
997 break;
998 case nir_op_ushr:
999 emit_percomp(SHR(result, op[0], op[1]), instr->dest.write_mask);
1000 break;
1001
1002 case nir_op_pack_half_2x16_split:
1003 emit_percomp(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1],
1004 instr->dest.write_mask);
1005 break;
1006
1007 case nir_op_ffma:
1008 emit_percomp(MAD(result, op[2], op[1], op[0]), instr->dest.write_mask);
1009 break;
1010
1011 case nir_op_flrp:
1012 /* TODO emulate for gen < 6 */
1013 emit_percomp(LRP(result, op[2], op[1], op[0]), instr->dest.write_mask);
1014 break;
1015
1016 case nir_op_bcsel:
1017 for (unsigned i = 0; i < 4; i++) {
1018 if (!((instr->dest.write_mask >> i) & 1))
1019 continue;
1020
1021 emit(CMP(reg_null_d, offset(op[0], i), fs_reg(0), BRW_CONDITIONAL_NZ));
1022 emit(SEL(offset(result, i), offset(op[1], i), offset(op[2], i)))
1023 ->predicate = BRW_PREDICATE_NORMAL;
1024 }
1025 break;
1026
1027 default:
1028 unreachable("unhandled instruction");
1029 }
1030 }
1031
1032 fs_reg
1033 fs_visitor::get_nir_src(nir_src src)
1034 {
1035 if (src.is_ssa) {
1036 assert(src.ssa->parent_instr->type == nir_instr_type_load_const);
1037 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1038 fs_reg reg(GRF, virtual_grf_alloc(src.ssa->num_components),
1039 BRW_REGISTER_TYPE_D);
1040
1041 for (unsigned i = 0; i < src.ssa->num_components; ++i)
1042 emit(MOV(offset(reg, i), fs_reg(load->value.i[i])));
1043
1044 return reg;
1045 } else {
1046 fs_reg reg;
1047 if (src.reg.reg->is_global)
1048 reg = nir_globals[src.reg.reg->index];
1049 else
1050 reg = nir_locals[src.reg.reg->index];
1051
1052 /* to avoid floating-point denorm flushing problems, set the type by
1053 * default to D - instructions that need floating point semantics will set
1054 * this to F if they need to
1055 */
1056 reg.type = BRW_REGISTER_TYPE_D;
1057 reg.reg_offset = src.reg.base_offset;
1058 if (src.reg.indirect) {
1059 reg.reladdr = new(mem_ctx) fs_reg();
1060 *reg.reladdr = retype(get_nir_src(*src.reg.indirect),
1061 BRW_REGISTER_TYPE_D);
1062 }
1063
1064 return reg;
1065 }
1066 }
1067
1068 fs_reg
1069 fs_visitor::get_nir_alu_src(nir_alu_instr *instr, unsigned src)
1070 {
1071 fs_reg reg = get_nir_src(instr->src[src].src);
1072
1073 reg.type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[src]);
1074 reg.abs = instr->src[src].abs;
1075 reg.negate = instr->src[src].negate;
1076
1077 bool needs_swizzle = false;
1078 unsigned num_components = 0;
1079 for (unsigned i = 0; i < 4; i++) {
1080 if (!nir_alu_instr_channel_used(instr, src, i))
1081 continue;
1082
1083 if (instr->src[src].swizzle[i] != i)
1084 needs_swizzle = true;
1085
1086 num_components = i + 1;
1087 }
1088
1089 if (needs_swizzle) {
1090 /* resolve the swizzle through MOV's */
1091 fs_reg new_reg = fs_reg(GRF, virtual_grf_alloc(num_components), reg.type);
1092
1093 for (unsigned i = 0; i < 4; i++) {
1094 if (!nir_alu_instr_channel_used(instr, src, i))
1095 continue;
1096
1097 emit(MOV(offset(new_reg, i),
1098 offset(reg, instr->src[src].swizzle[i])));
1099 }
1100
1101 return new_reg;
1102 }
1103
1104 return reg;
1105 }
1106
1107 fs_reg
1108 fs_visitor::get_nir_dest(nir_dest dest)
1109 {
1110 fs_reg reg;
1111 if (dest.reg.reg->is_global)
1112 reg = nir_globals[dest.reg.reg->index];
1113 else
1114 reg = nir_locals[dest.reg.reg->index];
1115
1116 reg.reg_offset = dest.reg.base_offset;
1117 if (dest.reg.indirect) {
1118 reg.reladdr = new(mem_ctx) fs_reg();
1119 *reg.reladdr = retype(get_nir_src(*dest.reg.indirect),
1120 BRW_REGISTER_TYPE_D);
1121 }
1122
1123 return reg;
1124 }
1125
1126 void
1127 fs_visitor::emit_percomp(fs_inst *inst, unsigned wr_mask)
1128 {
1129 for (unsigned i = 0; i < 4; i++) {
1130 if (!((wr_mask >> i) & 1))
1131 continue;
1132
1133 fs_inst *new_inst = new(mem_ctx) fs_inst(*inst);
1134 new_inst->dst.reg_offset += i;
1135 for (unsigned j = 0; j < new_inst->sources; j++)
1136 if (inst->src[j].file == GRF)
1137 new_inst->src[j].reg_offset += i;
1138
1139 emit(new_inst);
1140 }
1141 }
1142
1143 void
1144 fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1145 unsigned wr_mask, bool saturate,
1146 enum brw_predicate predicate,
1147 enum brw_conditional_mod mod)
1148 {
1149 for (unsigned i = 0; i < 4; i++) {
1150 if (!((wr_mask >> i) & 1))
1151 continue;
1152
1153 fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0);
1154 new_inst->dst.reg_offset += i;
1155 for (unsigned j = 0; j < new_inst->sources; j++)
1156 if (new_inst->src[j].file == GRF)
1157 new_inst->src[j].reg_offset += i;
1158
1159 new_inst->predicate = predicate;
1160 new_inst->conditional_mod = mod;
1161 new_inst->saturate = saturate;
1162 emit(new_inst);
1163 }
1164 }
1165
1166 void
1167 fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0, fs_reg src1,
1168 unsigned wr_mask, bool saturate,
1169 enum brw_predicate predicate,
1170 enum brw_conditional_mod mod)
1171 {
1172 for (unsigned i = 0; i < 4; i++) {
1173 if (!((wr_mask >> i) & 1))
1174 continue;
1175
1176 fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0, src1);
1177 new_inst->dst.reg_offset += i;
1178 for (unsigned j = 0; j < new_inst->sources; j++)
1179 if (new_inst->src[j].file == GRF)
1180 new_inst->src[j].reg_offset += i;
1181
1182 new_inst->predicate = predicate;
1183 new_inst->conditional_mod = mod;
1184 new_inst->saturate = saturate;
1185 emit(new_inst);
1186 }
1187 }
1188
1189 void
1190 fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1191 unsigned wr_mask, bool saturate)
1192 {
1193 for (unsigned i = 0; i < 4; i++) {
1194 if (!((wr_mask >> i) & 1))
1195 continue;
1196
1197 fs_reg new_dest = dest;
1198 new_dest.reg_offset += i;
1199 fs_reg new_src0 = src0;
1200 if (src0.file == GRF)
1201 new_src0.reg_offset += i;
1202
1203 fs_inst *new_inst = emit_math(op, new_dest, new_src0);
1204 new_inst->saturate = saturate;
1205 }
1206 }
1207
1208 void
1209 fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1210 fs_reg src1, unsigned wr_mask,
1211 bool saturate)
1212 {
1213 for (unsigned i = 0; i < 4; i++) {
1214 if (!((wr_mask >> i) & 1))
1215 continue;
1216
1217 fs_reg new_dest = dest;
1218 new_dest.reg_offset += i;
1219 fs_reg new_src0 = src0;
1220 if (src0.file == GRF)
1221 new_src0.reg_offset += i;
1222 fs_reg new_src1 = src1;
1223 if (src1.file == GRF)
1224 new_src1.reg_offset += i;
1225
1226 fs_inst *new_inst = emit_math(op, new_dest, new_src0, new_src1);
1227 new_inst->saturate = saturate;
1228 }
1229 }
1230
1231 void
1232 fs_visitor::emit_reduction(enum opcode op, fs_reg dest, fs_reg src,
1233 unsigned num_components)
1234 {
1235 fs_reg src0 = src;
1236 fs_reg src1 = src;
1237 src1.reg_offset++;
1238
1239 if (num_components == 2) {
1240 emit(op, dest, src0, src1);
1241 return;
1242 }
1243
1244 fs_reg temp1 = fs_reg(GRF, virtual_grf_alloc(1));
1245 temp1.type = src.type;
1246 emit(op, temp1, src0, src1);
1247
1248 fs_reg src2 = src;
1249 src2.reg_offset += 2;
1250
1251 if (num_components == 3) {
1252 emit(op, dest, temp1, src2);
1253 return;
1254 }
1255
1256 assert(num_components == 4);
1257
1258 fs_reg src3 = src;
1259 src3.reg_offset += 3;
1260 fs_reg temp2 = fs_reg(GRF, virtual_grf_alloc(1));
1261 temp2.type = src.type;
1262
1263 emit(op, temp2, src2, src3);
1264 emit(op, dest, temp1, temp2);
1265 }
1266
1267 void
1268 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
1269 {
1270 fs_reg dest;
1271 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1272 dest = get_nir_dest(instr->dest);
1273
1274 bool has_indirect = false;
1275
1276 switch (instr->intrinsic) {
1277 case nir_intrinsic_discard: {
1278 /* We track our discarded pixels in f0.1. By predicating on it, we can
1279 * update just the flag bits that aren't yet discarded. By emitting a
1280 * CMP of g0 != g0, all our currently executing channels will get turned
1281 * off.
1282 */
1283 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1284 BRW_REGISTER_TYPE_UW));
1285 fs_inst *cmp = emit(CMP(reg_null_f, some_reg, some_reg,
1286 BRW_CONDITIONAL_NZ));
1287 cmp->predicate = BRW_PREDICATE_NORMAL;
1288 cmp->flag_subreg = 1;
1289
1290 if (brw->gen >= 6) {
1291 /* For performance, after a discard, jump to the end of the shader.
1292 * Only jump if all relevant channels have been discarded.
1293 */
1294 fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
1295 discard_jump->flag_subreg = 1;
1296
1297 discard_jump->predicate = (dispatch_width == 8)
1298 ? BRW_PREDICATE_ALIGN1_ANY8H
1299 : BRW_PREDICATE_ALIGN1_ANY16H;
1300 discard_jump->predicate_inverse = true;
1301 }
1302
1303 break;
1304 }
1305
1306 case nir_intrinsic_atomic_counter_inc:
1307 case nir_intrinsic_atomic_counter_dec:
1308 case nir_intrinsic_atomic_counter_read: {
1309 unsigned surf_index = prog_data->binding_table.abo_start +
1310 (unsigned) instr->const_index[0];
1311 fs_reg offset = fs_reg(get_nir_src(instr->src[0]));
1312
1313 switch (instr->intrinsic) {
1314 case nir_intrinsic_atomic_counter_inc:
1315 emit_untyped_atomic(BRW_AOP_INC, surf_index, dest, offset,
1316 fs_reg(), fs_reg());
1317 break;
1318 case nir_intrinsic_atomic_counter_dec:
1319 emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dest, offset,
1320 fs_reg(), fs_reg());
1321 break;
1322 case nir_intrinsic_atomic_counter_read:
1323 emit_untyped_surface_read(surf_index, dest, offset);
1324 break;
1325 default:
1326 unreachable("Unreachable");
1327 }
1328 break;
1329 }
1330
1331 case nir_intrinsic_load_front_face:
1332 assert(!"TODO");
1333
1334 case nir_intrinsic_load_sample_mask_in: {
1335 fs_reg sample_mask_in = nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
1336 assert(sample_mask_in.file != BAD_FILE);
1337 dest.type = sample_mask_in.type;
1338 emit(MOV(dest, sample_mask_in));
1339 break;
1340 }
1341
1342 case nir_intrinsic_load_sample_pos: {
1343 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1344 assert(sample_pos.file != BAD_FILE);
1345 dest.type = sample_pos.type;
1346 emit(MOV(dest, sample_pos));
1347 emit(MOV(offset(dest, 1), offset(sample_pos, 1)));
1348 break;
1349 }
1350
1351 case nir_intrinsic_load_sample_id: {
1352 fs_reg sample_id = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
1353 assert(sample_id.file != BAD_FILE);
1354 dest.type = sample_id.type;
1355 emit(MOV(dest, sample_id));
1356 break;
1357 }
1358
1359 case nir_intrinsic_load_uniform_indirect:
1360 has_indirect = true;
1361 case nir_intrinsic_load_uniform: {
1362 unsigned index = 0;
1363 for (int i = 0; i < instr->const_index[1]; i++) {
1364 for (unsigned j = 0; j < instr->num_components; j++) {
1365 fs_reg src = nir_uniforms;
1366 src.reg_offset = instr->const_index[0] + index;
1367 if (has_indirect)
1368 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1369 src.type = dest.type;
1370 index++;
1371
1372 emit(MOV(dest, src));
1373 dest.reg_offset++;
1374 }
1375 }
1376 break;
1377 }
1378
1379 case nir_intrinsic_load_ubo_indirect:
1380 has_indirect = true;
1381 case nir_intrinsic_load_ubo: {
1382 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
1383 fs_reg surf_index;
1384
1385 if (const_index) {
1386 surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
1387 const_index->u[0]);
1388 } else {
1389 /* The block index is not a constant. Evaluate the index expression
1390 * per-channel and add the base UBO index; the generator will select
1391 * a value from any live channel.
1392 */
1393 surf_index = fs_reg(this, glsl_type::uint_type);
1394 emit(ADD(surf_index, get_nir_src(instr->src[0]),
1395 fs_reg(stage_prog_data->binding_table.ubo_start)))
1396 ->force_writemask_all = true;
1397
1398 /* Assume this may touch any UBO. It would be nice to provide
1399 * a tighter bound, but the array information is already lowered away.
1400 */
1401 brw_mark_surface_used(prog_data,
1402 stage_prog_data->binding_table.ubo_start +
1403 shader_prog->NumUniformBlocks - 1);
1404 }
1405
1406 if (has_indirect) {
1407 /* Turn the byte offset into a dword offset. */
1408 fs_reg base_offset = fs_reg(this, glsl_type::int_type);
1409 emit(SHR(base_offset, retype(get_nir_src(instr->src[1]),
1410 BRW_REGISTER_TYPE_D),
1411 fs_reg(2)));
1412
1413 unsigned vec4_offset = instr->const_index[0] / 4;
1414 for (int i = 0; i < instr->num_components; i++)
1415 emit(VARYING_PULL_CONSTANT_LOAD(offset(dest, i), surf_index,
1416 base_offset, vec4_offset + i));
1417 } else {
1418 fs_reg packed_consts = fs_reg(this, glsl_type::float_type);
1419 packed_consts.type = dest.type;
1420
1421 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
1422 emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
1423 surf_index, const_offset_reg);
1424
1425 for (unsigned i = 0; i < instr->num_components; i++) {
1426 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
1427
1428 /* The std140 packing rules don't allow vectors to cross 16-byte
1429 * boundaries, and a reg is 32 bytes.
1430 */
1431 assert(packed_consts.subreg_offset < 32);
1432
1433 emit(MOV(dest, packed_consts));
1434 dest.reg_offset++;
1435 }
1436 }
1437 break;
1438 }
1439
1440 case nir_intrinsic_load_input_indirect:
1441 has_indirect = true;
1442 case nir_intrinsic_load_input: {
1443 unsigned index = 0;
1444 for (int i = 0; i < instr->const_index[1]; i++) {
1445 for (unsigned j = 0; j < instr->num_components; j++) {
1446 fs_reg src = nir_inputs;
1447 src.reg_offset = instr->const_index[0] + index;
1448 if (has_indirect)
1449 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1450 src.type = dest.type;
1451 index++;
1452
1453 emit(MOV(dest, src));
1454 dest.reg_offset++;
1455 }
1456 }
1457 break;
1458 }
1459
1460 /* Handle ARB_gpu_shader5 interpolation intrinsics
1461 *
1462 * It's worth a quick word of explanation as to why we handle the full
1463 * variable-based interpolation intrinsic rather than a lowered version
1464 * with like we do for other inputs. We have to do that because the way
1465 * we set up inputs doesn't allow us to use the already setup inputs for
1466 * interpolation. At the beginning of the shader, we go through all of
1467 * the input variables and do the initial interpolation and put it in
1468 * the nir_inputs array based on its location as determined in
1469 * nir_lower_io. If the input isn't used, dead code cleans up and
1470 * everything works fine. However, when we get to the ARB_gpu_shader5
1471 * interpolation intrinsics, we need to reinterpolate the input
1472 * differently. If we used an intrinsic that just had an index it would
1473 * only give us the offset into the nir_inputs array. However, this is
1474 * useless because that value is post-interpolation and we need
1475 * pre-interpolation. In order to get the actual location of the bits
1476 * we get from the vertex fetching hardware, we need the variable.
1477 */
1478 case nir_intrinsic_interp_var_at_centroid:
1479 case nir_intrinsic_interp_var_at_sample:
1480 case nir_intrinsic_interp_var_at_offset: {
1481 /* in SIMD16 mode, the pixel interpolator returns coords interleaved
1482 * 8 channels at a time, same as the barycentric coords presented in
1483 * the FS payload. this requires a bit of extra work to support.
1484 */
1485 no16("interpolate_at_* not yet supported in SIMD16 mode.");
1486
1487 fs_reg dst_x(GRF, virtual_grf_alloc(2), BRW_REGISTER_TYPE_F);
1488 fs_reg dst_y = offset(dst_x, 1);
1489
1490 /* For most messages, we need one reg of ignored data; the hardware
1491 * requires mlen==1 even when there is no payload. in the per-slot
1492 * offset case, we'll replace this with the proper source data.
1493 */
1494 fs_reg src(this, glsl_type::float_type);
1495 int mlen = 1; /* one reg unless overriden */
1496 fs_inst *inst;
1497
1498 switch (instr->intrinsic) {
1499 case nir_intrinsic_interp_var_at_centroid:
1500 inst = emit(FS_OPCODE_INTERPOLATE_AT_CENTROID, dst_x, src, fs_reg(0u));
1501 break;
1502
1503 case nir_intrinsic_interp_var_at_sample: {
1504 /* XXX: We should probably handle non-constant sample id's */
1505 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1506 assert(const_sample);
1507 unsigned msg_data = const_sample ? const_sample->i[0] << 4 : 0;
1508 inst = emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_x, src,
1509 fs_reg(msg_data));
1510 break;
1511 }
1512
1513 case nir_intrinsic_interp_var_at_offset: {
1514 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1515
1516 if (const_offset) {
1517 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1518 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1519
1520 inst = emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_x, src,
1521 fs_reg(off_x | (off_y << 4)));
1522 } else {
1523 src = fs_reg(this, glsl_type::ivec2_type);
1524 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1525 BRW_REGISTER_TYPE_F);
1526 for (int i = 0; i < 2; i++) {
1527 fs_reg temp(this, glsl_type::float_type);
1528 emit(MUL(temp, offset(offset_src, i), fs_reg(16.0f)));
1529 fs_reg itemp(this, glsl_type::int_type);
1530 emit(MOV(itemp, temp)); /* float to int */
1531
1532 /* Clamp the upper end of the range to +7/16.
1533 * ARB_gpu_shader5 requires that we support a maximum offset
1534 * of +0.5, which isn't representable in a S0.4 value -- if
1535 * we didn't clamp it, we'd end up with -8/16, which is the
1536 * opposite of what the shader author wanted.
1537 *
1538 * This is legal due to ARB_gpu_shader5's quantization
1539 * rules:
1540 *
1541 * "Not all values of <offset> may be supported; x and y
1542 * offsets may be rounded to fixed-point values with the
1543 * number of fraction bits given by the
1544 * implementation-dependent constant
1545 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1546 */
1547
1548 emit(BRW_OPCODE_SEL, offset(src, i), itemp, fs_reg(7))
1549 ->conditional_mod = BRW_CONDITIONAL_L; /* min(src2, 7) */
1550 }
1551
1552 mlen = 2;
1553 inst = emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_x, src,
1554 fs_reg(0u));
1555 }
1556 break;
1557 }
1558
1559 default:
1560 unreachable("Invalid intrinsic");
1561 }
1562
1563 inst->mlen = mlen;
1564 inst->regs_written = 2; /* 2 floats per slot returned */
1565 inst->pi_noperspective = instr->variables[0]->var->data.interpolation ==
1566 INTERP_QUALIFIER_NOPERSPECTIVE;
1567
1568 for (unsigned j = 0; j < instr->num_components; j++) {
1569 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1570 src.type = dest.type;
1571
1572 emit(FS_OPCODE_LINTERP, dest, dst_x, dst_y, src);
1573 dest.reg_offset++;
1574 }
1575 break;
1576 }
1577
1578 case nir_intrinsic_store_output_indirect:
1579 has_indirect = true;
1580 case nir_intrinsic_store_output: {
1581 fs_reg src = get_nir_src(instr->src[0]);
1582 unsigned index = 0;
1583 for (int i = 0; i < instr->const_index[1]; i++) {
1584 for (unsigned j = 0; j < instr->num_components; j++) {
1585 fs_reg new_dest = nir_outputs;
1586 new_dest.reg_offset = instr->const_index[0] + index;
1587 if (has_indirect)
1588 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
1589 new_dest.type = src.type;
1590 index++;
1591 emit(MOV(new_dest, src));
1592 src.reg_offset++;
1593 }
1594 }
1595 break;
1596 }
1597
1598 default:
1599 unreachable("unknown intrinsic");
1600 }
1601 }
1602
1603 void
1604 fs_visitor::nir_emit_texture(nir_tex_instr *instr)
1605 {
1606 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1607 unsigned sampler = instr->sampler_index;
1608 fs_reg sampler_reg(sampler);
1609
1610 /* FINISHME: We're failing to recompile our programs when the sampler is
1611 * updated. This only matters for the texture rectangle scale parameters
1612 * (pre-gen6, or gen6+ with GL_CLAMP).
1613 */
1614 int texunit = prog->SamplerUnits[sampler];
1615
1616 int gather_component = instr->component;
1617
1618 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
1619
1620 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
1621 instr->is_array;
1622
1623 int lod_components, offset_components = 0;
1624
1625 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, offset;
1626
1627 for (unsigned i = 0; i < instr->num_srcs; i++) {
1628 fs_reg src = get_nir_src(instr->src[i].src);
1629 switch (instr->src[i].src_type) {
1630 case nir_tex_src_bias:
1631 lod = retype(src, BRW_REGISTER_TYPE_F);
1632 break;
1633 case nir_tex_src_comparitor:
1634 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
1635 break;
1636 case nir_tex_src_coord:
1637 switch (instr->op) {
1638 case nir_texop_txf:
1639 case nir_texop_txf_ms:
1640 coordinate = retype(src, BRW_REGISTER_TYPE_D);
1641 break;
1642 default:
1643 coordinate = retype(src, BRW_REGISTER_TYPE_F);
1644 break;
1645 }
1646 break;
1647 case nir_tex_src_ddx:
1648 lod = retype(src, BRW_REGISTER_TYPE_F);
1649 lod_components = nir_tex_instr_src_size(instr, i);
1650 break;
1651 case nir_tex_src_ddy:
1652 lod2 = retype(src, BRW_REGISTER_TYPE_F);
1653 break;
1654 case nir_tex_src_lod:
1655 switch (instr->op) {
1656 case nir_texop_txs:
1657 lod = retype(src, BRW_REGISTER_TYPE_UD);
1658 break;
1659 case nir_texop_txf:
1660 lod = retype(src, BRW_REGISTER_TYPE_D);
1661 break;
1662 default:
1663 lod = retype(src, BRW_REGISTER_TYPE_F);
1664 break;
1665 }
1666 break;
1667 case nir_tex_src_ms_index:
1668 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
1669 break;
1670 case nir_tex_src_offset:
1671 offset = retype(src, BRW_REGISTER_TYPE_D);
1672 if (instr->is_array)
1673 offset_components = instr->coord_components - 1;
1674 else
1675 offset_components = instr->coord_components;
1676 break;
1677 case nir_tex_src_projector:
1678 unreachable("should be lowered");
1679
1680 case nir_tex_src_sampler_offset: {
1681 /* Figure out the highest possible sampler index and mark it as used */
1682 uint32_t max_used = sampler + instr->sampler_array_size - 1;
1683 if (instr->op == nir_texop_tg4 && brw->gen < 8) {
1684 max_used += stage_prog_data->binding_table.gather_texture_start;
1685 } else {
1686 max_used += stage_prog_data->binding_table.texture_start;
1687 }
1688 brw_mark_surface_used(prog_data, max_used);
1689
1690 /* Emit code to evaluate the actual indexing expression */
1691 sampler_reg = fs_reg(this, glsl_type::uint_type);
1692 emit(ADD(sampler_reg, src, fs_reg(sampler)))
1693 ->force_writemask_all = true;
1694 break;
1695 }
1696
1697 default:
1698 unreachable("unknown texture source");
1699 }
1700 }
1701
1702 if (instr->op == nir_texop_txf_ms) {
1703 if (brw->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<<sampler))
1704 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
1705 else
1706 mcs = fs_reg(0u);
1707 }
1708
1709 for (unsigned i = 0; i < 3; i++) {
1710 if (instr->const_offset[i] != 0) {
1711 assert(offset_components == 0);
1712 offset = fs_reg(brw_texture_offset(ctx, instr->const_offset, 3));
1713 break;
1714 }
1715 }
1716
1717 enum glsl_base_type dest_base_type;
1718 switch (instr->dest_type) {
1719 case nir_type_float:
1720 dest_base_type = GLSL_TYPE_FLOAT;
1721 break;
1722 case nir_type_int:
1723 dest_base_type = GLSL_TYPE_INT;
1724 break;
1725 case nir_type_unsigned:
1726 dest_base_type = GLSL_TYPE_UINT;
1727 break;
1728 default:
1729 unreachable("bad type");
1730 }
1731
1732 const glsl_type *dest_type =
1733 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
1734 1);
1735
1736 ir_texture_opcode op;
1737 switch (instr->op) {
1738 case nir_texop_lod: op = ir_lod; break;
1739 case nir_texop_query_levels: op = ir_query_levels; break;
1740 case nir_texop_tex: op = ir_tex; break;
1741 case nir_texop_tg4: op = ir_tg4; break;
1742 case nir_texop_txb: op = ir_txb; break;
1743 case nir_texop_txd: op = ir_txd; break;
1744 case nir_texop_txf: op = ir_txf; break;
1745 case nir_texop_txf_ms: op = ir_txf_ms; break;
1746 case nir_texop_txl: op = ir_txl; break;
1747 case nir_texop_txs: op = ir_txs; break;
1748 default:
1749 unreachable("unknown texture opcode");
1750 }
1751
1752 emit_texture(op, dest_type, coordinate, instr->coord_components,
1753 shadow_comparitor, lod, lod2, lod_components, sample_index,
1754 offset, offset_components, mcs, gather_component,
1755 is_cube_array, is_rect, sampler, sampler_reg, texunit);
1756
1757 fs_reg dest = get_nir_dest(instr->dest);
1758 dest.type = this->result.type;
1759 unsigned num_components = nir_tex_instr_dest_size(instr);
1760 emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
1761 }
1762
1763 void
1764 fs_visitor::nir_emit_jump(nir_jump_instr *instr)
1765 {
1766 switch (instr->type) {
1767 case nir_jump_break:
1768 emit(BRW_OPCODE_BREAK);
1769 break;
1770 case nir_jump_continue:
1771 emit(BRW_OPCODE_CONTINUE);
1772 break;
1773 case nir_jump_return:
1774 default:
1775 unreachable("unknown jump");
1776 }
1777 }