i965/fs_nir: Use the new variable lowering code
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "glsl/ir.h"
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "brw_fs.h"
28
29 void
30 fs_visitor::emit_nir_code()
31 {
32 /* first, lower the GLSL IR shader to NIR */
33 lower_output_reads(shader->base.ir);
34 nir_shader *nir = glsl_to_nir(shader->base.ir, NULL, true);
35 nir_validate_shader(nir);
36
37 nir_lower_global_vars_to_local(nir);
38 nir_validate_shader(nir);
39
40 nir_split_var_copies(nir);
41 nir_validate_shader(nir);
42
43 bool progress;
44 do {
45 progress = false;
46 nir_lower_variables(nir);
47 nir_validate_shader(nir);
48 progress |= nir_copy_prop(nir);
49 nir_validate_shader(nir);
50 progress |= nir_opt_dce(nir);
51 nir_validate_shader(nir);
52 progress |= nir_opt_cse(nir);
53 nir_validate_shader(nir);
54 progress |= nir_opt_peephole_select(nir);
55 nir_validate_shader(nir);
56 progress |= nir_opt_peephole_ffma(nir);
57 nir_validate_shader(nir);
58 } while (progress);
59
60 /* Lower a bunch of stuff */
61 nir_lower_io(nir);
62 nir_validate_shader(nir);
63
64 nir_lower_locals_to_regs(nir);
65 nir_validate_shader(nir);
66
67 nir_remove_dead_variables(nir);
68 nir_validate_shader(nir);
69 nir_convert_from_ssa(nir);
70 nir_validate_shader(nir);
71 nir_lower_vec_to_movs(nir);
72 nir_validate_shader(nir);
73
74 nir_lower_samplers(nir, shader_prog, shader->base.Program);
75 nir_validate_shader(nir);
76
77 nir_lower_system_values(nir);
78 nir_validate_shader(nir);
79
80 nir_lower_atomics(nir);
81 nir_validate_shader(nir);
82
83 /* emit the arrays used for inputs and outputs - load/store intrinsics will
84 * be converted to reads/writes of these arrays
85 */
86
87 if (nir->num_inputs > 0) {
88 nir_inputs = fs_reg(GRF, virtual_grf_alloc(nir->num_inputs));
89 nir_setup_inputs(nir);
90 }
91
92 if (nir->num_outputs > 0) {
93 nir_outputs = fs_reg(GRF, virtual_grf_alloc(nir->num_outputs));
94 nir_setup_outputs(nir);
95 }
96
97 if (nir->num_uniforms > 0) {
98 nir_uniforms = fs_reg(UNIFORM, 0);
99 nir_setup_uniforms(nir);
100 }
101
102 nir_globals = ralloc_array(mem_ctx, fs_reg, nir->reg_alloc);
103 foreach_list_typed(nir_register, reg, node, &nir->registers) {
104 unsigned array_elems =
105 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
106 unsigned size = array_elems * reg->num_components;
107 nir_globals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
108 }
109
110 /* get the main function and emit it */
111 nir_foreach_overload(nir, overload) {
112 assert(strcmp(overload->function->name, "main") == 0);
113 assert(overload->impl);
114 nir_emit_impl(overload->impl);
115 }
116
117 ralloc_free(nir);
118 }
119
120 void
121 fs_visitor::nir_setup_inputs(nir_shader *shader)
122 {
123 fs_reg varying = nir_inputs;
124
125 struct hash_entry *entry;
126 hash_table_foreach(shader->inputs, entry) {
127 nir_variable *var = (nir_variable *) entry->data;
128 varying.reg_offset = var->data.driver_location;
129
130 fs_reg reg;
131 if (!strcmp(var->name, "gl_FragCoord")) {
132 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
133 var->data.origin_upper_left);
134 emit_percomp(MOV(varying, reg), 0xF);
135 } else if (!strcmp(var->name, "gl_FrontFacing")) {
136 reg = *emit_frontfacing_interpolation();
137 emit(MOV(retype(varying, BRW_REGISTER_TYPE_UD), reg));
138 } else {
139 emit_general_interpolation(varying, var->name, var->type,
140 (glsl_interp_qualifier) var->data.interpolation,
141 var->data.location, var->data.centroid,
142 var->data.sample);
143 }
144 }
145 }
146
147 void
148 fs_visitor::nir_setup_outputs(nir_shader *shader)
149 {
150 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
151 fs_reg reg = nir_outputs;
152
153 struct hash_entry *entry;
154 hash_table_foreach(shader->outputs, entry) {
155 nir_variable *var = (nir_variable *) entry->data;
156 reg.reg_offset = var->data.driver_location;
157
158 if (var->data.index > 0) {
159 assert(var->data.location == FRAG_RESULT_DATA0);
160 assert(var->data.index == 1);
161 this->dual_src_output = reg;
162 this->do_dual_src = true;
163 } else if (var->data.location == FRAG_RESULT_COLOR) {
164 /* Writing gl_FragColor outputs to all color regions. */
165 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
166 this->outputs[i] = reg;
167 this->output_components[i] = 4;
168 }
169 } else if (var->data.location == FRAG_RESULT_DEPTH) {
170 this->frag_depth = reg;
171 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
172 this->sample_mask = reg;
173 } else {
174 /* gl_FragData or a user-defined FS output */
175 assert(var->data.location >= FRAG_RESULT_DATA0 &&
176 var->data.location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
177
178 int vector_elements =
179 var->type->is_array() ? var->type->fields.array->vector_elements
180 : var->type->vector_elements;
181
182 /* General color output. */
183 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
184 int output = var->data.location - FRAG_RESULT_DATA0 + i;
185 this->outputs[output] = reg;
186 this->outputs[output].reg_offset += vector_elements * i;
187 this->output_components[output] = vector_elements;
188 }
189 }
190 }
191 }
192
193 void
194 fs_visitor::nir_setup_uniforms(nir_shader *shader)
195 {
196 uniforms = shader->num_uniforms;
197 param_size[0] = shader->num_uniforms;
198
199 if (dispatch_width != 8)
200 return;
201
202 struct hash_entry *entry;
203 hash_table_foreach(shader->uniforms, entry) {
204 nir_variable *var = (nir_variable *) entry->data;
205
206 /* UBO's and atomics don't take up space in the uniform file */
207
208 if (var->interface_type != NULL || var->type->contains_atomic())
209 continue;
210
211 if (strncmp(var->name, "gl_", 3) == 0)
212 nir_setup_builtin_uniform(var);
213 else
214 nir_setup_uniform(var);
215 }
216 }
217
218 void
219 fs_visitor::nir_setup_uniform(nir_variable *var)
220 {
221 int namelen = strlen(var->name);
222
223 /* The data for our (non-builtin) uniforms is stored in a series of
224 * gl_uniform_driver_storage structs for each subcomponent that
225 * glGetUniformLocation() could name. We know it's been set up in the
226 * same order we'd walk the type, so walk the list of storage and find
227 * anything with our name, or the prefix of a component that starts with
228 * our name.
229 */
230 unsigned index = var->data.driver_location;
231 for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
232 struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
233
234 if (strncmp(var->name, storage->name, namelen) != 0 ||
235 (storage->name[namelen] != 0 &&
236 storage->name[namelen] != '.' &&
237 storage->name[namelen] != '[')) {
238 continue;
239 }
240
241 unsigned slots = storage->type->component_slots();
242 if (storage->array_elements)
243 slots *= storage->array_elements;
244
245 for (unsigned i = 0; i < slots; i++) {
246 stage_prog_data->param[index++] = &storage->storage[i];
247 }
248 }
249
250 /* Make sure we actually initialized the right amount of stuff here. */
251 assert(var->data.driver_location + var->type->component_slots() == index);
252 }
253
254 void
255 fs_visitor::nir_setup_builtin_uniform(nir_variable *var)
256 {
257 const nir_state_slot *const slots = var->state_slots;
258 assert(var->state_slots != NULL);
259
260 unsigned uniform_index = var->data.driver_location;
261 for (unsigned int i = 0; i < var->num_state_slots; i++) {
262 /* This state reference has already been setup by ir_to_mesa, but we'll
263 * get the same index back here.
264 */
265 int index = _mesa_add_state_reference(this->prog->Parameters,
266 (gl_state_index *)slots[i].tokens);
267
268 /* Add each of the unique swizzles of the element as a parameter.
269 * This'll end up matching the expected layout of the
270 * array/matrix/structure we're trying to fill in.
271 */
272 int last_swiz = -1;
273 for (unsigned int j = 0; j < 4; j++) {
274 int swiz = GET_SWZ(slots[i].swizzle, j);
275 if (swiz == last_swiz)
276 break;
277 last_swiz = swiz;
278
279 stage_prog_data->param[uniform_index++] =
280 &prog->Parameters->ParameterValues[index][swiz];
281 }
282 }
283 }
284
285 void
286 fs_visitor::nir_emit_impl(nir_function_impl *impl)
287 {
288 nir_locals = reralloc(mem_ctx, nir_locals, fs_reg, impl->reg_alloc);
289 foreach_list_typed(nir_register, reg, node, &impl->registers) {
290 unsigned array_elems =
291 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
292 unsigned size = array_elems * reg->num_components;
293 nir_locals[reg->index] = fs_reg(GRF, virtual_grf_alloc(size));
294 }
295
296 nir_emit_cf_list(&impl->body);
297 }
298
299 void
300 fs_visitor::nir_emit_cf_list(exec_list *list)
301 {
302 foreach_list_typed(nir_cf_node, node, node, list) {
303 switch (node->type) {
304 case nir_cf_node_if:
305 nir_emit_if(nir_cf_node_as_if(node));
306 break;
307
308 case nir_cf_node_loop:
309 nir_emit_loop(nir_cf_node_as_loop(node));
310 break;
311
312 case nir_cf_node_block:
313 nir_emit_block(nir_cf_node_as_block(node));
314 break;
315
316 default:
317 unreachable("Invalid CFG node block");
318 }
319 }
320 }
321
322 void
323 fs_visitor::nir_emit_if(nir_if *if_stmt)
324 {
325 if (brw->gen < 6) {
326 no16("Can't support (non-uniform) control flow on SIMD16\n");
327 }
328
329 /* first, put the condition into f0 */
330 fs_inst *inst = emit(MOV(reg_null_d,
331 retype(get_nir_src(if_stmt->condition),
332 BRW_REGISTER_TYPE_UD)));
333 inst->conditional_mod = BRW_CONDITIONAL_NZ;
334
335 emit(IF(BRW_PREDICATE_NORMAL));
336
337 nir_emit_cf_list(&if_stmt->then_list);
338
339 /* note: if the else is empty, dead CF elimination will remove it */
340 emit(BRW_OPCODE_ELSE);
341
342 nir_emit_cf_list(&if_stmt->else_list);
343
344 emit(BRW_OPCODE_ENDIF);
345
346 try_replace_with_sel();
347 }
348
349 void
350 fs_visitor::nir_emit_loop(nir_loop *loop)
351 {
352 if (brw->gen < 6) {
353 no16("Can't support (non-uniform) control flow on SIMD16\n");
354 }
355
356 emit(BRW_OPCODE_DO);
357
358 nir_emit_cf_list(&loop->body);
359
360 emit(BRW_OPCODE_WHILE);
361 }
362
363 void
364 fs_visitor::nir_emit_block(nir_block *block)
365 {
366 nir_foreach_instr(block, instr) {
367 nir_emit_instr(instr);
368 }
369 }
370
371 void
372 fs_visitor::nir_emit_instr(nir_instr *instr)
373 {
374 switch (instr->type) {
375 case nir_instr_type_alu:
376 nir_emit_alu(nir_instr_as_alu(instr));
377 break;
378
379 case nir_instr_type_intrinsic:
380 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
381 break;
382
383 case nir_instr_type_texture:
384 nir_emit_texture(nir_instr_as_texture(instr));
385 break;
386
387 case nir_instr_type_load_const:
388 nir_emit_load_const(nir_instr_as_load_const(instr));
389 break;
390
391 case nir_instr_type_jump:
392 nir_emit_jump(nir_instr_as_jump(instr));
393 break;
394
395 default:
396 unreachable("unknown instruction type");
397 }
398 }
399
400 static brw_reg_type
401 brw_type_for_nir_type(nir_alu_type type)
402 {
403 switch (type) {
404 case nir_type_bool:
405 case nir_type_unsigned:
406 return BRW_REGISTER_TYPE_UD;
407 case nir_type_int:
408 return BRW_REGISTER_TYPE_D;
409 case nir_type_float:
410 return BRW_REGISTER_TYPE_F;
411 default:
412 unreachable("unknown type");
413 }
414
415 return BRW_REGISTER_TYPE_F;
416 }
417
418 void
419 fs_visitor::nir_emit_alu(nir_alu_instr *instr)
420 {
421 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
422
423 fs_reg op[3];
424 fs_reg dest = get_nir_dest(instr->dest.dest);
425 dest.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
426
427 fs_reg result;
428 if (instr->has_predicate) {
429 result = fs_reg(GRF, virtual_grf_alloc(4));
430 result.type = dest.type;
431 } else {
432 result = dest;
433 }
434
435
436 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
437 op[i] = get_nir_alu_src(instr, i);
438
439 switch (instr->op) {
440 case nir_op_fmov:
441 case nir_op_i2f:
442 case nir_op_u2f: {
443 fs_inst *inst = MOV(result, op[0]);
444 inst->saturate = instr->dest.saturate;
445 emit_percomp(inst, instr->dest.write_mask);
446 }
447 break;
448
449 case nir_op_imov:
450 case nir_op_f2i:
451 case nir_op_f2u:
452 emit_percomp(MOV(result, op[0]), instr->dest.write_mask);
453 break;
454
455 case nir_op_fsign: {
456 /* AND(val, 0x80000000) gives the sign bit.
457 *
458 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
459 * zero.
460 */
461 emit_percomp(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
462 instr->dest.write_mask);
463
464 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
465 op[0].type = BRW_REGISTER_TYPE_UD;
466 result.type = BRW_REGISTER_TYPE_UD;
467 emit_percomp(AND(result_int, op[0], fs_reg(0x80000000u)),
468 instr->dest.write_mask);
469
470 fs_inst *inst = OR(result_int, result_int, fs_reg(0x3f800000u));
471 inst->predicate = BRW_PREDICATE_NORMAL;
472 emit_percomp(inst, instr->dest.write_mask);
473 if (instr->dest.saturate) {
474 fs_inst *inst = MOV(result, result);
475 inst->saturate = true;
476 emit_percomp(inst, instr->dest.write_mask);
477 }
478 break;
479 }
480
481 case nir_op_isign: {
482 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
483 * -> non-negative val generates 0x00000000.
484 * Predicated OR sets 1 if val is positive.
485 */
486 emit_percomp(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G),
487 instr->dest.write_mask);
488
489 emit_percomp(ASR(result, op[0], fs_reg(31)), instr->dest.write_mask);
490
491 fs_inst *inst = OR(result, result, fs_reg(1));
492 inst->predicate = BRW_PREDICATE_NORMAL;
493 emit_percomp(inst, instr->dest.write_mask);
494 break;
495 }
496
497 case nir_op_frcp:
498 emit_math_percomp(SHADER_OPCODE_RCP, result, op[0],
499 instr->dest.write_mask, instr->dest.saturate);
500 break;
501
502 case nir_op_fexp2:
503 emit_math_percomp(SHADER_OPCODE_EXP2, result, op[0],
504 instr->dest.write_mask, instr->dest.saturate);
505 break;
506
507 case nir_op_flog2:
508 emit_math_percomp(SHADER_OPCODE_LOG2, result, op[0],
509 instr->dest.write_mask, instr->dest.saturate);
510 break;
511
512 case nir_op_fexp:
513 case nir_op_flog:
514 unreachable("not reached: should be handled by ir_explog_to_explog2");
515
516 case nir_op_fsin:
517 case nir_op_fsin_reduced:
518 emit_math_percomp(SHADER_OPCODE_SIN, result, op[0],
519 instr->dest.write_mask, instr->dest.saturate);
520 break;
521
522 case nir_op_fcos:
523 case nir_op_fcos_reduced:
524 emit_math_percomp(SHADER_OPCODE_COS, result, op[0],
525 instr->dest.write_mask, instr->dest.saturate);
526 break;
527
528 case nir_op_fddx:
529 if (fs_key->high_quality_derivatives)
530 emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
531 instr->dest.write_mask, instr->dest.saturate);
532 else
533 emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
534 instr->dest.write_mask, instr->dest.saturate);
535 break;
536 case nir_op_fddx_fine:
537 emit_percomp(FS_OPCODE_DDX_FINE, result, op[0],
538 instr->dest.write_mask, instr->dest.saturate);
539 break;
540 case nir_op_fddx_coarse:
541 emit_percomp(FS_OPCODE_DDX_COARSE, result, op[0],
542 instr->dest.write_mask, instr->dest.saturate);
543 break;
544 case nir_op_fddy:
545 if (fs_key->high_quality_derivatives)
546 emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
547 fs_reg(fs_key->render_to_fbo),
548 instr->dest.write_mask, instr->dest.saturate);
549 else
550 emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
551 fs_reg(fs_key->render_to_fbo),
552 instr->dest.write_mask, instr->dest.saturate);
553 break;
554 case nir_op_fddy_fine:
555 emit_percomp(FS_OPCODE_DDY_FINE, result, op[0],
556 fs_reg(fs_key->render_to_fbo),
557 instr->dest.write_mask, instr->dest.saturate);
558 break;
559 case nir_op_fddy_coarse:
560 emit_percomp(FS_OPCODE_DDY_COARSE, result, op[0],
561 fs_reg(fs_key->render_to_fbo),
562 instr->dest.write_mask, instr->dest.saturate);
563 break;
564
565 case nir_op_fadd:
566 case nir_op_iadd: {
567 fs_inst *inst = ADD(result, op[0], op[1]);
568 inst->saturate = instr->dest.saturate;
569 emit_percomp(inst, instr->dest.write_mask);
570 break;
571 }
572
573 case nir_op_fmul: {
574 fs_inst *inst = MUL(result, op[0], op[1]);
575 inst->saturate = instr->dest.saturate;
576 emit_percomp(inst, instr->dest.write_mask);
577 break;
578 }
579
580 case nir_op_imul: {
581 /* TODO put in the 16-bit constant optimization once we have SSA */
582
583 if (brw->gen >= 7)
584 no16("SIMD16 explicit accumulator operands unsupported\n");
585
586 struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
587
588 emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
589 emit_percomp(MACH(reg_null_d, op[0], op[1]), instr->dest.write_mask);
590 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
591 break;
592 }
593
594 case nir_op_imul_high:
595 case nir_op_umul_high: {
596 if (brw->gen >= 7)
597 no16("SIMD16 explicit accumulator operands unsupported\n");
598
599 struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
600
601 emit_percomp(MUL(acc, op[0], op[1]), instr->dest.write_mask);
602 emit_percomp(MACH(result, op[0], op[1]), instr->dest.write_mask);
603 break;
604 }
605
606 case nir_op_idiv:
607 case nir_op_udiv:
608 emit_math_percomp(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1],
609 instr->dest.write_mask);
610 break;
611
612 case nir_op_uadd_carry: {
613 if (brw->gen >= 7)
614 no16("SIMD16 explicit accumulator operands unsupported\n");
615
616 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
617 BRW_REGISTER_TYPE_UD);
618
619 emit_percomp(ADDC(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
620 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
621 break;
622 }
623
624 case nir_op_usub_borrow: {
625 if (brw->gen >= 7)
626 no16("SIMD16 explicit accumulator operands unsupported\n");
627
628 struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
629 BRW_REGISTER_TYPE_UD);
630
631 emit_percomp(SUBB(reg_null_ud, op[0], op[1]), instr->dest.write_mask);
632 emit_percomp(MOV(result, fs_reg(acc)), instr->dest.write_mask);
633 break;
634 }
635
636 case nir_op_umod:
637 emit_math_percomp(SHADER_OPCODE_INT_REMAINDER, result, op[0],
638 op[1], instr->dest.write_mask);
639 break;
640
641 case nir_op_flt:
642 case nir_op_ilt:
643 case nir_op_ult:
644 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_L),
645 instr->dest.write_mask);
646 break;
647
648 case nir_op_fge:
649 case nir_op_ige:
650 case nir_op_uge:
651 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE),
652 instr->dest.write_mask);
653 break;
654
655 case nir_op_feq:
656 case nir_op_ieq:
657 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z),
658 instr->dest.write_mask);
659 break;
660
661 case nir_op_fne:
662 case nir_op_ine:
663 emit_percomp(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ),
664 instr->dest.write_mask);
665 break;
666
667 case nir_op_ball_fequal2:
668 case nir_op_ball_iequal2:
669 case nir_op_ball_fequal3:
670 case nir_op_ball_iequal3:
671 case nir_op_ball_fequal4:
672 case nir_op_ball_iequal4: {
673 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
674 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
675 emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_Z),
676 (1 << num_components) - 1);
677 emit_reduction(BRW_OPCODE_AND, result, temp, num_components);
678 break;
679 }
680
681 case nir_op_bany_fnequal2:
682 case nir_op_bany_inequal2:
683 case nir_op_bany_fnequal3:
684 case nir_op_bany_inequal3:
685 case nir_op_bany_fnequal4:
686 case nir_op_bany_inequal4: {
687 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
688 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
689 temp.type = BRW_REGISTER_TYPE_UD;
690 emit_percomp(CMP(temp, op[0], op[1], BRW_CONDITIONAL_NZ),
691 (1 << num_components) - 1);
692 emit_reduction(BRW_OPCODE_OR, result, temp, num_components);
693 break;
694 }
695
696 case nir_op_inot:
697 emit_percomp(NOT(result, op[0]), instr->dest.write_mask);
698 break;
699 case nir_op_ixor:
700 emit_percomp(XOR(result, op[0], op[1]), instr->dest.write_mask);
701 break;
702 case nir_op_ior:
703 emit_percomp(OR(result, op[0], op[1]), instr->dest.write_mask);
704 break;
705 case nir_op_iand:
706 emit_percomp(AND(result, op[0], op[1]), instr->dest.write_mask);
707 break;
708
709 case nir_op_fdot2:
710 case nir_op_fdot3:
711 case nir_op_fdot4: {
712 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
713 fs_reg temp = fs_reg(GRF, virtual_grf_alloc(num_components));
714 emit_percomp(MUL(temp, op[0], op[1]), (1 << num_components) - 1);
715 emit_reduction(BRW_OPCODE_ADD, result, temp, num_components);
716 if (instr->dest.saturate) {
717 fs_inst *inst = emit(MOV(result, result));
718 inst->saturate = true;
719 }
720 break;
721 }
722
723 case nir_op_bany2:
724 case nir_op_bany3:
725 case nir_op_bany4: {
726 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
727 emit_reduction(BRW_OPCODE_OR, result, op[0], num_components);
728 break;
729 }
730
731 case nir_op_ball2:
732 case nir_op_ball3:
733 case nir_op_ball4: {
734 unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
735 emit_reduction(BRW_OPCODE_AND, result, op[0], num_components);
736 break;
737 }
738
739 case nir_op_fnoise1_1:
740 case nir_op_fnoise1_2:
741 case nir_op_fnoise1_3:
742 case nir_op_fnoise1_4:
743 case nir_op_fnoise2_1:
744 case nir_op_fnoise2_2:
745 case nir_op_fnoise2_3:
746 case nir_op_fnoise2_4:
747 case nir_op_fnoise3_1:
748 case nir_op_fnoise3_2:
749 case nir_op_fnoise3_3:
750 case nir_op_fnoise3_4:
751 case nir_op_fnoise4_1:
752 case nir_op_fnoise4_2:
753 case nir_op_fnoise4_3:
754 case nir_op_fnoise4_4:
755 unreachable("not reached: should be handled by lower_noise");
756
757 case nir_op_vec2:
758 case nir_op_vec3:
759 case nir_op_vec4:
760 unreachable("not reached: should be handled by lower_quadop_vector");
761
762 case nir_op_ldexp:
763 unreachable("not reached: should be handled by ldexp_to_arith()");
764
765 case nir_op_fsqrt:
766 emit_math_percomp(SHADER_OPCODE_SQRT, result, op[0],
767 instr->dest.write_mask, instr->dest.saturate);
768 break;
769
770 case nir_op_frsq:
771 emit_math_percomp(SHADER_OPCODE_RSQ, result, op[0],
772 instr->dest.write_mask, instr->dest.saturate);
773 break;
774
775 case nir_op_b2i:
776 emit_percomp(AND(result, op[0], fs_reg(1)), instr->dest.write_mask);
777 break;
778 case nir_op_b2f: {
779 emit_percomp(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0],
780 fs_reg(0x3f800000u)),
781 instr->dest.write_mask);
782 break;
783 }
784
785 case nir_op_f2b:
786 emit_percomp(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ),
787 instr->dest.write_mask);
788 break;
789 case nir_op_i2b:
790 emit_percomp(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ),
791 instr->dest.write_mask);
792 break;
793
794 case nir_op_ftrunc: {
795 fs_inst *inst = RNDZ(result, op[0]);
796 inst->saturate = instr->dest.saturate;
797 emit_percomp(inst, instr->dest.write_mask);
798 break;
799 }
800 case nir_op_fceil: {
801 op[0].negate = !op[0].negate;
802 fs_reg temp = fs_reg(this, glsl_type::vec4_type);
803 emit_percomp(RNDD(temp, op[0]), instr->dest.write_mask);
804 temp.negate = true;
805 fs_inst *inst = MOV(result, temp);
806 inst->saturate = instr->dest.saturate;
807 emit_percomp(inst, instr->dest.write_mask);
808 break;
809 }
810 case nir_op_ffloor: {
811 fs_inst *inst = RNDD(result, op[0]);
812 inst->saturate = instr->dest.saturate;
813 emit_percomp(inst, instr->dest.write_mask);
814 break;
815 }
816 case nir_op_ffract: {
817 fs_inst *inst = FRC(result, op[0]);
818 inst->saturate = instr->dest.saturate;
819 emit_percomp(inst, instr->dest.write_mask);
820 break;
821 }
822 case nir_op_fround_even: {
823 fs_inst *inst = RNDE(result, op[0]);
824 inst->saturate = instr->dest.saturate;
825 emit_percomp(inst, instr->dest.write_mask);
826 break;
827 }
828
829 case nir_op_fmin:
830 case nir_op_imin:
831 case nir_op_umin:
832 if (brw->gen >= 6) {
833 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
834 instr->dest.write_mask, instr->dest.saturate,
835 BRW_PREDICATE_NONE, BRW_CONDITIONAL_L);
836 } else {
837 emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L),
838 instr->dest.write_mask);
839
840 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
841 instr->dest.write_mask, instr->dest.saturate,
842 BRW_PREDICATE_NORMAL);
843 }
844 break;
845
846 case nir_op_fmax:
847 case nir_op_imax:
848 case nir_op_umax:
849 if (brw->gen >= 6) {
850 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
851 instr->dest.write_mask, instr->dest.saturate,
852 BRW_PREDICATE_NONE, BRW_CONDITIONAL_GE);
853 } else {
854 emit_percomp(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE),
855 instr->dest.write_mask);
856
857 emit_percomp(BRW_OPCODE_SEL, result, op[0], op[1],
858 instr->dest.write_mask, instr->dest.saturate,
859 BRW_PREDICATE_NORMAL);
860 }
861 break;
862
863 case nir_op_pack_snorm_2x16:
864 case nir_op_pack_snorm_4x8:
865 case nir_op_pack_unorm_2x16:
866 case nir_op_pack_unorm_4x8:
867 case nir_op_unpack_snorm_2x16:
868 case nir_op_unpack_snorm_4x8:
869 case nir_op_unpack_unorm_2x16:
870 case nir_op_unpack_unorm_4x8:
871 case nir_op_unpack_half_2x16:
872 case nir_op_pack_half_2x16:
873 unreachable("not reached: should be handled by lower_packing_builtins");
874
875 case nir_op_unpack_half_2x16_split_x:
876 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0],
877 instr->dest.write_mask, instr->dest.saturate);
878 break;
879 case nir_op_unpack_half_2x16_split_y:
880 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0],
881 instr->dest.write_mask, instr->dest.saturate);
882 break;
883
884 case nir_op_fpow:
885 emit_percomp(SHADER_OPCODE_POW, result, op[0], op[1],
886 instr->dest.write_mask, instr->dest.saturate);
887 break;
888
889 case nir_op_bitfield_reverse:
890 emit_percomp(BFREV(result, op[0]), instr->dest.write_mask);
891 break;
892
893 case nir_op_bit_count:
894 emit_percomp(CBIT(result, op[0]), instr->dest.write_mask);
895 break;
896
897 case nir_op_ufind_msb:
898 case nir_op_ifind_msb: {
899 emit_percomp(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]),
900 instr->dest.write_mask);
901
902 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
903 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
904 * subtract the result from 31 to convert the MSB count into an LSB count.
905 */
906
907 emit_percomp(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ),
908 instr->dest.write_mask);
909 fs_reg neg_result(result);
910 neg_result.negate = true;
911 fs_inst *inst = ADD(result, neg_result, fs_reg(31));
912 inst->predicate = BRW_PREDICATE_NORMAL;
913 emit_percomp(inst, instr->dest.write_mask);
914 break;
915 }
916
917 case nir_op_find_lsb:
918 emit_percomp(FBL(result, op[0]), instr->dest.write_mask);
919 break;
920
921 case nir_op_ubitfield_extract:
922 case nir_op_ibitfield_extract:
923 emit_percomp(BFE(result, op[2], op[1], op[0]), instr->dest.write_mask);
924 break;
925 case nir_op_bfm:
926 emit_percomp(BFI1(result, op[0], op[1]), instr->dest.write_mask);
927 break;
928 case nir_op_bfi:
929 emit_percomp(BFI2(result, op[0], op[1], op[2]), instr->dest.write_mask);
930 break;
931
932 case nir_op_bitfield_insert:
933 unreachable("not reached: should be handled by "
934 "lower_instructions::bitfield_insert_to_bfm_bfi");
935
936 case nir_op_ishl:
937 emit_percomp(SHL(result, op[0], op[1]), instr->dest.write_mask);
938 break;
939 case nir_op_ishr:
940 emit_percomp(ASR(result, op[0], op[1]), instr->dest.write_mask);
941 break;
942 case nir_op_ushr:
943 emit_percomp(SHR(result, op[0], op[1]), instr->dest.write_mask);
944 break;
945
946 case nir_op_pack_half_2x16_split:
947 emit_percomp(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1],
948 instr->dest.write_mask);
949 break;
950
951 case nir_op_ffma:
952 emit_percomp(MAD(result, op[2], op[1], op[0]), instr->dest.write_mask);
953 break;
954
955 case nir_op_flrp:
956 /* TODO emulate for gen < 6 */
957 emit_percomp(LRP(result, op[2], op[1], op[0]), instr->dest.write_mask);
958 break;
959
960 case nir_op_bcsel:
961 emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
962 emit_percomp(BRW_OPCODE_SEL, result, op[1], op[2],
963 instr->dest.write_mask, false, BRW_PREDICATE_NORMAL);
964 break;
965
966 default:
967 unreachable("unhandled instruction");
968 }
969
970 /* emit a predicated move if there was predication */
971 if (instr->has_predicate) {
972 fs_inst *inst = emit(MOV(reg_null_d,
973 retype(get_nir_src(instr->predicate),
974 BRW_REGISTER_TYPE_UD)));
975 inst->conditional_mod = BRW_CONDITIONAL_NZ;
976 inst = MOV(dest, result);
977 inst->predicate = BRW_PREDICATE_NORMAL;
978 emit_percomp(inst, instr->dest.write_mask);
979 }
980 }
981
982 fs_reg
983 fs_visitor::get_nir_src(nir_src src)
984 {
985 if (src.is_ssa) {
986 assert(src.ssa->parent_instr->type == nir_instr_type_load_const);
987 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
988 fs_reg reg(GRF, virtual_grf_alloc(src.ssa->num_components),
989 BRW_REGISTER_TYPE_D);
990
991 for (unsigned i = 0; i < src.ssa->num_components; ++i)
992 emit(MOV(offset(reg, i), fs_reg(load->value.i[i])));
993
994 return reg;
995 } else {
996 fs_reg reg;
997 if (src.reg.reg->is_global)
998 reg = nir_globals[src.reg.reg->index];
999 else
1000 reg = nir_locals[src.reg.reg->index];
1001
1002 /* to avoid floating-point denorm flushing problems, set the type by
1003 * default to D - instructions that need floating point semantics will set
1004 * this to F if they need to
1005 */
1006 reg.type = BRW_REGISTER_TYPE_D;
1007 reg.reg_offset = src.reg.base_offset;
1008 if (src.reg.indirect) {
1009 reg.reladdr = new(mem_ctx) fs_reg();
1010 *reg.reladdr = retype(get_nir_src(*src.reg.indirect),
1011 BRW_REGISTER_TYPE_D);
1012 }
1013
1014 return reg;
1015 }
1016 }
1017
1018 fs_reg
1019 fs_visitor::get_nir_alu_src(nir_alu_instr *instr, unsigned src)
1020 {
1021 fs_reg reg = get_nir_src(instr->src[src].src);
1022
1023 reg.type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[src]);
1024 reg.abs = instr->src[src].abs;
1025 reg.negate = instr->src[src].negate;
1026
1027 bool needs_swizzle = false;
1028 unsigned num_components = 0;
1029 for (unsigned i = 0; i < 4; i++) {
1030 if (!nir_alu_instr_channel_used(instr, src, i))
1031 continue;
1032
1033 if (instr->src[src].swizzle[i] != i)
1034 needs_swizzle = true;
1035
1036 num_components = i + 1;
1037 }
1038
1039 if (needs_swizzle) {
1040 /* resolve the swizzle through MOV's */
1041 fs_reg new_reg = fs_reg(GRF, virtual_grf_alloc(num_components), reg.type);
1042
1043 for (unsigned i = 0; i < 4; i++) {
1044 if (!nir_alu_instr_channel_used(instr, src, i))
1045 continue;
1046
1047 emit(MOV(offset(new_reg, i),
1048 offset(reg, instr->src[src].swizzle[i])));
1049 }
1050
1051 return new_reg;
1052 }
1053
1054 return reg;
1055 }
1056
1057 fs_reg
1058 fs_visitor::get_nir_dest(nir_dest dest)
1059 {
1060 fs_reg reg;
1061 if (dest.reg.reg->is_global)
1062 reg = nir_globals[dest.reg.reg->index];
1063 else
1064 reg = nir_locals[dest.reg.reg->index];
1065
1066 reg.reg_offset = dest.reg.base_offset;
1067 if (dest.reg.indirect) {
1068 reg.reladdr = new(mem_ctx) fs_reg();
1069 *reg.reladdr = retype(get_nir_src(*dest.reg.indirect),
1070 BRW_REGISTER_TYPE_D);
1071 }
1072
1073 return reg;
1074 }
1075
1076 void
1077 fs_visitor::emit_percomp(fs_inst *inst, unsigned wr_mask)
1078 {
1079 for (unsigned i = 0; i < 4; i++) {
1080 if (!((wr_mask >> i) & 1))
1081 continue;
1082
1083 fs_inst *new_inst = new(mem_ctx) fs_inst(*inst);
1084 new_inst->dst.reg_offset += i;
1085 for (unsigned j = 0; j < new_inst->sources; j++)
1086 if (inst->src[j].file == GRF)
1087 new_inst->src[j].reg_offset += i;
1088
1089 emit(new_inst);
1090 }
1091 }
1092
1093 void
1094 fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1095 unsigned wr_mask, bool saturate,
1096 enum brw_predicate predicate,
1097 enum brw_conditional_mod mod)
1098 {
1099 for (unsigned i = 0; i < 4; i++) {
1100 if (!((wr_mask >> i) & 1))
1101 continue;
1102
1103 fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0);
1104 new_inst->dst.reg_offset += i;
1105 for (unsigned j = 0; j < new_inst->sources; j++)
1106 if (new_inst->src[j].file == GRF)
1107 new_inst->src[j].reg_offset += i;
1108
1109 new_inst->predicate = predicate;
1110 new_inst->conditional_mod = mod;
1111 new_inst->saturate = saturate;
1112 emit(new_inst);
1113 }
1114 }
1115
1116 void
1117 fs_visitor::emit_percomp(enum opcode op, fs_reg dest, fs_reg src0, fs_reg src1,
1118 unsigned wr_mask, bool saturate,
1119 enum brw_predicate predicate,
1120 enum brw_conditional_mod mod)
1121 {
1122 for (unsigned i = 0; i < 4; i++) {
1123 if (!((wr_mask >> i) & 1))
1124 continue;
1125
1126 fs_inst *new_inst = new(mem_ctx) fs_inst(op, dest, src0, src1);
1127 new_inst->dst.reg_offset += i;
1128 for (unsigned j = 0; j < new_inst->sources; j++)
1129 if (new_inst->src[j].file == GRF)
1130 new_inst->src[j].reg_offset += i;
1131
1132 new_inst->predicate = predicate;
1133 new_inst->conditional_mod = mod;
1134 new_inst->saturate = saturate;
1135 emit(new_inst);
1136 }
1137 }
1138
1139 void
1140 fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1141 unsigned wr_mask, bool saturate)
1142 {
1143 for (unsigned i = 0; i < 4; i++) {
1144 if (!((wr_mask >> i) & 1))
1145 continue;
1146
1147 fs_reg new_dest = dest;
1148 new_dest.reg_offset += i;
1149 fs_reg new_src0 = src0;
1150 if (src0.file == GRF)
1151 new_src0.reg_offset += i;
1152
1153 fs_inst *new_inst = emit_math(op, new_dest, new_src0);
1154 new_inst->saturate = saturate;
1155 }
1156 }
1157
1158 void
1159 fs_visitor::emit_math_percomp(enum opcode op, fs_reg dest, fs_reg src0,
1160 fs_reg src1, unsigned wr_mask,
1161 bool saturate)
1162 {
1163 for (unsigned i = 0; i < 4; i++) {
1164 if (!((wr_mask >> i) & 1))
1165 continue;
1166
1167 fs_reg new_dest = dest;
1168 new_dest.reg_offset += i;
1169 fs_reg new_src0 = src0;
1170 if (src0.file == GRF)
1171 new_src0.reg_offset += i;
1172 fs_reg new_src1 = src1;
1173 if (src1.file == GRF)
1174 new_src1.reg_offset += i;
1175
1176 fs_inst *new_inst = emit_math(op, new_dest, new_src0, new_src1);
1177 new_inst->saturate = saturate;
1178 }
1179 }
1180
1181 void
1182 fs_visitor::emit_reduction(enum opcode op, fs_reg dest, fs_reg src,
1183 unsigned num_components)
1184 {
1185 fs_reg src0 = src;
1186 fs_reg src1 = src;
1187 src1.reg_offset++;
1188
1189 if (num_components == 2) {
1190 emit(op, dest, src0, src1);
1191 return;
1192 }
1193
1194 fs_reg temp1 = fs_reg(GRF, virtual_grf_alloc(1));
1195 temp1.type = src.type;
1196 emit(op, temp1, src0, src1);
1197
1198 fs_reg src2 = src;
1199 src2.reg_offset += 2;
1200
1201 if (num_components == 3) {
1202 emit(op, dest, temp1, src2);
1203 return;
1204 }
1205
1206 assert(num_components == 4);
1207
1208 fs_reg src3 = src;
1209 src3.reg_offset += 3;
1210 fs_reg temp2 = fs_reg(GRF, virtual_grf_alloc(1));
1211 temp2.type = src.type;
1212
1213 emit(op, temp2, src2, src3);
1214 emit(op, dest, temp1, temp2);
1215 }
1216
1217 void
1218 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
1219 {
1220 fs_reg dest;
1221 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1222 dest = get_nir_dest(instr->dest);
1223 if (instr->has_predicate) {
1224 fs_inst *inst = emit(MOV(reg_null_d,
1225 retype(get_nir_src(instr->predicate),
1226 BRW_REGISTER_TYPE_UD)));
1227 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1228 }
1229
1230 switch (instr->intrinsic) {
1231 case nir_intrinsic_discard: {
1232 /* We track our discarded pixels in f0.1. By predicating on it, we can
1233 * update just the flag bits that aren't yet discarded. By emitting a
1234 * CMP of g0 != g0, all our currently executing channels will get turned
1235 * off.
1236 */
1237 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1238 BRW_REGISTER_TYPE_UW));
1239 fs_inst *cmp = emit(CMP(reg_null_f, some_reg, some_reg,
1240 BRW_CONDITIONAL_NZ));
1241 cmp->predicate = BRW_PREDICATE_NORMAL;
1242 cmp->flag_subreg = 1;
1243
1244 if (brw->gen >= 6) {
1245 /* For performance, after a discard, jump to the end of the shader.
1246 * Only jump if all relevant channels have been discarded.
1247 */
1248 fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
1249 discard_jump->flag_subreg = 1;
1250
1251 discard_jump->predicate = (dispatch_width == 8)
1252 ? BRW_PREDICATE_ALIGN1_ANY8H
1253 : BRW_PREDICATE_ALIGN1_ANY16H;
1254 discard_jump->predicate_inverse = true;
1255 }
1256
1257 break;
1258 }
1259
1260 case nir_intrinsic_atomic_counter_inc:
1261 case nir_intrinsic_atomic_counter_dec:
1262 case nir_intrinsic_atomic_counter_read: {
1263 unsigned surf_index = prog_data->binding_table.abo_start +
1264 (unsigned) instr->const_index[0];
1265 fs_reg offset = fs_reg(get_nir_src(instr->src[0]));
1266
1267 switch (instr->intrinsic) {
1268 case nir_intrinsic_atomic_counter_inc:
1269 emit_untyped_atomic(BRW_AOP_INC, surf_index, dest, offset,
1270 fs_reg(), fs_reg());
1271 break;
1272 case nir_intrinsic_atomic_counter_dec:
1273 emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dest, offset,
1274 fs_reg(), fs_reg());
1275 break;
1276 case nir_intrinsic_atomic_counter_read:
1277 emit_untyped_surface_read(surf_index, dest, offset);
1278 break;
1279 default:
1280 unreachable("Unreachable");
1281 }
1282 break;
1283 }
1284
1285 case nir_intrinsic_load_front_face:
1286 assert(!"TODO");
1287
1288 case nir_intrinsic_load_sample_mask_in: {
1289 assert(brw->gen >= 7);
1290 fs_reg reg = fs_reg(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
1291 BRW_REGISTER_TYPE_D));
1292 dest.type = reg.type;
1293 fs_inst *inst = MOV(dest, reg);
1294 if (instr->has_predicate)
1295 inst->predicate = BRW_PREDICATE_NORMAL;
1296 emit(inst);
1297 break;
1298 }
1299
1300 case nir_intrinsic_load_sample_pos: {
1301 fs_reg *reg = emit_samplepos_setup();
1302 dest.type = reg->type;
1303 emit(MOV(dest, *reg));
1304 emit(MOV(offset(dest, 1), offset(*reg, 1)));
1305 break;
1306 }
1307
1308 case nir_intrinsic_load_sample_id: {
1309 fs_reg *reg = emit_sampleid_setup();
1310 dest.type = reg->type;
1311 emit(MOV(dest, *reg));
1312 break;
1313 }
1314
1315 case nir_intrinsic_load_uniform_vec1:
1316 case nir_intrinsic_load_uniform_vec2:
1317 case nir_intrinsic_load_uniform_vec3:
1318 case nir_intrinsic_load_uniform_vec4: {
1319 unsigned index = 0;
1320 for (int i = 0; i < instr->const_index[1]; i++) {
1321 for (unsigned j = 0;
1322 j < nir_intrinsic_infos[instr->intrinsic].dest_components; j++) {
1323 fs_reg src = nir_uniforms;
1324 src.reg_offset = instr->const_index[0] + index;
1325 src.type = dest.type;
1326 index++;
1327
1328 fs_inst *inst = MOV(dest, src);
1329 if (instr->has_predicate)
1330 inst->predicate = BRW_PREDICATE_NORMAL;
1331 emit(inst);
1332 dest.reg_offset++;
1333 }
1334 }
1335 break;
1336 }
1337
1338 case nir_intrinsic_load_uniform_vec1_indirect:
1339 case nir_intrinsic_load_uniform_vec2_indirect:
1340 case nir_intrinsic_load_uniform_vec3_indirect:
1341 case nir_intrinsic_load_uniform_vec4_indirect: {
1342 unsigned index = 0;
1343 for (int i = 0; i < instr->const_index[1]; i++) {
1344 for (unsigned j = 0;
1345 j < nir_intrinsic_infos[instr->intrinsic].dest_components; j++) {
1346 fs_reg src = nir_uniforms;
1347 src.reg_offset = instr->const_index[0] + index;
1348 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1349 src.reladdr->type = BRW_REGISTER_TYPE_D;
1350 src.type = dest.type;
1351 index++;
1352
1353 fs_inst *inst = MOV(dest, src);
1354 if (instr->has_predicate)
1355 inst->predicate = BRW_PREDICATE_NORMAL;
1356 emit(inst);
1357 dest.reg_offset++;
1358 }
1359 }
1360 break;
1361 }
1362
1363 case nir_intrinsic_load_ubo_vec1:
1364 case nir_intrinsic_load_ubo_vec2:
1365 case nir_intrinsic_load_ubo_vec3:
1366 case nir_intrinsic_load_ubo_vec4: {
1367 fs_reg surf_index = fs_reg(prog_data->binding_table.ubo_start +
1368 (unsigned) instr->const_index[0]);
1369 fs_reg packed_consts = fs_reg(this, glsl_type::float_type);
1370 packed_consts.type = dest.type;
1371
1372 fs_reg const_offset_reg = fs_reg((unsigned) instr->const_index[1] & ~15);
1373 emit(new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
1374 packed_consts, surf_index, const_offset_reg));
1375
1376 for (unsigned i = 0;
1377 i < nir_intrinsic_infos[instr->intrinsic].dest_components; i++) {
1378 packed_consts.set_smear(instr->const_index[1] % 16 / 4 + i);
1379
1380 /* The std140 packing rules don't allow vectors to cross 16-byte
1381 * boundaries, and a reg is 32 bytes.
1382 */
1383 assert(packed_consts.subreg_offset < 32);
1384
1385 fs_inst *inst = MOV(dest, packed_consts);
1386 if (instr->has_predicate)
1387 inst->predicate = BRW_PREDICATE_NORMAL;
1388 emit(inst);
1389
1390 dest.reg_offset++;
1391 }
1392 break;
1393 }
1394
1395 case nir_intrinsic_load_ubo_vec1_indirect:
1396 case nir_intrinsic_load_ubo_vec2_indirect:
1397 case nir_intrinsic_load_ubo_vec3_indirect:
1398 case nir_intrinsic_load_ubo_vec4_indirect: {
1399 fs_reg surf_index = fs_reg(prog_data->binding_table.ubo_start +
1400 instr->const_index[0]);
1401 /* Turn the byte offset into a dword offset. */
1402 unsigned base_offset = instr->const_index[1] / 4;
1403 fs_reg offset = fs_reg(this, glsl_type::int_type);
1404 emit(SHR(offset, retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_D),
1405 fs_reg(2)));
1406
1407 for (unsigned i = 0;
1408 i < nir_intrinsic_infos[instr->intrinsic].dest_components; i++) {
1409 exec_list list = VARYING_PULL_CONSTANT_LOAD(dest, surf_index,
1410 offset, base_offset + i);
1411 fs_inst *last_inst = (fs_inst *) list.get_tail();
1412 if (instr->has_predicate)
1413 last_inst->predicate = BRW_PREDICATE_NORMAL;
1414 emit(list);
1415
1416 dest.reg_offset++;
1417 }
1418 break;
1419 }
1420
1421 case nir_intrinsic_load_input_vec1:
1422 case nir_intrinsic_load_input_vec2:
1423 case nir_intrinsic_load_input_vec3:
1424 case nir_intrinsic_load_input_vec4: {
1425 unsigned index = 0;
1426 for (int i = 0; i < instr->const_index[1]; i++) {
1427 for (unsigned j = 0;
1428 j < nir_intrinsic_infos[instr->intrinsic].dest_components; j++) {
1429 fs_reg src = nir_inputs;
1430 src.reg_offset = instr->const_index[0] + index;
1431 src.type = dest.type;
1432 index++;
1433
1434 fs_inst *inst = MOV(dest, src);
1435 if (instr->has_predicate)
1436 inst->predicate = BRW_PREDICATE_NORMAL;
1437 emit(inst);
1438 dest.reg_offset++;
1439 }
1440 }
1441 break;
1442 }
1443
1444 case nir_intrinsic_load_input_vec1_indirect:
1445 case nir_intrinsic_load_input_vec2_indirect:
1446 case nir_intrinsic_load_input_vec3_indirect:
1447 case nir_intrinsic_load_input_vec4_indirect: {
1448 unsigned index = 0;
1449 for (int i = 0; i < instr->const_index[1]; i++) {
1450 for (unsigned j = 0;
1451 j < nir_intrinsic_infos[instr->intrinsic].dest_components; j++) {
1452 fs_reg src = nir_inputs;
1453 src.reg_offset = instr->const_index[0] + index;
1454 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1455 src.reladdr->type = BRW_REGISTER_TYPE_D;
1456 src.type = dest.type;
1457 index++;
1458
1459 fs_inst *inst = MOV(dest, src);
1460 if (instr->has_predicate)
1461 inst->predicate = BRW_PREDICATE_NORMAL;
1462 emit(inst);
1463 dest.reg_offset++;
1464 }
1465 }
1466 break;
1467 }
1468
1469 case nir_intrinsic_store_output_vec1:
1470 case nir_intrinsic_store_output_vec2:
1471 case nir_intrinsic_store_output_vec3:
1472 case nir_intrinsic_store_output_vec4: {
1473 fs_reg src = get_nir_src(instr->src[0]);
1474 unsigned index = 0;
1475 for (int i = 0; i < instr->const_index[1]; i++) {
1476 for (unsigned j = 0;
1477 j < nir_intrinsic_infos[instr->intrinsic].src_components[0]; j++) {
1478 fs_reg new_dest = nir_outputs;
1479 new_dest.reg_offset = instr->const_index[0] + index;
1480 new_dest.type = src.type;
1481 index++;
1482 fs_inst *inst = MOV(new_dest, src);
1483 if (instr->has_predicate)
1484 inst->predicate = BRW_PREDICATE_NORMAL;
1485 emit(inst);
1486 src.reg_offset++;
1487 }
1488 }
1489 break;
1490 }
1491
1492 case nir_intrinsic_store_output_vec1_indirect:
1493 case nir_intrinsic_store_output_vec2_indirect:
1494 case nir_intrinsic_store_output_vec3_indirect:
1495 case nir_intrinsic_store_output_vec4_indirect: {
1496 fs_reg src = get_nir_src(instr->src[0]);
1497 fs_reg indirect = get_nir_src(instr->src[1]);
1498 unsigned index = 0;
1499 for (int i = 0; i < instr->const_index[1]; i++) {
1500 for (unsigned j = 0;
1501 j < nir_intrinsic_infos[instr->intrinsic].src_components[0]; j++) {
1502 fs_reg new_dest = nir_outputs;
1503 new_dest.reg_offset = instr->const_index[0] + index;
1504 new_dest.reladdr = new(mem_ctx) fs_reg(indirect);
1505 new_dest.type = src.type;
1506 index++;
1507 fs_inst *inst = MOV(new_dest, src);
1508 if (instr->has_predicate)
1509 inst->predicate = BRW_PREDICATE_NORMAL;
1510 emit(MOV(new_dest, src));
1511 src.reg_offset++;
1512 }
1513 }
1514 break;
1515 }
1516
1517 default:
1518 unreachable("unknown intrinsic");
1519 }
1520 }
1521
1522 void
1523 fs_visitor::nir_emit_texture(nir_tex_instr *instr)
1524 {
1525 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1526 unsigned sampler = instr->sampler_index;
1527
1528 /* FINISHME: We're failing to recompile our programs when the sampler is
1529 * updated. This only matters for the texture rectangle scale parameters
1530 * (pre-gen6, or gen6+ with GL_CLAMP).
1531 */
1532 int texunit = prog->SamplerUnits[sampler];
1533
1534 int gather_component = instr->component;
1535
1536 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
1537
1538 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
1539 instr->is_array;
1540
1541 int lod_components, offset_components = 0;
1542
1543 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, offset;
1544
1545 for (unsigned i = 0; i < instr->num_srcs; i++) {
1546 fs_reg src = get_nir_src(instr->src[i]);
1547 switch (instr->src_type[i]) {
1548 case nir_tex_src_bias:
1549 lod = retype(src, BRW_REGISTER_TYPE_F);
1550 break;
1551 case nir_tex_src_comparitor:
1552 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
1553 break;
1554 case nir_tex_src_coord:
1555 switch (instr->op) {
1556 case nir_texop_txf:
1557 case nir_texop_txf_ms:
1558 coordinate = retype(src, BRW_REGISTER_TYPE_D);
1559 break;
1560 default:
1561 coordinate = retype(src, BRW_REGISTER_TYPE_F);
1562 break;
1563 }
1564 break;
1565 case nir_tex_src_ddx:
1566 lod = retype(src, BRW_REGISTER_TYPE_F);
1567 lod_components = nir_tex_instr_src_size(instr, i);
1568 break;
1569 case nir_tex_src_ddy:
1570 lod2 = retype(src, BRW_REGISTER_TYPE_F);
1571 break;
1572 case nir_tex_src_lod:
1573 switch (instr->op) {
1574 case nir_texop_txs:
1575 lod = retype(src, BRW_REGISTER_TYPE_UD);
1576 break;
1577 case nir_texop_txf:
1578 lod = retype(src, BRW_REGISTER_TYPE_D);
1579 break;
1580 default:
1581 lod = retype(src, BRW_REGISTER_TYPE_F);
1582 break;
1583 }
1584 break;
1585 case nir_tex_src_ms_index:
1586 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
1587 break;
1588 case nir_tex_src_offset:
1589 offset = retype(src, BRW_REGISTER_TYPE_D);
1590 if (instr->is_array)
1591 offset_components = instr->coord_components - 1;
1592 else
1593 offset_components = instr->coord_components;
1594 break;
1595 case nir_tex_src_projector:
1596 unreachable("should be lowered");
1597 case nir_tex_src_sampler_index:
1598 unreachable("not yet supported");
1599 default:
1600 unreachable("unknown texture source");
1601 }
1602 }
1603
1604 if (instr->op == nir_texop_txf_ms) {
1605 if (brw->gen >= 7 && key->tex.compressed_multisample_layout_mask & (1<<sampler))
1606 mcs = emit_mcs_fetch(coordinate, instr->coord_components, fs_reg(sampler));
1607 else
1608 mcs = fs_reg(0u);
1609 }
1610
1611 for (unsigned i = 0; i < 3; i++) {
1612 if (instr->const_offset[i] != 0) {
1613 assert(offset_components == 0);
1614 offset = fs_reg(brw_texture_offset(ctx, instr->const_offset, 3));
1615 break;
1616 }
1617 }
1618
1619 enum glsl_base_type dest_base_type;
1620 switch (instr->dest_type) {
1621 case nir_type_float:
1622 dest_base_type = GLSL_TYPE_FLOAT;
1623 break;
1624 case nir_type_int:
1625 dest_base_type = GLSL_TYPE_INT;
1626 break;
1627 case nir_type_unsigned:
1628 dest_base_type = GLSL_TYPE_UINT;
1629 break;
1630 default:
1631 unreachable("bad type");
1632 }
1633
1634 const glsl_type *dest_type =
1635 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
1636 1);
1637
1638 ir_texture_opcode op;
1639 switch (instr->op) {
1640 case nir_texop_lod: op = ir_lod; break;
1641 case nir_texop_query_levels: op = ir_query_levels; break;
1642 case nir_texop_tex: op = ir_tex; break;
1643 case nir_texop_tg4: op = ir_tg4; break;
1644 case nir_texop_txb: op = ir_txb; break;
1645 case nir_texop_txd: op = ir_txd; break;
1646 case nir_texop_txf: op = ir_txf; break;
1647 case nir_texop_txf_ms: op = ir_txf_ms; break;
1648 case nir_texop_txl: op = ir_txl; break;
1649 case nir_texop_txs: op = ir_txs; break;
1650 default:
1651 unreachable("unknown texture opcode");
1652 }
1653
1654 emit_texture(op, dest_type, coordinate, instr->coord_components,
1655 shadow_comparitor, lod, lod2, lod_components, sample_index,
1656 offset, offset_components, mcs, gather_component,
1657 is_cube_array, is_rect, sampler, fs_reg(sampler), texunit);
1658
1659 fs_reg dest = get_nir_dest(instr->dest);
1660 dest.type = this->result.type;
1661 unsigned num_components = nir_tex_instr_dest_size(instr);
1662 emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
1663 }
1664
1665 void
1666 fs_visitor::nir_emit_load_const(nir_load_const_instr *instr)
1667 {
1668 /* Bail on SSA constant loads. These are used for immediates. */
1669 if (instr->dest.is_ssa)
1670 return;
1671
1672 fs_reg dest = get_nir_dest(instr->dest);
1673 dest.type = BRW_REGISTER_TYPE_UD;
1674 if (instr->array_elems == 0) {
1675 for (unsigned i = 0; i < instr->num_components; i++) {
1676 emit(MOV(dest, fs_reg(instr->value.u[i])));
1677 dest.reg_offset++;
1678 }
1679 } else {
1680 for (unsigned i = 0; i < instr->array_elems; i++) {
1681 for (unsigned j = 0; j < instr->num_components; j++) {
1682 emit(MOV(dest, fs_reg(instr->array[i].u[j])));
1683 dest.reg_offset++;
1684 }
1685 }
1686 }
1687 }
1688
1689 void
1690 fs_visitor::nir_emit_jump(nir_jump_instr *instr)
1691 {
1692 switch (instr->type) {
1693 case nir_jump_break:
1694 emit(BRW_OPCODE_BREAK);
1695 break;
1696 case nir_jump_continue:
1697 emit(BRW_OPCODE_CONTINUE);
1698 break;
1699 case nir_jump_return:
1700 default:
1701 unreachable("unknown jump");
1702 }
1703 }