Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_nir.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_vec4.h"
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_program.h"
29
30 using namespace brw;
31 using namespace brw::surface_access;
32
33 namespace brw {
34
35 void
36 vec4_visitor::emit_nir_code()
37 {
38 if (nir->num_uniforms > 0)
39 nir_setup_uniforms();
40
41 nir_setup_system_values();
42
43 /* get the main function and emit it */
44 nir_foreach_function(nir, function) {
45 assert(strcmp(function->name, "main") == 0);
46 assert(function->impl);
47 nir_emit_impl(function->impl);
48 }
49 }
50
51 void
52 vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr *instr)
53 {
54 dst_reg *reg;
55
56 switch (instr->intrinsic) {
57 case nir_intrinsic_load_vertex_id:
58 unreachable("should be lowered by lower_vertex_id().");
59
60 case nir_intrinsic_load_vertex_id_zero_base:
61 reg = &nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
62 if (reg->file == BAD_FILE)
63 *reg = *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE,
64 glsl_type::int_type);
65 break;
66
67 case nir_intrinsic_load_base_vertex:
68 reg = &nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
69 if (reg->file == BAD_FILE)
70 *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX,
71 glsl_type::int_type);
72 break;
73
74 case nir_intrinsic_load_instance_id:
75 reg = &nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
76 if (reg->file == BAD_FILE)
77 *reg = *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID,
78 glsl_type::int_type);
79 break;
80
81 case nir_intrinsic_load_base_instance:
82 reg = &nir_system_values[SYSTEM_VALUE_BASE_INSTANCE];
83 if (reg->file == BAD_FILE)
84 *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_INSTANCE,
85 glsl_type::int_type);
86 break;
87
88 case nir_intrinsic_load_draw_id:
89 reg = &nir_system_values[SYSTEM_VALUE_DRAW_ID];
90 if (reg->file == BAD_FILE)
91 *reg = *make_reg_for_system_value(SYSTEM_VALUE_DRAW_ID,
92 glsl_type::int_type);
93 break;
94
95 default:
96 break;
97 }
98 }
99
100 static bool
101 setup_system_values_block(nir_block *block, void *void_visitor)
102 {
103 vec4_visitor *v = (vec4_visitor *)void_visitor;
104
105 nir_foreach_instr(block, instr) {
106 if (instr->type != nir_instr_type_intrinsic)
107 continue;
108
109 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
110 v->nir_setup_system_value_intrinsic(intrin);
111 }
112
113 return true;
114 }
115
116 void
117 vec4_visitor::nir_setup_system_values()
118 {
119 nir_system_values = ralloc_array(mem_ctx, dst_reg, SYSTEM_VALUE_MAX);
120 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
121 nir_system_values[i] = dst_reg();
122 }
123
124 nir_foreach_function(nir, function) {
125 assert(strcmp(function->name, "main") == 0);
126 assert(function->impl);
127 nir_foreach_block(function->impl, setup_system_values_block, this);
128 }
129 }
130
131 void
132 vec4_visitor::nir_setup_uniforms()
133 {
134 uniforms = nir->num_uniforms / 16;
135 }
136
137 void
138 vec4_visitor::nir_emit_impl(nir_function_impl *impl)
139 {
140 nir_locals = ralloc_array(mem_ctx, dst_reg, impl->reg_alloc);
141 for (unsigned i = 0; i < impl->reg_alloc; i++) {
142 nir_locals[i] = dst_reg();
143 }
144
145 foreach_list_typed(nir_register, reg, node, &impl->registers) {
146 unsigned array_elems =
147 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
148
149 nir_locals[reg->index] = dst_reg(VGRF, alloc.allocate(array_elems));
150 }
151
152 nir_ssa_values = ralloc_array(mem_ctx, dst_reg, impl->ssa_alloc);
153
154 nir_emit_cf_list(&impl->body);
155 }
156
157 void
158 vec4_visitor::nir_emit_cf_list(exec_list *list)
159 {
160 exec_list_validate(list);
161 foreach_list_typed(nir_cf_node, node, node, list) {
162 switch (node->type) {
163 case nir_cf_node_if:
164 nir_emit_if(nir_cf_node_as_if(node));
165 break;
166
167 case nir_cf_node_loop:
168 nir_emit_loop(nir_cf_node_as_loop(node));
169 break;
170
171 case nir_cf_node_block:
172 nir_emit_block(nir_cf_node_as_block(node));
173 break;
174
175 default:
176 unreachable("Invalid CFG node block");
177 }
178 }
179 }
180
181 void
182 vec4_visitor::nir_emit_if(nir_if *if_stmt)
183 {
184 /* First, put the condition in f0 */
185 src_reg condition = get_nir_src(if_stmt->condition, BRW_REGISTER_TYPE_D, 1);
186 vec4_instruction *inst = emit(MOV(dst_null_d(), condition));
187 inst->conditional_mod = BRW_CONDITIONAL_NZ;
188
189 /* We can just predicate based on the X channel, as the condition only
190 * goes on its own line */
191 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X));
192
193 nir_emit_cf_list(&if_stmt->then_list);
194
195 /* note: if the else is empty, dead CF elimination will remove it */
196 emit(BRW_OPCODE_ELSE);
197
198 nir_emit_cf_list(&if_stmt->else_list);
199
200 emit(BRW_OPCODE_ENDIF);
201 }
202
203 void
204 vec4_visitor::nir_emit_loop(nir_loop *loop)
205 {
206 emit(BRW_OPCODE_DO);
207
208 nir_emit_cf_list(&loop->body);
209
210 emit(BRW_OPCODE_WHILE);
211 }
212
213 void
214 vec4_visitor::nir_emit_block(nir_block *block)
215 {
216 nir_foreach_instr(block, instr) {
217 nir_emit_instr(instr);
218 }
219 }
220
221 void
222 vec4_visitor::nir_emit_instr(nir_instr *instr)
223 {
224 base_ir = instr;
225
226 switch (instr->type) {
227 case nir_instr_type_load_const:
228 nir_emit_load_const(nir_instr_as_load_const(instr));
229 break;
230
231 case nir_instr_type_intrinsic:
232 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
233 break;
234
235 case nir_instr_type_alu:
236 nir_emit_alu(nir_instr_as_alu(instr));
237 break;
238
239 case nir_instr_type_jump:
240 nir_emit_jump(nir_instr_as_jump(instr));
241 break;
242
243 case nir_instr_type_tex:
244 nir_emit_texture(nir_instr_as_tex(instr));
245 break;
246
247 case nir_instr_type_ssa_undef:
248 nir_emit_undef(nir_instr_as_ssa_undef(instr));
249 break;
250
251 default:
252 fprintf(stderr, "VS instruction not yet implemented by NIR->vec4\n");
253 break;
254 }
255 }
256
257 static dst_reg
258 dst_reg_for_nir_reg(vec4_visitor *v, nir_register *nir_reg,
259 unsigned base_offset, nir_src *indirect)
260 {
261 dst_reg reg;
262
263 reg = v->nir_locals[nir_reg->index];
264 reg = offset(reg, base_offset);
265 if (indirect) {
266 reg.reladdr =
267 new(v->mem_ctx) src_reg(v->get_nir_src(*indirect,
268 BRW_REGISTER_TYPE_D,
269 1));
270 }
271 return reg;
272 }
273
274 dst_reg
275 vec4_visitor::get_nir_dest(nir_dest dest)
276 {
277 if (dest.is_ssa) {
278 dst_reg dst = dst_reg(VGRF, alloc.allocate(1));
279 nir_ssa_values[dest.ssa.index] = dst;
280 return dst;
281 } else {
282 return dst_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
283 dest.reg.indirect);
284 }
285 }
286
287 dst_reg
288 vec4_visitor::get_nir_dest(nir_dest dest, enum brw_reg_type type)
289 {
290 return retype(get_nir_dest(dest), type);
291 }
292
293 dst_reg
294 vec4_visitor::get_nir_dest(nir_dest dest, nir_alu_type type)
295 {
296 return get_nir_dest(dest, brw_type_for_nir_type(type));
297 }
298
299 src_reg
300 vec4_visitor::get_nir_src(nir_src src, enum brw_reg_type type,
301 unsigned num_components)
302 {
303 dst_reg reg;
304
305 if (src.is_ssa) {
306 assert(src.ssa != NULL);
307 reg = nir_ssa_values[src.ssa->index];
308 }
309 else {
310 reg = dst_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
311 src.reg.indirect);
312 }
313
314 reg = retype(reg, type);
315
316 src_reg reg_as_src = src_reg(reg);
317 reg_as_src.swizzle = brw_swizzle_for_size(num_components);
318 return reg_as_src;
319 }
320
321 src_reg
322 vec4_visitor::get_nir_src(nir_src src, nir_alu_type type,
323 unsigned num_components)
324 {
325 return get_nir_src(src, brw_type_for_nir_type(type), num_components);
326 }
327
328 src_reg
329 vec4_visitor::get_nir_src(nir_src src, unsigned num_components)
330 {
331 /* if type is not specified, default to signed int */
332 return get_nir_src(src, nir_type_int, num_components);
333 }
334
335 src_reg
336 vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
337 {
338 nir_src *offset_src = nir_get_io_offset_src(instr);
339 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
340
341 if (const_value) {
342 /* The only constant offset we should find is 0. brw_nir.c's
343 * add_const_offset_to_base() will fold other constant offsets
344 * into instr->const_index[0].
345 */
346 assert(const_value->u32[0] == 0);
347 return src_reg();
348 }
349
350 return get_nir_src(*offset_src, BRW_REGISTER_TYPE_UD, 1);
351 }
352
353 void
354 vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
355 {
356 dst_reg reg = dst_reg(VGRF, alloc.allocate(1));
357 reg.type = BRW_REGISTER_TYPE_D;
358
359 unsigned remaining = brw_writemask_for_size(instr->def.num_components);
360
361 /* @FIXME: consider emitting vector operations to save some MOVs in
362 * cases where the components are representable in 8 bits.
363 * For now, we emit a MOV for each distinct value.
364 */
365 for (unsigned i = 0; i < instr->def.num_components; i++) {
366 unsigned writemask = 1 << i;
367
368 if ((remaining & writemask) == 0)
369 continue;
370
371 for (unsigned j = i; j < instr->def.num_components; j++) {
372 if (instr->value.u32[i] == instr->value.u32[j]) {
373 writemask |= 1 << j;
374 }
375 }
376
377 reg.writemask = writemask;
378 emit(MOV(reg, brw_imm_d(instr->value.i32[i])));
379
380 remaining &= ~writemask;
381 }
382
383 /* Set final writemask */
384 reg.writemask = brw_writemask_for_size(instr->def.num_components);
385
386 nir_ssa_values[instr->def.index] = reg;
387 }
388
389 void
390 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
391 {
392 dst_reg dest;
393 src_reg src;
394
395 switch (instr->intrinsic) {
396
397 case nir_intrinsic_load_input: {
398 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
399
400 /* We set EmitNoIndirectInput for VS */
401 assert(const_offset);
402
403 src = src_reg(ATTR, instr->const_index[0] + const_offset->u32[0],
404 glsl_type::uvec4_type);
405
406 dest = get_nir_dest(instr->dest, src.type);
407 dest.writemask = brw_writemask_for_size(instr->num_components);
408
409 emit(MOV(dest, src));
410 break;
411 }
412
413 case nir_intrinsic_store_output: {
414 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
415 assert(const_offset);
416
417 int varying = instr->const_index[0] + const_offset->u32[0];
418
419 src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F,
420 instr->num_components);
421
422 output_reg[varying] = dst_reg(src);
423 break;
424 }
425
426 case nir_intrinsic_get_buffer_size: {
427 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
428 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
429
430 const unsigned index =
431 prog_data->base.binding_table.ssbo_start + ssbo_index;
432 dst_reg result_dst = get_nir_dest(instr->dest);
433 vec4_instruction *inst = new(mem_ctx)
434 vec4_instruction(VS_OPCODE_GET_BUFFER_SIZE, result_dst);
435
436 inst->base_mrf = 2;
437 inst->mlen = 1; /* always at least one */
438 inst->src[1] = brw_imm_ud(index);
439
440 /* MRF for the first parameter */
441 src_reg lod = brw_imm_d(0);
442 int param_base = inst->base_mrf;
443 int writemask = WRITEMASK_X;
444 emit(MOV(dst_reg(MRF, param_base, glsl_type::int_type, writemask), lod));
445
446 emit(inst);
447
448 brw_mark_surface_used(&prog_data->base, index);
449 break;
450 }
451
452 case nir_intrinsic_store_ssbo: {
453 assert(devinfo->gen >= 7);
454
455 /* Block index */
456 src_reg surf_index;
457 nir_const_value *const_uniform_block =
458 nir_src_as_const_value(instr->src[1]);
459 if (const_uniform_block) {
460 unsigned index = prog_data->base.binding_table.ssbo_start +
461 const_uniform_block->u32[0];
462 surf_index = brw_imm_ud(index);
463 brw_mark_surface_used(&prog_data->base, index);
464 } else {
465 surf_index = src_reg(this, glsl_type::uint_type);
466 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[1], 1),
467 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
468 surf_index = emit_uniformize(surf_index);
469
470 brw_mark_surface_used(&prog_data->base,
471 prog_data->base.binding_table.ssbo_start +
472 nir->info.num_ssbos - 1);
473 }
474
475 /* Offset */
476 src_reg offset_reg;
477 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
478 if (const_offset) {
479 offset_reg = brw_imm_ud(const_offset->u32[0]);
480 } else {
481 offset_reg = get_nir_src(instr->src[2], 1);
482 }
483
484 /* Value */
485 src_reg val_reg = get_nir_src(instr->src[0], 4);
486
487 /* Writemask */
488 unsigned write_mask = instr->const_index[0];
489
490 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
491 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
492 * typed and untyped messages and across hardware platforms, the
493 * current implementation of the untyped messages will transparently convert
494 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
495 * and enabling only channel X on the SEND instruction.
496 *
497 * The above, works well for full vector writes, but not for partial writes
498 * where we want to write some channels and not others, like when we have
499 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
500 * quite restrictive with regards to the channel enables we can configure in
501 * the message descriptor (not all combinations are allowed) we cannot simply
502 * implement these scenarios with a single message while keeping the
503 * aforementioned symmetry in the implementation. For now we de decided that
504 * it is better to keep the symmetry to reduce complexity, so in situations
505 * such as the one described we end up emitting two untyped write messages
506 * (one for xy and another for w).
507 *
508 * The code below packs consecutive channels into a single write message,
509 * detects gaps in the vector write and if needed, sends a second message
510 * with the remaining channels. If in the future we decide that we want to
511 * emit a single message at the expense of losing the symmetry in the
512 * implementation we can:
513 *
514 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
515 * message payload. In this mode we can write up to 8 offsets and dwords
516 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
517 * and select which of the 8 channels carry data to write by setting the
518 * appropriate writemask in the dst register of the SEND instruction.
519 * It would require to write a new generator opcode specifically for
520 * IvyBridge since we would need to prepare a SIMD8 payload that could
521 * use any channel, not just X.
522 *
523 * 2) For Haswell+: Simply send a single write message but set the writemask
524 * on the dst of the SEND instruction to select the channels we want to
525 * write. It would require to modify the current messages to receive
526 * and honor the writemask provided.
527 */
528 const vec4_builder bld = vec4_builder(this).at_end()
529 .annotate(current_annotation, base_ir);
530
531 int swizzle[4] = { 0, 0, 0, 0};
532 int num_channels = 0;
533 unsigned skipped_channels = 0;
534 int num_components = instr->num_components;
535 for (int i = 0; i < num_components; i++) {
536 /* Check if this channel needs to be written. If so, record the
537 * channel we need to take the data from in the swizzle array
538 */
539 int component_mask = 1 << i;
540 int write_test = write_mask & component_mask;
541 if (write_test)
542 swizzle[num_channels++] = i;
543
544 /* If we don't have to write this channel it means we have a gap in the
545 * vector, so write the channels we accumulated until now, if any. Do
546 * the same if this was the last component in the vector.
547 */
548 if (!write_test || i == num_components - 1) {
549 if (num_channels > 0) {
550 /* We have channels to write, so update the offset we need to
551 * write at to skip the channels we skipped, if any.
552 */
553 if (skipped_channels > 0) {
554 if (offset_reg.file == IMM) {
555 offset_reg.ud += 4 * skipped_channels;
556 } else {
557 emit(ADD(dst_reg(offset_reg), offset_reg,
558 brw_imm_ud(4 * skipped_channels)));
559 }
560 }
561
562 /* Swizzle the data register so we take the data from the channels
563 * we need to write and send the write message. This will write
564 * num_channels consecutive dwords starting at offset.
565 */
566 val_reg.swizzle =
567 BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
568 emit_untyped_write(bld, surf_index, offset_reg, val_reg,
569 1 /* dims */, num_channels /* size */,
570 BRW_PREDICATE_NONE);
571
572 /* If we have to do a second write we will have to update the
573 * offset so that we jump over the channels we have just written
574 * now.
575 */
576 skipped_channels = num_channels;
577
578 /* Restart the count for the next write message */
579 num_channels = 0;
580 }
581
582 /* We did not write the current channel, so increase skipped count */
583 skipped_channels++;
584 }
585 }
586
587 break;
588 }
589
590 case nir_intrinsic_load_ssbo: {
591 assert(devinfo->gen >= 7);
592
593 nir_const_value *const_uniform_block =
594 nir_src_as_const_value(instr->src[0]);
595
596 src_reg surf_index;
597 if (const_uniform_block) {
598 unsigned index = prog_data->base.binding_table.ssbo_start +
599 const_uniform_block->u32[0];
600 surf_index = brw_imm_ud(index);
601
602 brw_mark_surface_used(&prog_data->base, index);
603 } else {
604 surf_index = src_reg(this, glsl_type::uint_type);
605 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], 1),
606 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
607 surf_index = emit_uniformize(surf_index);
608
609 /* Assume this may touch any UBO. It would be nice to provide
610 * a tighter bound, but the array information is already lowered away.
611 */
612 brw_mark_surface_used(&prog_data->base,
613 prog_data->base.binding_table.ssbo_start +
614 nir->info.num_ssbos - 1);
615 }
616
617 src_reg offset_reg;
618 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
619 if (const_offset) {
620 offset_reg = brw_imm_ud(const_offset->u32[0]);
621 } else {
622 offset_reg = get_nir_src(instr->src[1], 1);
623 }
624
625 /* Read the vector */
626 const vec4_builder bld = vec4_builder(this).at_end()
627 .annotate(current_annotation, base_ir);
628
629 src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
630 1 /* dims */, 4 /* size*/,
631 BRW_PREDICATE_NONE);
632 dst_reg dest = get_nir_dest(instr->dest);
633 read_result.type = dest.type;
634 read_result.swizzle = brw_swizzle_for_size(instr->num_components);
635 emit(MOV(dest, read_result));
636
637 break;
638 }
639
640 case nir_intrinsic_ssbo_atomic_add:
641 nir_emit_ssbo_atomic(BRW_AOP_ADD, instr);
642 break;
643 case nir_intrinsic_ssbo_atomic_imin:
644 nir_emit_ssbo_atomic(BRW_AOP_IMIN, instr);
645 break;
646 case nir_intrinsic_ssbo_atomic_umin:
647 nir_emit_ssbo_atomic(BRW_AOP_UMIN, instr);
648 break;
649 case nir_intrinsic_ssbo_atomic_imax:
650 nir_emit_ssbo_atomic(BRW_AOP_IMAX, instr);
651 break;
652 case nir_intrinsic_ssbo_atomic_umax:
653 nir_emit_ssbo_atomic(BRW_AOP_UMAX, instr);
654 break;
655 case nir_intrinsic_ssbo_atomic_and:
656 nir_emit_ssbo_atomic(BRW_AOP_AND, instr);
657 break;
658 case nir_intrinsic_ssbo_atomic_or:
659 nir_emit_ssbo_atomic(BRW_AOP_OR, instr);
660 break;
661 case nir_intrinsic_ssbo_atomic_xor:
662 nir_emit_ssbo_atomic(BRW_AOP_XOR, instr);
663 break;
664 case nir_intrinsic_ssbo_atomic_exchange:
665 nir_emit_ssbo_atomic(BRW_AOP_MOV, instr);
666 break;
667 case nir_intrinsic_ssbo_atomic_comp_swap:
668 nir_emit_ssbo_atomic(BRW_AOP_CMPWR, instr);
669 break;
670
671 case nir_intrinsic_load_vertex_id:
672 unreachable("should be lowered by lower_vertex_id()");
673
674 case nir_intrinsic_load_vertex_id_zero_base:
675 case nir_intrinsic_load_base_vertex:
676 case nir_intrinsic_load_instance_id:
677 case nir_intrinsic_load_base_instance:
678 case nir_intrinsic_load_draw_id:
679 case nir_intrinsic_load_invocation_id: {
680 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
681 src_reg val = src_reg(nir_system_values[sv]);
682 assert(val.file != BAD_FILE);
683 dest = get_nir_dest(instr->dest, val.type);
684 emit(MOV(dest, val));
685 break;
686 }
687
688 case nir_intrinsic_load_uniform: {
689 /* Offsets are in bytes but they should always be multiples of 16 */
690 assert(instr->const_index[0] % 16 == 0);
691
692 dest = get_nir_dest(instr->dest);
693
694 src = src_reg(dst_reg(UNIFORM, instr->const_index[0] / 16));
695 src.type = dest.type;
696
697 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
698 if (const_offset) {
699 /* Offsets are in bytes but they should always be multiples of 16 */
700 assert(const_offset->u32[0] % 16 == 0);
701 src.reg_offset = const_offset->u32[0] / 16;
702
703 emit(MOV(dest, src));
704 } else {
705 src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
706
707 emit(SHADER_OPCODE_MOV_INDIRECT, dest, src,
708 indirect, brw_imm_ud(instr->const_index[1]));
709 }
710 break;
711 }
712
713 case nir_intrinsic_atomic_counter_read:
714 case nir_intrinsic_atomic_counter_inc:
715 case nir_intrinsic_atomic_counter_dec: {
716 unsigned surf_index = prog_data->base.binding_table.abo_start +
717 (unsigned) instr->const_index[0];
718 src_reg offset = get_nir_src(instr->src[0], nir_type_int,
719 instr->num_components);
720 const src_reg surface = brw_imm_ud(surf_index);
721 const vec4_builder bld =
722 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
723 src_reg tmp;
724
725 dest = get_nir_dest(instr->dest);
726
727 switch (instr->intrinsic) {
728 case nir_intrinsic_atomic_counter_inc:
729 tmp = emit_untyped_atomic(bld, surface, offset,
730 src_reg(), src_reg(),
731 1, 1,
732 BRW_AOP_INC);
733 break;
734 case nir_intrinsic_atomic_counter_dec:
735 tmp = emit_untyped_atomic(bld, surface, offset,
736 src_reg(), src_reg(),
737 1, 1,
738 BRW_AOP_PREDEC);
739 break;
740 case nir_intrinsic_atomic_counter_read:
741 tmp = emit_untyped_read(bld, surface, offset, 1, 1);
742 break;
743 default:
744 unreachable("Unreachable");
745 }
746
747 bld.MOV(retype(dest, tmp.type), tmp);
748 brw_mark_surface_used(stage_prog_data, surf_index);
749 break;
750 }
751
752 case nir_intrinsic_load_ubo: {
753 nir_const_value *const_block_index = nir_src_as_const_value(instr->src[0]);
754 src_reg surf_index;
755
756 dest = get_nir_dest(instr->dest);
757
758 if (const_block_index) {
759 /* The block index is a constant, so just emit the binding table entry
760 * as an immediate.
761 */
762 const unsigned index = prog_data->base.binding_table.ubo_start +
763 const_block_index->u32[0];
764 surf_index = brw_imm_ud(index);
765 brw_mark_surface_used(&prog_data->base, index);
766 } else {
767 /* The block index is not a constant. Evaluate the index expression
768 * per-channel and add the base UBO index; we have to select a value
769 * from any live channel.
770 */
771 surf_index = src_reg(this, glsl_type::uint_type);
772 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], nir_type_int,
773 instr->num_components),
774 brw_imm_ud(prog_data->base.binding_table.ubo_start)));
775 surf_index = emit_uniformize(surf_index);
776
777 /* Assume this may touch any UBO. It would be nice to provide
778 * a tighter bound, but the array information is already lowered away.
779 */
780 brw_mark_surface_used(&prog_data->base,
781 prog_data->base.binding_table.ubo_start +
782 nir->info.num_ubos - 1);
783 }
784
785 src_reg offset;
786 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
787 if (const_offset) {
788 offset = brw_imm_ud(const_offset->u32[0] & ~15);
789 } else {
790 offset = get_nir_src(instr->src[1], nir_type_int, 1);
791 }
792
793 src_reg packed_consts = src_reg(this, glsl_type::vec4_type);
794 packed_consts.type = dest.type;
795
796 emit_pull_constant_load_reg(dst_reg(packed_consts),
797 surf_index,
798 offset,
799 NULL, NULL /* before_block/inst */);
800
801 packed_consts.swizzle = brw_swizzle_for_size(instr->num_components);
802 if (const_offset) {
803 packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u32[0] % 16 / 4,
804 const_offset->u32[0] % 16 / 4,
805 const_offset->u32[0] % 16 / 4,
806 const_offset->u32[0] % 16 / 4);
807 }
808
809 emit(MOV(dest, packed_consts));
810 break;
811 }
812
813 case nir_intrinsic_memory_barrier: {
814 const vec4_builder bld =
815 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
816 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
817 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
818 ->regs_written = 2;
819 break;
820 }
821
822 case nir_intrinsic_shader_clock: {
823 /* We cannot do anything if there is an event, so ignore it for now */
824 const src_reg shader_clock = get_timestamp();
825 const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type);
826
827 dest = get_nir_dest(instr->dest, type);
828 emit(MOV(dest, shader_clock));
829 break;
830 }
831
832 default:
833 unreachable("Unknown intrinsic");
834 }
835 }
836
837 void
838 vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
839 {
840 dst_reg dest;
841 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
842 dest = get_nir_dest(instr->dest);
843
844 src_reg surface;
845 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
846 if (const_surface) {
847 unsigned surf_index = prog_data->base.binding_table.ssbo_start +
848 const_surface->u32[0];
849 surface = brw_imm_ud(surf_index);
850 brw_mark_surface_used(&prog_data->base, surf_index);
851 } else {
852 surface = src_reg(this, glsl_type::uint_type);
853 emit(ADD(dst_reg(surface), get_nir_src(instr->src[0]),
854 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
855
856 /* Assume this may touch any UBO. This is the same we do for other
857 * UBO/SSBO accesses with non-constant surface.
858 */
859 brw_mark_surface_used(&prog_data->base,
860 prog_data->base.binding_table.ssbo_start +
861 nir->info.num_ssbos - 1);
862 }
863
864 src_reg offset = get_nir_src(instr->src[1], 1);
865 src_reg data1 = get_nir_src(instr->src[2], 1);
866 src_reg data2;
867 if (op == BRW_AOP_CMPWR)
868 data2 = get_nir_src(instr->src[3], 1);
869
870 /* Emit the actual atomic operation operation */
871 const vec4_builder bld =
872 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
873
874 src_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
875 data1, data2,
876 1 /* dims */, 1 /* rsize */,
877 op,
878 BRW_PREDICATE_NONE);
879 dest.type = atomic_result.type;
880 bld.MOV(dest, atomic_result);
881 }
882
883 static unsigned
884 brw_swizzle_for_nir_swizzle(uint8_t swizzle[4])
885 {
886 return BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
887 }
888
889 static enum brw_conditional_mod
890 brw_conditional_for_nir_comparison(nir_op op)
891 {
892 switch (op) {
893 case nir_op_flt:
894 case nir_op_ilt:
895 case nir_op_ult:
896 return BRW_CONDITIONAL_L;
897
898 case nir_op_fge:
899 case nir_op_ige:
900 case nir_op_uge:
901 return BRW_CONDITIONAL_GE;
902
903 case nir_op_feq:
904 case nir_op_ieq:
905 case nir_op_ball_fequal2:
906 case nir_op_ball_iequal2:
907 case nir_op_ball_fequal3:
908 case nir_op_ball_iequal3:
909 case nir_op_ball_fequal4:
910 case nir_op_ball_iequal4:
911 return BRW_CONDITIONAL_Z;
912
913 case nir_op_fne:
914 case nir_op_ine:
915 case nir_op_bany_fnequal2:
916 case nir_op_bany_inequal2:
917 case nir_op_bany_fnequal3:
918 case nir_op_bany_inequal3:
919 case nir_op_bany_fnequal4:
920 case nir_op_bany_inequal4:
921 return BRW_CONDITIONAL_NZ;
922
923 default:
924 unreachable("not reached: bad operation for comparison");
925 }
926 }
927
928 bool
929 vec4_visitor::optimize_predicate(nir_alu_instr *instr,
930 enum brw_predicate *predicate)
931 {
932 if (!instr->src[0].src.is_ssa ||
933 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
934 return false;
935
936 nir_alu_instr *cmp_instr =
937 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
938
939 switch (cmp_instr->op) {
940 case nir_op_bany_fnequal2:
941 case nir_op_bany_inequal2:
942 case nir_op_bany_fnequal3:
943 case nir_op_bany_inequal3:
944 case nir_op_bany_fnequal4:
945 case nir_op_bany_inequal4:
946 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
947 break;
948 case nir_op_ball_fequal2:
949 case nir_op_ball_iequal2:
950 case nir_op_ball_fequal3:
951 case nir_op_ball_iequal3:
952 case nir_op_ball_fequal4:
953 case nir_op_ball_iequal4:
954 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
955 break;
956 default:
957 return false;
958 }
959
960 unsigned size_swizzle =
961 brw_swizzle_for_size(nir_op_infos[cmp_instr->op].input_sizes[0]);
962
963 src_reg op[2];
964 assert(nir_op_infos[cmp_instr->op].num_inputs == 2);
965 for (unsigned i = 0; i < 2; i++) {
966 op[i] = get_nir_src(cmp_instr->src[i].src,
967 nir_op_infos[cmp_instr->op].input_types[i], 4);
968 unsigned base_swizzle =
969 brw_swizzle_for_nir_swizzle(cmp_instr->src[i].swizzle);
970 op[i].swizzle = brw_compose_swizzle(size_swizzle, base_swizzle);
971 op[i].abs = cmp_instr->src[i].abs;
972 op[i].negate = cmp_instr->src[i].negate;
973 }
974
975 emit(CMP(dst_null_d(), op[0], op[1],
976 brw_conditional_for_nir_comparison(cmp_instr->op)));
977
978 return true;
979 }
980
981 void
982 vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
983 {
984 vec4_instruction *inst;
985
986 dst_reg dst = get_nir_dest(instr->dest.dest,
987 nir_op_infos[instr->op].output_type);
988 dst.writemask = instr->dest.write_mask;
989
990 src_reg op[4];
991 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
992 op[i] = get_nir_src(instr->src[i].src,
993 nir_op_infos[instr->op].input_types[i], 4);
994 op[i].swizzle = brw_swizzle_for_nir_swizzle(instr->src[i].swizzle);
995 op[i].abs = instr->src[i].abs;
996 op[i].negate = instr->src[i].negate;
997 }
998
999 switch (instr->op) {
1000 case nir_op_imov:
1001 case nir_op_fmov:
1002 inst = emit(MOV(dst, op[0]));
1003 inst->saturate = instr->dest.saturate;
1004 break;
1005
1006 case nir_op_vec2:
1007 case nir_op_vec3:
1008 case nir_op_vec4:
1009 unreachable("not reached: should be handled by lower_vec_to_movs()");
1010
1011 case nir_op_i2f:
1012 case nir_op_u2f:
1013 inst = emit(MOV(dst, op[0]));
1014 inst->saturate = instr->dest.saturate;
1015 break;
1016
1017 case nir_op_f2i:
1018 case nir_op_f2u:
1019 inst = emit(MOV(dst, op[0]));
1020 break;
1021
1022 case nir_op_fadd:
1023 /* fall through */
1024 case nir_op_iadd:
1025 inst = emit(ADD(dst, op[0], op[1]));
1026 inst->saturate = instr->dest.saturate;
1027 break;
1028
1029 case nir_op_fmul:
1030 inst = emit(MUL(dst, op[0], op[1]));
1031 inst->saturate = instr->dest.saturate;
1032 break;
1033
1034 case nir_op_imul: {
1035 if (devinfo->gen < 8) {
1036 nir_const_value *value0 = nir_src_as_const_value(instr->src[0].src);
1037 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
1038
1039 /* For integer multiplication, the MUL uses the low 16 bits of one of
1040 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1041 * accumulates in the contribution of the upper 16 bits of that
1042 * operand. If we can determine that one of the args is in the low
1043 * 16 bits, though, we can just emit a single MUL.
1044 */
1045 if (value0 && value0->u32[0] < (1 << 16)) {
1046 if (devinfo->gen < 7)
1047 emit(MUL(dst, op[0], op[1]));
1048 else
1049 emit(MUL(dst, op[1], op[0]));
1050 } else if (value1 && value1->u32[0] < (1 << 16)) {
1051 if (devinfo->gen < 7)
1052 emit(MUL(dst, op[1], op[0]));
1053 else
1054 emit(MUL(dst, op[0], op[1]));
1055 } else {
1056 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1057
1058 emit(MUL(acc, op[0], op[1]));
1059 emit(MACH(dst_null_d(), op[0], op[1]));
1060 emit(MOV(dst, src_reg(acc)));
1061 }
1062 } else {
1063 emit(MUL(dst, op[0], op[1]));
1064 }
1065 break;
1066 }
1067
1068 case nir_op_imul_high:
1069 case nir_op_umul_high: {
1070 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1071
1072 if (devinfo->gen >= 8)
1073 emit(MUL(acc, op[0], retype(op[1], BRW_REGISTER_TYPE_UW)));
1074 else
1075 emit(MUL(acc, op[0], op[1]));
1076
1077 emit(MACH(dst, op[0], op[1]));
1078 break;
1079 }
1080
1081 case nir_op_frcp:
1082 inst = emit_math(SHADER_OPCODE_RCP, dst, op[0]);
1083 inst->saturate = instr->dest.saturate;
1084 break;
1085
1086 case nir_op_fexp2:
1087 inst = emit_math(SHADER_OPCODE_EXP2, dst, op[0]);
1088 inst->saturate = instr->dest.saturate;
1089 break;
1090
1091 case nir_op_flog2:
1092 inst = emit_math(SHADER_OPCODE_LOG2, dst, op[0]);
1093 inst->saturate = instr->dest.saturate;
1094 break;
1095
1096 case nir_op_fsin:
1097 if (!compiler->precise_trig) {
1098 inst = emit_math(SHADER_OPCODE_SIN, dst, op[0]);
1099 } else {
1100 src_reg tmp = src_reg(this, glsl_type::vec4_type);
1101 inst = emit_math(SHADER_OPCODE_SIN, dst_reg(tmp), op[0]);
1102 inst = emit(MUL(dst, tmp, brw_imm_f(0.99997)));
1103 }
1104 inst->saturate = instr->dest.saturate;
1105 break;
1106
1107 case nir_op_fcos:
1108 if (!compiler->precise_trig) {
1109 inst = emit_math(SHADER_OPCODE_COS, dst, op[0]);
1110 } else {
1111 src_reg tmp = src_reg(this, glsl_type::vec4_type);
1112 inst = emit_math(SHADER_OPCODE_COS, dst_reg(tmp), op[0]);
1113 inst = emit(MUL(dst, tmp, brw_imm_f(0.99997)));
1114 }
1115 inst->saturate = instr->dest.saturate;
1116 break;
1117
1118 case nir_op_idiv:
1119 case nir_op_udiv:
1120 emit_math(SHADER_OPCODE_INT_QUOTIENT, dst, op[0], op[1]);
1121 break;
1122
1123 case nir_op_umod:
1124 case nir_op_irem:
1125 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1126 * appears that our hardware just does the right thing for signed
1127 * remainder.
1128 */
1129 emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1130 break;
1131
1132 case nir_op_imod: {
1133 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1134 inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1135
1136 /* Math instructions don't support conditional mod */
1137 inst = emit(MOV(dst_null_d(), src_reg(dst)));
1138 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1139
1140 /* Now, we need to determine if signs of the sources are different.
1141 * When we XOR the sources, the top bit is 0 if they are the same and 1
1142 * if they are different. We can then use a conditional modifier to
1143 * turn that into a predicate. This leads us to an XOR.l instruction.
1144 */
1145 src_reg tmp = src_reg(this, glsl_type::ivec4_type);
1146 inst = emit(XOR(dst_reg(tmp), op[0], op[1]));
1147 inst->predicate = BRW_PREDICATE_NORMAL;
1148 inst->conditional_mod = BRW_CONDITIONAL_L;
1149
1150 /* If the result of the initial remainder operation is non-zero and the
1151 * two sources have different signs, add in a copy of op[1] to get the
1152 * final integer modulus value.
1153 */
1154 inst = emit(ADD(dst, src_reg(dst), op[1]));
1155 inst->predicate = BRW_PREDICATE_NORMAL;
1156 break;
1157 }
1158
1159 case nir_op_ldexp:
1160 unreachable("not reached: should be handled by ldexp_to_arith()");
1161
1162 case nir_op_fsqrt:
1163 inst = emit_math(SHADER_OPCODE_SQRT, dst, op[0]);
1164 inst->saturate = instr->dest.saturate;
1165 break;
1166
1167 case nir_op_frsq:
1168 inst = emit_math(SHADER_OPCODE_RSQ, dst, op[0]);
1169 inst->saturate = instr->dest.saturate;
1170 break;
1171
1172 case nir_op_fpow:
1173 inst = emit_math(SHADER_OPCODE_POW, dst, op[0], op[1]);
1174 inst->saturate = instr->dest.saturate;
1175 break;
1176
1177 case nir_op_uadd_carry: {
1178 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1179
1180 emit(ADDC(dst_null_ud(), op[0], op[1]));
1181 emit(MOV(dst, src_reg(acc)));
1182 break;
1183 }
1184
1185 case nir_op_usub_borrow: {
1186 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1187
1188 emit(SUBB(dst_null_ud(), op[0], op[1]));
1189 emit(MOV(dst, src_reg(acc)));
1190 break;
1191 }
1192
1193 case nir_op_ftrunc:
1194 inst = emit(RNDZ(dst, op[0]));
1195 inst->saturate = instr->dest.saturate;
1196 break;
1197
1198 case nir_op_fceil: {
1199 src_reg tmp = src_reg(this, glsl_type::float_type);
1200 tmp.swizzle =
1201 brw_swizzle_for_size(instr->src[0].src.is_ssa ?
1202 instr->src[0].src.ssa->num_components :
1203 instr->src[0].src.reg.reg->num_components);
1204
1205 op[0].negate = !op[0].negate;
1206 emit(RNDD(dst_reg(tmp), op[0]));
1207 tmp.negate = true;
1208 inst = emit(MOV(dst, tmp));
1209 inst->saturate = instr->dest.saturate;
1210 break;
1211 }
1212
1213 case nir_op_ffloor:
1214 inst = emit(RNDD(dst, op[0]));
1215 inst->saturate = instr->dest.saturate;
1216 break;
1217
1218 case nir_op_ffract:
1219 inst = emit(FRC(dst, op[0]));
1220 inst->saturate = instr->dest.saturate;
1221 break;
1222
1223 case nir_op_fround_even:
1224 inst = emit(RNDE(dst, op[0]));
1225 inst->saturate = instr->dest.saturate;
1226 break;
1227
1228 case nir_op_fquantize2f16: {
1229 /* See also vec4_visitor::emit_pack_half_2x16() */
1230 src_reg tmp16 = src_reg(this, glsl_type::uvec4_type);
1231 src_reg tmp32 = src_reg(this, glsl_type::vec4_type);
1232 src_reg zero = src_reg(this, glsl_type::vec4_type);
1233
1234 /* Check for denormal */
1235 src_reg abs_src0 = op[0];
1236 abs_src0.abs = true;
1237 emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1238 BRW_CONDITIONAL_L));
1239 /* Get the appropriately signed zero */
1240 emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD),
1241 retype(op[0], BRW_REGISTER_TYPE_UD),
1242 brw_imm_ud(0x80000000)));
1243 /* Do the actual F32 -> F16 -> F32 conversion */
1244 emit(F32TO16(dst_reg(tmp16), op[0]));
1245 emit(F16TO32(dst_reg(tmp32), tmp16));
1246 /* Select that or zero based on normal status */
1247 inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32);
1248 inst->predicate = BRW_PREDICATE_NORMAL;
1249 inst->saturate = instr->dest.saturate;
1250 break;
1251 }
1252
1253 case nir_op_fmin:
1254 case nir_op_imin:
1255 case nir_op_umin:
1256 inst = emit_minmax(BRW_CONDITIONAL_L, dst, op[0], op[1]);
1257 inst->saturate = instr->dest.saturate;
1258 break;
1259
1260 case nir_op_fmax:
1261 case nir_op_imax:
1262 case nir_op_umax:
1263 inst = emit_minmax(BRW_CONDITIONAL_GE, dst, op[0], op[1]);
1264 inst->saturate = instr->dest.saturate;
1265 break;
1266
1267 case nir_op_fddx:
1268 case nir_op_fddx_coarse:
1269 case nir_op_fddx_fine:
1270 case nir_op_fddy:
1271 case nir_op_fddy_coarse:
1272 case nir_op_fddy_fine:
1273 unreachable("derivatives are not valid in vertex shaders");
1274
1275 case nir_op_flt:
1276 case nir_op_ilt:
1277 case nir_op_ult:
1278 case nir_op_fge:
1279 case nir_op_ige:
1280 case nir_op_uge:
1281 case nir_op_feq:
1282 case nir_op_ieq:
1283 case nir_op_fne:
1284 case nir_op_ine:
1285 emit(CMP(dst, op[0], op[1],
1286 brw_conditional_for_nir_comparison(instr->op)));
1287 break;
1288
1289 case nir_op_ball_fequal2:
1290 case nir_op_ball_iequal2:
1291 case nir_op_ball_fequal3:
1292 case nir_op_ball_iequal3:
1293 case nir_op_ball_fequal4:
1294 case nir_op_ball_iequal4: {
1295 unsigned swiz =
1296 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1297
1298 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1299 brw_conditional_for_nir_comparison(instr->op)));
1300 emit(MOV(dst, brw_imm_d(0)));
1301 inst = emit(MOV(dst, brw_imm_d(~0)));
1302 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1303 break;
1304 }
1305
1306 case nir_op_bany_fnequal2:
1307 case nir_op_bany_inequal2:
1308 case nir_op_bany_fnequal3:
1309 case nir_op_bany_inequal3:
1310 case nir_op_bany_fnequal4:
1311 case nir_op_bany_inequal4: {
1312 unsigned swiz =
1313 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1314
1315 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1316 brw_conditional_for_nir_comparison(instr->op)));
1317
1318 emit(MOV(dst, brw_imm_d(0)));
1319 inst = emit(MOV(dst, brw_imm_d(~0)));
1320 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1321 break;
1322 }
1323
1324 case nir_op_inot:
1325 if (devinfo->gen >= 8) {
1326 op[0] = resolve_source_modifiers(op[0]);
1327 }
1328 emit(NOT(dst, op[0]));
1329 break;
1330
1331 case nir_op_ixor:
1332 if (devinfo->gen >= 8) {
1333 op[0] = resolve_source_modifiers(op[0]);
1334 op[1] = resolve_source_modifiers(op[1]);
1335 }
1336 emit(XOR(dst, op[0], op[1]));
1337 break;
1338
1339 case nir_op_ior:
1340 if (devinfo->gen >= 8) {
1341 op[0] = resolve_source_modifiers(op[0]);
1342 op[1] = resolve_source_modifiers(op[1]);
1343 }
1344 emit(OR(dst, op[0], op[1]));
1345 break;
1346
1347 case nir_op_iand:
1348 if (devinfo->gen >= 8) {
1349 op[0] = resolve_source_modifiers(op[0]);
1350 op[1] = resolve_source_modifiers(op[1]);
1351 }
1352 emit(AND(dst, op[0], op[1]));
1353 break;
1354
1355 case nir_op_b2i:
1356 case nir_op_b2f:
1357 emit(MOV(dst, negate(op[0])));
1358 break;
1359
1360 case nir_op_f2b:
1361 emit(CMP(dst, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1362 break;
1363
1364 case nir_op_i2b:
1365 emit(CMP(dst, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1366 break;
1367
1368 case nir_op_fnoise1_1:
1369 case nir_op_fnoise1_2:
1370 case nir_op_fnoise1_3:
1371 case nir_op_fnoise1_4:
1372 case nir_op_fnoise2_1:
1373 case nir_op_fnoise2_2:
1374 case nir_op_fnoise2_3:
1375 case nir_op_fnoise2_4:
1376 case nir_op_fnoise3_1:
1377 case nir_op_fnoise3_2:
1378 case nir_op_fnoise3_3:
1379 case nir_op_fnoise3_4:
1380 case nir_op_fnoise4_1:
1381 case nir_op_fnoise4_2:
1382 case nir_op_fnoise4_3:
1383 case nir_op_fnoise4_4:
1384 unreachable("not reached: should be handled by lower_noise");
1385
1386 case nir_op_unpack_half_2x16_split_x:
1387 case nir_op_unpack_half_2x16_split_y:
1388 case nir_op_pack_half_2x16_split:
1389 unreachable("not reached: should not occur in vertex shader");
1390
1391 case nir_op_unpack_snorm_2x16:
1392 case nir_op_unpack_unorm_2x16:
1393 case nir_op_pack_snorm_2x16:
1394 case nir_op_pack_unorm_2x16:
1395 unreachable("not reached: should be handled by lower_packing_builtins");
1396
1397 case nir_op_pack_uvec4_to_uint:
1398 unreachable("not reached");
1399
1400 case nir_op_pack_uvec2_to_uint: {
1401 dst_reg tmp1 = dst_reg(this, glsl_type::uint_type);
1402 tmp1.writemask = WRITEMASK_X;
1403 op[0].swizzle = BRW_SWIZZLE_YYYY;
1404 emit(SHL(tmp1, op[0], src_reg(brw_imm_ud(16u))));
1405
1406 dst_reg tmp2 = dst_reg(this, glsl_type::uint_type);
1407 tmp2.writemask = WRITEMASK_X;
1408 op[0].swizzle = BRW_SWIZZLE_XXXX;
1409 emit(AND(tmp2, op[0], src_reg(brw_imm_ud(0xffffu))));
1410
1411 emit(OR(dst, src_reg(tmp1), src_reg(tmp2)));
1412 break;
1413 }
1414
1415 case nir_op_unpack_half_2x16:
1416 /* As NIR does not guarantee that we have a correct swizzle outside the
1417 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1418 * uses the source operand in an operation with WRITEMASK_Y while our
1419 * source operand has only size 1, it accessed incorrect data producing
1420 * regressions in Piglit. We repeat the swizzle of the first component on the
1421 * rest of components to avoid regressions. In the vec4_visitor IR code path
1422 * this is not needed because the operand has already the correct swizzle.
1423 */
1424 op[0].swizzle = brw_compose_swizzle(BRW_SWIZZLE_XXXX, op[0].swizzle);
1425 emit_unpack_half_2x16(dst, op[0]);
1426 break;
1427
1428 case nir_op_pack_half_2x16:
1429 emit_pack_half_2x16(dst, op[0]);
1430 break;
1431
1432 case nir_op_unpack_unorm_4x8:
1433 emit_unpack_unorm_4x8(dst, op[0]);
1434 break;
1435
1436 case nir_op_pack_unorm_4x8:
1437 emit_pack_unorm_4x8(dst, op[0]);
1438 break;
1439
1440 case nir_op_unpack_snorm_4x8:
1441 emit_unpack_snorm_4x8(dst, op[0]);
1442 break;
1443
1444 case nir_op_pack_snorm_4x8:
1445 emit_pack_snorm_4x8(dst, op[0]);
1446 break;
1447
1448 case nir_op_bitfield_reverse:
1449 emit(BFREV(dst, op[0]));
1450 break;
1451
1452 case nir_op_bit_count:
1453 emit(CBIT(dst, op[0]));
1454 break;
1455
1456 case nir_op_ufind_msb:
1457 case nir_op_ifind_msb: {
1458 emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0]));
1459
1460 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1461 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1462 * subtract the result from 31 to convert the MSB count into an LSB count.
1463 */
1464 src_reg src(dst);
1465 emit(CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ));
1466
1467 inst = emit(ADD(dst, src, brw_imm_d(31)));
1468 inst->predicate = BRW_PREDICATE_NORMAL;
1469 inst->src[0].negate = true;
1470 break;
1471 }
1472
1473 case nir_op_find_lsb:
1474 emit(FBL(dst, op[0]));
1475 break;
1476
1477 case nir_op_ubitfield_extract:
1478 case nir_op_ibitfield_extract:
1479 unreachable("should have been lowered");
1480 case nir_op_ubfe:
1481 case nir_op_ibfe:
1482 op[0] = fix_3src_operand(op[0]);
1483 op[1] = fix_3src_operand(op[1]);
1484 op[2] = fix_3src_operand(op[2]);
1485
1486 emit(BFE(dst, op[2], op[1], op[0]));
1487 break;
1488
1489 case nir_op_bfm:
1490 emit(BFI1(dst, op[0], op[1]));
1491 break;
1492
1493 case nir_op_bfi:
1494 op[0] = fix_3src_operand(op[0]);
1495 op[1] = fix_3src_operand(op[1]);
1496 op[2] = fix_3src_operand(op[2]);
1497
1498 emit(BFI2(dst, op[0], op[1], op[2]));
1499 break;
1500
1501 case nir_op_bitfield_insert:
1502 unreachable("not reached: should have been lowered");
1503
1504 case nir_op_fsign:
1505 /* AND(val, 0x80000000) gives the sign bit.
1506 *
1507 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1508 * zero.
1509 */
1510 emit(CMP(dst_null_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1511
1512 op[0].type = BRW_REGISTER_TYPE_UD;
1513 dst.type = BRW_REGISTER_TYPE_UD;
1514 emit(AND(dst, op[0], brw_imm_ud(0x80000000u)));
1515
1516 inst = emit(OR(dst, src_reg(dst), brw_imm_ud(0x3f800000u)));
1517 inst->predicate = BRW_PREDICATE_NORMAL;
1518 dst.type = BRW_REGISTER_TYPE_F;
1519
1520 if (instr->dest.saturate) {
1521 inst = emit(MOV(dst, src_reg(dst)));
1522 inst->saturate = true;
1523 }
1524 break;
1525
1526 case nir_op_isign:
1527 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1528 * -> non-negative val generates 0x00000000.
1529 * Predicated OR sets 1 if val is positive.
1530 */
1531 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G));
1532 emit(ASR(dst, op[0], brw_imm_d(31)));
1533 inst = emit(OR(dst, src_reg(dst), brw_imm_d(1)));
1534 inst->predicate = BRW_PREDICATE_NORMAL;
1535 break;
1536
1537 case nir_op_ishl:
1538 emit(SHL(dst, op[0], op[1]));
1539 break;
1540
1541 case nir_op_ishr:
1542 emit(ASR(dst, op[0], op[1]));
1543 break;
1544
1545 case nir_op_ushr:
1546 emit(SHR(dst, op[0], op[1]));
1547 break;
1548
1549 case nir_op_ffma:
1550 op[0] = fix_3src_operand(op[0]);
1551 op[1] = fix_3src_operand(op[1]);
1552 op[2] = fix_3src_operand(op[2]);
1553
1554 inst = emit(MAD(dst, op[2], op[1], op[0]));
1555 inst->saturate = instr->dest.saturate;
1556 break;
1557
1558 case nir_op_flrp:
1559 inst = emit_lrp(dst, op[0], op[1], op[2]);
1560 inst->saturate = instr->dest.saturate;
1561 break;
1562
1563 case nir_op_bcsel:
1564 enum brw_predicate predicate;
1565 if (!optimize_predicate(instr, &predicate)) {
1566 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1567 switch (dst.writemask) {
1568 case WRITEMASK_X:
1569 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X;
1570 break;
1571 case WRITEMASK_Y:
1572 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1573 break;
1574 case WRITEMASK_Z:
1575 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1576 break;
1577 case WRITEMASK_W:
1578 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W;
1579 break;
1580 default:
1581 predicate = BRW_PREDICATE_NORMAL;
1582 break;
1583 }
1584 }
1585 inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]);
1586 inst->predicate = predicate;
1587 break;
1588
1589 case nir_op_fdot_replicated2:
1590 inst = emit(BRW_OPCODE_DP2, dst, op[0], op[1]);
1591 inst->saturate = instr->dest.saturate;
1592 break;
1593
1594 case nir_op_fdot_replicated3:
1595 inst = emit(BRW_OPCODE_DP3, dst, op[0], op[1]);
1596 inst->saturate = instr->dest.saturate;
1597 break;
1598
1599 case nir_op_fdot_replicated4:
1600 inst = emit(BRW_OPCODE_DP4, dst, op[0], op[1]);
1601 inst->saturate = instr->dest.saturate;
1602 break;
1603
1604 case nir_op_fdph_replicated:
1605 inst = emit(BRW_OPCODE_DPH, dst, op[0], op[1]);
1606 inst->saturate = instr->dest.saturate;
1607 break;
1608
1609 case nir_op_fabs:
1610 case nir_op_iabs:
1611 case nir_op_fneg:
1612 case nir_op_ineg:
1613 case nir_op_fsat:
1614 unreachable("not reached: should be lowered by lower_source mods");
1615
1616 case nir_op_fdiv:
1617 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1618
1619 case nir_op_fmod:
1620 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1621
1622 case nir_op_fsub:
1623 case nir_op_isub:
1624 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1625
1626 default:
1627 unreachable("Unimplemented ALU operation");
1628 }
1629
1630 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1631 * to sign extend the low bit to 0/~0
1632 */
1633 if (devinfo->gen <= 5 &&
1634 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) ==
1635 BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1636 dst_reg masked = dst_reg(this, glsl_type::int_type);
1637 masked.writemask = dst.writemask;
1638 emit(AND(masked, src_reg(dst), brw_imm_d(1)));
1639 src_reg masked_neg = src_reg(masked);
1640 masked_neg.negate = true;
1641 emit(MOV(retype(dst, BRW_REGISTER_TYPE_D), masked_neg));
1642 }
1643 }
1644
1645 void
1646 vec4_visitor::nir_emit_jump(nir_jump_instr *instr)
1647 {
1648 switch (instr->type) {
1649 case nir_jump_break:
1650 emit(BRW_OPCODE_BREAK);
1651 break;
1652
1653 case nir_jump_continue:
1654 emit(BRW_OPCODE_CONTINUE);
1655 break;
1656
1657 case nir_jump_return:
1658 default:
1659 unreachable("unknown jump");
1660 }
1661 }
1662
1663 enum ir_texture_opcode
1664 ir_texture_opcode_for_nir_texop(nir_texop texop)
1665 {
1666 enum ir_texture_opcode op;
1667
1668 switch (texop) {
1669 case nir_texop_lod: op = ir_lod; break;
1670 case nir_texop_query_levels: op = ir_query_levels; break;
1671 case nir_texop_texture_samples: op = ir_texture_samples; break;
1672 case nir_texop_tex: op = ir_tex; break;
1673 case nir_texop_tg4: op = ir_tg4; break;
1674 case nir_texop_txb: op = ir_txb; break;
1675 case nir_texop_txd: op = ir_txd; break;
1676 case nir_texop_txf: op = ir_txf; break;
1677 case nir_texop_txf_ms: op = ir_txf_ms; break;
1678 case nir_texop_txl: op = ir_txl; break;
1679 case nir_texop_txs: op = ir_txs; break;
1680 case nir_texop_samples_identical: op = ir_samples_identical; break;
1681 default:
1682 unreachable("unknown texture opcode");
1683 }
1684
1685 return op;
1686 }
1687 const glsl_type *
1688 glsl_type_for_nir_alu_type(nir_alu_type alu_type,
1689 unsigned components)
1690 {
1691 switch (alu_type) {
1692 case nir_type_float:
1693 return glsl_type::vec(components);
1694 case nir_type_int:
1695 return glsl_type::ivec(components);
1696 case nir_type_uint:
1697 return glsl_type::uvec(components);
1698 case nir_type_bool:
1699 return glsl_type::bvec(components);
1700 default:
1701 return glsl_type::error_type;
1702 }
1703
1704 return glsl_type::error_type;
1705 }
1706
1707 void
1708 vec4_visitor::nir_emit_texture(nir_tex_instr *instr)
1709 {
1710 unsigned texture = instr->texture_index;
1711 unsigned sampler = instr->sampler_index;
1712 src_reg texture_reg = brw_imm_ud(texture);
1713 src_reg sampler_reg = brw_imm_ud(sampler);
1714 src_reg coordinate;
1715 const glsl_type *coord_type = NULL;
1716 src_reg shadow_comparitor;
1717 src_reg offset_value;
1718 src_reg lod, lod2;
1719 src_reg sample_index;
1720 src_reg mcs;
1721
1722 const glsl_type *dest_type =
1723 glsl_type_for_nir_alu_type(instr->dest_type,
1724 nir_tex_instr_dest_size(instr));
1725 dst_reg dest = get_nir_dest(instr->dest, instr->dest_type);
1726
1727 /* The hardware requires a LOD for buffer textures */
1728 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
1729 lod = brw_imm_d(0);
1730
1731 /* Load the texture operation sources */
1732 uint32_t constant_offset = 0;
1733 for (unsigned i = 0; i < instr->num_srcs; i++) {
1734 switch (instr->src[i].src_type) {
1735 case nir_tex_src_comparitor:
1736 shadow_comparitor = get_nir_src(instr->src[i].src,
1737 BRW_REGISTER_TYPE_F, 1);
1738 break;
1739
1740 case nir_tex_src_coord: {
1741 unsigned src_size = nir_tex_instr_src_size(instr, i);
1742
1743 switch (instr->op) {
1744 case nir_texop_txf:
1745 case nir_texop_txf_ms:
1746 case nir_texop_samples_identical:
1747 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D,
1748 src_size);
1749 coord_type = glsl_type::ivec(src_size);
1750 break;
1751
1752 default:
1753 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
1754 src_size);
1755 coord_type = glsl_type::vec(src_size);
1756 break;
1757 }
1758 break;
1759 }
1760
1761 case nir_tex_src_ddx:
1762 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
1763 nir_tex_instr_src_size(instr, i));
1764 break;
1765
1766 case nir_tex_src_ddy:
1767 lod2 = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
1768 nir_tex_instr_src_size(instr, i));
1769 break;
1770
1771 case nir_tex_src_lod:
1772 switch (instr->op) {
1773 case nir_texop_txs:
1774 case nir_texop_txf:
1775 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
1776 break;
1777
1778 default:
1779 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F, 1);
1780 break;
1781 }
1782 break;
1783
1784 case nir_tex_src_ms_index: {
1785 sample_index = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
1786 break;
1787 }
1788
1789 case nir_tex_src_offset: {
1790 nir_const_value *const_offset =
1791 nir_src_as_const_value(instr->src[i].src);
1792 if (const_offset) {
1793 constant_offset = brw_texture_offset(const_offset->i32, 3);
1794 } else {
1795 offset_value =
1796 get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2);
1797 }
1798 break;
1799 }
1800
1801 case nir_tex_src_texture_offset: {
1802 /* The highest texture which may be used by this operation is
1803 * the last element of the array. Mark it here, because the generator
1804 * doesn't have enough information to determine the bound.
1805 */
1806 uint32_t array_size = instr->texture_array_size;
1807 uint32_t max_used = texture + array_size - 1;
1808 if (instr->op == nir_texop_tg4) {
1809 max_used += prog_data->base.binding_table.gather_texture_start;
1810 } else {
1811 max_used += prog_data->base.binding_table.texture_start;
1812 }
1813
1814 brw_mark_surface_used(&prog_data->base, max_used);
1815
1816 /* Emit code to evaluate the actual indexing expression */
1817 src_reg src = get_nir_src(instr->src[i].src, 1);
1818 src_reg temp(this, glsl_type::uint_type);
1819 emit(ADD(dst_reg(temp), src, brw_imm_ud(texture)));
1820 texture_reg = emit_uniformize(temp);
1821 break;
1822 }
1823
1824 case nir_tex_src_sampler_offset: {
1825 /* Emit code to evaluate the actual indexing expression */
1826 src_reg src = get_nir_src(instr->src[i].src, 1);
1827 src_reg temp(this, glsl_type::uint_type);
1828 emit(ADD(dst_reg(temp), src, brw_imm_ud(sampler)));
1829 sampler_reg = emit_uniformize(temp);
1830 break;
1831 }
1832
1833 case nir_tex_src_projector:
1834 unreachable("Should be lowered by do_lower_texture_projection");
1835
1836 case nir_tex_src_bias:
1837 unreachable("LOD bias is not valid for vertex shaders.\n");
1838
1839 default:
1840 unreachable("unknown texture source");
1841 }
1842 }
1843
1844 if (instr->op == nir_texop_txf_ms ||
1845 instr->op == nir_texop_samples_identical) {
1846 assert(coord_type != NULL);
1847 if (devinfo->gen >= 7 &&
1848 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
1849 mcs = emit_mcs_fetch(coord_type, coordinate, texture_reg);
1850 } else {
1851 mcs = brw_imm_ud(0u);
1852 }
1853 }
1854
1855 /* Stuff the channel select bits in the top of the texture offset */
1856 if (instr->op == nir_texop_tg4) {
1857 if (instr->component == 1 &&
1858 (key_tex->gather_channel_quirk_mask & (1 << texture))) {
1859 /* gather4 sampler is broken for green channel on RG32F --
1860 * we must ask for blue instead.
1861 */
1862 constant_offset |= 2 << 16;
1863 } else {
1864 constant_offset |= instr->component << 16;
1865 }
1866 }
1867
1868 ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op);
1869
1870 bool is_cube_array =
1871 instr->op == nir_texop_txs &&
1872 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
1873 instr->is_array;
1874
1875 emit_texture(op, dest, dest_type, coordinate, instr->coord_components,
1876 shadow_comparitor,
1877 lod, lod2, sample_index,
1878 constant_offset, offset_value,
1879 mcs, is_cube_array,
1880 texture, texture_reg, sampler, sampler_reg);
1881 }
1882
1883 void
1884 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr *instr)
1885 {
1886 nir_ssa_values[instr->def.index] = dst_reg(VGRF, alloc.allocate(1));
1887 }
1888
1889 }