intel/compiler: Move Gen4/5 rounding to visitor
[mesa.git] / src / intel / compiler / brw_vec4_nir.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_vec4.h"
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_eu.h"
29
30 using namespace brw;
31 using namespace brw::surface_access;
32
33 namespace brw {
34
35 void
36 vec4_visitor::emit_nir_code()
37 {
38 if (nir->num_uniforms > 0)
39 nir_setup_uniforms();
40
41 nir_emit_impl(nir_shader_get_entrypoint((nir_shader *)nir));
42 }
43
44 void
45 vec4_visitor::nir_setup_uniforms()
46 {
47 uniforms = nir->num_uniforms / 16;
48 }
49
50 void
51 vec4_visitor::nir_emit_impl(nir_function_impl *impl)
52 {
53 nir_locals = ralloc_array(mem_ctx, dst_reg, impl->reg_alloc);
54 for (unsigned i = 0; i < impl->reg_alloc; i++) {
55 nir_locals[i] = dst_reg();
56 }
57
58 foreach_list_typed(nir_register, reg, node, &impl->registers) {
59 unsigned array_elems =
60 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
61 const unsigned num_regs = array_elems * DIV_ROUND_UP(reg->bit_size, 32);
62 nir_locals[reg->index] = dst_reg(VGRF, alloc.allocate(num_regs));
63
64 if (reg->bit_size == 64)
65 nir_locals[reg->index].type = BRW_REGISTER_TYPE_DF;
66 }
67
68 nir_ssa_values = ralloc_array(mem_ctx, dst_reg, impl->ssa_alloc);
69
70 nir_emit_cf_list(&impl->body);
71 }
72
73 void
74 vec4_visitor::nir_emit_cf_list(exec_list *list)
75 {
76 exec_list_validate(list);
77 foreach_list_typed(nir_cf_node, node, node, list) {
78 switch (node->type) {
79 case nir_cf_node_if:
80 nir_emit_if(nir_cf_node_as_if(node));
81 break;
82
83 case nir_cf_node_loop:
84 nir_emit_loop(nir_cf_node_as_loop(node));
85 break;
86
87 case nir_cf_node_block:
88 nir_emit_block(nir_cf_node_as_block(node));
89 break;
90
91 default:
92 unreachable("Invalid CFG node block");
93 }
94 }
95 }
96
97 void
98 vec4_visitor::nir_emit_if(nir_if *if_stmt)
99 {
100 /* First, put the condition in f0 */
101 src_reg condition = get_nir_src(if_stmt->condition, BRW_REGISTER_TYPE_D, 1);
102 vec4_instruction *inst = emit(MOV(dst_null_d(), condition));
103 inst->conditional_mod = BRW_CONDITIONAL_NZ;
104
105 /* We can just predicate based on the X channel, as the condition only
106 * goes on its own line */
107 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X));
108
109 nir_emit_cf_list(&if_stmt->then_list);
110
111 /* note: if the else is empty, dead CF elimination will remove it */
112 emit(BRW_OPCODE_ELSE);
113
114 nir_emit_cf_list(&if_stmt->else_list);
115
116 emit(BRW_OPCODE_ENDIF);
117 }
118
119 void
120 vec4_visitor::nir_emit_loop(nir_loop *loop)
121 {
122 emit(BRW_OPCODE_DO);
123
124 nir_emit_cf_list(&loop->body);
125
126 emit(BRW_OPCODE_WHILE);
127 }
128
129 void
130 vec4_visitor::nir_emit_block(nir_block *block)
131 {
132 nir_foreach_instr(instr, block) {
133 nir_emit_instr(instr);
134 }
135 }
136
137 void
138 vec4_visitor::nir_emit_instr(nir_instr *instr)
139 {
140 base_ir = instr;
141
142 switch (instr->type) {
143 case nir_instr_type_load_const:
144 nir_emit_load_const(nir_instr_as_load_const(instr));
145 break;
146
147 case nir_instr_type_intrinsic:
148 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
149 break;
150
151 case nir_instr_type_alu:
152 nir_emit_alu(nir_instr_as_alu(instr));
153 break;
154
155 case nir_instr_type_jump:
156 nir_emit_jump(nir_instr_as_jump(instr));
157 break;
158
159 case nir_instr_type_tex:
160 nir_emit_texture(nir_instr_as_tex(instr));
161 break;
162
163 case nir_instr_type_ssa_undef:
164 nir_emit_undef(nir_instr_as_ssa_undef(instr));
165 break;
166
167 default:
168 unreachable("VS instruction not yet implemented by NIR->vec4");
169 }
170 }
171
172 static dst_reg
173 dst_reg_for_nir_reg(vec4_visitor *v, nir_register *nir_reg,
174 unsigned base_offset, nir_src *indirect)
175 {
176 dst_reg reg;
177
178 reg = v->nir_locals[nir_reg->index];
179 if (nir_reg->bit_size == 64)
180 reg.type = BRW_REGISTER_TYPE_DF;
181 reg = offset(reg, 8, base_offset);
182 if (indirect) {
183 reg.reladdr =
184 new(v->mem_ctx) src_reg(v->get_nir_src(*indirect,
185 BRW_REGISTER_TYPE_D,
186 1));
187 }
188 return reg;
189 }
190
191 dst_reg
192 vec4_visitor::get_nir_dest(const nir_dest &dest)
193 {
194 if (dest.is_ssa) {
195 dst_reg dst =
196 dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(dest.ssa.bit_size, 32)));
197 if (dest.ssa.bit_size == 64)
198 dst.type = BRW_REGISTER_TYPE_DF;
199 nir_ssa_values[dest.ssa.index] = dst;
200 return dst;
201 } else {
202 return dst_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
203 dest.reg.indirect);
204 }
205 }
206
207 dst_reg
208 vec4_visitor::get_nir_dest(const nir_dest &dest, enum brw_reg_type type)
209 {
210 return retype(get_nir_dest(dest), type);
211 }
212
213 dst_reg
214 vec4_visitor::get_nir_dest(const nir_dest &dest, nir_alu_type type)
215 {
216 return get_nir_dest(dest, brw_type_for_nir_type(devinfo, type));
217 }
218
219 src_reg
220 vec4_visitor::get_nir_src(const nir_src &src, enum brw_reg_type type,
221 unsigned num_components)
222 {
223 dst_reg reg;
224
225 if (src.is_ssa) {
226 assert(src.ssa != NULL);
227 reg = nir_ssa_values[src.ssa->index];
228 }
229 else {
230 reg = dst_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
231 src.reg.indirect);
232 }
233
234 reg = retype(reg, type);
235
236 src_reg reg_as_src = src_reg(reg);
237 reg_as_src.swizzle = brw_swizzle_for_size(num_components);
238 return reg_as_src;
239 }
240
241 src_reg
242 vec4_visitor::get_nir_src(const nir_src &src, nir_alu_type type,
243 unsigned num_components)
244 {
245 return get_nir_src(src, brw_type_for_nir_type(devinfo, type),
246 num_components);
247 }
248
249 src_reg
250 vec4_visitor::get_nir_src(const nir_src &src, unsigned num_components)
251 {
252 /* if type is not specified, default to signed int */
253 return get_nir_src(src, nir_type_int32, num_components);
254 }
255
256 src_reg
257 vec4_visitor::get_nir_src_imm(const nir_src &src)
258 {
259 assert(nir_src_num_components(src) == 1);
260 assert(nir_src_bit_size(src) == 32);
261 return nir_src_is_const(src) ? src_reg(brw_imm_d(nir_src_as_int(src))) :
262 get_nir_src(src, 1);
263 }
264
265 src_reg
266 vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
267 {
268 nir_src *offset_src = nir_get_io_offset_src(instr);
269
270 if (nir_src_is_const(*offset_src)) {
271 /* The only constant offset we should find is 0. brw_nir.c's
272 * add_const_offset_to_base() will fold other constant offsets
273 * into instr->const_index[0].
274 */
275 assert(nir_src_as_uint(*offset_src) == 0);
276 return src_reg();
277 }
278
279 return get_nir_src(*offset_src, BRW_REGISTER_TYPE_UD, 1);
280 }
281
282 static src_reg
283 setup_imm_df(const vec4_builder &bld, double v)
284 {
285 const gen_device_info *devinfo = bld.shader->devinfo;
286 assert(devinfo->gen >= 7);
287
288 if (devinfo->gen >= 8)
289 return brw_imm_df(v);
290
291 /* gen7.5 does not support DF immediates straighforward but the DIM
292 * instruction allows to set the 64-bit immediate value.
293 */
294 if (devinfo->is_haswell) {
295 const vec4_builder ubld = bld.exec_all();
296 const dst_reg dst = bld.vgrf(BRW_REGISTER_TYPE_DF);
297 ubld.DIM(dst, brw_imm_df(v));
298 return swizzle(src_reg(dst), BRW_SWIZZLE_XXXX);
299 }
300
301 /* gen7 does not support DF immediates */
302 union {
303 double d;
304 struct {
305 uint32_t i1;
306 uint32_t i2;
307 };
308 } di;
309
310 di.d = v;
311
312 /* Write the low 32-bit of the constant to the X:UD channel and the
313 * high 32-bit to the Y:UD channel to build the constant in a VGRF.
314 * We have to do this twice (offset 0 and offset 1), since a DF VGRF takes
315 * two SIMD8 registers in SIMD4x2 execution. Finally, return a swizzle
316 * XXXX so any access to the VGRF only reads the constant data in these
317 * channels.
318 */
319 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
320 for (unsigned n = 0; n < 2; n++) {
321 const vec4_builder ubld = bld.exec_all().group(4, n);
322 ubld.MOV(writemask(offset(tmp, 8, n), WRITEMASK_X), brw_imm_ud(di.i1));
323 ubld.MOV(writemask(offset(tmp, 8, n), WRITEMASK_Y), brw_imm_ud(di.i2));
324 }
325
326 return swizzle(src_reg(retype(tmp, BRW_REGISTER_TYPE_DF)), BRW_SWIZZLE_XXXX);
327 }
328
329 void
330 vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
331 {
332 dst_reg reg;
333
334 if (instr->def.bit_size == 64) {
335 reg = dst_reg(VGRF, alloc.allocate(2));
336 reg.type = BRW_REGISTER_TYPE_DF;
337 } else {
338 reg = dst_reg(VGRF, alloc.allocate(1));
339 reg.type = BRW_REGISTER_TYPE_D;
340 }
341
342 const vec4_builder ibld = vec4_builder(this).at_end();
343 unsigned remaining = brw_writemask_for_size(instr->def.num_components);
344
345 /* @FIXME: consider emitting vector operations to save some MOVs in
346 * cases where the components are representable in 8 bits.
347 * For now, we emit a MOV for each distinct value.
348 */
349 for (unsigned i = 0; i < instr->def.num_components; i++) {
350 unsigned writemask = 1 << i;
351
352 if ((remaining & writemask) == 0)
353 continue;
354
355 for (unsigned j = i; j < instr->def.num_components; j++) {
356 if ((instr->def.bit_size == 32 &&
357 instr->value[i].u32 == instr->value[j].u32) ||
358 (instr->def.bit_size == 64 &&
359 instr->value[i].f64 == instr->value[j].f64)) {
360 writemask |= 1 << j;
361 }
362 }
363
364 reg.writemask = writemask;
365 if (instr->def.bit_size == 64) {
366 emit(MOV(reg, setup_imm_df(ibld, instr->value[i].f64)));
367 } else {
368 emit(MOV(reg, brw_imm_d(instr->value[i].i32)));
369 }
370
371 remaining &= ~writemask;
372 }
373
374 /* Set final writemask */
375 reg.writemask = brw_writemask_for_size(instr->def.num_components);
376
377 nir_ssa_values[instr->def.index] = reg;
378 }
379
380 src_reg
381 vec4_visitor::get_nir_ssbo_intrinsic_index(nir_intrinsic_instr *instr)
382 {
383 /* SSBO stores are weird in that their index is in src[1] */
384 const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
385
386 src_reg surf_index;
387 if (nir_src_is_const(instr->src[src])) {
388 unsigned index = prog_data->base.binding_table.ssbo_start +
389 nir_src_as_uint(instr->src[src]);
390 surf_index = brw_imm_ud(index);
391 } else {
392 surf_index = src_reg(this, glsl_type::uint_type);
393 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[src], 1),
394 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
395 surf_index = emit_uniformize(surf_index);
396 }
397
398 return surf_index;
399 }
400
401 void
402 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
403 {
404 dst_reg dest;
405 src_reg src;
406
407 switch (instr->intrinsic) {
408
409 case nir_intrinsic_load_input: {
410 assert(nir_dest_bit_size(instr->dest) == 32);
411 /* We set EmitNoIndirectInput for VS */
412 unsigned load_offset = nir_src_as_uint(instr->src[0]);
413
414 dest = get_nir_dest(instr->dest);
415 dest.writemask = brw_writemask_for_size(instr->num_components);
416
417 src = src_reg(ATTR, instr->const_index[0] + load_offset,
418 glsl_type::uvec4_type);
419 src = retype(src, dest.type);
420
421 /* Swizzle source based on component layout qualifier */
422 src.swizzle = BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr));
423 emit(MOV(dest, src));
424 break;
425 }
426
427 case nir_intrinsic_store_output: {
428 assert(nir_src_bit_size(instr->src[0]) == 32);
429 unsigned store_offset = nir_src_as_uint(instr->src[1]);
430 int varying = instr->const_index[0] + store_offset;
431 src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F,
432 instr->num_components);
433
434 unsigned c = nir_intrinsic_component(instr);
435 output_reg[varying][c] = dst_reg(src);
436 output_num_components[varying][c] = instr->num_components;
437 break;
438 }
439
440 case nir_intrinsic_get_buffer_size: {
441 assert(nir_src_num_components(instr->src[0]) == 1);
442 unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
443 nir_src_as_uint(instr->src[0]) : 0;
444
445 const unsigned index =
446 prog_data->base.binding_table.ssbo_start + ssbo_index;
447 dst_reg result_dst = get_nir_dest(instr->dest);
448 vec4_instruction *inst = new(mem_ctx)
449 vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE, result_dst);
450
451 inst->base_mrf = 2;
452 inst->mlen = 1; /* always at least one */
453 inst->src[1] = brw_imm_ud(index);
454
455 /* MRF for the first parameter */
456 src_reg lod = brw_imm_d(0);
457 int param_base = inst->base_mrf;
458 int writemask = WRITEMASK_X;
459 emit(MOV(dst_reg(MRF, param_base, glsl_type::int_type, writemask), lod));
460
461 emit(inst);
462 break;
463 }
464
465 case nir_intrinsic_store_ssbo: {
466 assert(devinfo->gen >= 7);
467
468 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
469 assert(nir_src_bit_size(instr->src[0]) == 32);
470 assert(nir_intrinsic_write_mask(instr) ==
471 (1u << instr->num_components) - 1);
472
473 src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
474 src_reg offset_reg = retype(get_nir_src_imm(instr->src[2]),
475 BRW_REGISTER_TYPE_UD);
476
477 /* Value */
478 src_reg val_reg = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F, 4);
479
480 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
481 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
482 * typed and untyped messages and across hardware platforms, the
483 * current implementation of the untyped messages will transparently convert
484 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
485 * and enabling only channel X on the SEND instruction.
486 *
487 * The above, works well for full vector writes, but not for partial writes
488 * where we want to write some channels and not others, like when we have
489 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
490 * quite restrictive with regards to the channel enables we can configure in
491 * the message descriptor (not all combinations are allowed) we cannot simply
492 * implement these scenarios with a single message while keeping the
493 * aforementioned symmetry in the implementation. For now we de decided that
494 * it is better to keep the symmetry to reduce complexity, so in situations
495 * such as the one described we end up emitting two untyped write messages
496 * (one for xy and another for w).
497 *
498 * The code below packs consecutive channels into a single write message,
499 * detects gaps in the vector write and if needed, sends a second message
500 * with the remaining channels. If in the future we decide that we want to
501 * emit a single message at the expense of losing the symmetry in the
502 * implementation we can:
503 *
504 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
505 * message payload. In this mode we can write up to 8 offsets and dwords
506 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
507 * and select which of the 8 channels carry data to write by setting the
508 * appropriate writemask in the dst register of the SEND instruction.
509 * It would require to write a new generator opcode specifically for
510 * IvyBridge since we would need to prepare a SIMD8 payload that could
511 * use any channel, not just X.
512 *
513 * 2) For Haswell+: Simply send a single write message but set the writemask
514 * on the dst of the SEND instruction to select the channels we want to
515 * write. It would require to modify the current messages to receive
516 * and honor the writemask provided.
517 */
518 const vec4_builder bld = vec4_builder(this).at_end()
519 .annotate(current_annotation, base_ir);
520
521 emit_untyped_write(bld, surf_index, offset_reg, val_reg,
522 1 /* dims */, instr->num_components /* size */,
523 BRW_PREDICATE_NONE);
524 break;
525 }
526
527 case nir_intrinsic_load_ssbo: {
528 assert(devinfo->gen >= 7);
529
530 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
531 assert(nir_dest_bit_size(instr->dest) == 32);
532
533 src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
534 src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]),
535 BRW_REGISTER_TYPE_UD);
536
537 /* Read the vector */
538 const vec4_builder bld = vec4_builder(this).at_end()
539 .annotate(current_annotation, base_ir);
540
541 src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
542 1 /* dims */, 4 /* size*/,
543 BRW_PREDICATE_NONE);
544 dst_reg dest = get_nir_dest(instr->dest);
545 read_result.type = dest.type;
546 read_result.swizzle = brw_swizzle_for_size(instr->num_components);
547 emit(MOV(dest, read_result));
548 break;
549 }
550
551 case nir_intrinsic_ssbo_atomic_add:
552 case nir_intrinsic_ssbo_atomic_imin:
553 case nir_intrinsic_ssbo_atomic_umin:
554 case nir_intrinsic_ssbo_atomic_imax:
555 case nir_intrinsic_ssbo_atomic_umax:
556 case nir_intrinsic_ssbo_atomic_and:
557 case nir_intrinsic_ssbo_atomic_or:
558 case nir_intrinsic_ssbo_atomic_xor:
559 case nir_intrinsic_ssbo_atomic_exchange:
560 case nir_intrinsic_ssbo_atomic_comp_swap:
561 nir_emit_ssbo_atomic(brw_aop_for_nir_intrinsic(instr), instr);
562 break;
563
564 case nir_intrinsic_load_vertex_id:
565 unreachable("should be lowered by lower_vertex_id()");
566
567 case nir_intrinsic_load_vertex_id_zero_base:
568 case nir_intrinsic_load_base_vertex:
569 case nir_intrinsic_load_instance_id:
570 case nir_intrinsic_load_base_instance:
571 case nir_intrinsic_load_draw_id:
572 case nir_intrinsic_load_invocation_id:
573 unreachable("should be lowered by brw_nir_lower_vs_inputs()");
574
575 case nir_intrinsic_load_uniform: {
576 /* Offsets are in bytes but they should always be multiples of 4 */
577 assert(nir_intrinsic_base(instr) % 4 == 0);
578
579 dest = get_nir_dest(instr->dest);
580
581 src = src_reg(dst_reg(UNIFORM, nir_intrinsic_base(instr) / 16));
582 src.type = dest.type;
583
584 /* Uniforms don't actually have to be vec4 aligned. In the case that
585 * it isn't, we have to use a swizzle to shift things around. They
586 * do still have the std140 alignment requirement that vec2's have to
587 * be vec2-aligned and vec3's and vec4's have to be vec4-aligned.
588 *
589 * The swizzle also works in the indirect case as the generator adds
590 * the swizzle to the offset for us.
591 */
592 const int type_size = type_sz(src.type);
593 unsigned shift = (nir_intrinsic_base(instr) % 16) / type_size;
594 assert(shift + instr->num_components <= 4);
595
596 if (nir_src_is_const(instr->src[0])) {
597 const unsigned load_offset = nir_src_as_uint(instr->src[0]);
598 /* Offsets are in bytes but they should always be multiples of 4 */
599 assert(load_offset % 4 == 0);
600
601 src.swizzle = brw_swizzle_for_size(instr->num_components);
602 dest.writemask = brw_writemask_for_size(instr->num_components);
603 unsigned offset = load_offset + shift * type_size;
604 src.offset = ROUND_DOWN_TO(offset, 16);
605 shift = (offset % 16) / type_size;
606 assert(shift + instr->num_components <= 4);
607 src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift);
608
609 emit(MOV(dest, src));
610 } else {
611 /* Uniform arrays are vec4 aligned, because of std140 alignment
612 * rules.
613 */
614 assert(shift == 0);
615
616 src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
617
618 /* MOV_INDIRECT is going to stomp the whole thing anyway */
619 dest.writemask = WRITEMASK_XYZW;
620
621 emit(SHADER_OPCODE_MOV_INDIRECT, dest, src,
622 indirect, brw_imm_ud(instr->const_index[1]));
623 }
624 break;
625 }
626
627 case nir_intrinsic_load_ubo: {
628 src_reg surf_index;
629
630 prog_data->base.has_ubo_pull = true;
631
632 dest = get_nir_dest(instr->dest);
633
634 if (nir_src_is_const(instr->src[0])) {
635 /* The block index is a constant, so just emit the binding table entry
636 * as an immediate.
637 */
638 const unsigned index = prog_data->base.binding_table.ubo_start +
639 nir_src_as_uint(instr->src[0]);
640 surf_index = brw_imm_ud(index);
641 } else {
642 /* The block index is not a constant. Evaluate the index expression
643 * per-channel and add the base UBO index; we have to select a value
644 * from any live channel.
645 */
646 surf_index = src_reg(this, glsl_type::uint_type);
647 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], nir_type_int32,
648 instr->num_components),
649 brw_imm_ud(prog_data->base.binding_table.ubo_start)));
650 surf_index = emit_uniformize(surf_index);
651 }
652
653 src_reg offset_reg;
654 if (nir_src_is_const(instr->src[1])) {
655 unsigned load_offset = nir_src_as_uint(instr->src[1]);
656 offset_reg = brw_imm_ud(load_offset & ~15);
657 } else {
658 offset_reg = src_reg(this, glsl_type::uint_type);
659 emit(MOV(dst_reg(offset_reg),
660 get_nir_src(instr->src[1], nir_type_uint32, 1)));
661 }
662
663 src_reg packed_consts;
664 if (nir_dest_bit_size(instr->dest) == 32) {
665 packed_consts = src_reg(this, glsl_type::vec4_type);
666 emit_pull_constant_load_reg(dst_reg(packed_consts),
667 surf_index,
668 offset_reg,
669 NULL, NULL /* before_block/inst */);
670 } else {
671 src_reg temp = src_reg(this, glsl_type::dvec4_type);
672 src_reg temp_float = retype(temp, BRW_REGISTER_TYPE_F);
673
674 emit_pull_constant_load_reg(dst_reg(temp_float),
675 surf_index, offset_reg, NULL, NULL);
676 if (offset_reg.file == IMM)
677 offset_reg.ud += 16;
678 else
679 emit(ADD(dst_reg(offset_reg), offset_reg, brw_imm_ud(16u)));
680 emit_pull_constant_load_reg(dst_reg(byte_offset(temp_float, REG_SIZE)),
681 surf_index, offset_reg, NULL, NULL);
682
683 packed_consts = src_reg(this, glsl_type::dvec4_type);
684 shuffle_64bit_data(dst_reg(packed_consts), temp, false);
685 }
686
687 packed_consts.swizzle = brw_swizzle_for_size(instr->num_components);
688 if (nir_src_is_const(instr->src[1])) {
689 unsigned load_offset = nir_src_as_uint(instr->src[1]);
690 unsigned type_size = type_sz(dest.type);
691 packed_consts.swizzle +=
692 BRW_SWIZZLE4(load_offset % 16 / type_size,
693 load_offset % 16 / type_size,
694 load_offset % 16 / type_size,
695 load_offset % 16 / type_size);
696 }
697
698 emit(MOV(dest, retype(packed_consts, dest.type)));
699
700 break;
701 }
702
703 case nir_intrinsic_memory_barrier:
704 case nir_intrinsic_scoped_memory_barrier: {
705 const vec4_builder bld =
706 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
707 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
708 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp, brw_vec8_grf(0, 0))
709 ->size_written = 2 * REG_SIZE;
710 break;
711 }
712
713 case nir_intrinsic_shader_clock: {
714 /* We cannot do anything if there is an event, so ignore it for now */
715 const src_reg shader_clock = get_timestamp();
716 const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type);
717
718 dest = get_nir_dest(instr->dest, type);
719 emit(MOV(dest, shader_clock));
720 break;
721 }
722
723 default:
724 unreachable("Unknown intrinsic");
725 }
726 }
727
728 void
729 vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
730 {
731 dst_reg dest;
732 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
733 dest = get_nir_dest(instr->dest);
734
735 src_reg surface = get_nir_ssbo_intrinsic_index(instr);
736 src_reg offset = get_nir_src(instr->src[1], 1);
737 src_reg data1;
738 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
739 data1 = get_nir_src(instr->src[2], 1);
740 src_reg data2;
741 if (op == BRW_AOP_CMPWR)
742 data2 = get_nir_src(instr->src[3], 1);
743
744 /* Emit the actual atomic operation operation */
745 const vec4_builder bld =
746 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
747
748 src_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
749 data1, data2,
750 1 /* dims */, 1 /* rsize */,
751 op,
752 BRW_PREDICATE_NONE);
753 dest.type = atomic_result.type;
754 bld.MOV(dest, atomic_result);
755 }
756
757 static unsigned
758 brw_swizzle_for_nir_swizzle(uint8_t swizzle[4])
759 {
760 return BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
761 }
762
763 bool
764 vec4_visitor::optimize_predicate(nir_alu_instr *instr,
765 enum brw_predicate *predicate)
766 {
767 if (!instr->src[0].src.is_ssa ||
768 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
769 return false;
770
771 nir_alu_instr *cmp_instr =
772 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
773
774 switch (cmp_instr->op) {
775 case nir_op_b32any_fnequal2:
776 case nir_op_b32any_inequal2:
777 case nir_op_b32any_fnequal3:
778 case nir_op_b32any_inequal3:
779 case nir_op_b32any_fnequal4:
780 case nir_op_b32any_inequal4:
781 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
782 break;
783 case nir_op_b32all_fequal2:
784 case nir_op_b32all_iequal2:
785 case nir_op_b32all_fequal3:
786 case nir_op_b32all_iequal3:
787 case nir_op_b32all_fequal4:
788 case nir_op_b32all_iequal4:
789 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
790 break;
791 default:
792 return false;
793 }
794
795 unsigned size_swizzle =
796 brw_swizzle_for_size(nir_op_infos[cmp_instr->op].input_sizes[0]);
797
798 src_reg op[2];
799 assert(nir_op_infos[cmp_instr->op].num_inputs == 2);
800 for (unsigned i = 0; i < 2; i++) {
801 nir_alu_type type = nir_op_infos[cmp_instr->op].input_types[i];
802 unsigned bit_size = nir_src_bit_size(cmp_instr->src[i].src);
803 type = (nir_alu_type) (((unsigned) type) | bit_size);
804 op[i] = get_nir_src(cmp_instr->src[i].src, type, 4);
805 unsigned base_swizzle =
806 brw_swizzle_for_nir_swizzle(cmp_instr->src[i].swizzle);
807 op[i].swizzle = brw_compose_swizzle(size_swizzle, base_swizzle);
808 op[i].abs = cmp_instr->src[i].abs;
809 op[i].negate = cmp_instr->src[i].negate;
810 }
811
812 emit(CMP(dst_null_d(), op[0], op[1],
813 brw_cmod_for_nir_comparison(cmp_instr->op)));
814
815 return true;
816 }
817
818 static void
819 emit_find_msb_using_lzd(const vec4_builder &bld,
820 const dst_reg &dst,
821 const src_reg &src,
822 bool is_signed)
823 {
824 vec4_instruction *inst;
825 src_reg temp = src;
826
827 if (is_signed) {
828 /* LZD of an absolute value source almost always does the right
829 * thing. There are two problem values:
830 *
831 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
832 * 0. However, findMSB(int(0x80000000)) == 30.
833 *
834 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
835 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
836 *
837 * For a value of zero or negative one, -1 will be returned.
838 *
839 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
840 * findMSB(-(1<<x)) should return x-1.
841 *
842 * For all negative number cases, including 0x80000000 and
843 * 0xffffffff, the correct value is obtained from LZD if instead of
844 * negating the (already negative) value the logical-not is used. A
845 * conditonal logical-not can be achieved in two instructions.
846 */
847 temp = src_reg(bld.vgrf(BRW_REGISTER_TYPE_D));
848
849 bld.ASR(dst_reg(temp), src, brw_imm_d(31));
850 bld.XOR(dst_reg(temp), temp, src);
851 }
852
853 bld.LZD(retype(dst, BRW_REGISTER_TYPE_UD),
854 retype(temp, BRW_REGISTER_TYPE_UD));
855
856 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
857 * from the LSB side. Subtract the result from 31 to convert the MSB count
858 * into an LSB count. If no bits are set, LZD will return 32. 31-32 = -1,
859 * which is exactly what findMSB() is supposed to return.
860 */
861 inst = bld.ADD(dst, retype(src_reg(dst), BRW_REGISTER_TYPE_D),
862 brw_imm_d(31));
863 inst->src[0].negate = true;
864 }
865
866 void
867 vec4_visitor::emit_conversion_from_double(dst_reg dst, src_reg src,
868 bool saturate)
869 {
870 /* BDW PRM vol 15 - workarounds:
871 * DF->f format conversion for Align16 has wrong emask calculation when
872 * source is immediate.
873 */
874 if (devinfo->gen == 8 && dst.type == BRW_REGISTER_TYPE_F &&
875 src.file == BRW_IMMEDIATE_VALUE) {
876 vec4_instruction *inst = emit(MOV(dst, brw_imm_f(src.df)));
877 inst->saturate = saturate;
878 return;
879 }
880
881 enum opcode op;
882 switch (dst.type) {
883 case BRW_REGISTER_TYPE_D:
884 op = VEC4_OPCODE_DOUBLE_TO_D32;
885 break;
886 case BRW_REGISTER_TYPE_UD:
887 op = VEC4_OPCODE_DOUBLE_TO_U32;
888 break;
889 case BRW_REGISTER_TYPE_F:
890 op = VEC4_OPCODE_DOUBLE_TO_F32;
891 break;
892 default:
893 unreachable("Unknown conversion");
894 }
895
896 dst_reg temp = dst_reg(this, glsl_type::dvec4_type);
897 emit(MOV(temp, src));
898 dst_reg temp2 = dst_reg(this, glsl_type::dvec4_type);
899 emit(op, temp2, src_reg(temp));
900
901 emit(VEC4_OPCODE_PICK_LOW_32BIT, retype(temp2, dst.type), src_reg(temp2));
902 vec4_instruction *inst = emit(MOV(dst, src_reg(retype(temp2, dst.type))));
903 inst->saturate = saturate;
904 }
905
906 void
907 vec4_visitor::emit_conversion_to_double(dst_reg dst, src_reg src,
908 bool saturate)
909 {
910 dst_reg tmp_dst = dst_reg(src_reg(this, glsl_type::dvec4_type));
911 src_reg tmp_src = retype(src_reg(this, glsl_type::vec4_type), src.type);
912 emit(MOV(dst_reg(tmp_src), src));
913 emit(VEC4_OPCODE_TO_DOUBLE, tmp_dst, tmp_src);
914 vec4_instruction *inst = emit(MOV(dst, src_reg(tmp_dst)));
915 inst->saturate = saturate;
916 }
917
918 /**
919 * Try to use an immediate value for a source
920 *
921 * In cases of flow control, constant propagation is sometimes unable to
922 * determine that a register contains a constant value. To work around this,
923 * try to emit a literal as one of the sources. If \c try_src0_also is set,
924 * \c op[0] will also be tried for an immediate value.
925 *
926 * If \c op[0] is modified, the operands will be exchanged so that \c op[1]
927 * will always be the immediate value.
928 *
929 * \return The index of the source that was modified, 0 or 1, if successful.
930 * Otherwise, -1.
931 *
932 * \param op - Operands to the instruction
933 * \param try_src0_also - True if \c op[0] should also be a candidate for
934 * getting an immediate value. This should only be set
935 * for commutative operations.
936 */
937 static int
938 try_immediate_source(const nir_alu_instr *instr, src_reg *op,
939 bool try_src0_also,
940 ASSERTED const gen_device_info *devinfo)
941 {
942 unsigned idx;
943
944 /* MOV should be the only single-source instruction passed to this
945 * function. Any other unary instruction with a constant source should
946 * have been constant-folded away!
947 */
948 assert(nir_op_infos[instr->op].num_inputs > 1 ||
949 instr->op == nir_op_mov);
950
951 if (instr->op != nir_op_mov &&
952 nir_src_bit_size(instr->src[1].src) == 32 &&
953 nir_src_is_const(instr->src[1].src)) {
954 idx = 1;
955 } else if (try_src0_also &&
956 nir_src_bit_size(instr->src[0].src) == 32 &&
957 nir_src_is_const(instr->src[0].src)) {
958 idx = 0;
959 } else {
960 return -1;
961 }
962
963 const enum brw_reg_type old_type = op[idx].type;
964
965 switch (old_type) {
966 case BRW_REGISTER_TYPE_D:
967 case BRW_REGISTER_TYPE_UD: {
968 int first_comp = -1;
969 int d = 0;
970
971 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
972 if (nir_alu_instr_channel_used(instr, idx, i)) {
973 if (first_comp < 0) {
974 first_comp = i;
975 d = nir_src_comp_as_int(instr->src[idx].src,
976 instr->src[idx].swizzle[i]);
977 } else if (d != nir_src_comp_as_int(instr->src[idx].src,
978 instr->src[idx].swizzle[i])) {
979 return -1;
980 }
981 }
982 }
983
984 assert(first_comp >= 0);
985
986 if (op[idx].abs)
987 d = MAX2(-d, d);
988
989 if (op[idx].negate) {
990 /* On Gen8+ a negation source modifier on a logical operation means
991 * something different. Nothing should generate this, so assert that
992 * it does not occur.
993 */
994 assert(devinfo->gen < 8 || (instr->op != nir_op_iand &&
995 instr->op != nir_op_ior &&
996 instr->op != nir_op_ixor));
997 d = -d;
998 }
999
1000 op[idx] = retype(src_reg(brw_imm_d(d)), old_type);
1001 break;
1002 }
1003
1004 case BRW_REGISTER_TYPE_F: {
1005 int first_comp = -1;
1006 float f[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
1007 bool is_scalar = true;
1008
1009 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
1010 if (nir_alu_instr_channel_used(instr, idx, i)) {
1011 f[i] = nir_src_comp_as_float(instr->src[idx].src,
1012 instr->src[idx].swizzle[i]);
1013 if (first_comp < 0) {
1014 first_comp = i;
1015 } else if (f[first_comp] != f[i]) {
1016 is_scalar = false;
1017 }
1018 }
1019 }
1020
1021 if (is_scalar) {
1022 if (op[idx].abs)
1023 f[first_comp] = fabs(f[first_comp]);
1024
1025 if (op[idx].negate)
1026 f[first_comp] = -f[first_comp];
1027
1028 op[idx] = src_reg(brw_imm_f(f[first_comp]));
1029 assert(op[idx].type == old_type);
1030 } else {
1031 uint8_t vf_values[4] = { 0, 0, 0, 0 };
1032
1033 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
1034 if (op[idx].abs)
1035 f[i] = fabs(f[i]);
1036
1037 if (op[idx].negate)
1038 f[i] = -f[i];
1039
1040 const int vf = brw_float_to_vf(f[i]);
1041 if (vf == -1)
1042 return -1;
1043
1044 vf_values[i] = vf;
1045 }
1046
1047 op[idx] = src_reg(brw_imm_vf4(vf_values[0], vf_values[1],
1048 vf_values[2], vf_values[3]));
1049 }
1050 break;
1051 }
1052
1053 default:
1054 unreachable("Non-32bit type.");
1055 }
1056
1057 /* If the instruction has more than one source, the instruction format only
1058 * allows source 1 to be an immediate value. If the immediate value was
1059 * source 0, then the sources must be exchanged.
1060 */
1061 if (idx == 0 && instr->op != nir_op_mov) {
1062 src_reg tmp = op[0];
1063 op[0] = op[1];
1064 op[1] = tmp;
1065 }
1066
1067 return idx;
1068 }
1069
1070 void
1071 vec4_visitor::fix_float_operands(src_reg op[3], nir_alu_instr *instr)
1072 {
1073 bool fixed[3] = { false, false, false };
1074
1075 for (unsigned i = 0; i < 2; i++) {
1076 if (!nir_src_is_const(instr->src[i].src))
1077 continue;
1078
1079 for (unsigned j = i + 1; j < 3; j++) {
1080 if (fixed[j])
1081 continue;
1082
1083 if (!nir_src_is_const(instr->src[j].src))
1084 continue;
1085
1086 if (nir_alu_srcs_equal(instr, instr, i, j)) {
1087 if (!fixed[i])
1088 op[i] = fix_3src_operand(op[i]);
1089
1090 op[j] = op[i];
1091
1092 fixed[i] = true;
1093 fixed[j] = true;
1094 } else if (nir_alu_srcs_negative_equal(instr, instr, i, j)) {
1095 if (!fixed[i])
1096 op[i] = fix_3src_operand(op[i]);
1097
1098 op[j] = op[i];
1099 op[j].negate = !op[j].negate;
1100
1101 fixed[i] = true;
1102 fixed[j] = true;
1103 }
1104 }
1105 }
1106
1107 for (unsigned i = 0; i < 3; i++) {
1108 if (!fixed[i])
1109 op[i] = fix_3src_operand(op[i]);
1110 }
1111 }
1112
1113 static bool
1114 const_src_fits_in_16_bits(const nir_src &src, brw_reg_type type)
1115 {
1116 assert(nir_src_is_const(src));
1117 if (type_is_unsigned_int(type)) {
1118 return nir_src_comp_as_uint(src, 0) <= UINT16_MAX;
1119 } else {
1120 const int64_t c = nir_src_comp_as_int(src, 0);
1121 return c <= INT16_MAX && c >= INT16_MIN;
1122 }
1123 }
1124
1125 void
1126 vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
1127 {
1128 vec4_instruction *inst;
1129
1130 nir_alu_type dst_type = (nir_alu_type) (nir_op_infos[instr->op].output_type |
1131 nir_dest_bit_size(instr->dest.dest));
1132 dst_reg dst = get_nir_dest(instr->dest.dest, dst_type);
1133 dst.writemask = instr->dest.write_mask;
1134
1135 src_reg op[4];
1136 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1137 nir_alu_type src_type = (nir_alu_type)
1138 (nir_op_infos[instr->op].input_types[i] |
1139 nir_src_bit_size(instr->src[i].src));
1140 op[i] = get_nir_src(instr->src[i].src, src_type, 4);
1141 op[i].swizzle = brw_swizzle_for_nir_swizzle(instr->src[i].swizzle);
1142 op[i].abs = instr->src[i].abs;
1143 op[i].negate = instr->src[i].negate;
1144 }
1145
1146 switch (instr->op) {
1147 case nir_op_mov:
1148 try_immediate_source(instr, &op[0], true, devinfo);
1149 inst = emit(MOV(dst, op[0]));
1150 inst->saturate = instr->dest.saturate;
1151 break;
1152
1153 case nir_op_vec2:
1154 case nir_op_vec3:
1155 case nir_op_vec4:
1156 unreachable("not reached: should be handled by lower_vec_to_movs()");
1157
1158 case nir_op_i2f32:
1159 case nir_op_u2f32:
1160 inst = emit(MOV(dst, op[0]));
1161 inst->saturate = instr->dest.saturate;
1162 break;
1163
1164 case nir_op_f2f32:
1165 case nir_op_f2i32:
1166 case nir_op_f2u32:
1167 if (nir_src_bit_size(instr->src[0].src) == 64)
1168 emit_conversion_from_double(dst, op[0], instr->dest.saturate);
1169 else
1170 inst = emit(MOV(dst, op[0]));
1171 break;
1172
1173 case nir_op_f2f64:
1174 case nir_op_i2f64:
1175 case nir_op_u2f64:
1176 emit_conversion_to_double(dst, op[0], instr->dest.saturate);
1177 break;
1178
1179 case nir_op_fsat:
1180 inst = emit(MOV(dst, op[0]));
1181 inst->saturate = true;
1182 break;
1183
1184 case nir_op_fneg:
1185 case nir_op_ineg:
1186 op[0].negate = true;
1187 inst = emit(MOV(dst, op[0]));
1188 if (instr->op == nir_op_fneg)
1189 inst->saturate = instr->dest.saturate;
1190 break;
1191
1192 case nir_op_fabs:
1193 case nir_op_iabs:
1194 op[0].negate = false;
1195 op[0].abs = true;
1196 inst = emit(MOV(dst, op[0]));
1197 if (instr->op == nir_op_fabs)
1198 inst->saturate = instr->dest.saturate;
1199 break;
1200
1201 case nir_op_iadd:
1202 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1203 /* fall through */
1204 case nir_op_fadd:
1205 try_immediate_source(instr, op, true, devinfo);
1206 inst = emit(ADD(dst, op[0], op[1]));
1207 inst->saturate = instr->dest.saturate;
1208 break;
1209
1210 case nir_op_uadd_sat:
1211 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1212 inst = emit(ADD(dst, op[0], op[1]));
1213 inst->saturate = true;
1214 break;
1215
1216 case nir_op_fmul:
1217 try_immediate_source(instr, op, true, devinfo);
1218 inst = emit(MUL(dst, op[0], op[1]));
1219 inst->saturate = instr->dest.saturate;
1220 break;
1221
1222 case nir_op_imul: {
1223 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1224 if (devinfo->gen < 8) {
1225 /* For integer multiplication, the MUL uses the low 16 bits of one of
1226 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1227 * accumulates in the contribution of the upper 16 bits of that
1228 * operand. If we can determine that one of the args is in the low
1229 * 16 bits, though, we can just emit a single MUL.
1230 */
1231 if (nir_src_is_const(instr->src[0].src) &&
1232 nir_alu_instr_src_read_mask(instr, 0) == 1 &&
1233 const_src_fits_in_16_bits(instr->src[0].src, op[0].type)) {
1234 if (devinfo->gen < 7)
1235 emit(MUL(dst, op[0], op[1]));
1236 else
1237 emit(MUL(dst, op[1], op[0]));
1238 } else if (nir_src_is_const(instr->src[1].src) &&
1239 nir_alu_instr_src_read_mask(instr, 1) == 1 &&
1240 const_src_fits_in_16_bits(instr->src[1].src, op[1].type)) {
1241 if (devinfo->gen < 7)
1242 emit(MUL(dst, op[1], op[0]));
1243 else
1244 emit(MUL(dst, op[0], op[1]));
1245 } else {
1246 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1247
1248 emit(MUL(acc, op[0], op[1]));
1249 emit(MACH(dst_null_d(), op[0], op[1]));
1250 emit(MOV(dst, src_reg(acc)));
1251 }
1252 } else {
1253 emit(MUL(dst, op[0], op[1]));
1254 }
1255 break;
1256 }
1257
1258 case nir_op_imul_high:
1259 case nir_op_umul_high: {
1260 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1261 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1262
1263 if (devinfo->gen >= 8)
1264 emit(MUL(acc, op[0], retype(op[1], BRW_REGISTER_TYPE_UW)));
1265 else
1266 emit(MUL(acc, op[0], op[1]));
1267
1268 emit(MACH(dst, op[0], op[1]));
1269 break;
1270 }
1271
1272 case nir_op_frcp:
1273 inst = emit_math(SHADER_OPCODE_RCP, dst, op[0]);
1274 inst->saturate = instr->dest.saturate;
1275 break;
1276
1277 case nir_op_fexp2:
1278 inst = emit_math(SHADER_OPCODE_EXP2, dst, op[0]);
1279 inst->saturate = instr->dest.saturate;
1280 break;
1281
1282 case nir_op_flog2:
1283 inst = emit_math(SHADER_OPCODE_LOG2, dst, op[0]);
1284 inst->saturate = instr->dest.saturate;
1285 break;
1286
1287 case nir_op_fsin:
1288 inst = emit_math(SHADER_OPCODE_SIN, dst, op[0]);
1289 inst->saturate = instr->dest.saturate;
1290 break;
1291
1292 case nir_op_fcos:
1293 inst = emit_math(SHADER_OPCODE_COS, dst, op[0]);
1294 inst->saturate = instr->dest.saturate;
1295 break;
1296
1297 case nir_op_idiv:
1298 case nir_op_udiv:
1299 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1300 emit_math(SHADER_OPCODE_INT_QUOTIENT, dst, op[0], op[1]);
1301 break;
1302
1303 case nir_op_umod:
1304 case nir_op_irem:
1305 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1306 * appears that our hardware just does the right thing for signed
1307 * remainder.
1308 */
1309 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1310 emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1311 break;
1312
1313 case nir_op_imod: {
1314 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1315 inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1316
1317 /* Math instructions don't support conditional mod */
1318 inst = emit(MOV(dst_null_d(), src_reg(dst)));
1319 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1320
1321 /* Now, we need to determine if signs of the sources are different.
1322 * When we XOR the sources, the top bit is 0 if they are the same and 1
1323 * if they are different. We can then use a conditional modifier to
1324 * turn that into a predicate. This leads us to an XOR.l instruction.
1325 *
1326 * Technically, according to the PRM, you're not allowed to use .l on a
1327 * XOR instruction. However, emperical experiments and Curro's reading
1328 * of the simulator source both indicate that it's safe.
1329 */
1330 src_reg tmp = src_reg(this, glsl_type::ivec4_type);
1331 inst = emit(XOR(dst_reg(tmp), op[0], op[1]));
1332 inst->predicate = BRW_PREDICATE_NORMAL;
1333 inst->conditional_mod = BRW_CONDITIONAL_L;
1334
1335 /* If the result of the initial remainder operation is non-zero and the
1336 * two sources have different signs, add in a copy of op[1] to get the
1337 * final integer modulus value.
1338 */
1339 inst = emit(ADD(dst, src_reg(dst), op[1]));
1340 inst->predicate = BRW_PREDICATE_NORMAL;
1341 break;
1342 }
1343
1344 case nir_op_ldexp:
1345 unreachable("not reached: should be handled by ldexp_to_arith()");
1346
1347 case nir_op_fsqrt:
1348 inst = emit_math(SHADER_OPCODE_SQRT, dst, op[0]);
1349 inst->saturate = instr->dest.saturate;
1350 break;
1351
1352 case nir_op_frsq:
1353 inst = emit_math(SHADER_OPCODE_RSQ, dst, op[0]);
1354 inst->saturate = instr->dest.saturate;
1355 break;
1356
1357 case nir_op_fpow:
1358 inst = emit_math(SHADER_OPCODE_POW, dst, op[0], op[1]);
1359 inst->saturate = instr->dest.saturate;
1360 break;
1361
1362 case nir_op_uadd_carry: {
1363 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1364 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1365
1366 emit(ADDC(dst_null_ud(), op[0], op[1]));
1367 emit(MOV(dst, src_reg(acc)));
1368 break;
1369 }
1370
1371 case nir_op_usub_borrow: {
1372 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1373 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1374
1375 emit(SUBB(dst_null_ud(), op[0], op[1]));
1376 emit(MOV(dst, src_reg(acc)));
1377 break;
1378 }
1379
1380 case nir_op_ftrunc:
1381 inst = emit(RNDZ(dst, op[0]));
1382 if (devinfo->gen < 6) {
1383 inst->conditional_mod = BRW_CONDITIONAL_R;
1384 inst = emit(ADD(dst, src_reg(dst), brw_imm_f(1.0f)));
1385 inst->predicate = BRW_PREDICATE_NORMAL;
1386 inst = emit(MOV(dst, src_reg(dst))); /* for potential saturation */
1387 }
1388 inst->saturate = instr->dest.saturate;
1389 break;
1390
1391 case nir_op_fceil: {
1392 src_reg tmp = src_reg(this, glsl_type::float_type);
1393 tmp.swizzle =
1394 brw_swizzle_for_size(instr->src[0].src.is_ssa ?
1395 instr->src[0].src.ssa->num_components :
1396 instr->src[0].src.reg.reg->num_components);
1397
1398 op[0].negate = !op[0].negate;
1399 emit(RNDD(dst_reg(tmp), op[0]));
1400 tmp.negate = true;
1401 inst = emit(MOV(dst, tmp));
1402 inst->saturate = instr->dest.saturate;
1403 break;
1404 }
1405
1406 case nir_op_ffloor:
1407 inst = emit(RNDD(dst, op[0]));
1408 inst->saturate = instr->dest.saturate;
1409 break;
1410
1411 case nir_op_ffract:
1412 inst = emit(FRC(dst, op[0]));
1413 inst->saturate = instr->dest.saturate;
1414 break;
1415
1416 case nir_op_fround_even:
1417 inst = emit(RNDE(dst, op[0]));
1418 if (devinfo->gen < 6) {
1419 inst->conditional_mod = BRW_CONDITIONAL_R;
1420 inst = emit(ADD(dst, src_reg(dst), brw_imm_f(1.0f)));
1421 inst->predicate = BRW_PREDICATE_NORMAL;
1422 inst = emit(MOV(dst, src_reg(dst))); /* for potential saturation */
1423 }
1424 inst->saturate = instr->dest.saturate;
1425 break;
1426
1427 case nir_op_fquantize2f16: {
1428 /* See also vec4_visitor::emit_pack_half_2x16() */
1429 src_reg tmp16 = src_reg(this, glsl_type::uvec4_type);
1430 src_reg tmp32 = src_reg(this, glsl_type::vec4_type);
1431 src_reg zero = src_reg(this, glsl_type::vec4_type);
1432
1433 /* Check for denormal */
1434 src_reg abs_src0 = op[0];
1435 abs_src0.abs = true;
1436 emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1437 BRW_CONDITIONAL_L));
1438 /* Get the appropriately signed zero */
1439 emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD),
1440 retype(op[0], BRW_REGISTER_TYPE_UD),
1441 brw_imm_ud(0x80000000)));
1442 /* Do the actual F32 -> F16 -> F32 conversion */
1443 emit(F32TO16(dst_reg(tmp16), op[0]));
1444 emit(F16TO32(dst_reg(tmp32), tmp16));
1445 /* Select that or zero based on normal status */
1446 inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32);
1447 inst->predicate = BRW_PREDICATE_NORMAL;
1448 inst->saturate = instr->dest.saturate;
1449 break;
1450 }
1451
1452 case nir_op_imin:
1453 case nir_op_umin:
1454 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1455 /* fall through */
1456 case nir_op_fmin:
1457 try_immediate_source(instr, op, true, devinfo);
1458 inst = emit_minmax(BRW_CONDITIONAL_L, dst, op[0], op[1]);
1459 inst->saturate = instr->dest.saturate;
1460 break;
1461
1462 case nir_op_imax:
1463 case nir_op_umax:
1464 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1465 /* fall through */
1466 case nir_op_fmax:
1467 try_immediate_source(instr, op, true, devinfo);
1468 inst = emit_minmax(BRW_CONDITIONAL_GE, dst, op[0], op[1]);
1469 inst->saturate = instr->dest.saturate;
1470 break;
1471
1472 case nir_op_fddx:
1473 case nir_op_fddx_coarse:
1474 case nir_op_fddx_fine:
1475 case nir_op_fddy:
1476 case nir_op_fddy_coarse:
1477 case nir_op_fddy_fine:
1478 unreachable("derivatives are not valid in vertex shaders");
1479
1480 case nir_op_ilt32:
1481 case nir_op_ult32:
1482 case nir_op_ige32:
1483 case nir_op_uge32:
1484 case nir_op_ieq32:
1485 case nir_op_ine32:
1486 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1487 /* Fallthrough */
1488 case nir_op_flt32:
1489 case nir_op_fge32:
1490 case nir_op_feq32:
1491 case nir_op_fne32: {
1492 enum brw_conditional_mod conditional_mod =
1493 brw_cmod_for_nir_comparison(instr->op);
1494
1495 if (nir_src_bit_size(instr->src[0].src) < 64) {
1496 /* If the order of the sources is changed due to an immediate value,
1497 * then the condition must also be changed.
1498 */
1499 if (try_immediate_source(instr, op, true, devinfo) == 0)
1500 conditional_mod = brw_swap_cmod(conditional_mod);
1501
1502 emit(CMP(dst, op[0], op[1], conditional_mod));
1503 } else {
1504 /* Produce a 32-bit boolean result from the DF comparison by selecting
1505 * only the low 32-bit in each DF produced. Do this in a temporary
1506 * so we can then move from there to the result using align16 again
1507 * to honor the original writemask.
1508 */
1509 dst_reg temp = dst_reg(this, glsl_type::dvec4_type);
1510 emit(CMP(temp, op[0], op[1], conditional_mod));
1511 dst_reg result = dst_reg(this, glsl_type::bvec4_type);
1512 emit(VEC4_OPCODE_PICK_LOW_32BIT, result, src_reg(temp));
1513 emit(MOV(dst, src_reg(result)));
1514 }
1515 break;
1516 }
1517
1518 case nir_op_b32all_iequal2:
1519 case nir_op_b32all_iequal3:
1520 case nir_op_b32all_iequal4:
1521 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1522 /* Fallthrough */
1523 case nir_op_b32all_fequal2:
1524 case nir_op_b32all_fequal3:
1525 case nir_op_b32all_fequal4: {
1526 unsigned swiz =
1527 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1528
1529 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1530 brw_cmod_for_nir_comparison(instr->op)));
1531 emit(MOV(dst, brw_imm_d(0)));
1532 inst = emit(MOV(dst, brw_imm_d(~0)));
1533 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1534 break;
1535 }
1536
1537 case nir_op_b32any_inequal2:
1538 case nir_op_b32any_inequal3:
1539 case nir_op_b32any_inequal4:
1540 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1541 /* Fallthrough */
1542 case nir_op_b32any_fnequal2:
1543 case nir_op_b32any_fnequal3:
1544 case nir_op_b32any_fnequal4: {
1545 unsigned swiz =
1546 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1547
1548 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1549 brw_cmod_for_nir_comparison(instr->op)));
1550
1551 emit(MOV(dst, brw_imm_d(0)));
1552 inst = emit(MOV(dst, brw_imm_d(~0)));
1553 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1554 break;
1555 }
1556
1557 case nir_op_inot:
1558 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1559 if (devinfo->gen >= 8) {
1560 op[0] = resolve_source_modifiers(op[0]);
1561 }
1562 emit(NOT(dst, op[0]));
1563 break;
1564
1565 case nir_op_ixor:
1566 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1567 if (devinfo->gen >= 8) {
1568 op[0] = resolve_source_modifiers(op[0]);
1569 op[1] = resolve_source_modifiers(op[1]);
1570 }
1571 try_immediate_source(instr, op, true, devinfo);
1572 emit(XOR(dst, op[0], op[1]));
1573 break;
1574
1575 case nir_op_ior:
1576 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1577 if (devinfo->gen >= 8) {
1578 op[0] = resolve_source_modifiers(op[0]);
1579 op[1] = resolve_source_modifiers(op[1]);
1580 }
1581 try_immediate_source(instr, op, true, devinfo);
1582 emit(OR(dst, op[0], op[1]));
1583 break;
1584
1585 case nir_op_iand:
1586 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1587 if (devinfo->gen >= 8) {
1588 op[0] = resolve_source_modifiers(op[0]);
1589 op[1] = resolve_source_modifiers(op[1]);
1590 }
1591 try_immediate_source(instr, op, true, devinfo);
1592 emit(AND(dst, op[0], op[1]));
1593 break;
1594
1595 case nir_op_b2i32:
1596 case nir_op_b2f32:
1597 case nir_op_b2f64:
1598 if (nir_dest_bit_size(instr->dest.dest) > 32) {
1599 assert(dst.type == BRW_REGISTER_TYPE_DF);
1600 emit_conversion_to_double(dst, negate(op[0]), false);
1601 } else {
1602 emit(MOV(dst, negate(op[0])));
1603 }
1604 break;
1605
1606 case nir_op_f2b32:
1607 if (nir_src_bit_size(instr->src[0].src) == 64) {
1608 /* We use a MOV with conditional_mod to check if the provided value is
1609 * 0.0. We want this to flush denormalized numbers to zero, so we set a
1610 * source modifier on the source operand to trigger this, as source
1611 * modifiers don't affect the result of the testing against 0.0.
1612 */
1613 src_reg value = op[0];
1614 value.abs = true;
1615 vec4_instruction *inst = emit(MOV(dst_null_df(), value));
1616 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1617
1618 src_reg one = src_reg(this, glsl_type::ivec4_type);
1619 emit(MOV(dst_reg(one), brw_imm_d(~0)));
1620 inst = emit(BRW_OPCODE_SEL, dst, one, brw_imm_d(0));
1621 inst->predicate = BRW_PREDICATE_NORMAL;
1622 } else {
1623 emit(CMP(dst, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1624 }
1625 break;
1626
1627 case nir_op_i2b32:
1628 emit(CMP(dst, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1629 break;
1630
1631 case nir_op_fnoise1_1:
1632 case nir_op_fnoise1_2:
1633 case nir_op_fnoise1_3:
1634 case nir_op_fnoise1_4:
1635 case nir_op_fnoise2_1:
1636 case nir_op_fnoise2_2:
1637 case nir_op_fnoise2_3:
1638 case nir_op_fnoise2_4:
1639 case nir_op_fnoise3_1:
1640 case nir_op_fnoise3_2:
1641 case nir_op_fnoise3_3:
1642 case nir_op_fnoise3_4:
1643 case nir_op_fnoise4_1:
1644 case nir_op_fnoise4_2:
1645 case nir_op_fnoise4_3:
1646 case nir_op_fnoise4_4:
1647 unreachable("not reached: should be handled by lower_noise");
1648
1649 case nir_op_unpack_half_2x16_split_x:
1650 case nir_op_unpack_half_2x16_split_y:
1651 case nir_op_pack_half_2x16_split:
1652 unreachable("not reached: should not occur in vertex shader");
1653
1654 case nir_op_unpack_snorm_2x16:
1655 case nir_op_unpack_unorm_2x16:
1656 case nir_op_pack_snorm_2x16:
1657 case nir_op_pack_unorm_2x16:
1658 unreachable("not reached: should be handled by lower_packing_builtins");
1659
1660 case nir_op_pack_uvec4_to_uint:
1661 unreachable("not reached");
1662
1663 case nir_op_pack_uvec2_to_uint: {
1664 dst_reg tmp1 = dst_reg(this, glsl_type::uint_type);
1665 tmp1.writemask = WRITEMASK_X;
1666 op[0].swizzle = BRW_SWIZZLE_YYYY;
1667 emit(SHL(tmp1, op[0], src_reg(brw_imm_ud(16u))));
1668
1669 dst_reg tmp2 = dst_reg(this, glsl_type::uint_type);
1670 tmp2.writemask = WRITEMASK_X;
1671 op[0].swizzle = BRW_SWIZZLE_XXXX;
1672 emit(AND(tmp2, op[0], src_reg(brw_imm_ud(0xffffu))));
1673
1674 emit(OR(dst, src_reg(tmp1), src_reg(tmp2)));
1675 break;
1676 }
1677
1678 case nir_op_pack_64_2x32_split: {
1679 dst_reg result = dst_reg(this, glsl_type::dvec4_type);
1680 dst_reg tmp = dst_reg(this, glsl_type::uvec4_type);
1681 emit(MOV(tmp, retype(op[0], BRW_REGISTER_TYPE_UD)));
1682 emit(VEC4_OPCODE_SET_LOW_32BIT, result, src_reg(tmp));
1683 emit(MOV(tmp, retype(op[1], BRW_REGISTER_TYPE_UD)));
1684 emit(VEC4_OPCODE_SET_HIGH_32BIT, result, src_reg(tmp));
1685 emit(MOV(dst, src_reg(result)));
1686 break;
1687 }
1688
1689 case nir_op_unpack_64_2x32_split_x:
1690 case nir_op_unpack_64_2x32_split_y: {
1691 enum opcode oper = (instr->op == nir_op_unpack_64_2x32_split_x) ?
1692 VEC4_OPCODE_PICK_LOW_32BIT : VEC4_OPCODE_PICK_HIGH_32BIT;
1693 dst_reg tmp = dst_reg(this, glsl_type::dvec4_type);
1694 emit(MOV(tmp, op[0]));
1695 dst_reg tmp2 = dst_reg(this, glsl_type::uvec4_type);
1696 emit(oper, tmp2, src_reg(tmp));
1697 emit(MOV(dst, src_reg(tmp2)));
1698 break;
1699 }
1700
1701 case nir_op_unpack_half_2x16:
1702 /* As NIR does not guarantee that we have a correct swizzle outside the
1703 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1704 * uses the source operand in an operation with WRITEMASK_Y while our
1705 * source operand has only size 1, it accessed incorrect data producing
1706 * regressions in Piglit. We repeat the swizzle of the first component on the
1707 * rest of components to avoid regressions. In the vec4_visitor IR code path
1708 * this is not needed because the operand has already the correct swizzle.
1709 */
1710 op[0].swizzle = brw_compose_swizzle(BRW_SWIZZLE_XXXX, op[0].swizzle);
1711 emit_unpack_half_2x16(dst, op[0]);
1712 break;
1713
1714 case nir_op_pack_half_2x16:
1715 emit_pack_half_2x16(dst, op[0]);
1716 break;
1717
1718 case nir_op_unpack_unorm_4x8:
1719 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1720 emit_unpack_unorm_4x8(dst, op[0]);
1721 break;
1722
1723 case nir_op_pack_unorm_4x8:
1724 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1725 emit_pack_unorm_4x8(dst, op[0]);
1726 break;
1727
1728 case nir_op_unpack_snorm_4x8:
1729 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1730 emit_unpack_snorm_4x8(dst, op[0]);
1731 break;
1732
1733 case nir_op_pack_snorm_4x8:
1734 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1735 emit_pack_snorm_4x8(dst, op[0]);
1736 break;
1737
1738 case nir_op_bitfield_reverse:
1739 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1740 emit(BFREV(dst, op[0]));
1741 break;
1742
1743 case nir_op_bit_count:
1744 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1745 emit(CBIT(dst, op[0]));
1746 break;
1747
1748 case nir_op_ufind_msb:
1749 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1750 emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst, op[0], false);
1751 break;
1752
1753 case nir_op_ifind_msb: {
1754 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1755 vec4_builder bld = vec4_builder(this).at_end();
1756 src_reg src(dst);
1757
1758 if (devinfo->gen < 7) {
1759 emit_find_msb_using_lzd(bld, dst, op[0], true);
1760 } else {
1761 emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0]));
1762
1763 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1764 * count from the LSB side. If FBH didn't return an error
1765 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1766 * count into an LSB count.
1767 */
1768 bld.CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1769
1770 inst = bld.ADD(dst, src, brw_imm_d(31));
1771 inst->predicate = BRW_PREDICATE_NORMAL;
1772 inst->src[0].negate = true;
1773 }
1774 break;
1775 }
1776
1777 case nir_op_find_lsb: {
1778 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1779 vec4_builder bld = vec4_builder(this).at_end();
1780
1781 if (devinfo->gen < 7) {
1782 dst_reg temp = bld.vgrf(BRW_REGISTER_TYPE_D);
1783
1784 /* (x & -x) generates a value that consists of only the LSB of x.
1785 * For all powers of 2, findMSB(y) == findLSB(y).
1786 */
1787 src_reg src = src_reg(retype(op[0], BRW_REGISTER_TYPE_D));
1788 src_reg negated_src = src;
1789
1790 /* One must be negated, and the other must be non-negated. It
1791 * doesn't matter which is which.
1792 */
1793 negated_src.negate = true;
1794 src.negate = false;
1795
1796 bld.AND(temp, src, negated_src);
1797 emit_find_msb_using_lzd(bld, dst, src_reg(temp), false);
1798 } else {
1799 bld.FBL(dst, op[0]);
1800 }
1801 break;
1802 }
1803
1804 case nir_op_ubitfield_extract:
1805 case nir_op_ibitfield_extract:
1806 unreachable("should have been lowered");
1807 case nir_op_ubfe:
1808 case nir_op_ibfe:
1809 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1810 op[0] = fix_3src_operand(op[0]);
1811 op[1] = fix_3src_operand(op[1]);
1812 op[2] = fix_3src_operand(op[2]);
1813
1814 emit(BFE(dst, op[2], op[1], op[0]));
1815 break;
1816
1817 case nir_op_bfm:
1818 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1819 emit(BFI1(dst, op[0], op[1]));
1820 break;
1821
1822 case nir_op_bfi:
1823 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1824 op[0] = fix_3src_operand(op[0]);
1825 op[1] = fix_3src_operand(op[1]);
1826 op[2] = fix_3src_operand(op[2]);
1827
1828 emit(BFI2(dst, op[0], op[1], op[2]));
1829 break;
1830
1831 case nir_op_bitfield_insert:
1832 unreachable("not reached: should have been lowered");
1833
1834 case nir_op_fsign:
1835 assert(!instr->dest.saturate);
1836 if (op[0].abs) {
1837 /* Straightforward since the source can be assumed to be either
1838 * strictly >= 0 or strictly <= 0 depending on the setting of the
1839 * negate flag.
1840 */
1841 inst = emit(MOV(dst, op[0]));
1842 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1843
1844 inst = (op[0].negate)
1845 ? emit(MOV(dst, brw_imm_f(-1.0f)))
1846 : emit(MOV(dst, brw_imm_f(1.0f)));
1847 inst->predicate = BRW_PREDICATE_NORMAL;
1848 } else if (type_sz(op[0].type) < 8) {
1849 /* AND(val, 0x80000000) gives the sign bit.
1850 *
1851 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1852 * zero.
1853 */
1854 emit(CMP(dst_null_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1855
1856 op[0].type = BRW_REGISTER_TYPE_UD;
1857 dst.type = BRW_REGISTER_TYPE_UD;
1858 emit(AND(dst, op[0], brw_imm_ud(0x80000000u)));
1859
1860 inst = emit(OR(dst, src_reg(dst), brw_imm_ud(0x3f800000u)));
1861 inst->predicate = BRW_PREDICATE_NORMAL;
1862 dst.type = BRW_REGISTER_TYPE_F;
1863 } else {
1864 /* For doubles we do the same but we need to consider:
1865 *
1866 * - We use a MOV with conditional_mod instead of a CMP so that we can
1867 * skip loading a 0.0 immediate. We use a source modifier on the
1868 * source of the MOV so that we flush denormalized values to 0.
1869 * Since we want to compare against 0, this won't alter the result.
1870 * - We need to extract the high 32-bit of each DF where the sign
1871 * is stored.
1872 * - We need to produce a DF result.
1873 */
1874
1875 /* Check for zero */
1876 src_reg value = op[0];
1877 value.abs = true;
1878 inst = emit(MOV(dst_null_df(), value));
1879 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1880
1881 /* AND each high 32-bit channel with 0x80000000u */
1882 dst_reg tmp = dst_reg(this, glsl_type::uvec4_type);
1883 emit(VEC4_OPCODE_PICK_HIGH_32BIT, tmp, op[0]);
1884 emit(AND(tmp, src_reg(tmp), brw_imm_ud(0x80000000u)));
1885
1886 /* Add 1.0 to each channel, predicated to skip the cases where the
1887 * channel's value was 0
1888 */
1889 inst = emit(OR(tmp, src_reg(tmp), brw_imm_ud(0x3f800000u)));
1890 inst->predicate = BRW_PREDICATE_NORMAL;
1891
1892 /* Now convert the result from float to double */
1893 emit_conversion_to_double(dst, retype(src_reg(tmp),
1894 BRW_REGISTER_TYPE_F),
1895 false);
1896 }
1897 break;
1898
1899 case nir_op_ishl:
1900 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1901 try_immediate_source(instr, op, false, devinfo);
1902 emit(SHL(dst, op[0], op[1]));
1903 break;
1904
1905 case nir_op_ishr:
1906 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1907 try_immediate_source(instr, op, false, devinfo);
1908 emit(ASR(dst, op[0], op[1]));
1909 break;
1910
1911 case nir_op_ushr:
1912 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1913 try_immediate_source(instr, op, false, devinfo);
1914 emit(SHR(dst, op[0], op[1]));
1915 break;
1916
1917 case nir_op_ffma:
1918 if (type_sz(dst.type) == 8) {
1919 dst_reg mul_dst = dst_reg(this, glsl_type::dvec4_type);
1920 emit(MUL(mul_dst, op[1], op[0]));
1921 inst = emit(ADD(dst, src_reg(mul_dst), op[2]));
1922 inst->saturate = instr->dest.saturate;
1923 } else {
1924 fix_float_operands(op, instr);
1925 inst = emit(MAD(dst, op[2], op[1], op[0]));
1926 inst->saturate = instr->dest.saturate;
1927 }
1928 break;
1929
1930 case nir_op_flrp:
1931 fix_float_operands(op, instr);
1932 inst = emit(LRP(dst, op[2], op[1], op[0]));
1933 inst->saturate = instr->dest.saturate;
1934 break;
1935
1936 case nir_op_b32csel:
1937 enum brw_predicate predicate;
1938 if (!optimize_predicate(instr, &predicate)) {
1939 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1940 switch (dst.writemask) {
1941 case WRITEMASK_X:
1942 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X;
1943 break;
1944 case WRITEMASK_Y:
1945 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1946 break;
1947 case WRITEMASK_Z:
1948 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1949 break;
1950 case WRITEMASK_W:
1951 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W;
1952 break;
1953 default:
1954 predicate = BRW_PREDICATE_NORMAL;
1955 break;
1956 }
1957 }
1958 inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]);
1959 inst->predicate = predicate;
1960 break;
1961
1962 case nir_op_fdot_replicated2:
1963 try_immediate_source(instr, op, true, devinfo);
1964 inst = emit(BRW_OPCODE_DP2, dst, op[0], op[1]);
1965 inst->saturate = instr->dest.saturate;
1966 break;
1967
1968 case nir_op_fdot_replicated3:
1969 try_immediate_source(instr, op, true, devinfo);
1970 inst = emit(BRW_OPCODE_DP3, dst, op[0], op[1]);
1971 inst->saturate = instr->dest.saturate;
1972 break;
1973
1974 case nir_op_fdot_replicated4:
1975 try_immediate_source(instr, op, true, devinfo);
1976 inst = emit(BRW_OPCODE_DP4, dst, op[0], op[1]);
1977 inst->saturate = instr->dest.saturate;
1978 break;
1979
1980 case nir_op_fdph_replicated:
1981 try_immediate_source(instr, op, false, devinfo);
1982 inst = emit(BRW_OPCODE_DPH, dst, op[0], op[1]);
1983 inst->saturate = instr->dest.saturate;
1984 break;
1985
1986 case nir_op_fdiv:
1987 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1988
1989 case nir_op_fmod:
1990 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1991
1992 case nir_op_fsub:
1993 case nir_op_isub:
1994 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1995
1996 default:
1997 unreachable("Unimplemented ALU operation");
1998 }
1999
2000 /* If we need to do a boolean resolve, replace the result with -(x & 1)
2001 * to sign extend the low bit to 0/~0
2002 */
2003 if (devinfo->gen <= 5 &&
2004 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) ==
2005 BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
2006 dst_reg masked = dst_reg(this, glsl_type::int_type);
2007 masked.writemask = dst.writemask;
2008 emit(AND(masked, src_reg(dst), brw_imm_d(1)));
2009 src_reg masked_neg = src_reg(masked);
2010 masked_neg.negate = true;
2011 emit(MOV(retype(dst, BRW_REGISTER_TYPE_D), masked_neg));
2012 }
2013 }
2014
2015 void
2016 vec4_visitor::nir_emit_jump(nir_jump_instr *instr)
2017 {
2018 switch (instr->type) {
2019 case nir_jump_break:
2020 emit(BRW_OPCODE_BREAK);
2021 break;
2022
2023 case nir_jump_continue:
2024 emit(BRW_OPCODE_CONTINUE);
2025 break;
2026
2027 case nir_jump_return:
2028 /* fall through */
2029 default:
2030 unreachable("unknown jump");
2031 }
2032 }
2033
2034 static enum ir_texture_opcode
2035 ir_texture_opcode_for_nir_texop(nir_texop texop)
2036 {
2037 enum ir_texture_opcode op;
2038
2039 switch (texop) {
2040 case nir_texop_lod: op = ir_lod; break;
2041 case nir_texop_query_levels: op = ir_query_levels; break;
2042 case nir_texop_texture_samples: op = ir_texture_samples; break;
2043 case nir_texop_tex: op = ir_tex; break;
2044 case nir_texop_tg4: op = ir_tg4; break;
2045 case nir_texop_txb: op = ir_txb; break;
2046 case nir_texop_txd: op = ir_txd; break;
2047 case nir_texop_txf: op = ir_txf; break;
2048 case nir_texop_txf_ms: op = ir_txf_ms; break;
2049 case nir_texop_txl: op = ir_txl; break;
2050 case nir_texop_txs: op = ir_txs; break;
2051 case nir_texop_samples_identical: op = ir_samples_identical; break;
2052 default:
2053 unreachable("unknown texture opcode");
2054 }
2055
2056 return op;
2057 }
2058
2059 static const glsl_type *
2060 glsl_type_for_nir_alu_type(nir_alu_type alu_type,
2061 unsigned components)
2062 {
2063 return glsl_type::get_instance(brw_glsl_base_type_for_nir_type(alu_type),
2064 components, 1);
2065 }
2066
2067 void
2068 vec4_visitor::nir_emit_texture(nir_tex_instr *instr)
2069 {
2070 unsigned texture = instr->texture_index;
2071 unsigned sampler = instr->sampler_index;
2072 src_reg texture_reg = brw_imm_ud(texture);
2073 src_reg sampler_reg = brw_imm_ud(sampler);
2074 src_reg coordinate;
2075 const glsl_type *coord_type = NULL;
2076 src_reg shadow_comparator;
2077 src_reg offset_value;
2078 src_reg lod, lod2;
2079 src_reg sample_index;
2080 src_reg mcs;
2081
2082 const glsl_type *dest_type =
2083 glsl_type_for_nir_alu_type(instr->dest_type,
2084 nir_tex_instr_dest_size(instr));
2085 dst_reg dest = get_nir_dest(instr->dest, instr->dest_type);
2086
2087 /* The hardware requires a LOD for buffer textures */
2088 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
2089 lod = brw_imm_d(0);
2090
2091 /* Load the texture operation sources */
2092 uint32_t constant_offset = 0;
2093 for (unsigned i = 0; i < instr->num_srcs; i++) {
2094 switch (instr->src[i].src_type) {
2095 case nir_tex_src_comparator:
2096 shadow_comparator = get_nir_src(instr->src[i].src,
2097 BRW_REGISTER_TYPE_F, 1);
2098 break;
2099
2100 case nir_tex_src_coord: {
2101 unsigned src_size = nir_tex_instr_src_size(instr, i);
2102
2103 switch (instr->op) {
2104 case nir_texop_txf:
2105 case nir_texop_txf_ms:
2106 case nir_texop_samples_identical:
2107 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D,
2108 src_size);
2109 coord_type = glsl_type::ivec(src_size);
2110 break;
2111
2112 default:
2113 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2114 src_size);
2115 coord_type = glsl_type::vec(src_size);
2116 break;
2117 }
2118 break;
2119 }
2120
2121 case nir_tex_src_ddx:
2122 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2123 nir_tex_instr_src_size(instr, i));
2124 break;
2125
2126 case nir_tex_src_ddy:
2127 lod2 = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2128 nir_tex_instr_src_size(instr, i));
2129 break;
2130
2131 case nir_tex_src_lod:
2132 switch (instr->op) {
2133 case nir_texop_txs:
2134 case nir_texop_txf:
2135 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
2136 break;
2137
2138 default:
2139 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F, 1);
2140 break;
2141 }
2142 break;
2143
2144 case nir_tex_src_ms_index: {
2145 sample_index = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
2146 break;
2147 }
2148
2149 case nir_tex_src_offset:
2150 if (!brw_texture_offset(instr, i, &constant_offset)) {
2151 offset_value =
2152 get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2);
2153 }
2154 break;
2155
2156 case nir_tex_src_texture_offset: {
2157 /* Emit code to evaluate the actual indexing expression */
2158 src_reg src = get_nir_src(instr->src[i].src, 1);
2159 src_reg temp(this, glsl_type::uint_type);
2160 emit(ADD(dst_reg(temp), src, brw_imm_ud(texture)));
2161 texture_reg = emit_uniformize(temp);
2162 break;
2163 }
2164
2165 case nir_tex_src_sampler_offset: {
2166 /* Emit code to evaluate the actual indexing expression */
2167 src_reg src = get_nir_src(instr->src[i].src, 1);
2168 src_reg temp(this, glsl_type::uint_type);
2169 emit(ADD(dst_reg(temp), src, brw_imm_ud(sampler)));
2170 sampler_reg = emit_uniformize(temp);
2171 break;
2172 }
2173
2174 case nir_tex_src_projector:
2175 unreachable("Should be lowered by do_lower_texture_projection");
2176
2177 case nir_tex_src_bias:
2178 unreachable("LOD bias is not valid for vertex shaders.\n");
2179
2180 default:
2181 unreachable("unknown texture source");
2182 }
2183 }
2184
2185 if (instr->op == nir_texop_txf_ms ||
2186 instr->op == nir_texop_samples_identical) {
2187 assert(coord_type != NULL);
2188 if (devinfo->gen >= 7 &&
2189 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
2190 mcs = emit_mcs_fetch(coord_type, coordinate, texture_reg);
2191 } else {
2192 mcs = brw_imm_ud(0u);
2193 }
2194 }
2195
2196 /* Stuff the channel select bits in the top of the texture offset */
2197 if (instr->op == nir_texop_tg4) {
2198 if (instr->component == 1 &&
2199 (key_tex->gather_channel_quirk_mask & (1 << texture))) {
2200 /* gather4 sampler is broken for green channel on RG32F --
2201 * we must ask for blue instead.
2202 */
2203 constant_offset |= 2 << 16;
2204 } else {
2205 constant_offset |= instr->component << 16;
2206 }
2207 }
2208
2209 ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op);
2210
2211 emit_texture(op, dest, dest_type, coordinate, instr->coord_components,
2212 shadow_comparator,
2213 lod, lod2, sample_index,
2214 constant_offset, offset_value, mcs,
2215 texture, texture_reg, sampler_reg);
2216 }
2217
2218 void
2219 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr *instr)
2220 {
2221 nir_ssa_values[instr->def.index] =
2222 dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(instr->def.bit_size, 32)));
2223 }
2224
2225 /* SIMD4x2 64bit data is stored in register space like this:
2226 *
2227 * r0.0:DF x0 y0 z0 w0
2228 * r1.0:DF x1 y1 z1 w1
2229 *
2230 * When we need to write data such as this to memory using 32-bit write
2231 * messages we need to shuffle it in this fashion:
2232 *
2233 * r0.0:DF x0 y0 x1 y1 (to be written at base offset)
2234 * r0.0:DF z0 w0 z1 w1 (to be written at base offset + 16)
2235 *
2236 * We need to do the inverse operation when we read using 32-bit messages,
2237 * which we can do by applying the same exact shuffling on the 64-bit data
2238 * read, only that because the data for each vertex is positioned differently
2239 * we need to apply different channel enables.
2240 *
2241 * This function takes 64bit data and shuffles it as explained above.
2242 *
2243 * The @for_write parameter is used to specify if the shuffling is being done
2244 * for proper SIMD4x2 64-bit data that needs to be shuffled prior to a 32-bit
2245 * write message (for_write = true), or instead we are doing the inverse
2246 * operation and we have just read 64-bit data using a 32-bit messages that we
2247 * need to shuffle to create valid SIMD4x2 64-bit data (for_write = false).
2248 *
2249 * If @block and @ref are non-NULL, then the shuffling is done after @ref,
2250 * otherwise the instructions are emitted normally at the end. The function
2251 * returns the last instruction inserted.
2252 *
2253 * Notice that @src and @dst cannot be the same register.
2254 */
2255 vec4_instruction *
2256 vec4_visitor::shuffle_64bit_data(dst_reg dst, src_reg src, bool for_write,
2257 bblock_t *block, vec4_instruction *ref)
2258 {
2259 assert(type_sz(src.type) == 8);
2260 assert(type_sz(dst.type) == 8);
2261 assert(!regions_overlap(dst, 2 * REG_SIZE, src, 2 * REG_SIZE));
2262 assert(!ref == !block);
2263
2264 const vec4_builder bld = !ref ? vec4_builder(this).at_end() :
2265 vec4_builder(this).at(block, ref->next);
2266
2267 /* Resolve swizzle in src */
2268 vec4_instruction *inst;
2269 if (src.swizzle != BRW_SWIZZLE_XYZW) {
2270 dst_reg data = dst_reg(this, glsl_type::dvec4_type);
2271 inst = bld.MOV(data, src);
2272 src = src_reg(data);
2273 }
2274
2275 /* dst+0.XY = src+0.XY */
2276 inst = bld.group(4, 0).MOV(writemask(dst, WRITEMASK_XY), src);
2277
2278 /* dst+0.ZW = src+1.XY */
2279 inst = bld.group(4, for_write ? 1 : 0)
2280 .MOV(writemask(dst, WRITEMASK_ZW),
2281 swizzle(byte_offset(src, REG_SIZE), BRW_SWIZZLE_XYXY));
2282
2283 /* dst+1.XY = src+0.ZW */
2284 inst = bld.group(4, for_write ? 0 : 1)
2285 .MOV(writemask(byte_offset(dst, REG_SIZE), WRITEMASK_XY),
2286 swizzle(src, BRW_SWIZZLE_ZWZW));
2287
2288 /* dst+1.ZW = src+1.ZW */
2289 inst = bld.group(4, 1)
2290 .MOV(writemask(byte_offset(dst, REG_SIZE), WRITEMASK_ZW),
2291 byte_offset(src, REG_SIZE));
2292
2293 return inst;
2294 }
2295
2296 }