intel/disasm: Fix decoding of src0 of SENDS
[mesa.git] / src / intel / compiler / brw_vec4_nir.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_vec4.h"
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_eu.h"
29
30 using namespace brw;
31 using namespace brw::surface_access;
32
33 namespace brw {
34
35 void
36 vec4_visitor::emit_nir_code()
37 {
38 if (nir->num_uniforms > 0)
39 nir_setup_uniforms();
40
41 nir_emit_impl(nir_shader_get_entrypoint((nir_shader *)nir));
42 }
43
44 void
45 vec4_visitor::nir_setup_uniforms()
46 {
47 uniforms = nir->num_uniforms / 16;
48 }
49
50 void
51 vec4_visitor::nir_emit_impl(nir_function_impl *impl)
52 {
53 nir_locals = ralloc_array(mem_ctx, dst_reg, impl->reg_alloc);
54 for (unsigned i = 0; i < impl->reg_alloc; i++) {
55 nir_locals[i] = dst_reg();
56 }
57
58 foreach_list_typed(nir_register, reg, node, &impl->registers) {
59 unsigned array_elems =
60 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
61 const unsigned num_regs = array_elems * DIV_ROUND_UP(reg->bit_size, 32);
62 nir_locals[reg->index] = dst_reg(VGRF, alloc.allocate(num_regs));
63
64 if (reg->bit_size == 64)
65 nir_locals[reg->index].type = BRW_REGISTER_TYPE_DF;
66 }
67
68 nir_ssa_values = ralloc_array(mem_ctx, dst_reg, impl->ssa_alloc);
69
70 nir_emit_cf_list(&impl->body);
71 }
72
73 void
74 vec4_visitor::nir_emit_cf_list(exec_list *list)
75 {
76 exec_list_validate(list);
77 foreach_list_typed(nir_cf_node, node, node, list) {
78 switch (node->type) {
79 case nir_cf_node_if:
80 nir_emit_if(nir_cf_node_as_if(node));
81 break;
82
83 case nir_cf_node_loop:
84 nir_emit_loop(nir_cf_node_as_loop(node));
85 break;
86
87 case nir_cf_node_block:
88 nir_emit_block(nir_cf_node_as_block(node));
89 break;
90
91 default:
92 unreachable("Invalid CFG node block");
93 }
94 }
95 }
96
97 void
98 vec4_visitor::nir_emit_if(nir_if *if_stmt)
99 {
100 /* First, put the condition in f0 */
101 src_reg condition = get_nir_src(if_stmt->condition, BRW_REGISTER_TYPE_D, 1);
102 vec4_instruction *inst = emit(MOV(dst_null_d(), condition));
103 inst->conditional_mod = BRW_CONDITIONAL_NZ;
104
105 /* We can just predicate based on the X channel, as the condition only
106 * goes on its own line */
107 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X));
108
109 nir_emit_cf_list(&if_stmt->then_list);
110
111 /* note: if the else is empty, dead CF elimination will remove it */
112 emit(BRW_OPCODE_ELSE);
113
114 nir_emit_cf_list(&if_stmt->else_list);
115
116 emit(BRW_OPCODE_ENDIF);
117 }
118
119 void
120 vec4_visitor::nir_emit_loop(nir_loop *loop)
121 {
122 emit(BRW_OPCODE_DO);
123
124 nir_emit_cf_list(&loop->body);
125
126 emit(BRW_OPCODE_WHILE);
127 }
128
129 void
130 vec4_visitor::nir_emit_block(nir_block *block)
131 {
132 nir_foreach_instr(instr, block) {
133 nir_emit_instr(instr);
134 }
135 }
136
137 void
138 vec4_visitor::nir_emit_instr(nir_instr *instr)
139 {
140 base_ir = instr;
141
142 switch (instr->type) {
143 case nir_instr_type_load_const:
144 nir_emit_load_const(nir_instr_as_load_const(instr));
145 break;
146
147 case nir_instr_type_intrinsic:
148 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
149 break;
150
151 case nir_instr_type_alu:
152 nir_emit_alu(nir_instr_as_alu(instr));
153 break;
154
155 case nir_instr_type_jump:
156 nir_emit_jump(nir_instr_as_jump(instr));
157 break;
158
159 case nir_instr_type_tex:
160 nir_emit_texture(nir_instr_as_tex(instr));
161 break;
162
163 case nir_instr_type_ssa_undef:
164 nir_emit_undef(nir_instr_as_ssa_undef(instr));
165 break;
166
167 default:
168 unreachable("VS instruction not yet implemented by NIR->vec4");
169 }
170 }
171
172 static dst_reg
173 dst_reg_for_nir_reg(vec4_visitor *v, nir_register *nir_reg,
174 unsigned base_offset, nir_src *indirect)
175 {
176 dst_reg reg;
177
178 reg = v->nir_locals[nir_reg->index];
179 if (nir_reg->bit_size == 64)
180 reg.type = BRW_REGISTER_TYPE_DF;
181 reg = offset(reg, 8, base_offset);
182 if (indirect) {
183 reg.reladdr =
184 new(v->mem_ctx) src_reg(v->get_nir_src(*indirect,
185 BRW_REGISTER_TYPE_D,
186 1));
187 }
188 return reg;
189 }
190
191 dst_reg
192 vec4_visitor::get_nir_dest(const nir_dest &dest)
193 {
194 if (dest.is_ssa) {
195 dst_reg dst =
196 dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(dest.ssa.bit_size, 32)));
197 if (dest.ssa.bit_size == 64)
198 dst.type = BRW_REGISTER_TYPE_DF;
199 nir_ssa_values[dest.ssa.index] = dst;
200 return dst;
201 } else {
202 return dst_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
203 dest.reg.indirect);
204 }
205 }
206
207 dst_reg
208 vec4_visitor::get_nir_dest(const nir_dest &dest, enum brw_reg_type type)
209 {
210 return retype(get_nir_dest(dest), type);
211 }
212
213 dst_reg
214 vec4_visitor::get_nir_dest(const nir_dest &dest, nir_alu_type type)
215 {
216 return get_nir_dest(dest, brw_type_for_nir_type(devinfo, type));
217 }
218
219 src_reg
220 vec4_visitor::get_nir_src(const nir_src &src, enum brw_reg_type type,
221 unsigned num_components)
222 {
223 dst_reg reg;
224
225 if (src.is_ssa) {
226 assert(src.ssa != NULL);
227 reg = nir_ssa_values[src.ssa->index];
228 }
229 else {
230 reg = dst_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
231 src.reg.indirect);
232 }
233
234 reg = retype(reg, type);
235
236 src_reg reg_as_src = src_reg(reg);
237 reg_as_src.swizzle = brw_swizzle_for_size(num_components);
238 return reg_as_src;
239 }
240
241 src_reg
242 vec4_visitor::get_nir_src(const nir_src &src, nir_alu_type type,
243 unsigned num_components)
244 {
245 return get_nir_src(src, brw_type_for_nir_type(devinfo, type),
246 num_components);
247 }
248
249 src_reg
250 vec4_visitor::get_nir_src(const nir_src &src, unsigned num_components)
251 {
252 /* if type is not specified, default to signed int */
253 return get_nir_src(src, nir_type_int32, num_components);
254 }
255
256 src_reg
257 vec4_visitor::get_nir_src_imm(const nir_src &src)
258 {
259 assert(nir_src_num_components(src) == 1);
260 assert(nir_src_bit_size(src) == 32);
261 return nir_src_is_const(src) ? src_reg(brw_imm_d(nir_src_as_int(src))) :
262 get_nir_src(src, 1);
263 }
264
265 src_reg
266 vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
267 {
268 nir_src *offset_src = nir_get_io_offset_src(instr);
269
270 if (nir_src_is_const(*offset_src)) {
271 /* The only constant offset we should find is 0. brw_nir.c's
272 * add_const_offset_to_base() will fold other constant offsets
273 * into instr->const_index[0].
274 */
275 assert(nir_src_as_uint(*offset_src) == 0);
276 return src_reg();
277 }
278
279 return get_nir_src(*offset_src, BRW_REGISTER_TYPE_UD, 1);
280 }
281
282 static src_reg
283 setup_imm_df(const vec4_builder &bld, double v)
284 {
285 const gen_device_info *devinfo = bld.shader->devinfo;
286 assert(devinfo->gen >= 7);
287
288 if (devinfo->gen >= 8)
289 return brw_imm_df(v);
290
291 /* gen7.5 does not support DF immediates straighforward but the DIM
292 * instruction allows to set the 64-bit immediate value.
293 */
294 if (devinfo->is_haswell) {
295 const vec4_builder ubld = bld.exec_all();
296 const dst_reg dst = bld.vgrf(BRW_REGISTER_TYPE_DF);
297 ubld.DIM(dst, brw_imm_df(v));
298 return swizzle(src_reg(dst), BRW_SWIZZLE_XXXX);
299 }
300
301 /* gen7 does not support DF immediates */
302 union {
303 double d;
304 struct {
305 uint32_t i1;
306 uint32_t i2;
307 };
308 } di;
309
310 di.d = v;
311
312 /* Write the low 32-bit of the constant to the X:UD channel and the
313 * high 32-bit to the Y:UD channel to build the constant in a VGRF.
314 * We have to do this twice (offset 0 and offset 1), since a DF VGRF takes
315 * two SIMD8 registers in SIMD4x2 execution. Finally, return a swizzle
316 * XXXX so any access to the VGRF only reads the constant data in these
317 * channels.
318 */
319 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
320 for (unsigned n = 0; n < 2; n++) {
321 const vec4_builder ubld = bld.exec_all().group(4, n);
322 ubld.MOV(writemask(offset(tmp, 8, n), WRITEMASK_X), brw_imm_ud(di.i1));
323 ubld.MOV(writemask(offset(tmp, 8, n), WRITEMASK_Y), brw_imm_ud(di.i2));
324 }
325
326 return swizzle(src_reg(retype(tmp, BRW_REGISTER_TYPE_DF)), BRW_SWIZZLE_XXXX);
327 }
328
329 void
330 vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
331 {
332 dst_reg reg;
333
334 if (instr->def.bit_size == 64) {
335 reg = dst_reg(VGRF, alloc.allocate(2));
336 reg.type = BRW_REGISTER_TYPE_DF;
337 } else {
338 reg = dst_reg(VGRF, alloc.allocate(1));
339 reg.type = BRW_REGISTER_TYPE_D;
340 }
341
342 const vec4_builder ibld = vec4_builder(this).at_end();
343 unsigned remaining = brw_writemask_for_size(instr->def.num_components);
344
345 /* @FIXME: consider emitting vector operations to save some MOVs in
346 * cases where the components are representable in 8 bits.
347 * For now, we emit a MOV for each distinct value.
348 */
349 for (unsigned i = 0; i < instr->def.num_components; i++) {
350 unsigned writemask = 1 << i;
351
352 if ((remaining & writemask) == 0)
353 continue;
354
355 for (unsigned j = i; j < instr->def.num_components; j++) {
356 if ((instr->def.bit_size == 32 &&
357 instr->value[i].u32 == instr->value[j].u32) ||
358 (instr->def.bit_size == 64 &&
359 instr->value[i].f64 == instr->value[j].f64)) {
360 writemask |= 1 << j;
361 }
362 }
363
364 reg.writemask = writemask;
365 if (instr->def.bit_size == 64) {
366 emit(MOV(reg, setup_imm_df(ibld, instr->value[i].f64)));
367 } else {
368 emit(MOV(reg, brw_imm_d(instr->value[i].i32)));
369 }
370
371 remaining &= ~writemask;
372 }
373
374 /* Set final writemask */
375 reg.writemask = brw_writemask_for_size(instr->def.num_components);
376
377 nir_ssa_values[instr->def.index] = reg;
378 }
379
380 src_reg
381 vec4_visitor::get_nir_ssbo_intrinsic_index(nir_intrinsic_instr *instr)
382 {
383 /* SSBO stores are weird in that their index is in src[1] */
384 const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
385
386 src_reg surf_index;
387 if (nir_src_is_const(instr->src[src])) {
388 unsigned index = prog_data->base.binding_table.ssbo_start +
389 nir_src_as_uint(instr->src[src]);
390 surf_index = brw_imm_ud(index);
391 } else {
392 surf_index = src_reg(this, glsl_type::uint_type);
393 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[src], 1),
394 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
395 surf_index = emit_uniformize(surf_index);
396 }
397
398 return surf_index;
399 }
400
401 void
402 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
403 {
404 dst_reg dest;
405 src_reg src;
406
407 switch (instr->intrinsic) {
408
409 case nir_intrinsic_load_input: {
410 assert(nir_dest_bit_size(instr->dest) == 32);
411 /* We set EmitNoIndirectInput for VS */
412 unsigned load_offset = nir_src_as_uint(instr->src[0]);
413
414 dest = get_nir_dest(instr->dest);
415 dest.writemask = brw_writemask_for_size(instr->num_components);
416
417 src = src_reg(ATTR, instr->const_index[0] + load_offset,
418 glsl_type::uvec4_type);
419 src = retype(src, dest.type);
420
421 /* Swizzle source based on component layout qualifier */
422 src.swizzle = BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr));
423 emit(MOV(dest, src));
424 break;
425 }
426
427 case nir_intrinsic_store_output: {
428 assert(nir_src_bit_size(instr->src[0]) == 32);
429 unsigned store_offset = nir_src_as_uint(instr->src[1]);
430 int varying = instr->const_index[0] + store_offset;
431 src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F,
432 instr->num_components);
433
434 unsigned c = nir_intrinsic_component(instr);
435 output_reg[varying][c] = dst_reg(src);
436 output_num_components[varying][c] = instr->num_components;
437 break;
438 }
439
440 case nir_intrinsic_get_buffer_size: {
441 assert(nir_src_num_components(instr->src[0]) == 1);
442 unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
443 nir_src_as_uint(instr->src[0]) : 0;
444
445 const unsigned index =
446 prog_data->base.binding_table.ssbo_start + ssbo_index;
447 dst_reg result_dst = get_nir_dest(instr->dest);
448 vec4_instruction *inst = new(mem_ctx)
449 vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE, result_dst);
450
451 inst->base_mrf = 2;
452 inst->mlen = 1; /* always at least one */
453 inst->src[1] = brw_imm_ud(index);
454
455 /* MRF for the first parameter */
456 src_reg lod = brw_imm_d(0);
457 int param_base = inst->base_mrf;
458 int writemask = WRITEMASK_X;
459 emit(MOV(dst_reg(MRF, param_base, glsl_type::int_type, writemask), lod));
460
461 emit(inst);
462 break;
463 }
464
465 case nir_intrinsic_store_ssbo: {
466 assert(devinfo->gen >= 7);
467
468 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
469 assert(nir_src_bit_size(instr->src[0]) == 32);
470 assert(nir_intrinsic_write_mask(instr) ==
471 (1u << instr->num_components) - 1);
472
473 src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
474 src_reg offset_reg = retype(get_nir_src_imm(instr->src[2]),
475 BRW_REGISTER_TYPE_UD);
476
477 /* Value */
478 src_reg val_reg = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F, 4);
479
480 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
481 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
482 * typed and untyped messages and across hardware platforms, the
483 * current implementation of the untyped messages will transparently convert
484 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
485 * and enabling only channel X on the SEND instruction.
486 *
487 * The above, works well for full vector writes, but not for partial writes
488 * where we want to write some channels and not others, like when we have
489 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
490 * quite restrictive with regards to the channel enables we can configure in
491 * the message descriptor (not all combinations are allowed) we cannot simply
492 * implement these scenarios with a single message while keeping the
493 * aforementioned symmetry in the implementation. For now we de decided that
494 * it is better to keep the symmetry to reduce complexity, so in situations
495 * such as the one described we end up emitting two untyped write messages
496 * (one for xy and another for w).
497 *
498 * The code below packs consecutive channels into a single write message,
499 * detects gaps in the vector write and if needed, sends a second message
500 * with the remaining channels. If in the future we decide that we want to
501 * emit a single message at the expense of losing the symmetry in the
502 * implementation we can:
503 *
504 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
505 * message payload. In this mode we can write up to 8 offsets and dwords
506 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
507 * and select which of the 8 channels carry data to write by setting the
508 * appropriate writemask in the dst register of the SEND instruction.
509 * It would require to write a new generator opcode specifically for
510 * IvyBridge since we would need to prepare a SIMD8 payload that could
511 * use any channel, not just X.
512 *
513 * 2) For Haswell+: Simply send a single write message but set the writemask
514 * on the dst of the SEND instruction to select the channels we want to
515 * write. It would require to modify the current messages to receive
516 * and honor the writemask provided.
517 */
518 const vec4_builder bld = vec4_builder(this).at_end()
519 .annotate(current_annotation, base_ir);
520
521 emit_untyped_write(bld, surf_index, offset_reg, val_reg,
522 1 /* dims */, instr->num_components /* size */,
523 BRW_PREDICATE_NONE);
524 break;
525 }
526
527 case nir_intrinsic_load_ssbo: {
528 assert(devinfo->gen >= 7);
529
530 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
531 assert(nir_dest_bit_size(instr->dest) == 32);
532
533 src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
534 src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]),
535 BRW_REGISTER_TYPE_UD);
536
537 /* Read the vector */
538 const vec4_builder bld = vec4_builder(this).at_end()
539 .annotate(current_annotation, base_ir);
540
541 src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
542 1 /* dims */, 4 /* size*/,
543 BRW_PREDICATE_NONE);
544 dst_reg dest = get_nir_dest(instr->dest);
545 read_result.type = dest.type;
546 read_result.swizzle = brw_swizzle_for_size(instr->num_components);
547 emit(MOV(dest, read_result));
548 break;
549 }
550
551 case nir_intrinsic_ssbo_atomic_add:
552 case nir_intrinsic_ssbo_atomic_imin:
553 case nir_intrinsic_ssbo_atomic_umin:
554 case nir_intrinsic_ssbo_atomic_imax:
555 case nir_intrinsic_ssbo_atomic_umax:
556 case nir_intrinsic_ssbo_atomic_and:
557 case nir_intrinsic_ssbo_atomic_or:
558 case nir_intrinsic_ssbo_atomic_xor:
559 case nir_intrinsic_ssbo_atomic_exchange:
560 case nir_intrinsic_ssbo_atomic_comp_swap:
561 nir_emit_ssbo_atomic(brw_aop_for_nir_intrinsic(instr), instr);
562 break;
563
564 case nir_intrinsic_load_vertex_id:
565 unreachable("should be lowered by lower_vertex_id()");
566
567 case nir_intrinsic_load_vertex_id_zero_base:
568 case nir_intrinsic_load_base_vertex:
569 case nir_intrinsic_load_instance_id:
570 case nir_intrinsic_load_base_instance:
571 case nir_intrinsic_load_draw_id:
572 case nir_intrinsic_load_invocation_id:
573 unreachable("should be lowered by brw_nir_lower_vs_inputs()");
574
575 case nir_intrinsic_load_uniform: {
576 /* Offsets are in bytes but they should always be multiples of 4 */
577 assert(nir_intrinsic_base(instr) % 4 == 0);
578
579 dest = get_nir_dest(instr->dest);
580
581 src = src_reg(dst_reg(UNIFORM, nir_intrinsic_base(instr) / 16));
582 src.type = dest.type;
583
584 /* Uniforms don't actually have to be vec4 aligned. In the case that
585 * it isn't, we have to use a swizzle to shift things around. They
586 * do still have the std140 alignment requirement that vec2's have to
587 * be vec2-aligned and vec3's and vec4's have to be vec4-aligned.
588 *
589 * The swizzle also works in the indirect case as the generator adds
590 * the swizzle to the offset for us.
591 */
592 const int type_size = type_sz(src.type);
593 unsigned shift = (nir_intrinsic_base(instr) % 16) / type_size;
594 assert(shift + instr->num_components <= 4);
595
596 if (nir_src_is_const(instr->src[0])) {
597 const unsigned load_offset = nir_src_as_uint(instr->src[0]);
598 /* Offsets are in bytes but they should always be multiples of 4 */
599 assert(load_offset % 4 == 0);
600
601 src.swizzle = brw_swizzle_for_size(instr->num_components);
602 dest.writemask = brw_writemask_for_size(instr->num_components);
603 unsigned offset = load_offset + shift * type_size;
604 src.offset = ROUND_DOWN_TO(offset, 16);
605 shift = (offset % 16) / type_size;
606 assert(shift + instr->num_components <= 4);
607 src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift);
608
609 emit(MOV(dest, src));
610 } else {
611 /* Uniform arrays are vec4 aligned, because of std140 alignment
612 * rules.
613 */
614 assert(shift == 0);
615
616 src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
617
618 /* MOV_INDIRECT is going to stomp the whole thing anyway */
619 dest.writemask = WRITEMASK_XYZW;
620
621 emit(SHADER_OPCODE_MOV_INDIRECT, dest, src,
622 indirect, brw_imm_ud(instr->const_index[1]));
623 }
624 break;
625 }
626
627 case nir_intrinsic_load_ubo: {
628 src_reg surf_index;
629
630 prog_data->base.has_ubo_pull = true;
631
632 dest = get_nir_dest(instr->dest);
633
634 if (nir_src_is_const(instr->src[0])) {
635 /* The block index is a constant, so just emit the binding table entry
636 * as an immediate.
637 */
638 const unsigned index = prog_data->base.binding_table.ubo_start +
639 nir_src_as_uint(instr->src[0]);
640 surf_index = brw_imm_ud(index);
641 } else {
642 /* The block index is not a constant. Evaluate the index expression
643 * per-channel and add the base UBO index; we have to select a value
644 * from any live channel.
645 */
646 surf_index = src_reg(this, glsl_type::uint_type);
647 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], nir_type_int32,
648 instr->num_components),
649 brw_imm_ud(prog_data->base.binding_table.ubo_start)));
650 surf_index = emit_uniformize(surf_index);
651 }
652
653 src_reg offset_reg;
654 if (nir_src_is_const(instr->src[1])) {
655 unsigned load_offset = nir_src_as_uint(instr->src[1]);
656 offset_reg = brw_imm_ud(load_offset & ~15);
657 } else {
658 offset_reg = src_reg(this, glsl_type::uint_type);
659 emit(MOV(dst_reg(offset_reg),
660 get_nir_src(instr->src[1], nir_type_uint32, 1)));
661 }
662
663 src_reg packed_consts;
664 if (nir_dest_bit_size(instr->dest) == 32) {
665 packed_consts = src_reg(this, glsl_type::vec4_type);
666 emit_pull_constant_load_reg(dst_reg(packed_consts),
667 surf_index,
668 offset_reg,
669 NULL, NULL /* before_block/inst */);
670 } else {
671 src_reg temp = src_reg(this, glsl_type::dvec4_type);
672 src_reg temp_float = retype(temp, BRW_REGISTER_TYPE_F);
673
674 emit_pull_constant_load_reg(dst_reg(temp_float),
675 surf_index, offset_reg, NULL, NULL);
676 if (offset_reg.file == IMM)
677 offset_reg.ud += 16;
678 else
679 emit(ADD(dst_reg(offset_reg), offset_reg, brw_imm_ud(16u)));
680 emit_pull_constant_load_reg(dst_reg(byte_offset(temp_float, REG_SIZE)),
681 surf_index, offset_reg, NULL, NULL);
682
683 packed_consts = src_reg(this, glsl_type::dvec4_type);
684 shuffle_64bit_data(dst_reg(packed_consts), temp, false);
685 }
686
687 packed_consts.swizzle = brw_swizzle_for_size(instr->num_components);
688 if (nir_src_is_const(instr->src[1])) {
689 unsigned load_offset = nir_src_as_uint(instr->src[1]);
690 unsigned type_size = type_sz(dest.type);
691 packed_consts.swizzle +=
692 BRW_SWIZZLE4(load_offset % 16 / type_size,
693 load_offset % 16 / type_size,
694 load_offset % 16 / type_size,
695 load_offset % 16 / type_size);
696 }
697
698 emit(MOV(dest, retype(packed_consts, dest.type)));
699
700 break;
701 }
702
703 case nir_intrinsic_memory_barrier: {
704 const vec4_builder bld =
705 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
706 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
707 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp, brw_vec8_grf(0, 0))
708 ->size_written = 2 * REG_SIZE;
709 break;
710 }
711
712 case nir_intrinsic_shader_clock: {
713 /* We cannot do anything if there is an event, so ignore it for now */
714 const src_reg shader_clock = get_timestamp();
715 const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type);
716
717 dest = get_nir_dest(instr->dest, type);
718 emit(MOV(dest, shader_clock));
719 break;
720 }
721
722 default:
723 unreachable("Unknown intrinsic");
724 }
725 }
726
727 void
728 vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
729 {
730 dst_reg dest;
731 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
732 dest = get_nir_dest(instr->dest);
733
734 src_reg surface = get_nir_ssbo_intrinsic_index(instr);
735 src_reg offset = get_nir_src(instr->src[1], 1);
736 src_reg data1;
737 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
738 data1 = get_nir_src(instr->src[2], 1);
739 src_reg data2;
740 if (op == BRW_AOP_CMPWR)
741 data2 = get_nir_src(instr->src[3], 1);
742
743 /* Emit the actual atomic operation operation */
744 const vec4_builder bld =
745 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
746
747 src_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
748 data1, data2,
749 1 /* dims */, 1 /* rsize */,
750 op,
751 BRW_PREDICATE_NONE);
752 dest.type = atomic_result.type;
753 bld.MOV(dest, atomic_result);
754 }
755
756 static unsigned
757 brw_swizzle_for_nir_swizzle(uint8_t swizzle[4])
758 {
759 return BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
760 }
761
762 bool
763 vec4_visitor::optimize_predicate(nir_alu_instr *instr,
764 enum brw_predicate *predicate)
765 {
766 if (!instr->src[0].src.is_ssa ||
767 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
768 return false;
769
770 nir_alu_instr *cmp_instr =
771 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
772
773 switch (cmp_instr->op) {
774 case nir_op_b32any_fnequal2:
775 case nir_op_b32any_inequal2:
776 case nir_op_b32any_fnequal3:
777 case nir_op_b32any_inequal3:
778 case nir_op_b32any_fnequal4:
779 case nir_op_b32any_inequal4:
780 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
781 break;
782 case nir_op_b32all_fequal2:
783 case nir_op_b32all_iequal2:
784 case nir_op_b32all_fequal3:
785 case nir_op_b32all_iequal3:
786 case nir_op_b32all_fequal4:
787 case nir_op_b32all_iequal4:
788 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
789 break;
790 default:
791 return false;
792 }
793
794 unsigned size_swizzle =
795 brw_swizzle_for_size(nir_op_infos[cmp_instr->op].input_sizes[0]);
796
797 src_reg op[2];
798 assert(nir_op_infos[cmp_instr->op].num_inputs == 2);
799 for (unsigned i = 0; i < 2; i++) {
800 nir_alu_type type = nir_op_infos[cmp_instr->op].input_types[i];
801 unsigned bit_size = nir_src_bit_size(cmp_instr->src[i].src);
802 type = (nir_alu_type) (((unsigned) type) | bit_size);
803 op[i] = get_nir_src(cmp_instr->src[i].src, type, 4);
804 unsigned base_swizzle =
805 brw_swizzle_for_nir_swizzle(cmp_instr->src[i].swizzle);
806 op[i].swizzle = brw_compose_swizzle(size_swizzle, base_swizzle);
807 op[i].abs = cmp_instr->src[i].abs;
808 op[i].negate = cmp_instr->src[i].negate;
809 }
810
811 emit(CMP(dst_null_d(), op[0], op[1],
812 brw_cmod_for_nir_comparison(cmp_instr->op)));
813
814 return true;
815 }
816
817 static void
818 emit_find_msb_using_lzd(const vec4_builder &bld,
819 const dst_reg &dst,
820 const src_reg &src,
821 bool is_signed)
822 {
823 vec4_instruction *inst;
824 src_reg temp = src;
825
826 if (is_signed) {
827 /* LZD of an absolute value source almost always does the right
828 * thing. There are two problem values:
829 *
830 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
831 * 0. However, findMSB(int(0x80000000)) == 30.
832 *
833 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
834 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
835 *
836 * For a value of zero or negative one, -1 will be returned.
837 *
838 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
839 * findMSB(-(1<<x)) should return x-1.
840 *
841 * For all negative number cases, including 0x80000000 and
842 * 0xffffffff, the correct value is obtained from LZD if instead of
843 * negating the (already negative) value the logical-not is used. A
844 * conditonal logical-not can be achieved in two instructions.
845 */
846 temp = src_reg(bld.vgrf(BRW_REGISTER_TYPE_D));
847
848 bld.ASR(dst_reg(temp), src, brw_imm_d(31));
849 bld.XOR(dst_reg(temp), temp, src);
850 }
851
852 bld.LZD(retype(dst, BRW_REGISTER_TYPE_UD),
853 retype(temp, BRW_REGISTER_TYPE_UD));
854
855 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
856 * from the LSB side. Subtract the result from 31 to convert the MSB count
857 * into an LSB count. If no bits are set, LZD will return 32. 31-32 = -1,
858 * which is exactly what findMSB() is supposed to return.
859 */
860 inst = bld.ADD(dst, retype(src_reg(dst), BRW_REGISTER_TYPE_D),
861 brw_imm_d(31));
862 inst->src[0].negate = true;
863 }
864
865 void
866 vec4_visitor::emit_conversion_from_double(dst_reg dst, src_reg src,
867 bool saturate)
868 {
869 /* BDW PRM vol 15 - workarounds:
870 * DF->f format conversion for Align16 has wrong emask calculation when
871 * source is immediate.
872 */
873 if (devinfo->gen == 8 && dst.type == BRW_REGISTER_TYPE_F &&
874 src.file == BRW_IMMEDIATE_VALUE) {
875 vec4_instruction *inst = emit(MOV(dst, brw_imm_f(src.df)));
876 inst->saturate = saturate;
877 return;
878 }
879
880 enum opcode op;
881 switch (dst.type) {
882 case BRW_REGISTER_TYPE_D:
883 op = VEC4_OPCODE_DOUBLE_TO_D32;
884 break;
885 case BRW_REGISTER_TYPE_UD:
886 op = VEC4_OPCODE_DOUBLE_TO_U32;
887 break;
888 case BRW_REGISTER_TYPE_F:
889 op = VEC4_OPCODE_DOUBLE_TO_F32;
890 break;
891 default:
892 unreachable("Unknown conversion");
893 }
894
895 dst_reg temp = dst_reg(this, glsl_type::dvec4_type);
896 emit(MOV(temp, src));
897 dst_reg temp2 = dst_reg(this, glsl_type::dvec4_type);
898 emit(op, temp2, src_reg(temp));
899
900 emit(VEC4_OPCODE_PICK_LOW_32BIT, retype(temp2, dst.type), src_reg(temp2));
901 vec4_instruction *inst = emit(MOV(dst, src_reg(retype(temp2, dst.type))));
902 inst->saturate = saturate;
903 }
904
905 void
906 vec4_visitor::emit_conversion_to_double(dst_reg dst, src_reg src,
907 bool saturate)
908 {
909 dst_reg tmp_dst = dst_reg(src_reg(this, glsl_type::dvec4_type));
910 src_reg tmp_src = retype(src_reg(this, glsl_type::vec4_type), src.type);
911 emit(MOV(dst_reg(tmp_src), src));
912 emit(VEC4_OPCODE_TO_DOUBLE, tmp_dst, tmp_src);
913 vec4_instruction *inst = emit(MOV(dst, src_reg(tmp_dst)));
914 inst->saturate = saturate;
915 }
916
917 /**
918 * Try to use an immediate value for a source
919 *
920 * In cases of flow control, constant propagation is sometimes unable to
921 * determine that a register contains a constant value. To work around this,
922 * try to emit a literal as one of the sources. If \c try_src0_also is set,
923 * \c op[0] will also be tried for an immediate value.
924 *
925 * If \c op[0] is modified, the operands will be exchanged so that \c op[1]
926 * will always be the immediate value.
927 *
928 * \return The index of the source that was modified, 0 or 1, if successful.
929 * Otherwise, -1.
930 *
931 * \param op - Operands to the instruction
932 * \param try_src0_also - True if \c op[0] should also be a candidate for
933 * getting an immediate value. This should only be set
934 * for commutative operations.
935 */
936 static int
937 try_immediate_source(const nir_alu_instr *instr, src_reg *op,
938 bool try_src0_also,
939 ASSERTED const gen_device_info *devinfo)
940 {
941 unsigned idx;
942
943 /* MOV should be the only single-source instruction passed to this
944 * function. Any other unary instruction with a constant source should
945 * have been constant-folded away!
946 */
947 assert(nir_op_infos[instr->op].num_inputs > 1 ||
948 instr->op == nir_op_mov);
949
950 if (instr->op != nir_op_mov &&
951 nir_src_bit_size(instr->src[1].src) == 32 &&
952 nir_src_is_const(instr->src[1].src)) {
953 idx = 1;
954 } else if (try_src0_also &&
955 nir_src_bit_size(instr->src[0].src) == 32 &&
956 nir_src_is_const(instr->src[0].src)) {
957 idx = 0;
958 } else {
959 return -1;
960 }
961
962 const enum brw_reg_type old_type = op[idx].type;
963
964 switch (old_type) {
965 case BRW_REGISTER_TYPE_D:
966 case BRW_REGISTER_TYPE_UD: {
967 int first_comp = -1;
968 int d = 0;
969
970 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
971 if (nir_alu_instr_channel_used(instr, idx, i)) {
972 if (first_comp < 0) {
973 first_comp = i;
974 d = nir_src_comp_as_int(instr->src[idx].src,
975 instr->src[idx].swizzle[i]);
976 } else if (d != nir_src_comp_as_int(instr->src[idx].src,
977 instr->src[idx].swizzle[i])) {
978 return -1;
979 }
980 }
981 }
982
983 assert(first_comp >= 0);
984
985 if (op[idx].abs)
986 d = MAX2(-d, d);
987
988 if (op[idx].negate) {
989 /* On Gen8+ a negation source modifier on a logical operation means
990 * something different. Nothing should generate this, so assert that
991 * it does not occur.
992 */
993 assert(devinfo->gen < 8 || (instr->op != nir_op_iand &&
994 instr->op != nir_op_ior &&
995 instr->op != nir_op_ixor));
996 d = -d;
997 }
998
999 op[idx] = retype(src_reg(brw_imm_d(d)), old_type);
1000 break;
1001 }
1002
1003 case BRW_REGISTER_TYPE_F: {
1004 int first_comp = -1;
1005 float f[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
1006 bool is_scalar = true;
1007
1008 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
1009 if (nir_alu_instr_channel_used(instr, idx, i)) {
1010 f[i] = nir_src_comp_as_float(instr->src[idx].src,
1011 instr->src[idx].swizzle[i]);
1012 if (first_comp < 0) {
1013 first_comp = i;
1014 } else if (f[first_comp] != f[i]) {
1015 is_scalar = false;
1016 }
1017 }
1018 }
1019
1020 if (is_scalar) {
1021 if (op[idx].abs)
1022 f[first_comp] = fabs(f[first_comp]);
1023
1024 if (op[idx].negate)
1025 f[first_comp] = -f[first_comp];
1026
1027 op[idx] = src_reg(brw_imm_f(f[first_comp]));
1028 assert(op[idx].type == old_type);
1029 } else {
1030 uint8_t vf_values[4] = { 0, 0, 0, 0 };
1031
1032 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
1033 if (op[idx].abs)
1034 f[i] = fabs(f[i]);
1035
1036 if (op[idx].negate)
1037 f[i] = -f[i];
1038
1039 const int vf = brw_float_to_vf(f[i]);
1040 if (vf == -1)
1041 return -1;
1042
1043 vf_values[i] = vf;
1044 }
1045
1046 op[idx] = src_reg(brw_imm_vf4(vf_values[0], vf_values[1],
1047 vf_values[2], vf_values[3]));
1048 }
1049 break;
1050 }
1051
1052 default:
1053 unreachable("Non-32bit type.");
1054 }
1055
1056 /* If the instruction has more than one source, the instruction format only
1057 * allows source 1 to be an immediate value. If the immediate value was
1058 * source 0, then the sources must be exchanged.
1059 */
1060 if (idx == 0 && instr->op != nir_op_mov) {
1061 src_reg tmp = op[0];
1062 op[0] = op[1];
1063 op[1] = tmp;
1064 }
1065
1066 return idx;
1067 }
1068
1069 void
1070 vec4_visitor::fix_float_operands(src_reg op[3], nir_alu_instr *instr)
1071 {
1072 bool fixed[3] = { false, false, false };
1073
1074 for (unsigned i = 0; i < 2; i++) {
1075 if (!nir_src_is_const(instr->src[i].src))
1076 continue;
1077
1078 for (unsigned j = i + 1; j < 3; j++) {
1079 if (fixed[j])
1080 continue;
1081
1082 if (!nir_src_is_const(instr->src[j].src))
1083 continue;
1084
1085 if (nir_alu_srcs_equal(instr, instr, i, j)) {
1086 if (!fixed[i])
1087 op[i] = fix_3src_operand(op[i]);
1088
1089 op[j] = op[i];
1090
1091 fixed[i] = true;
1092 fixed[j] = true;
1093 } else if (nir_alu_srcs_negative_equal(instr, instr, i, j)) {
1094 if (!fixed[i])
1095 op[i] = fix_3src_operand(op[i]);
1096
1097 op[j] = op[i];
1098 op[j].negate = !op[j].negate;
1099
1100 fixed[i] = true;
1101 fixed[j] = true;
1102 }
1103 }
1104 }
1105
1106 for (unsigned i = 0; i < 3; i++) {
1107 if (!fixed[i])
1108 op[i] = fix_3src_operand(op[i]);
1109 }
1110 }
1111
1112 static bool
1113 const_src_fits_in_16_bits(const nir_src &src, brw_reg_type type)
1114 {
1115 assert(nir_src_is_const(src));
1116 if (type_is_unsigned_int(type)) {
1117 return nir_src_comp_as_uint(src, 0) <= UINT16_MAX;
1118 } else {
1119 const int64_t c = nir_src_comp_as_int(src, 0);
1120 return c <= INT16_MAX && c >= INT16_MIN;
1121 }
1122 }
1123
1124 void
1125 vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
1126 {
1127 vec4_instruction *inst;
1128
1129 nir_alu_type dst_type = (nir_alu_type) (nir_op_infos[instr->op].output_type |
1130 nir_dest_bit_size(instr->dest.dest));
1131 dst_reg dst = get_nir_dest(instr->dest.dest, dst_type);
1132 dst.writemask = instr->dest.write_mask;
1133
1134 src_reg op[4];
1135 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1136 nir_alu_type src_type = (nir_alu_type)
1137 (nir_op_infos[instr->op].input_types[i] |
1138 nir_src_bit_size(instr->src[i].src));
1139 op[i] = get_nir_src(instr->src[i].src, src_type, 4);
1140 op[i].swizzle = brw_swizzle_for_nir_swizzle(instr->src[i].swizzle);
1141 op[i].abs = instr->src[i].abs;
1142 op[i].negate = instr->src[i].negate;
1143 }
1144
1145 switch (instr->op) {
1146 case nir_op_mov:
1147 try_immediate_source(instr, &op[0], true, devinfo);
1148 inst = emit(MOV(dst, op[0]));
1149 inst->saturate = instr->dest.saturate;
1150 break;
1151
1152 case nir_op_vec2:
1153 case nir_op_vec3:
1154 case nir_op_vec4:
1155 unreachable("not reached: should be handled by lower_vec_to_movs()");
1156
1157 case nir_op_i2f32:
1158 case nir_op_u2f32:
1159 inst = emit(MOV(dst, op[0]));
1160 inst->saturate = instr->dest.saturate;
1161 break;
1162
1163 case nir_op_f2f32:
1164 case nir_op_f2i32:
1165 case nir_op_f2u32:
1166 if (nir_src_bit_size(instr->src[0].src) == 64)
1167 emit_conversion_from_double(dst, op[0], instr->dest.saturate);
1168 else
1169 inst = emit(MOV(dst, op[0]));
1170 break;
1171
1172 case nir_op_f2f64:
1173 case nir_op_i2f64:
1174 case nir_op_u2f64:
1175 emit_conversion_to_double(dst, op[0], instr->dest.saturate);
1176 break;
1177
1178 case nir_op_fsat:
1179 inst = emit(MOV(dst, op[0]));
1180 inst->saturate = true;
1181 break;
1182
1183 case nir_op_fneg:
1184 case nir_op_ineg:
1185 op[0].negate = true;
1186 inst = emit(MOV(dst, op[0]));
1187 if (instr->op == nir_op_fneg)
1188 inst->saturate = instr->dest.saturate;
1189 break;
1190
1191 case nir_op_fabs:
1192 case nir_op_iabs:
1193 op[0].negate = false;
1194 op[0].abs = true;
1195 inst = emit(MOV(dst, op[0]));
1196 if (instr->op == nir_op_fabs)
1197 inst->saturate = instr->dest.saturate;
1198 break;
1199
1200 case nir_op_iadd:
1201 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1202 /* fall through */
1203 case nir_op_fadd:
1204 try_immediate_source(instr, op, true, devinfo);
1205 inst = emit(ADD(dst, op[0], op[1]));
1206 inst->saturate = instr->dest.saturate;
1207 break;
1208
1209 case nir_op_uadd_sat:
1210 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1211 inst = emit(ADD(dst, op[0], op[1]));
1212 inst->saturate = true;
1213 break;
1214
1215 case nir_op_fmul:
1216 try_immediate_source(instr, op, true, devinfo);
1217 inst = emit(MUL(dst, op[0], op[1]));
1218 inst->saturate = instr->dest.saturate;
1219 break;
1220
1221 case nir_op_imul: {
1222 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1223 if (devinfo->gen < 8) {
1224 /* For integer multiplication, the MUL uses the low 16 bits of one of
1225 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1226 * accumulates in the contribution of the upper 16 bits of that
1227 * operand. If we can determine that one of the args is in the low
1228 * 16 bits, though, we can just emit a single MUL.
1229 */
1230 if (nir_src_is_const(instr->src[0].src) &&
1231 nir_alu_instr_src_read_mask(instr, 0) == 1 &&
1232 const_src_fits_in_16_bits(instr->src[0].src, op[0].type)) {
1233 if (devinfo->gen < 7)
1234 emit(MUL(dst, op[0], op[1]));
1235 else
1236 emit(MUL(dst, op[1], op[0]));
1237 } else if (nir_src_is_const(instr->src[1].src) &&
1238 nir_alu_instr_src_read_mask(instr, 1) == 1 &&
1239 const_src_fits_in_16_bits(instr->src[1].src, op[1].type)) {
1240 if (devinfo->gen < 7)
1241 emit(MUL(dst, op[1], op[0]));
1242 else
1243 emit(MUL(dst, op[0], op[1]));
1244 } else {
1245 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1246
1247 emit(MUL(acc, op[0], op[1]));
1248 emit(MACH(dst_null_d(), op[0], op[1]));
1249 emit(MOV(dst, src_reg(acc)));
1250 }
1251 } else {
1252 emit(MUL(dst, op[0], op[1]));
1253 }
1254 break;
1255 }
1256
1257 case nir_op_imul_high:
1258 case nir_op_umul_high: {
1259 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1260 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1261
1262 if (devinfo->gen >= 8)
1263 emit(MUL(acc, op[0], retype(op[1], BRW_REGISTER_TYPE_UW)));
1264 else
1265 emit(MUL(acc, op[0], op[1]));
1266
1267 emit(MACH(dst, op[0], op[1]));
1268 break;
1269 }
1270
1271 case nir_op_frcp:
1272 inst = emit_math(SHADER_OPCODE_RCP, dst, op[0]);
1273 inst->saturate = instr->dest.saturate;
1274 break;
1275
1276 case nir_op_fexp2:
1277 inst = emit_math(SHADER_OPCODE_EXP2, dst, op[0]);
1278 inst->saturate = instr->dest.saturate;
1279 break;
1280
1281 case nir_op_flog2:
1282 inst = emit_math(SHADER_OPCODE_LOG2, dst, op[0]);
1283 inst->saturate = instr->dest.saturate;
1284 break;
1285
1286 case nir_op_fsin:
1287 inst = emit_math(SHADER_OPCODE_SIN, dst, op[0]);
1288 inst->saturate = instr->dest.saturate;
1289 break;
1290
1291 case nir_op_fcos:
1292 inst = emit_math(SHADER_OPCODE_COS, dst, op[0]);
1293 inst->saturate = instr->dest.saturate;
1294 break;
1295
1296 case nir_op_idiv:
1297 case nir_op_udiv:
1298 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1299 emit_math(SHADER_OPCODE_INT_QUOTIENT, dst, op[0], op[1]);
1300 break;
1301
1302 case nir_op_umod:
1303 case nir_op_irem:
1304 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1305 * appears that our hardware just does the right thing for signed
1306 * remainder.
1307 */
1308 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1309 emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1310 break;
1311
1312 case nir_op_imod: {
1313 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1314 inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1315
1316 /* Math instructions don't support conditional mod */
1317 inst = emit(MOV(dst_null_d(), src_reg(dst)));
1318 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1319
1320 /* Now, we need to determine if signs of the sources are different.
1321 * When we XOR the sources, the top bit is 0 if they are the same and 1
1322 * if they are different. We can then use a conditional modifier to
1323 * turn that into a predicate. This leads us to an XOR.l instruction.
1324 *
1325 * Technically, according to the PRM, you're not allowed to use .l on a
1326 * XOR instruction. However, emperical experiments and Curro's reading
1327 * of the simulator source both indicate that it's safe.
1328 */
1329 src_reg tmp = src_reg(this, glsl_type::ivec4_type);
1330 inst = emit(XOR(dst_reg(tmp), op[0], op[1]));
1331 inst->predicate = BRW_PREDICATE_NORMAL;
1332 inst->conditional_mod = BRW_CONDITIONAL_L;
1333
1334 /* If the result of the initial remainder operation is non-zero and the
1335 * two sources have different signs, add in a copy of op[1] to get the
1336 * final integer modulus value.
1337 */
1338 inst = emit(ADD(dst, src_reg(dst), op[1]));
1339 inst->predicate = BRW_PREDICATE_NORMAL;
1340 break;
1341 }
1342
1343 case nir_op_ldexp:
1344 unreachable("not reached: should be handled by ldexp_to_arith()");
1345
1346 case nir_op_fsqrt:
1347 inst = emit_math(SHADER_OPCODE_SQRT, dst, op[0]);
1348 inst->saturate = instr->dest.saturate;
1349 break;
1350
1351 case nir_op_frsq:
1352 inst = emit_math(SHADER_OPCODE_RSQ, dst, op[0]);
1353 inst->saturate = instr->dest.saturate;
1354 break;
1355
1356 case nir_op_fpow:
1357 inst = emit_math(SHADER_OPCODE_POW, dst, op[0], op[1]);
1358 inst->saturate = instr->dest.saturate;
1359 break;
1360
1361 case nir_op_uadd_carry: {
1362 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1363 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1364
1365 emit(ADDC(dst_null_ud(), op[0], op[1]));
1366 emit(MOV(dst, src_reg(acc)));
1367 break;
1368 }
1369
1370 case nir_op_usub_borrow: {
1371 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1372 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1373
1374 emit(SUBB(dst_null_ud(), op[0], op[1]));
1375 emit(MOV(dst, src_reg(acc)));
1376 break;
1377 }
1378
1379 case nir_op_ftrunc:
1380 inst = emit(RNDZ(dst, op[0]));
1381 inst->saturate = instr->dest.saturate;
1382 break;
1383
1384 case nir_op_fceil: {
1385 src_reg tmp = src_reg(this, glsl_type::float_type);
1386 tmp.swizzle =
1387 brw_swizzle_for_size(instr->src[0].src.is_ssa ?
1388 instr->src[0].src.ssa->num_components :
1389 instr->src[0].src.reg.reg->num_components);
1390
1391 op[0].negate = !op[0].negate;
1392 emit(RNDD(dst_reg(tmp), op[0]));
1393 tmp.negate = true;
1394 inst = emit(MOV(dst, tmp));
1395 inst->saturate = instr->dest.saturate;
1396 break;
1397 }
1398
1399 case nir_op_ffloor:
1400 inst = emit(RNDD(dst, op[0]));
1401 inst->saturate = instr->dest.saturate;
1402 break;
1403
1404 case nir_op_ffract:
1405 inst = emit(FRC(dst, op[0]));
1406 inst->saturate = instr->dest.saturate;
1407 break;
1408
1409 case nir_op_fround_even:
1410 inst = emit(RNDE(dst, op[0]));
1411 inst->saturate = instr->dest.saturate;
1412 break;
1413
1414 case nir_op_fquantize2f16: {
1415 /* See also vec4_visitor::emit_pack_half_2x16() */
1416 src_reg tmp16 = src_reg(this, glsl_type::uvec4_type);
1417 src_reg tmp32 = src_reg(this, glsl_type::vec4_type);
1418 src_reg zero = src_reg(this, glsl_type::vec4_type);
1419
1420 /* Check for denormal */
1421 src_reg abs_src0 = op[0];
1422 abs_src0.abs = true;
1423 emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1424 BRW_CONDITIONAL_L));
1425 /* Get the appropriately signed zero */
1426 emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD),
1427 retype(op[0], BRW_REGISTER_TYPE_UD),
1428 brw_imm_ud(0x80000000)));
1429 /* Do the actual F32 -> F16 -> F32 conversion */
1430 emit(F32TO16(dst_reg(tmp16), op[0]));
1431 emit(F16TO32(dst_reg(tmp32), tmp16));
1432 /* Select that or zero based on normal status */
1433 inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32);
1434 inst->predicate = BRW_PREDICATE_NORMAL;
1435 inst->saturate = instr->dest.saturate;
1436 break;
1437 }
1438
1439 case nir_op_imin:
1440 case nir_op_umin:
1441 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1442 /* fall through */
1443 case nir_op_fmin:
1444 try_immediate_source(instr, op, true, devinfo);
1445 inst = emit_minmax(BRW_CONDITIONAL_L, dst, op[0], op[1]);
1446 inst->saturate = instr->dest.saturate;
1447 break;
1448
1449 case nir_op_imax:
1450 case nir_op_umax:
1451 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1452 /* fall through */
1453 case nir_op_fmax:
1454 try_immediate_source(instr, op, true, devinfo);
1455 inst = emit_minmax(BRW_CONDITIONAL_GE, dst, op[0], op[1]);
1456 inst->saturate = instr->dest.saturate;
1457 break;
1458
1459 case nir_op_fddx:
1460 case nir_op_fddx_coarse:
1461 case nir_op_fddx_fine:
1462 case nir_op_fddy:
1463 case nir_op_fddy_coarse:
1464 case nir_op_fddy_fine:
1465 unreachable("derivatives are not valid in vertex shaders");
1466
1467 case nir_op_ilt32:
1468 case nir_op_ult32:
1469 case nir_op_ige32:
1470 case nir_op_uge32:
1471 case nir_op_ieq32:
1472 case nir_op_ine32:
1473 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1474 /* Fallthrough */
1475 case nir_op_flt32:
1476 case nir_op_fge32:
1477 case nir_op_feq32:
1478 case nir_op_fne32: {
1479 enum brw_conditional_mod conditional_mod =
1480 brw_cmod_for_nir_comparison(instr->op);
1481
1482 if (nir_src_bit_size(instr->src[0].src) < 64) {
1483 /* If the order of the sources is changed due to an immediate value,
1484 * then the condition must also be changed.
1485 */
1486 if (try_immediate_source(instr, op, true, devinfo) == 0)
1487 conditional_mod = brw_swap_cmod(conditional_mod);
1488
1489 emit(CMP(dst, op[0], op[1], conditional_mod));
1490 } else {
1491 /* Produce a 32-bit boolean result from the DF comparison by selecting
1492 * only the low 32-bit in each DF produced. Do this in a temporary
1493 * so we can then move from there to the result using align16 again
1494 * to honor the original writemask.
1495 */
1496 dst_reg temp = dst_reg(this, glsl_type::dvec4_type);
1497 emit(CMP(temp, op[0], op[1], conditional_mod));
1498 dst_reg result = dst_reg(this, glsl_type::bvec4_type);
1499 emit(VEC4_OPCODE_PICK_LOW_32BIT, result, src_reg(temp));
1500 emit(MOV(dst, src_reg(result)));
1501 }
1502 break;
1503 }
1504
1505 case nir_op_b32all_iequal2:
1506 case nir_op_b32all_iequal3:
1507 case nir_op_b32all_iequal4:
1508 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1509 /* Fallthrough */
1510 case nir_op_b32all_fequal2:
1511 case nir_op_b32all_fequal3:
1512 case nir_op_b32all_fequal4: {
1513 unsigned swiz =
1514 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1515
1516 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1517 brw_cmod_for_nir_comparison(instr->op)));
1518 emit(MOV(dst, brw_imm_d(0)));
1519 inst = emit(MOV(dst, brw_imm_d(~0)));
1520 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1521 break;
1522 }
1523
1524 case nir_op_b32any_inequal2:
1525 case nir_op_b32any_inequal3:
1526 case nir_op_b32any_inequal4:
1527 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1528 /* Fallthrough */
1529 case nir_op_b32any_fnequal2:
1530 case nir_op_b32any_fnequal3:
1531 case nir_op_b32any_fnequal4: {
1532 unsigned swiz =
1533 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1534
1535 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1536 brw_cmod_for_nir_comparison(instr->op)));
1537
1538 emit(MOV(dst, brw_imm_d(0)));
1539 inst = emit(MOV(dst, brw_imm_d(~0)));
1540 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1541 break;
1542 }
1543
1544 case nir_op_inot:
1545 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1546 if (devinfo->gen >= 8) {
1547 op[0] = resolve_source_modifiers(op[0]);
1548 }
1549 emit(NOT(dst, op[0]));
1550 break;
1551
1552 case nir_op_ixor:
1553 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1554 if (devinfo->gen >= 8) {
1555 op[0] = resolve_source_modifiers(op[0]);
1556 op[1] = resolve_source_modifiers(op[1]);
1557 }
1558 try_immediate_source(instr, op, true, devinfo);
1559 emit(XOR(dst, op[0], op[1]));
1560 break;
1561
1562 case nir_op_ior:
1563 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1564 if (devinfo->gen >= 8) {
1565 op[0] = resolve_source_modifiers(op[0]);
1566 op[1] = resolve_source_modifiers(op[1]);
1567 }
1568 try_immediate_source(instr, op, true, devinfo);
1569 emit(OR(dst, op[0], op[1]));
1570 break;
1571
1572 case nir_op_iand:
1573 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1574 if (devinfo->gen >= 8) {
1575 op[0] = resolve_source_modifiers(op[0]);
1576 op[1] = resolve_source_modifiers(op[1]);
1577 }
1578 try_immediate_source(instr, op, true, devinfo);
1579 emit(AND(dst, op[0], op[1]));
1580 break;
1581
1582 case nir_op_b2i32:
1583 case nir_op_b2f32:
1584 case nir_op_b2f64:
1585 if (nir_dest_bit_size(instr->dest.dest) > 32) {
1586 assert(dst.type == BRW_REGISTER_TYPE_DF);
1587 emit_conversion_to_double(dst, negate(op[0]), false);
1588 } else {
1589 emit(MOV(dst, negate(op[0])));
1590 }
1591 break;
1592
1593 case nir_op_f2b32:
1594 if (nir_src_bit_size(instr->src[0].src) == 64) {
1595 /* We use a MOV with conditional_mod to check if the provided value is
1596 * 0.0. We want this to flush denormalized numbers to zero, so we set a
1597 * source modifier on the source operand to trigger this, as source
1598 * modifiers don't affect the result of the testing against 0.0.
1599 */
1600 src_reg value = op[0];
1601 value.abs = true;
1602 vec4_instruction *inst = emit(MOV(dst_null_df(), value));
1603 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1604
1605 src_reg one = src_reg(this, glsl_type::ivec4_type);
1606 emit(MOV(dst_reg(one), brw_imm_d(~0)));
1607 inst = emit(BRW_OPCODE_SEL, dst, one, brw_imm_d(0));
1608 inst->predicate = BRW_PREDICATE_NORMAL;
1609 } else {
1610 emit(CMP(dst, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1611 }
1612 break;
1613
1614 case nir_op_i2b32:
1615 emit(CMP(dst, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1616 break;
1617
1618 case nir_op_fnoise1_1:
1619 case nir_op_fnoise1_2:
1620 case nir_op_fnoise1_3:
1621 case nir_op_fnoise1_4:
1622 case nir_op_fnoise2_1:
1623 case nir_op_fnoise2_2:
1624 case nir_op_fnoise2_3:
1625 case nir_op_fnoise2_4:
1626 case nir_op_fnoise3_1:
1627 case nir_op_fnoise3_2:
1628 case nir_op_fnoise3_3:
1629 case nir_op_fnoise3_4:
1630 case nir_op_fnoise4_1:
1631 case nir_op_fnoise4_2:
1632 case nir_op_fnoise4_3:
1633 case nir_op_fnoise4_4:
1634 unreachable("not reached: should be handled by lower_noise");
1635
1636 case nir_op_unpack_half_2x16_split_x:
1637 case nir_op_unpack_half_2x16_split_y:
1638 case nir_op_pack_half_2x16_split:
1639 unreachable("not reached: should not occur in vertex shader");
1640
1641 case nir_op_unpack_snorm_2x16:
1642 case nir_op_unpack_unorm_2x16:
1643 case nir_op_pack_snorm_2x16:
1644 case nir_op_pack_unorm_2x16:
1645 unreachable("not reached: should be handled by lower_packing_builtins");
1646
1647 case nir_op_pack_uvec4_to_uint:
1648 unreachable("not reached");
1649
1650 case nir_op_pack_uvec2_to_uint: {
1651 dst_reg tmp1 = dst_reg(this, glsl_type::uint_type);
1652 tmp1.writemask = WRITEMASK_X;
1653 op[0].swizzle = BRW_SWIZZLE_YYYY;
1654 emit(SHL(tmp1, op[0], src_reg(brw_imm_ud(16u))));
1655
1656 dst_reg tmp2 = dst_reg(this, glsl_type::uint_type);
1657 tmp2.writemask = WRITEMASK_X;
1658 op[0].swizzle = BRW_SWIZZLE_XXXX;
1659 emit(AND(tmp2, op[0], src_reg(brw_imm_ud(0xffffu))));
1660
1661 emit(OR(dst, src_reg(tmp1), src_reg(tmp2)));
1662 break;
1663 }
1664
1665 case nir_op_pack_64_2x32_split: {
1666 dst_reg result = dst_reg(this, glsl_type::dvec4_type);
1667 dst_reg tmp = dst_reg(this, glsl_type::uvec4_type);
1668 emit(MOV(tmp, retype(op[0], BRW_REGISTER_TYPE_UD)));
1669 emit(VEC4_OPCODE_SET_LOW_32BIT, result, src_reg(tmp));
1670 emit(MOV(tmp, retype(op[1], BRW_REGISTER_TYPE_UD)));
1671 emit(VEC4_OPCODE_SET_HIGH_32BIT, result, src_reg(tmp));
1672 emit(MOV(dst, src_reg(result)));
1673 break;
1674 }
1675
1676 case nir_op_unpack_64_2x32_split_x:
1677 case nir_op_unpack_64_2x32_split_y: {
1678 enum opcode oper = (instr->op == nir_op_unpack_64_2x32_split_x) ?
1679 VEC4_OPCODE_PICK_LOW_32BIT : VEC4_OPCODE_PICK_HIGH_32BIT;
1680 dst_reg tmp = dst_reg(this, glsl_type::dvec4_type);
1681 emit(MOV(tmp, op[0]));
1682 dst_reg tmp2 = dst_reg(this, glsl_type::uvec4_type);
1683 emit(oper, tmp2, src_reg(tmp));
1684 emit(MOV(dst, src_reg(tmp2)));
1685 break;
1686 }
1687
1688 case nir_op_unpack_half_2x16:
1689 /* As NIR does not guarantee that we have a correct swizzle outside the
1690 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1691 * uses the source operand in an operation with WRITEMASK_Y while our
1692 * source operand has only size 1, it accessed incorrect data producing
1693 * regressions in Piglit. We repeat the swizzle of the first component on the
1694 * rest of components to avoid regressions. In the vec4_visitor IR code path
1695 * this is not needed because the operand has already the correct swizzle.
1696 */
1697 op[0].swizzle = brw_compose_swizzle(BRW_SWIZZLE_XXXX, op[0].swizzle);
1698 emit_unpack_half_2x16(dst, op[0]);
1699 break;
1700
1701 case nir_op_pack_half_2x16:
1702 emit_pack_half_2x16(dst, op[0]);
1703 break;
1704
1705 case nir_op_unpack_unorm_4x8:
1706 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1707 emit_unpack_unorm_4x8(dst, op[0]);
1708 break;
1709
1710 case nir_op_pack_unorm_4x8:
1711 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1712 emit_pack_unorm_4x8(dst, op[0]);
1713 break;
1714
1715 case nir_op_unpack_snorm_4x8:
1716 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1717 emit_unpack_snorm_4x8(dst, op[0]);
1718 break;
1719
1720 case nir_op_pack_snorm_4x8:
1721 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1722 emit_pack_snorm_4x8(dst, op[0]);
1723 break;
1724
1725 case nir_op_bitfield_reverse:
1726 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1727 emit(BFREV(dst, op[0]));
1728 break;
1729
1730 case nir_op_bit_count:
1731 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1732 emit(CBIT(dst, op[0]));
1733 break;
1734
1735 case nir_op_ufind_msb:
1736 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1737 emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst, op[0], false);
1738 break;
1739
1740 case nir_op_ifind_msb: {
1741 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1742 vec4_builder bld = vec4_builder(this).at_end();
1743 src_reg src(dst);
1744
1745 if (devinfo->gen < 7) {
1746 emit_find_msb_using_lzd(bld, dst, op[0], true);
1747 } else {
1748 emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0]));
1749
1750 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1751 * count from the LSB side. If FBH didn't return an error
1752 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1753 * count into an LSB count.
1754 */
1755 bld.CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1756
1757 inst = bld.ADD(dst, src, brw_imm_d(31));
1758 inst->predicate = BRW_PREDICATE_NORMAL;
1759 inst->src[0].negate = true;
1760 }
1761 break;
1762 }
1763
1764 case nir_op_find_lsb: {
1765 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1766 vec4_builder bld = vec4_builder(this).at_end();
1767
1768 if (devinfo->gen < 7) {
1769 dst_reg temp = bld.vgrf(BRW_REGISTER_TYPE_D);
1770
1771 /* (x & -x) generates a value that consists of only the LSB of x.
1772 * For all powers of 2, findMSB(y) == findLSB(y).
1773 */
1774 src_reg src = src_reg(retype(op[0], BRW_REGISTER_TYPE_D));
1775 src_reg negated_src = src;
1776
1777 /* One must be negated, and the other must be non-negated. It
1778 * doesn't matter which is which.
1779 */
1780 negated_src.negate = true;
1781 src.negate = false;
1782
1783 bld.AND(temp, src, negated_src);
1784 emit_find_msb_using_lzd(bld, dst, src_reg(temp), false);
1785 } else {
1786 bld.FBL(dst, op[0]);
1787 }
1788 break;
1789 }
1790
1791 case nir_op_ubitfield_extract:
1792 case nir_op_ibitfield_extract:
1793 unreachable("should have been lowered");
1794 case nir_op_ubfe:
1795 case nir_op_ibfe:
1796 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1797 op[0] = fix_3src_operand(op[0]);
1798 op[1] = fix_3src_operand(op[1]);
1799 op[2] = fix_3src_operand(op[2]);
1800
1801 emit(BFE(dst, op[2], op[1], op[0]));
1802 break;
1803
1804 case nir_op_bfm:
1805 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1806 emit(BFI1(dst, op[0], op[1]));
1807 break;
1808
1809 case nir_op_bfi:
1810 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1811 op[0] = fix_3src_operand(op[0]);
1812 op[1] = fix_3src_operand(op[1]);
1813 op[2] = fix_3src_operand(op[2]);
1814
1815 emit(BFI2(dst, op[0], op[1], op[2]));
1816 break;
1817
1818 case nir_op_bitfield_insert:
1819 unreachable("not reached: should have been lowered");
1820
1821 case nir_op_fsign:
1822 assert(!instr->dest.saturate);
1823 if (op[0].abs) {
1824 /* Straightforward since the source can be assumed to be either
1825 * strictly >= 0 or strictly <= 0 depending on the setting of the
1826 * negate flag.
1827 */
1828 inst = emit(MOV(dst, op[0]));
1829 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1830
1831 inst = (op[0].negate)
1832 ? emit(MOV(dst, brw_imm_f(-1.0f)))
1833 : emit(MOV(dst, brw_imm_f(1.0f)));
1834 inst->predicate = BRW_PREDICATE_NORMAL;
1835 } else if (type_sz(op[0].type) < 8) {
1836 /* AND(val, 0x80000000) gives the sign bit.
1837 *
1838 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1839 * zero.
1840 */
1841 emit(CMP(dst_null_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1842
1843 op[0].type = BRW_REGISTER_TYPE_UD;
1844 dst.type = BRW_REGISTER_TYPE_UD;
1845 emit(AND(dst, op[0], brw_imm_ud(0x80000000u)));
1846
1847 inst = emit(OR(dst, src_reg(dst), brw_imm_ud(0x3f800000u)));
1848 inst->predicate = BRW_PREDICATE_NORMAL;
1849 dst.type = BRW_REGISTER_TYPE_F;
1850 } else {
1851 /* For doubles we do the same but we need to consider:
1852 *
1853 * - We use a MOV with conditional_mod instead of a CMP so that we can
1854 * skip loading a 0.0 immediate. We use a source modifier on the
1855 * source of the MOV so that we flush denormalized values to 0.
1856 * Since we want to compare against 0, this won't alter the result.
1857 * - We need to extract the high 32-bit of each DF where the sign
1858 * is stored.
1859 * - We need to produce a DF result.
1860 */
1861
1862 /* Check for zero */
1863 src_reg value = op[0];
1864 value.abs = true;
1865 inst = emit(MOV(dst_null_df(), value));
1866 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1867
1868 /* AND each high 32-bit channel with 0x80000000u */
1869 dst_reg tmp = dst_reg(this, glsl_type::uvec4_type);
1870 emit(VEC4_OPCODE_PICK_HIGH_32BIT, tmp, op[0]);
1871 emit(AND(tmp, src_reg(tmp), brw_imm_ud(0x80000000u)));
1872
1873 /* Add 1.0 to each channel, predicated to skip the cases where the
1874 * channel's value was 0
1875 */
1876 inst = emit(OR(tmp, src_reg(tmp), brw_imm_ud(0x3f800000u)));
1877 inst->predicate = BRW_PREDICATE_NORMAL;
1878
1879 /* Now convert the result from float to double */
1880 emit_conversion_to_double(dst, retype(src_reg(tmp),
1881 BRW_REGISTER_TYPE_F),
1882 false);
1883 }
1884 break;
1885
1886 case nir_op_ishl:
1887 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1888 try_immediate_source(instr, op, false, devinfo);
1889 emit(SHL(dst, op[0], op[1]));
1890 break;
1891
1892 case nir_op_ishr:
1893 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1894 try_immediate_source(instr, op, false, devinfo);
1895 emit(ASR(dst, op[0], op[1]));
1896 break;
1897
1898 case nir_op_ushr:
1899 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1900 try_immediate_source(instr, op, false, devinfo);
1901 emit(SHR(dst, op[0], op[1]));
1902 break;
1903
1904 case nir_op_ffma:
1905 if (type_sz(dst.type) == 8) {
1906 dst_reg mul_dst = dst_reg(this, glsl_type::dvec4_type);
1907 emit(MUL(mul_dst, op[1], op[0]));
1908 inst = emit(ADD(dst, src_reg(mul_dst), op[2]));
1909 inst->saturate = instr->dest.saturate;
1910 } else {
1911 fix_float_operands(op, instr);
1912 inst = emit(MAD(dst, op[2], op[1], op[0]));
1913 inst->saturate = instr->dest.saturate;
1914 }
1915 break;
1916
1917 case nir_op_flrp:
1918 fix_float_operands(op, instr);
1919 inst = emit(LRP(dst, op[2], op[1], op[0]));
1920 inst->saturate = instr->dest.saturate;
1921 break;
1922
1923 case nir_op_b32csel:
1924 enum brw_predicate predicate;
1925 if (!optimize_predicate(instr, &predicate)) {
1926 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1927 switch (dst.writemask) {
1928 case WRITEMASK_X:
1929 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X;
1930 break;
1931 case WRITEMASK_Y:
1932 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1933 break;
1934 case WRITEMASK_Z:
1935 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1936 break;
1937 case WRITEMASK_W:
1938 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W;
1939 break;
1940 default:
1941 predicate = BRW_PREDICATE_NORMAL;
1942 break;
1943 }
1944 }
1945 inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]);
1946 inst->predicate = predicate;
1947 break;
1948
1949 case nir_op_fdot_replicated2:
1950 try_immediate_source(instr, op, true, devinfo);
1951 inst = emit(BRW_OPCODE_DP2, dst, op[0], op[1]);
1952 inst->saturate = instr->dest.saturate;
1953 break;
1954
1955 case nir_op_fdot_replicated3:
1956 try_immediate_source(instr, op, true, devinfo);
1957 inst = emit(BRW_OPCODE_DP3, dst, op[0], op[1]);
1958 inst->saturate = instr->dest.saturate;
1959 break;
1960
1961 case nir_op_fdot_replicated4:
1962 try_immediate_source(instr, op, true, devinfo);
1963 inst = emit(BRW_OPCODE_DP4, dst, op[0], op[1]);
1964 inst->saturate = instr->dest.saturate;
1965 break;
1966
1967 case nir_op_fdph_replicated:
1968 try_immediate_source(instr, op, false, devinfo);
1969 inst = emit(BRW_OPCODE_DPH, dst, op[0], op[1]);
1970 inst->saturate = instr->dest.saturate;
1971 break;
1972
1973 case nir_op_fdiv:
1974 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1975
1976 case nir_op_fmod:
1977 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1978
1979 case nir_op_fsub:
1980 case nir_op_isub:
1981 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1982
1983 default:
1984 unreachable("Unimplemented ALU operation");
1985 }
1986
1987 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1988 * to sign extend the low bit to 0/~0
1989 */
1990 if (devinfo->gen <= 5 &&
1991 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) ==
1992 BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1993 dst_reg masked = dst_reg(this, glsl_type::int_type);
1994 masked.writemask = dst.writemask;
1995 emit(AND(masked, src_reg(dst), brw_imm_d(1)));
1996 src_reg masked_neg = src_reg(masked);
1997 masked_neg.negate = true;
1998 emit(MOV(retype(dst, BRW_REGISTER_TYPE_D), masked_neg));
1999 }
2000 }
2001
2002 void
2003 vec4_visitor::nir_emit_jump(nir_jump_instr *instr)
2004 {
2005 switch (instr->type) {
2006 case nir_jump_break:
2007 emit(BRW_OPCODE_BREAK);
2008 break;
2009
2010 case nir_jump_continue:
2011 emit(BRW_OPCODE_CONTINUE);
2012 break;
2013
2014 case nir_jump_return:
2015 /* fall through */
2016 default:
2017 unreachable("unknown jump");
2018 }
2019 }
2020
2021 static enum ir_texture_opcode
2022 ir_texture_opcode_for_nir_texop(nir_texop texop)
2023 {
2024 enum ir_texture_opcode op;
2025
2026 switch (texop) {
2027 case nir_texop_lod: op = ir_lod; break;
2028 case nir_texop_query_levels: op = ir_query_levels; break;
2029 case nir_texop_texture_samples: op = ir_texture_samples; break;
2030 case nir_texop_tex: op = ir_tex; break;
2031 case nir_texop_tg4: op = ir_tg4; break;
2032 case nir_texop_txb: op = ir_txb; break;
2033 case nir_texop_txd: op = ir_txd; break;
2034 case nir_texop_txf: op = ir_txf; break;
2035 case nir_texop_txf_ms: op = ir_txf_ms; break;
2036 case nir_texop_txl: op = ir_txl; break;
2037 case nir_texop_txs: op = ir_txs; break;
2038 case nir_texop_samples_identical: op = ir_samples_identical; break;
2039 default:
2040 unreachable("unknown texture opcode");
2041 }
2042
2043 return op;
2044 }
2045
2046 static const glsl_type *
2047 glsl_type_for_nir_alu_type(nir_alu_type alu_type,
2048 unsigned components)
2049 {
2050 return glsl_type::get_instance(brw_glsl_base_type_for_nir_type(alu_type),
2051 components, 1);
2052 }
2053
2054 void
2055 vec4_visitor::nir_emit_texture(nir_tex_instr *instr)
2056 {
2057 unsigned texture = instr->texture_index;
2058 unsigned sampler = instr->sampler_index;
2059 src_reg texture_reg = brw_imm_ud(texture);
2060 src_reg sampler_reg = brw_imm_ud(sampler);
2061 src_reg coordinate;
2062 const glsl_type *coord_type = NULL;
2063 src_reg shadow_comparator;
2064 src_reg offset_value;
2065 src_reg lod, lod2;
2066 src_reg sample_index;
2067 src_reg mcs;
2068
2069 const glsl_type *dest_type =
2070 glsl_type_for_nir_alu_type(instr->dest_type,
2071 nir_tex_instr_dest_size(instr));
2072 dst_reg dest = get_nir_dest(instr->dest, instr->dest_type);
2073
2074 /* The hardware requires a LOD for buffer textures */
2075 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
2076 lod = brw_imm_d(0);
2077
2078 /* Load the texture operation sources */
2079 uint32_t constant_offset = 0;
2080 for (unsigned i = 0; i < instr->num_srcs; i++) {
2081 switch (instr->src[i].src_type) {
2082 case nir_tex_src_comparator:
2083 shadow_comparator = get_nir_src(instr->src[i].src,
2084 BRW_REGISTER_TYPE_F, 1);
2085 break;
2086
2087 case nir_tex_src_coord: {
2088 unsigned src_size = nir_tex_instr_src_size(instr, i);
2089
2090 switch (instr->op) {
2091 case nir_texop_txf:
2092 case nir_texop_txf_ms:
2093 case nir_texop_samples_identical:
2094 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D,
2095 src_size);
2096 coord_type = glsl_type::ivec(src_size);
2097 break;
2098
2099 default:
2100 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2101 src_size);
2102 coord_type = glsl_type::vec(src_size);
2103 break;
2104 }
2105 break;
2106 }
2107
2108 case nir_tex_src_ddx:
2109 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2110 nir_tex_instr_src_size(instr, i));
2111 break;
2112
2113 case nir_tex_src_ddy:
2114 lod2 = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2115 nir_tex_instr_src_size(instr, i));
2116 break;
2117
2118 case nir_tex_src_lod:
2119 switch (instr->op) {
2120 case nir_texop_txs:
2121 case nir_texop_txf:
2122 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
2123 break;
2124
2125 default:
2126 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F, 1);
2127 break;
2128 }
2129 break;
2130
2131 case nir_tex_src_ms_index: {
2132 sample_index = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
2133 break;
2134 }
2135
2136 case nir_tex_src_offset:
2137 if (!brw_texture_offset(instr, i, &constant_offset)) {
2138 offset_value =
2139 get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2);
2140 }
2141 break;
2142
2143 case nir_tex_src_texture_offset: {
2144 /* Emit code to evaluate the actual indexing expression */
2145 src_reg src = get_nir_src(instr->src[i].src, 1);
2146 src_reg temp(this, glsl_type::uint_type);
2147 emit(ADD(dst_reg(temp), src, brw_imm_ud(texture)));
2148 texture_reg = emit_uniformize(temp);
2149 break;
2150 }
2151
2152 case nir_tex_src_sampler_offset: {
2153 /* Emit code to evaluate the actual indexing expression */
2154 src_reg src = get_nir_src(instr->src[i].src, 1);
2155 src_reg temp(this, glsl_type::uint_type);
2156 emit(ADD(dst_reg(temp), src, brw_imm_ud(sampler)));
2157 sampler_reg = emit_uniformize(temp);
2158 break;
2159 }
2160
2161 case nir_tex_src_projector:
2162 unreachable("Should be lowered by do_lower_texture_projection");
2163
2164 case nir_tex_src_bias:
2165 unreachable("LOD bias is not valid for vertex shaders.\n");
2166
2167 default:
2168 unreachable("unknown texture source");
2169 }
2170 }
2171
2172 if (instr->op == nir_texop_txf_ms ||
2173 instr->op == nir_texop_samples_identical) {
2174 assert(coord_type != NULL);
2175 if (devinfo->gen >= 7 &&
2176 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
2177 mcs = emit_mcs_fetch(coord_type, coordinate, texture_reg);
2178 } else {
2179 mcs = brw_imm_ud(0u);
2180 }
2181 }
2182
2183 /* Stuff the channel select bits in the top of the texture offset */
2184 if (instr->op == nir_texop_tg4) {
2185 if (instr->component == 1 &&
2186 (key_tex->gather_channel_quirk_mask & (1 << texture))) {
2187 /* gather4 sampler is broken for green channel on RG32F --
2188 * we must ask for blue instead.
2189 */
2190 constant_offset |= 2 << 16;
2191 } else {
2192 constant_offset |= instr->component << 16;
2193 }
2194 }
2195
2196 ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op);
2197
2198 emit_texture(op, dest, dest_type, coordinate, instr->coord_components,
2199 shadow_comparator,
2200 lod, lod2, sample_index,
2201 constant_offset, offset_value, mcs,
2202 texture, texture_reg, sampler_reg);
2203 }
2204
2205 void
2206 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr *instr)
2207 {
2208 nir_ssa_values[instr->def.index] =
2209 dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(instr->def.bit_size, 32)));
2210 }
2211
2212 /* SIMD4x2 64bit data is stored in register space like this:
2213 *
2214 * r0.0:DF x0 y0 z0 w0
2215 * r1.0:DF x1 y1 z1 w1
2216 *
2217 * When we need to write data such as this to memory using 32-bit write
2218 * messages we need to shuffle it in this fashion:
2219 *
2220 * r0.0:DF x0 y0 x1 y1 (to be written at base offset)
2221 * r0.0:DF z0 w0 z1 w1 (to be written at base offset + 16)
2222 *
2223 * We need to do the inverse operation when we read using 32-bit messages,
2224 * which we can do by applying the same exact shuffling on the 64-bit data
2225 * read, only that because the data for each vertex is positioned differently
2226 * we need to apply different channel enables.
2227 *
2228 * This function takes 64bit data and shuffles it as explained above.
2229 *
2230 * The @for_write parameter is used to specify if the shuffling is being done
2231 * for proper SIMD4x2 64-bit data that needs to be shuffled prior to a 32-bit
2232 * write message (for_write = true), or instead we are doing the inverse
2233 * operation and we have just read 64-bit data using a 32-bit messages that we
2234 * need to shuffle to create valid SIMD4x2 64-bit data (for_write = false).
2235 *
2236 * If @block and @ref are non-NULL, then the shuffling is done after @ref,
2237 * otherwise the instructions are emitted normally at the end. The function
2238 * returns the last instruction inserted.
2239 *
2240 * Notice that @src and @dst cannot be the same register.
2241 */
2242 vec4_instruction *
2243 vec4_visitor::shuffle_64bit_data(dst_reg dst, src_reg src, bool for_write,
2244 bblock_t *block, vec4_instruction *ref)
2245 {
2246 assert(type_sz(src.type) == 8);
2247 assert(type_sz(dst.type) == 8);
2248 assert(!regions_overlap(dst, 2 * REG_SIZE, src, 2 * REG_SIZE));
2249 assert(!ref == !block);
2250
2251 const vec4_builder bld = !ref ? vec4_builder(this).at_end() :
2252 vec4_builder(this).at(block, ref->next);
2253
2254 /* Resolve swizzle in src */
2255 vec4_instruction *inst;
2256 if (src.swizzle != BRW_SWIZZLE_XYZW) {
2257 dst_reg data = dst_reg(this, glsl_type::dvec4_type);
2258 inst = bld.MOV(data, src);
2259 src = src_reg(data);
2260 }
2261
2262 /* dst+0.XY = src+0.XY */
2263 inst = bld.group(4, 0).MOV(writemask(dst, WRITEMASK_XY), src);
2264
2265 /* dst+0.ZW = src+1.XY */
2266 inst = bld.group(4, for_write ? 1 : 0)
2267 .MOV(writemask(dst, WRITEMASK_ZW),
2268 swizzle(byte_offset(src, REG_SIZE), BRW_SWIZZLE_XYXY));
2269
2270 /* dst+1.XY = src+0.ZW */
2271 inst = bld.group(4, for_write ? 0 : 1)
2272 .MOV(writemask(byte_offset(dst, REG_SIZE), WRITEMASK_XY),
2273 swizzle(src, BRW_SWIZZLE_ZWZW));
2274
2275 /* dst+1.ZW = src+1.ZW */
2276 inst = bld.group(4, 1)
2277 .MOV(writemask(byte_offset(dst, REG_SIZE), WRITEMASK_ZW),
2278 byte_offset(src, REG_SIZE));
2279
2280 return inst;
2281 }
2282
2283 }