intel/fs: Remove unused emission of load_simd_with_intel
[mesa.git] / src / intel / compiler / brw_vec4_nir.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_vec4.h"
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_eu.h"
29
30 using namespace brw;
31 using namespace brw::surface_access;
32
33 namespace brw {
34
35 void
36 vec4_visitor::emit_nir_code()
37 {
38 if (nir->num_uniforms > 0)
39 nir_setup_uniforms();
40
41 nir_emit_impl(nir_shader_get_entrypoint((nir_shader *)nir));
42 }
43
44 void
45 vec4_visitor::nir_setup_uniforms()
46 {
47 uniforms = nir->num_uniforms / 16;
48 }
49
50 void
51 vec4_visitor::nir_emit_impl(nir_function_impl *impl)
52 {
53 nir_locals = ralloc_array(mem_ctx, dst_reg, impl->reg_alloc);
54 for (unsigned i = 0; i < impl->reg_alloc; i++) {
55 nir_locals[i] = dst_reg();
56 }
57
58 foreach_list_typed(nir_register, reg, node, &impl->registers) {
59 unsigned array_elems =
60 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
61 const unsigned num_regs = array_elems * DIV_ROUND_UP(reg->bit_size, 32);
62 nir_locals[reg->index] = dst_reg(VGRF, alloc.allocate(num_regs));
63
64 if (reg->bit_size == 64)
65 nir_locals[reg->index].type = BRW_REGISTER_TYPE_DF;
66 }
67
68 nir_ssa_values = ralloc_array(mem_ctx, dst_reg, impl->ssa_alloc);
69
70 nir_emit_cf_list(&impl->body);
71 }
72
73 void
74 vec4_visitor::nir_emit_cf_list(exec_list *list)
75 {
76 exec_list_validate(list);
77 foreach_list_typed(nir_cf_node, node, node, list) {
78 switch (node->type) {
79 case nir_cf_node_if:
80 nir_emit_if(nir_cf_node_as_if(node));
81 break;
82
83 case nir_cf_node_loop:
84 nir_emit_loop(nir_cf_node_as_loop(node));
85 break;
86
87 case nir_cf_node_block:
88 nir_emit_block(nir_cf_node_as_block(node));
89 break;
90
91 default:
92 unreachable("Invalid CFG node block");
93 }
94 }
95 }
96
97 void
98 vec4_visitor::nir_emit_if(nir_if *if_stmt)
99 {
100 /* First, put the condition in f0 */
101 src_reg condition = get_nir_src(if_stmt->condition, BRW_REGISTER_TYPE_D, 1);
102 vec4_instruction *inst = emit(MOV(dst_null_d(), condition));
103 inst->conditional_mod = BRW_CONDITIONAL_NZ;
104
105 /* We can just predicate based on the X channel, as the condition only
106 * goes on its own line */
107 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X));
108
109 nir_emit_cf_list(&if_stmt->then_list);
110
111 /* note: if the else is empty, dead CF elimination will remove it */
112 emit(BRW_OPCODE_ELSE);
113
114 nir_emit_cf_list(&if_stmt->else_list);
115
116 emit(BRW_OPCODE_ENDIF);
117 }
118
119 void
120 vec4_visitor::nir_emit_loop(nir_loop *loop)
121 {
122 emit(BRW_OPCODE_DO);
123
124 nir_emit_cf_list(&loop->body);
125
126 emit(BRW_OPCODE_WHILE);
127 }
128
129 void
130 vec4_visitor::nir_emit_block(nir_block *block)
131 {
132 nir_foreach_instr(instr, block) {
133 nir_emit_instr(instr);
134 }
135 }
136
137 void
138 vec4_visitor::nir_emit_instr(nir_instr *instr)
139 {
140 base_ir = instr;
141
142 switch (instr->type) {
143 case nir_instr_type_load_const:
144 nir_emit_load_const(nir_instr_as_load_const(instr));
145 break;
146
147 case nir_instr_type_intrinsic:
148 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
149 break;
150
151 case nir_instr_type_alu:
152 nir_emit_alu(nir_instr_as_alu(instr));
153 break;
154
155 case nir_instr_type_jump:
156 nir_emit_jump(nir_instr_as_jump(instr));
157 break;
158
159 case nir_instr_type_tex:
160 nir_emit_texture(nir_instr_as_tex(instr));
161 break;
162
163 case nir_instr_type_ssa_undef:
164 nir_emit_undef(nir_instr_as_ssa_undef(instr));
165 break;
166
167 default:
168 unreachable("VS instruction not yet implemented by NIR->vec4");
169 }
170 }
171
172 static dst_reg
173 dst_reg_for_nir_reg(vec4_visitor *v, nir_register *nir_reg,
174 unsigned base_offset, nir_src *indirect)
175 {
176 dst_reg reg;
177
178 reg = v->nir_locals[nir_reg->index];
179 if (nir_reg->bit_size == 64)
180 reg.type = BRW_REGISTER_TYPE_DF;
181 reg = offset(reg, 8, base_offset);
182 if (indirect) {
183 reg.reladdr =
184 new(v->mem_ctx) src_reg(v->get_nir_src(*indirect,
185 BRW_REGISTER_TYPE_D,
186 1));
187 }
188 return reg;
189 }
190
191 dst_reg
192 vec4_visitor::get_nir_dest(const nir_dest &dest)
193 {
194 if (dest.is_ssa) {
195 dst_reg dst =
196 dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(dest.ssa.bit_size, 32)));
197 if (dest.ssa.bit_size == 64)
198 dst.type = BRW_REGISTER_TYPE_DF;
199 nir_ssa_values[dest.ssa.index] = dst;
200 return dst;
201 } else {
202 return dst_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
203 dest.reg.indirect);
204 }
205 }
206
207 dst_reg
208 vec4_visitor::get_nir_dest(const nir_dest &dest, enum brw_reg_type type)
209 {
210 return retype(get_nir_dest(dest), type);
211 }
212
213 dst_reg
214 vec4_visitor::get_nir_dest(const nir_dest &dest, nir_alu_type type)
215 {
216 return get_nir_dest(dest, brw_type_for_nir_type(devinfo, type));
217 }
218
219 src_reg
220 vec4_visitor::get_nir_src(const nir_src &src, enum brw_reg_type type,
221 unsigned num_components)
222 {
223 dst_reg reg;
224
225 if (src.is_ssa) {
226 assert(src.ssa != NULL);
227 reg = nir_ssa_values[src.ssa->index];
228 }
229 else {
230 reg = dst_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
231 src.reg.indirect);
232 }
233
234 reg = retype(reg, type);
235
236 src_reg reg_as_src = src_reg(reg);
237 reg_as_src.swizzle = brw_swizzle_for_size(num_components);
238 return reg_as_src;
239 }
240
241 src_reg
242 vec4_visitor::get_nir_src(const nir_src &src, nir_alu_type type,
243 unsigned num_components)
244 {
245 return get_nir_src(src, brw_type_for_nir_type(devinfo, type),
246 num_components);
247 }
248
249 src_reg
250 vec4_visitor::get_nir_src(const nir_src &src, unsigned num_components)
251 {
252 /* if type is not specified, default to signed int */
253 return get_nir_src(src, nir_type_int32, num_components);
254 }
255
256 src_reg
257 vec4_visitor::get_nir_src_imm(const nir_src &src)
258 {
259 assert(nir_src_num_components(src) == 1);
260 assert(nir_src_bit_size(src) == 32);
261 return nir_src_is_const(src) ? src_reg(brw_imm_d(nir_src_as_int(src))) :
262 get_nir_src(src, 1);
263 }
264
265 src_reg
266 vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
267 {
268 nir_src *offset_src = nir_get_io_offset_src(instr);
269
270 if (nir_src_is_const(*offset_src)) {
271 /* The only constant offset we should find is 0. brw_nir.c's
272 * add_const_offset_to_base() will fold other constant offsets
273 * into instr->const_index[0].
274 */
275 assert(nir_src_as_uint(*offset_src) == 0);
276 return src_reg();
277 }
278
279 return get_nir_src(*offset_src, BRW_REGISTER_TYPE_UD, 1);
280 }
281
282 static src_reg
283 setup_imm_df(const vec4_builder &bld, double v)
284 {
285 const gen_device_info *devinfo = bld.shader->devinfo;
286 assert(devinfo->gen >= 7);
287
288 if (devinfo->gen >= 8)
289 return brw_imm_df(v);
290
291 /* gen7.5 does not support DF immediates straighforward but the DIM
292 * instruction allows to set the 64-bit immediate value.
293 */
294 if (devinfo->is_haswell) {
295 const vec4_builder ubld = bld.exec_all();
296 const dst_reg dst = bld.vgrf(BRW_REGISTER_TYPE_DF);
297 ubld.DIM(dst, brw_imm_df(v));
298 return swizzle(src_reg(dst), BRW_SWIZZLE_XXXX);
299 }
300
301 /* gen7 does not support DF immediates */
302 union {
303 double d;
304 struct {
305 uint32_t i1;
306 uint32_t i2;
307 };
308 } di;
309
310 di.d = v;
311
312 /* Write the low 32-bit of the constant to the X:UD channel and the
313 * high 32-bit to the Y:UD channel to build the constant in a VGRF.
314 * We have to do this twice (offset 0 and offset 1), since a DF VGRF takes
315 * two SIMD8 registers in SIMD4x2 execution. Finally, return a swizzle
316 * XXXX so any access to the VGRF only reads the constant data in these
317 * channels.
318 */
319 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
320 for (unsigned n = 0; n < 2; n++) {
321 const vec4_builder ubld = bld.exec_all().group(4, n);
322 ubld.MOV(writemask(offset(tmp, 8, n), WRITEMASK_X), brw_imm_ud(di.i1));
323 ubld.MOV(writemask(offset(tmp, 8, n), WRITEMASK_Y), brw_imm_ud(di.i2));
324 }
325
326 return swizzle(src_reg(retype(tmp, BRW_REGISTER_TYPE_DF)), BRW_SWIZZLE_XXXX);
327 }
328
329 void
330 vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
331 {
332 dst_reg reg;
333
334 if (instr->def.bit_size == 64) {
335 reg = dst_reg(VGRF, alloc.allocate(2));
336 reg.type = BRW_REGISTER_TYPE_DF;
337 } else {
338 reg = dst_reg(VGRF, alloc.allocate(1));
339 reg.type = BRW_REGISTER_TYPE_D;
340 }
341
342 const vec4_builder ibld = vec4_builder(this).at_end();
343 unsigned remaining = brw_writemask_for_size(instr->def.num_components);
344
345 /* @FIXME: consider emitting vector operations to save some MOVs in
346 * cases where the components are representable in 8 bits.
347 * For now, we emit a MOV for each distinct value.
348 */
349 for (unsigned i = 0; i < instr->def.num_components; i++) {
350 unsigned writemask = 1 << i;
351
352 if ((remaining & writemask) == 0)
353 continue;
354
355 for (unsigned j = i; j < instr->def.num_components; j++) {
356 if ((instr->def.bit_size == 32 &&
357 instr->value[i].u32 == instr->value[j].u32) ||
358 (instr->def.bit_size == 64 &&
359 instr->value[i].f64 == instr->value[j].f64)) {
360 writemask |= 1 << j;
361 }
362 }
363
364 reg.writemask = writemask;
365 if (instr->def.bit_size == 64) {
366 emit(MOV(reg, setup_imm_df(ibld, instr->value[i].f64)));
367 } else {
368 emit(MOV(reg, brw_imm_d(instr->value[i].i32)));
369 }
370
371 remaining &= ~writemask;
372 }
373
374 /* Set final writemask */
375 reg.writemask = brw_writemask_for_size(instr->def.num_components);
376
377 nir_ssa_values[instr->def.index] = reg;
378 }
379
380 src_reg
381 vec4_visitor::get_nir_ssbo_intrinsic_index(nir_intrinsic_instr *instr)
382 {
383 /* SSBO stores are weird in that their index is in src[1] */
384 const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
385
386 src_reg surf_index;
387 if (nir_src_is_const(instr->src[src])) {
388 unsigned index = prog_data->base.binding_table.ssbo_start +
389 nir_src_as_uint(instr->src[src]);
390 surf_index = brw_imm_ud(index);
391 } else {
392 surf_index = src_reg(this, glsl_type::uint_type);
393 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[src], 1),
394 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
395 surf_index = emit_uniformize(surf_index);
396 }
397
398 return surf_index;
399 }
400
401 void
402 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
403 {
404 dst_reg dest;
405 src_reg src;
406
407 switch (instr->intrinsic) {
408
409 case nir_intrinsic_load_input: {
410 assert(nir_dest_bit_size(instr->dest) == 32);
411 /* We set EmitNoIndirectInput for VS */
412 unsigned load_offset = nir_src_as_uint(instr->src[0]);
413
414 dest = get_nir_dest(instr->dest);
415 dest.writemask = brw_writemask_for_size(instr->num_components);
416
417 src = src_reg(ATTR, instr->const_index[0] + load_offset,
418 glsl_type::uvec4_type);
419 src = retype(src, dest.type);
420
421 /* Swizzle source based on component layout qualifier */
422 src.swizzle = BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr));
423 emit(MOV(dest, src));
424 break;
425 }
426
427 case nir_intrinsic_store_output: {
428 assert(nir_src_bit_size(instr->src[0]) == 32);
429 unsigned store_offset = nir_src_as_uint(instr->src[1]);
430 int varying = instr->const_index[0] + store_offset;
431 src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F,
432 instr->num_components);
433
434 unsigned c = nir_intrinsic_component(instr);
435 output_reg[varying][c] = dst_reg(src);
436 output_num_components[varying][c] = instr->num_components;
437 break;
438 }
439
440 case nir_intrinsic_get_buffer_size: {
441 assert(nir_src_num_components(instr->src[0]) == 1);
442 unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
443 nir_src_as_uint(instr->src[0]) : 0;
444
445 const unsigned index =
446 prog_data->base.binding_table.ssbo_start + ssbo_index;
447 dst_reg result_dst = get_nir_dest(instr->dest);
448 vec4_instruction *inst = new(mem_ctx)
449 vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE, result_dst);
450
451 inst->base_mrf = 2;
452 inst->mlen = 1; /* always at least one */
453 inst->src[1] = brw_imm_ud(index);
454
455 /* MRF for the first parameter */
456 src_reg lod = brw_imm_d(0);
457 int param_base = inst->base_mrf;
458 int writemask = WRITEMASK_X;
459 emit(MOV(dst_reg(MRF, param_base, glsl_type::int_type, writemask), lod));
460
461 emit(inst);
462 break;
463 }
464
465 case nir_intrinsic_store_ssbo: {
466 assert(devinfo->gen >= 7);
467
468 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
469 assert(nir_src_bit_size(instr->src[0]) == 32);
470 assert(nir_intrinsic_write_mask(instr) ==
471 (1u << instr->num_components) - 1);
472
473 src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
474 src_reg offset_reg = retype(get_nir_src_imm(instr->src[2]),
475 BRW_REGISTER_TYPE_UD);
476
477 /* Value */
478 src_reg val_reg = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F, 4);
479
480 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
481 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
482 * typed and untyped messages and across hardware platforms, the
483 * current implementation of the untyped messages will transparently convert
484 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
485 * and enabling only channel X on the SEND instruction.
486 *
487 * The above, works well for full vector writes, but not for partial writes
488 * where we want to write some channels and not others, like when we have
489 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
490 * quite restrictive with regards to the channel enables we can configure in
491 * the message descriptor (not all combinations are allowed) we cannot simply
492 * implement these scenarios with a single message while keeping the
493 * aforementioned symmetry in the implementation. For now we de decided that
494 * it is better to keep the symmetry to reduce complexity, so in situations
495 * such as the one described we end up emitting two untyped write messages
496 * (one for xy and another for w).
497 *
498 * The code below packs consecutive channels into a single write message,
499 * detects gaps in the vector write and if needed, sends a second message
500 * with the remaining channels. If in the future we decide that we want to
501 * emit a single message at the expense of losing the symmetry in the
502 * implementation we can:
503 *
504 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
505 * message payload. In this mode we can write up to 8 offsets and dwords
506 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
507 * and select which of the 8 channels carry data to write by setting the
508 * appropriate writemask in the dst register of the SEND instruction.
509 * It would require to write a new generator opcode specifically for
510 * IvyBridge since we would need to prepare a SIMD8 payload that could
511 * use any channel, not just X.
512 *
513 * 2) For Haswell+: Simply send a single write message but set the writemask
514 * on the dst of the SEND instruction to select the channels we want to
515 * write. It would require to modify the current messages to receive
516 * and honor the writemask provided.
517 */
518 const vec4_builder bld = vec4_builder(this).at_end()
519 .annotate(current_annotation, base_ir);
520
521 emit_untyped_write(bld, surf_index, offset_reg, val_reg,
522 1 /* dims */, instr->num_components /* size */,
523 BRW_PREDICATE_NONE);
524 break;
525 }
526
527 case nir_intrinsic_load_ssbo: {
528 assert(devinfo->gen >= 7);
529
530 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
531 assert(nir_dest_bit_size(instr->dest) == 32);
532
533 src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
534 src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]),
535 BRW_REGISTER_TYPE_UD);
536
537 /* Read the vector */
538 const vec4_builder bld = vec4_builder(this).at_end()
539 .annotate(current_annotation, base_ir);
540
541 src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
542 1 /* dims */, 4 /* size*/,
543 BRW_PREDICATE_NONE);
544 dst_reg dest = get_nir_dest(instr->dest);
545 read_result.type = dest.type;
546 read_result.swizzle = brw_swizzle_for_size(instr->num_components);
547 emit(MOV(dest, read_result));
548 break;
549 }
550
551 case nir_intrinsic_ssbo_atomic_add:
552 case nir_intrinsic_ssbo_atomic_imin:
553 case nir_intrinsic_ssbo_atomic_umin:
554 case nir_intrinsic_ssbo_atomic_imax:
555 case nir_intrinsic_ssbo_atomic_umax:
556 case nir_intrinsic_ssbo_atomic_and:
557 case nir_intrinsic_ssbo_atomic_or:
558 case nir_intrinsic_ssbo_atomic_xor:
559 case nir_intrinsic_ssbo_atomic_exchange:
560 case nir_intrinsic_ssbo_atomic_comp_swap:
561 nir_emit_ssbo_atomic(brw_aop_for_nir_intrinsic(instr), instr);
562 break;
563
564 case nir_intrinsic_load_vertex_id:
565 unreachable("should be lowered by lower_vertex_id()");
566
567 case nir_intrinsic_load_vertex_id_zero_base:
568 case nir_intrinsic_load_base_vertex:
569 case nir_intrinsic_load_instance_id:
570 case nir_intrinsic_load_base_instance:
571 case nir_intrinsic_load_draw_id:
572 case nir_intrinsic_load_invocation_id:
573 unreachable("should be lowered by brw_nir_lower_vs_inputs()");
574
575 case nir_intrinsic_load_uniform: {
576 /* Offsets are in bytes but they should always be multiples of 4 */
577 assert(nir_intrinsic_base(instr) % 4 == 0);
578
579 dest = get_nir_dest(instr->dest);
580
581 src = src_reg(dst_reg(UNIFORM, nir_intrinsic_base(instr) / 16));
582 src.type = dest.type;
583
584 /* Uniforms don't actually have to be vec4 aligned. In the case that
585 * it isn't, we have to use a swizzle to shift things around. They
586 * do still have the std140 alignment requirement that vec2's have to
587 * be vec2-aligned and vec3's and vec4's have to be vec4-aligned.
588 *
589 * The swizzle also works in the indirect case as the generator adds
590 * the swizzle to the offset for us.
591 */
592 const int type_size = type_sz(src.type);
593 unsigned shift = (nir_intrinsic_base(instr) % 16) / type_size;
594 assert(shift + instr->num_components <= 4);
595
596 if (nir_src_is_const(instr->src[0])) {
597 const unsigned load_offset = nir_src_as_uint(instr->src[0]);
598 /* Offsets are in bytes but they should always be multiples of 4 */
599 assert(load_offset % 4 == 0);
600
601 src.swizzle = brw_swizzle_for_size(instr->num_components);
602 dest.writemask = brw_writemask_for_size(instr->num_components);
603 unsigned offset = load_offset + shift * type_size;
604 src.offset = ROUND_DOWN_TO(offset, 16);
605 shift = (offset % 16) / type_size;
606 assert(shift + instr->num_components <= 4);
607 src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift);
608
609 emit(MOV(dest, src));
610 } else {
611 /* Uniform arrays are vec4 aligned, because of std140 alignment
612 * rules.
613 */
614 assert(shift == 0);
615
616 src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
617
618 /* MOV_INDIRECT is going to stomp the whole thing anyway */
619 dest.writemask = WRITEMASK_XYZW;
620
621 emit(SHADER_OPCODE_MOV_INDIRECT, dest, src,
622 indirect, brw_imm_ud(instr->const_index[1]));
623 }
624 break;
625 }
626
627 case nir_intrinsic_load_ubo: {
628 src_reg surf_index;
629
630 prog_data->base.has_ubo_pull = true;
631
632 dest = get_nir_dest(instr->dest);
633
634 if (nir_src_is_const(instr->src[0])) {
635 /* The block index is a constant, so just emit the binding table entry
636 * as an immediate.
637 */
638 const unsigned index = prog_data->base.binding_table.ubo_start +
639 nir_src_as_uint(instr->src[0]);
640 surf_index = brw_imm_ud(index);
641 } else {
642 /* The block index is not a constant. Evaluate the index expression
643 * per-channel and add the base UBO index; we have to select a value
644 * from any live channel.
645 */
646 surf_index = src_reg(this, glsl_type::uint_type);
647 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], nir_type_int32,
648 instr->num_components),
649 brw_imm_ud(prog_data->base.binding_table.ubo_start)));
650 surf_index = emit_uniformize(surf_index);
651 }
652
653 src_reg offset_reg;
654 if (nir_src_is_const(instr->src[1])) {
655 unsigned load_offset = nir_src_as_uint(instr->src[1]);
656 offset_reg = brw_imm_ud(load_offset & ~15);
657 } else {
658 offset_reg = src_reg(this, glsl_type::uint_type);
659 emit(MOV(dst_reg(offset_reg),
660 get_nir_src(instr->src[1], nir_type_uint32, 1)));
661 }
662
663 src_reg packed_consts;
664 if (nir_dest_bit_size(instr->dest) == 32) {
665 packed_consts = src_reg(this, glsl_type::vec4_type);
666 emit_pull_constant_load_reg(dst_reg(packed_consts),
667 surf_index,
668 offset_reg,
669 NULL, NULL /* before_block/inst */);
670 } else {
671 src_reg temp = src_reg(this, glsl_type::dvec4_type);
672 src_reg temp_float = retype(temp, BRW_REGISTER_TYPE_F);
673
674 emit_pull_constant_load_reg(dst_reg(temp_float),
675 surf_index, offset_reg, NULL, NULL);
676 if (offset_reg.file == IMM)
677 offset_reg.ud += 16;
678 else
679 emit(ADD(dst_reg(offset_reg), offset_reg, brw_imm_ud(16u)));
680 emit_pull_constant_load_reg(dst_reg(byte_offset(temp_float, REG_SIZE)),
681 surf_index, offset_reg, NULL, NULL);
682
683 packed_consts = src_reg(this, glsl_type::dvec4_type);
684 shuffle_64bit_data(dst_reg(packed_consts), temp, false);
685 }
686
687 packed_consts.swizzle = brw_swizzle_for_size(instr->num_components);
688 if (nir_src_is_const(instr->src[1])) {
689 unsigned load_offset = nir_src_as_uint(instr->src[1]);
690 unsigned type_size = type_sz(dest.type);
691 packed_consts.swizzle +=
692 BRW_SWIZZLE4(load_offset % 16 / type_size,
693 load_offset % 16 / type_size,
694 load_offset % 16 / type_size,
695 load_offset % 16 / type_size);
696 }
697
698 emit(MOV(dest, retype(packed_consts, dest.type)));
699
700 break;
701 }
702
703 case nir_intrinsic_memory_barrier:
704 case nir_intrinsic_scoped_memory_barrier: {
705 const vec4_builder bld =
706 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
707 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
708 vec4_instruction *fence =
709 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp, brw_vec8_grf(0, 0));
710 fence->sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
711 break;
712 }
713
714 case nir_intrinsic_shader_clock: {
715 /* We cannot do anything if there is an event, so ignore it for now */
716 const src_reg shader_clock = get_timestamp();
717 const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type);
718
719 dest = get_nir_dest(instr->dest, type);
720 emit(MOV(dest, shader_clock));
721 break;
722 }
723
724 default:
725 unreachable("Unknown intrinsic");
726 }
727 }
728
729 void
730 vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
731 {
732 dst_reg dest;
733 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
734 dest = get_nir_dest(instr->dest);
735
736 src_reg surface = get_nir_ssbo_intrinsic_index(instr);
737 src_reg offset = get_nir_src(instr->src[1], 1);
738 src_reg data1;
739 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
740 data1 = get_nir_src(instr->src[2], 1);
741 src_reg data2;
742 if (op == BRW_AOP_CMPWR)
743 data2 = get_nir_src(instr->src[3], 1);
744
745 /* Emit the actual atomic operation operation */
746 const vec4_builder bld =
747 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
748
749 src_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
750 data1, data2,
751 1 /* dims */, 1 /* rsize */,
752 op,
753 BRW_PREDICATE_NONE);
754 dest.type = atomic_result.type;
755 bld.MOV(dest, atomic_result);
756 }
757
758 static unsigned
759 brw_swizzle_for_nir_swizzle(uint8_t swizzle[4])
760 {
761 return BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
762 }
763
764 bool
765 vec4_visitor::optimize_predicate(nir_alu_instr *instr,
766 enum brw_predicate *predicate)
767 {
768 if (!instr->src[0].src.is_ssa ||
769 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
770 return false;
771
772 nir_alu_instr *cmp_instr =
773 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
774
775 switch (cmp_instr->op) {
776 case nir_op_b32any_fnequal2:
777 case nir_op_b32any_inequal2:
778 case nir_op_b32any_fnequal3:
779 case nir_op_b32any_inequal3:
780 case nir_op_b32any_fnequal4:
781 case nir_op_b32any_inequal4:
782 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
783 break;
784 case nir_op_b32all_fequal2:
785 case nir_op_b32all_iequal2:
786 case nir_op_b32all_fequal3:
787 case nir_op_b32all_iequal3:
788 case nir_op_b32all_fequal4:
789 case nir_op_b32all_iequal4:
790 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
791 break;
792 default:
793 return false;
794 }
795
796 unsigned size_swizzle =
797 brw_swizzle_for_size(nir_op_infos[cmp_instr->op].input_sizes[0]);
798
799 src_reg op[2];
800 assert(nir_op_infos[cmp_instr->op].num_inputs == 2);
801 for (unsigned i = 0; i < 2; i++) {
802 nir_alu_type type = nir_op_infos[cmp_instr->op].input_types[i];
803 unsigned bit_size = nir_src_bit_size(cmp_instr->src[i].src);
804 type = (nir_alu_type) (((unsigned) type) | bit_size);
805 op[i] = get_nir_src(cmp_instr->src[i].src, type, 4);
806 unsigned base_swizzle =
807 brw_swizzle_for_nir_swizzle(cmp_instr->src[i].swizzle);
808 op[i].swizzle = brw_compose_swizzle(size_swizzle, base_swizzle);
809 }
810
811 emit(CMP(dst_null_d(), op[0], op[1],
812 brw_cmod_for_nir_comparison(cmp_instr->op)));
813
814 return true;
815 }
816
817 static void
818 emit_find_msb_using_lzd(const vec4_builder &bld,
819 const dst_reg &dst,
820 const src_reg &src,
821 bool is_signed)
822 {
823 vec4_instruction *inst;
824 src_reg temp = src;
825
826 if (is_signed) {
827 /* LZD of an absolute value source almost always does the right
828 * thing. There are two problem values:
829 *
830 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
831 * 0. However, findMSB(int(0x80000000)) == 30.
832 *
833 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
834 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
835 *
836 * For a value of zero or negative one, -1 will be returned.
837 *
838 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
839 * findMSB(-(1<<x)) should return x-1.
840 *
841 * For all negative number cases, including 0x80000000 and
842 * 0xffffffff, the correct value is obtained from LZD if instead of
843 * negating the (already negative) value the logical-not is used. A
844 * conditonal logical-not can be achieved in two instructions.
845 */
846 temp = src_reg(bld.vgrf(BRW_REGISTER_TYPE_D));
847
848 bld.ASR(dst_reg(temp), src, brw_imm_d(31));
849 bld.XOR(dst_reg(temp), temp, src);
850 }
851
852 bld.LZD(retype(dst, BRW_REGISTER_TYPE_UD),
853 retype(temp, BRW_REGISTER_TYPE_UD));
854
855 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
856 * from the LSB side. Subtract the result from 31 to convert the MSB count
857 * into an LSB count. If no bits are set, LZD will return 32. 31-32 = -1,
858 * which is exactly what findMSB() is supposed to return.
859 */
860 inst = bld.ADD(dst, retype(src_reg(dst), BRW_REGISTER_TYPE_D),
861 brw_imm_d(31));
862 inst->src[0].negate = true;
863 }
864
865 void
866 vec4_visitor::emit_conversion_from_double(dst_reg dst, src_reg src)
867 {
868 /* BDW PRM vol 15 - workarounds:
869 * DF->f format conversion for Align16 has wrong emask calculation when
870 * source is immediate.
871 */
872 if (devinfo->gen == 8 && dst.type == BRW_REGISTER_TYPE_F &&
873 src.file == BRW_IMMEDIATE_VALUE) {
874 emit(MOV(dst, brw_imm_f(src.df)));
875 return;
876 }
877
878 enum opcode op;
879 switch (dst.type) {
880 case BRW_REGISTER_TYPE_D:
881 op = VEC4_OPCODE_DOUBLE_TO_D32;
882 break;
883 case BRW_REGISTER_TYPE_UD:
884 op = VEC4_OPCODE_DOUBLE_TO_U32;
885 break;
886 case BRW_REGISTER_TYPE_F:
887 op = VEC4_OPCODE_DOUBLE_TO_F32;
888 break;
889 default:
890 unreachable("Unknown conversion");
891 }
892
893 dst_reg temp = dst_reg(this, glsl_type::dvec4_type);
894 emit(MOV(temp, src));
895 dst_reg temp2 = dst_reg(this, glsl_type::dvec4_type);
896 emit(op, temp2, src_reg(temp));
897
898 emit(VEC4_OPCODE_PICK_LOW_32BIT, retype(temp2, dst.type), src_reg(temp2));
899 emit(MOV(dst, src_reg(retype(temp2, dst.type))));
900 }
901
902 void
903 vec4_visitor::emit_conversion_to_double(dst_reg dst, src_reg src)
904 {
905 dst_reg tmp_dst = dst_reg(src_reg(this, glsl_type::dvec4_type));
906 src_reg tmp_src = retype(src_reg(this, glsl_type::vec4_type), src.type);
907 emit(MOV(dst_reg(tmp_src), src));
908 emit(VEC4_OPCODE_TO_DOUBLE, tmp_dst, tmp_src);
909 emit(MOV(dst, src_reg(tmp_dst)));
910 }
911
912 /**
913 * Try to use an immediate value for a source
914 *
915 * In cases of flow control, constant propagation is sometimes unable to
916 * determine that a register contains a constant value. To work around this,
917 * try to emit a literal as one of the sources. If \c try_src0_also is set,
918 * \c op[0] will also be tried for an immediate value.
919 *
920 * If \c op[0] is modified, the operands will be exchanged so that \c op[1]
921 * will always be the immediate value.
922 *
923 * \return The index of the source that was modified, 0 or 1, if successful.
924 * Otherwise, -1.
925 *
926 * \param op - Operands to the instruction
927 * \param try_src0_also - True if \c op[0] should also be a candidate for
928 * getting an immediate value. This should only be set
929 * for commutative operations.
930 */
931 static int
932 try_immediate_source(const nir_alu_instr *instr, src_reg *op,
933 bool try_src0_also,
934 ASSERTED const gen_device_info *devinfo)
935 {
936 unsigned idx;
937
938 /* MOV should be the only single-source instruction passed to this
939 * function. Any other unary instruction with a constant source should
940 * have been constant-folded away!
941 */
942 assert(nir_op_infos[instr->op].num_inputs > 1 ||
943 instr->op == nir_op_mov);
944
945 if (instr->op != nir_op_mov &&
946 nir_src_bit_size(instr->src[1].src) == 32 &&
947 nir_src_is_const(instr->src[1].src)) {
948 idx = 1;
949 } else if (try_src0_also &&
950 nir_src_bit_size(instr->src[0].src) == 32 &&
951 nir_src_is_const(instr->src[0].src)) {
952 idx = 0;
953 } else {
954 return -1;
955 }
956
957 const enum brw_reg_type old_type = op[idx].type;
958
959 switch (old_type) {
960 case BRW_REGISTER_TYPE_D:
961 case BRW_REGISTER_TYPE_UD: {
962 int first_comp = -1;
963 int d = 0;
964
965 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
966 if (nir_alu_instr_channel_used(instr, idx, i)) {
967 if (first_comp < 0) {
968 first_comp = i;
969 d = nir_src_comp_as_int(instr->src[idx].src,
970 instr->src[idx].swizzle[i]);
971 } else if (d != nir_src_comp_as_int(instr->src[idx].src,
972 instr->src[idx].swizzle[i])) {
973 return -1;
974 }
975 }
976 }
977
978 assert(first_comp >= 0);
979
980 if (op[idx].abs)
981 d = MAX2(-d, d);
982
983 if (op[idx].negate) {
984 /* On Gen8+ a negation source modifier on a logical operation means
985 * something different. Nothing should generate this, so assert that
986 * it does not occur.
987 */
988 assert(devinfo->gen < 8 || (instr->op != nir_op_iand &&
989 instr->op != nir_op_ior &&
990 instr->op != nir_op_ixor));
991 d = -d;
992 }
993
994 op[idx] = retype(src_reg(brw_imm_d(d)), old_type);
995 break;
996 }
997
998 case BRW_REGISTER_TYPE_F: {
999 int first_comp = -1;
1000 float f[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
1001 bool is_scalar = true;
1002
1003 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
1004 if (nir_alu_instr_channel_used(instr, idx, i)) {
1005 f[i] = nir_src_comp_as_float(instr->src[idx].src,
1006 instr->src[idx].swizzle[i]);
1007 if (first_comp < 0) {
1008 first_comp = i;
1009 } else if (f[first_comp] != f[i]) {
1010 is_scalar = false;
1011 }
1012 }
1013 }
1014
1015 if (is_scalar) {
1016 if (op[idx].abs)
1017 f[first_comp] = fabs(f[first_comp]);
1018
1019 if (op[idx].negate)
1020 f[first_comp] = -f[first_comp];
1021
1022 op[idx] = src_reg(brw_imm_f(f[first_comp]));
1023 assert(op[idx].type == old_type);
1024 } else {
1025 uint8_t vf_values[4] = { 0, 0, 0, 0 };
1026
1027 for (unsigned i = 0; i < ARRAY_SIZE(vf_values); i++) {
1028
1029 if (op[idx].abs)
1030 f[i] = fabs(f[i]);
1031
1032 if (op[idx].negate)
1033 f[i] = -f[i];
1034
1035 const int vf = brw_float_to_vf(f[i]);
1036 if (vf == -1)
1037 return -1;
1038
1039 vf_values[i] = vf;
1040 }
1041
1042 op[idx] = src_reg(brw_imm_vf4(vf_values[0], vf_values[1],
1043 vf_values[2], vf_values[3]));
1044 }
1045 break;
1046 }
1047
1048 default:
1049 unreachable("Non-32bit type.");
1050 }
1051
1052 /* If the instruction has more than one source, the instruction format only
1053 * allows source 1 to be an immediate value. If the immediate value was
1054 * source 0, then the sources must be exchanged.
1055 */
1056 if (idx == 0 && instr->op != nir_op_mov) {
1057 src_reg tmp = op[0];
1058 op[0] = op[1];
1059 op[1] = tmp;
1060 }
1061
1062 return idx;
1063 }
1064
1065 void
1066 vec4_visitor::fix_float_operands(src_reg op[3], nir_alu_instr *instr)
1067 {
1068 bool fixed[3] = { false, false, false };
1069
1070 for (unsigned i = 0; i < 2; i++) {
1071 if (!nir_src_is_const(instr->src[i].src))
1072 continue;
1073
1074 for (unsigned j = i + 1; j < 3; j++) {
1075 if (fixed[j])
1076 continue;
1077
1078 if (!nir_src_is_const(instr->src[j].src))
1079 continue;
1080
1081 if (nir_alu_srcs_equal(instr, instr, i, j)) {
1082 if (!fixed[i])
1083 op[i] = fix_3src_operand(op[i]);
1084
1085 op[j] = op[i];
1086
1087 fixed[i] = true;
1088 fixed[j] = true;
1089 } else if (nir_alu_srcs_negative_equal(instr, instr, i, j)) {
1090 if (!fixed[i])
1091 op[i] = fix_3src_operand(op[i]);
1092
1093 op[j] = op[i];
1094 op[j].negate = !op[j].negate;
1095
1096 fixed[i] = true;
1097 fixed[j] = true;
1098 }
1099 }
1100 }
1101
1102 for (unsigned i = 0; i < 3; i++) {
1103 if (!fixed[i])
1104 op[i] = fix_3src_operand(op[i]);
1105 }
1106 }
1107
1108 static bool
1109 const_src_fits_in_16_bits(const nir_src &src, brw_reg_type type)
1110 {
1111 assert(nir_src_is_const(src));
1112 if (type_is_unsigned_int(type)) {
1113 return nir_src_comp_as_uint(src, 0) <= UINT16_MAX;
1114 } else {
1115 const int64_t c = nir_src_comp_as_int(src, 0);
1116 return c <= INT16_MAX && c >= INT16_MIN;
1117 }
1118 }
1119
1120 void
1121 vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
1122 {
1123 vec4_instruction *inst;
1124
1125 nir_alu_type dst_type = (nir_alu_type) (nir_op_infos[instr->op].output_type |
1126 nir_dest_bit_size(instr->dest.dest));
1127 dst_reg dst = get_nir_dest(instr->dest.dest, dst_type);
1128 dst.writemask = instr->dest.write_mask;
1129
1130 assert(!instr->dest.saturate);
1131
1132 src_reg op[4];
1133 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1134 /* We don't lower to source modifiers, so they shouldn't exist. */
1135 assert(!instr->src[i].abs);
1136 assert(!instr->src[i].negate);
1137
1138 nir_alu_type src_type = (nir_alu_type)
1139 (nir_op_infos[instr->op].input_types[i] |
1140 nir_src_bit_size(instr->src[i].src));
1141 op[i] = get_nir_src(instr->src[i].src, src_type, 4);
1142 op[i].swizzle = brw_swizzle_for_nir_swizzle(instr->src[i].swizzle);
1143 }
1144
1145 switch (instr->op) {
1146 case nir_op_mov:
1147 try_immediate_source(instr, &op[0], true, devinfo);
1148 inst = emit(MOV(dst, op[0]));
1149 break;
1150
1151 case nir_op_vec2:
1152 case nir_op_vec3:
1153 case nir_op_vec4:
1154 unreachable("not reached: should be handled by lower_vec_to_movs()");
1155
1156 case nir_op_i2f32:
1157 case nir_op_u2f32:
1158 inst = emit(MOV(dst, op[0]));
1159 break;
1160
1161 case nir_op_f2f32:
1162 case nir_op_f2i32:
1163 case nir_op_f2u32:
1164 if (nir_src_bit_size(instr->src[0].src) == 64)
1165 emit_conversion_from_double(dst, op[0]);
1166 else
1167 inst = emit(MOV(dst, op[0]));
1168 break;
1169
1170 case nir_op_f2f64:
1171 case nir_op_i2f64:
1172 case nir_op_u2f64:
1173 emit_conversion_to_double(dst, op[0]);
1174 break;
1175
1176 case nir_op_fsat:
1177 inst = emit(MOV(dst, op[0]));
1178 inst->saturate = true;
1179 break;
1180
1181 case nir_op_fneg:
1182 case nir_op_ineg:
1183 op[0].negate = true;
1184 inst = emit(MOV(dst, op[0]));
1185 break;
1186
1187 case nir_op_fabs:
1188 case nir_op_iabs:
1189 op[0].negate = false;
1190 op[0].abs = true;
1191 inst = emit(MOV(dst, op[0]));
1192 break;
1193
1194 case nir_op_iadd:
1195 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1196 /* fall through */
1197 case nir_op_fadd:
1198 try_immediate_source(instr, op, true, devinfo);
1199 inst = emit(ADD(dst, op[0], op[1]));
1200 break;
1201
1202 case nir_op_uadd_sat:
1203 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1204 inst = emit(ADD(dst, op[0], op[1]));
1205 inst->saturate = true;
1206 break;
1207
1208 case nir_op_fmul:
1209 try_immediate_source(instr, op, true, devinfo);
1210 inst = emit(MUL(dst, op[0], op[1]));
1211 break;
1212
1213 case nir_op_imul: {
1214 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1215 if (devinfo->gen < 8) {
1216 /* For integer multiplication, the MUL uses the low 16 bits of one of
1217 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1218 * accumulates in the contribution of the upper 16 bits of that
1219 * operand. If we can determine that one of the args is in the low
1220 * 16 bits, though, we can just emit a single MUL.
1221 */
1222 if (nir_src_is_const(instr->src[0].src) &&
1223 nir_alu_instr_src_read_mask(instr, 0) == 1 &&
1224 const_src_fits_in_16_bits(instr->src[0].src, op[0].type)) {
1225 if (devinfo->gen < 7)
1226 emit(MUL(dst, op[0], op[1]));
1227 else
1228 emit(MUL(dst, op[1], op[0]));
1229 } else if (nir_src_is_const(instr->src[1].src) &&
1230 nir_alu_instr_src_read_mask(instr, 1) == 1 &&
1231 const_src_fits_in_16_bits(instr->src[1].src, op[1].type)) {
1232 if (devinfo->gen < 7)
1233 emit(MUL(dst, op[1], op[0]));
1234 else
1235 emit(MUL(dst, op[0], op[1]));
1236 } else {
1237 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1238
1239 emit(MUL(acc, op[0], op[1]));
1240 emit(MACH(dst_null_d(), op[0], op[1]));
1241 emit(MOV(dst, src_reg(acc)));
1242 }
1243 } else {
1244 emit(MUL(dst, op[0], op[1]));
1245 }
1246 break;
1247 }
1248
1249 case nir_op_imul_high:
1250 case nir_op_umul_high: {
1251 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1252 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1253
1254 if (devinfo->gen >= 8)
1255 emit(MUL(acc, op[0], retype(op[1], BRW_REGISTER_TYPE_UW)));
1256 else
1257 emit(MUL(acc, op[0], op[1]));
1258
1259 emit(MACH(dst, op[0], op[1]));
1260 break;
1261 }
1262
1263 case nir_op_frcp:
1264 inst = emit_math(SHADER_OPCODE_RCP, dst, op[0]);
1265 break;
1266
1267 case nir_op_fexp2:
1268 inst = emit_math(SHADER_OPCODE_EXP2, dst, op[0]);
1269 break;
1270
1271 case nir_op_flog2:
1272 inst = emit_math(SHADER_OPCODE_LOG2, dst, op[0]);
1273 break;
1274
1275 case nir_op_fsin:
1276 inst = emit_math(SHADER_OPCODE_SIN, dst, op[0]);
1277 break;
1278
1279 case nir_op_fcos:
1280 inst = emit_math(SHADER_OPCODE_COS, dst, op[0]);
1281 break;
1282
1283 case nir_op_idiv:
1284 case nir_op_udiv:
1285 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1286 emit_math(SHADER_OPCODE_INT_QUOTIENT, dst, op[0], op[1]);
1287 break;
1288
1289 case nir_op_umod:
1290 case nir_op_irem:
1291 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1292 * appears that our hardware just does the right thing for signed
1293 * remainder.
1294 */
1295 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1296 emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1297 break;
1298
1299 case nir_op_imod: {
1300 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1301 inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1302
1303 /* Math instructions don't support conditional mod */
1304 inst = emit(MOV(dst_null_d(), src_reg(dst)));
1305 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1306
1307 /* Now, we need to determine if signs of the sources are different.
1308 * When we XOR the sources, the top bit is 0 if they are the same and 1
1309 * if they are different. We can then use a conditional modifier to
1310 * turn that into a predicate. This leads us to an XOR.l instruction.
1311 *
1312 * Technically, according to the PRM, you're not allowed to use .l on a
1313 * XOR instruction. However, emperical experiments and Curro's reading
1314 * of the simulator source both indicate that it's safe.
1315 */
1316 src_reg tmp = src_reg(this, glsl_type::ivec4_type);
1317 inst = emit(XOR(dst_reg(tmp), op[0], op[1]));
1318 inst->predicate = BRW_PREDICATE_NORMAL;
1319 inst->conditional_mod = BRW_CONDITIONAL_L;
1320
1321 /* If the result of the initial remainder operation is non-zero and the
1322 * two sources have different signs, add in a copy of op[1] to get the
1323 * final integer modulus value.
1324 */
1325 inst = emit(ADD(dst, src_reg(dst), op[1]));
1326 inst->predicate = BRW_PREDICATE_NORMAL;
1327 break;
1328 }
1329
1330 case nir_op_ldexp:
1331 unreachable("not reached: should be handled by ldexp_to_arith()");
1332
1333 case nir_op_fsqrt:
1334 inst = emit_math(SHADER_OPCODE_SQRT, dst, op[0]);
1335 break;
1336
1337 case nir_op_frsq:
1338 inst = emit_math(SHADER_OPCODE_RSQ, dst, op[0]);
1339 break;
1340
1341 case nir_op_fpow:
1342 inst = emit_math(SHADER_OPCODE_POW, dst, op[0], op[1]);
1343 break;
1344
1345 case nir_op_uadd_carry: {
1346 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1347 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1348
1349 emit(ADDC(dst_null_ud(), op[0], op[1]));
1350 emit(MOV(dst, src_reg(acc)));
1351 break;
1352 }
1353
1354 case nir_op_usub_borrow: {
1355 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1356 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1357
1358 emit(SUBB(dst_null_ud(), op[0], op[1]));
1359 emit(MOV(dst, src_reg(acc)));
1360 break;
1361 }
1362
1363 case nir_op_ftrunc:
1364 inst = emit(RNDZ(dst, op[0]));
1365 if (devinfo->gen < 6) {
1366 inst->conditional_mod = BRW_CONDITIONAL_R;
1367 inst = emit(ADD(dst, src_reg(dst), brw_imm_f(1.0f)));
1368 inst->predicate = BRW_PREDICATE_NORMAL;
1369 inst = emit(MOV(dst, src_reg(dst))); /* for potential saturation */
1370 }
1371 break;
1372
1373 case nir_op_fceil: {
1374 src_reg tmp = src_reg(this, glsl_type::float_type);
1375 tmp.swizzle =
1376 brw_swizzle_for_size(instr->src[0].src.is_ssa ?
1377 instr->src[0].src.ssa->num_components :
1378 instr->src[0].src.reg.reg->num_components);
1379
1380 op[0].negate = !op[0].negate;
1381 emit(RNDD(dst_reg(tmp), op[0]));
1382 tmp.negate = true;
1383 inst = emit(MOV(dst, tmp));
1384 break;
1385 }
1386
1387 case nir_op_ffloor:
1388 inst = emit(RNDD(dst, op[0]));
1389 break;
1390
1391 case nir_op_ffract:
1392 inst = emit(FRC(dst, op[0]));
1393 break;
1394
1395 case nir_op_fround_even:
1396 inst = emit(RNDE(dst, op[0]));
1397 if (devinfo->gen < 6) {
1398 inst->conditional_mod = BRW_CONDITIONAL_R;
1399 inst = emit(ADD(dst, src_reg(dst), brw_imm_f(1.0f)));
1400 inst->predicate = BRW_PREDICATE_NORMAL;
1401 inst = emit(MOV(dst, src_reg(dst))); /* for potential saturation */
1402 }
1403 break;
1404
1405 case nir_op_fquantize2f16: {
1406 /* See also vec4_visitor::emit_pack_half_2x16() */
1407 src_reg tmp16 = src_reg(this, glsl_type::uvec4_type);
1408 src_reg tmp32 = src_reg(this, glsl_type::vec4_type);
1409 src_reg zero = src_reg(this, glsl_type::vec4_type);
1410
1411 /* Check for denormal */
1412 src_reg abs_src0 = op[0];
1413 abs_src0.abs = true;
1414 emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1415 BRW_CONDITIONAL_L));
1416 /* Get the appropriately signed zero */
1417 emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD),
1418 retype(op[0], BRW_REGISTER_TYPE_UD),
1419 brw_imm_ud(0x80000000)));
1420 /* Do the actual F32 -> F16 -> F32 conversion */
1421 emit(F32TO16(dst_reg(tmp16), op[0]));
1422 emit(F16TO32(dst_reg(tmp32), tmp16));
1423 /* Select that or zero based on normal status */
1424 inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32);
1425 inst->predicate = BRW_PREDICATE_NORMAL;
1426 break;
1427 }
1428
1429 case nir_op_imin:
1430 case nir_op_umin:
1431 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1432 /* fall through */
1433 case nir_op_fmin:
1434 try_immediate_source(instr, op, true, devinfo);
1435 inst = emit_minmax(BRW_CONDITIONAL_L, dst, op[0], op[1]);
1436 break;
1437
1438 case nir_op_imax:
1439 case nir_op_umax:
1440 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1441 /* fall through */
1442 case nir_op_fmax:
1443 try_immediate_source(instr, op, true, devinfo);
1444 inst = emit_minmax(BRW_CONDITIONAL_GE, dst, op[0], op[1]);
1445 break;
1446
1447 case nir_op_fddx:
1448 case nir_op_fddx_coarse:
1449 case nir_op_fddx_fine:
1450 case nir_op_fddy:
1451 case nir_op_fddy_coarse:
1452 case nir_op_fddy_fine:
1453 unreachable("derivatives are not valid in vertex shaders");
1454
1455 case nir_op_ilt32:
1456 case nir_op_ult32:
1457 case nir_op_ige32:
1458 case nir_op_uge32:
1459 case nir_op_ieq32:
1460 case nir_op_ine32:
1461 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1462 /* Fallthrough */
1463 case nir_op_flt32:
1464 case nir_op_fge32:
1465 case nir_op_feq32:
1466 case nir_op_fne32: {
1467 enum brw_conditional_mod conditional_mod =
1468 brw_cmod_for_nir_comparison(instr->op);
1469
1470 if (nir_src_bit_size(instr->src[0].src) < 64) {
1471 /* If the order of the sources is changed due to an immediate value,
1472 * then the condition must also be changed.
1473 */
1474 if (try_immediate_source(instr, op, true, devinfo) == 0)
1475 conditional_mod = brw_swap_cmod(conditional_mod);
1476
1477 emit(CMP(dst, op[0], op[1], conditional_mod));
1478 } else {
1479 /* Produce a 32-bit boolean result from the DF comparison by selecting
1480 * only the low 32-bit in each DF produced. Do this in a temporary
1481 * so we can then move from there to the result using align16 again
1482 * to honor the original writemask.
1483 */
1484 dst_reg temp = dst_reg(this, glsl_type::dvec4_type);
1485 emit(CMP(temp, op[0], op[1], conditional_mod));
1486 dst_reg result = dst_reg(this, glsl_type::bvec4_type);
1487 emit(VEC4_OPCODE_PICK_LOW_32BIT, result, src_reg(temp));
1488 emit(MOV(dst, src_reg(result)));
1489 }
1490 break;
1491 }
1492
1493 case nir_op_b32all_iequal2:
1494 case nir_op_b32all_iequal3:
1495 case nir_op_b32all_iequal4:
1496 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1497 /* Fallthrough */
1498 case nir_op_b32all_fequal2:
1499 case nir_op_b32all_fequal3:
1500 case nir_op_b32all_fequal4: {
1501 unsigned swiz =
1502 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1503
1504 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1505 brw_cmod_for_nir_comparison(instr->op)));
1506 emit(MOV(dst, brw_imm_d(0)));
1507 inst = emit(MOV(dst, brw_imm_d(~0)));
1508 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1509 break;
1510 }
1511
1512 case nir_op_b32any_inequal2:
1513 case nir_op_b32any_inequal3:
1514 case nir_op_b32any_inequal4:
1515 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1516 /* Fallthrough */
1517 case nir_op_b32any_fnequal2:
1518 case nir_op_b32any_fnequal3:
1519 case nir_op_b32any_fnequal4: {
1520 unsigned swiz =
1521 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1522
1523 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1524 brw_cmod_for_nir_comparison(instr->op)));
1525
1526 emit(MOV(dst, brw_imm_d(0)));
1527 inst = emit(MOV(dst, brw_imm_d(~0)));
1528 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1529 break;
1530 }
1531
1532 case nir_op_inot:
1533 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1534 if (devinfo->gen >= 8) {
1535 op[0] = resolve_source_modifiers(op[0]);
1536 }
1537 emit(NOT(dst, op[0]));
1538 break;
1539
1540 case nir_op_ixor:
1541 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1542 if (devinfo->gen >= 8) {
1543 op[0] = resolve_source_modifiers(op[0]);
1544 op[1] = resolve_source_modifiers(op[1]);
1545 }
1546 try_immediate_source(instr, op, true, devinfo);
1547 emit(XOR(dst, op[0], op[1]));
1548 break;
1549
1550 case nir_op_ior:
1551 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1552 if (devinfo->gen >= 8) {
1553 op[0] = resolve_source_modifiers(op[0]);
1554 op[1] = resolve_source_modifiers(op[1]);
1555 }
1556 try_immediate_source(instr, op, true, devinfo);
1557 emit(OR(dst, op[0], op[1]));
1558 break;
1559
1560 case nir_op_iand:
1561 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1562 if (devinfo->gen >= 8) {
1563 op[0] = resolve_source_modifiers(op[0]);
1564 op[1] = resolve_source_modifiers(op[1]);
1565 }
1566 try_immediate_source(instr, op, true, devinfo);
1567 emit(AND(dst, op[0], op[1]));
1568 break;
1569
1570 case nir_op_b2i32:
1571 case nir_op_b2f32:
1572 case nir_op_b2f64:
1573 if (nir_dest_bit_size(instr->dest.dest) > 32) {
1574 assert(dst.type == BRW_REGISTER_TYPE_DF);
1575 emit_conversion_to_double(dst, negate(op[0]));
1576 } else {
1577 emit(MOV(dst, negate(op[0])));
1578 }
1579 break;
1580
1581 case nir_op_f2b32:
1582 if (nir_src_bit_size(instr->src[0].src) == 64) {
1583 /* We use a MOV with conditional_mod to check if the provided value is
1584 * 0.0. We want this to flush denormalized numbers to zero, so we set a
1585 * source modifier on the source operand to trigger this, as source
1586 * modifiers don't affect the result of the testing against 0.0.
1587 */
1588 src_reg value = op[0];
1589 value.abs = true;
1590 vec4_instruction *inst = emit(MOV(dst_null_df(), value));
1591 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1592
1593 src_reg one = src_reg(this, glsl_type::ivec4_type);
1594 emit(MOV(dst_reg(one), brw_imm_d(~0)));
1595 inst = emit(BRW_OPCODE_SEL, dst, one, brw_imm_d(0));
1596 inst->predicate = BRW_PREDICATE_NORMAL;
1597 } else {
1598 emit(CMP(dst, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1599 }
1600 break;
1601
1602 case nir_op_i2b32:
1603 emit(CMP(dst, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1604 break;
1605
1606 case nir_op_unpack_half_2x16_split_x:
1607 case nir_op_unpack_half_2x16_split_y:
1608 case nir_op_pack_half_2x16_split:
1609 unreachable("not reached: should not occur in vertex shader");
1610
1611 case nir_op_unpack_snorm_2x16:
1612 case nir_op_unpack_unorm_2x16:
1613 case nir_op_pack_snorm_2x16:
1614 case nir_op_pack_unorm_2x16:
1615 unreachable("not reached: should be handled by lower_packing_builtins");
1616
1617 case nir_op_pack_uvec4_to_uint:
1618 unreachable("not reached");
1619
1620 case nir_op_pack_uvec2_to_uint: {
1621 dst_reg tmp1 = dst_reg(this, glsl_type::uint_type);
1622 tmp1.writemask = WRITEMASK_X;
1623 op[0].swizzle = BRW_SWIZZLE_YYYY;
1624 emit(SHL(tmp1, op[0], src_reg(brw_imm_ud(16u))));
1625
1626 dst_reg tmp2 = dst_reg(this, glsl_type::uint_type);
1627 tmp2.writemask = WRITEMASK_X;
1628 op[0].swizzle = BRW_SWIZZLE_XXXX;
1629 emit(AND(tmp2, op[0], src_reg(brw_imm_ud(0xffffu))));
1630
1631 emit(OR(dst, src_reg(tmp1), src_reg(tmp2)));
1632 break;
1633 }
1634
1635 case nir_op_pack_64_2x32_split: {
1636 dst_reg result = dst_reg(this, glsl_type::dvec4_type);
1637 dst_reg tmp = dst_reg(this, glsl_type::uvec4_type);
1638 emit(MOV(tmp, retype(op[0], BRW_REGISTER_TYPE_UD)));
1639 emit(VEC4_OPCODE_SET_LOW_32BIT, result, src_reg(tmp));
1640 emit(MOV(tmp, retype(op[1], BRW_REGISTER_TYPE_UD)));
1641 emit(VEC4_OPCODE_SET_HIGH_32BIT, result, src_reg(tmp));
1642 emit(MOV(dst, src_reg(result)));
1643 break;
1644 }
1645
1646 case nir_op_unpack_64_2x32_split_x:
1647 case nir_op_unpack_64_2x32_split_y: {
1648 enum opcode oper = (instr->op == nir_op_unpack_64_2x32_split_x) ?
1649 VEC4_OPCODE_PICK_LOW_32BIT : VEC4_OPCODE_PICK_HIGH_32BIT;
1650 dst_reg tmp = dst_reg(this, glsl_type::dvec4_type);
1651 emit(MOV(tmp, op[0]));
1652 dst_reg tmp2 = dst_reg(this, glsl_type::uvec4_type);
1653 emit(oper, tmp2, src_reg(tmp));
1654 emit(MOV(dst, src_reg(tmp2)));
1655 break;
1656 }
1657
1658 case nir_op_unpack_half_2x16:
1659 /* As NIR does not guarantee that we have a correct swizzle outside the
1660 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1661 * uses the source operand in an operation with WRITEMASK_Y while our
1662 * source operand has only size 1, it accessed incorrect data producing
1663 * regressions in Piglit. We repeat the swizzle of the first component on the
1664 * rest of components to avoid regressions. In the vec4_visitor IR code path
1665 * this is not needed because the operand has already the correct swizzle.
1666 */
1667 op[0].swizzle = brw_compose_swizzle(BRW_SWIZZLE_XXXX, op[0].swizzle);
1668 emit_unpack_half_2x16(dst, op[0]);
1669 break;
1670
1671 case nir_op_pack_half_2x16:
1672 emit_pack_half_2x16(dst, op[0]);
1673 break;
1674
1675 case nir_op_unpack_unorm_4x8:
1676 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1677 emit_unpack_unorm_4x8(dst, op[0]);
1678 break;
1679
1680 case nir_op_pack_unorm_4x8:
1681 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1682 emit_pack_unorm_4x8(dst, op[0]);
1683 break;
1684
1685 case nir_op_unpack_snorm_4x8:
1686 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1687 emit_unpack_snorm_4x8(dst, op[0]);
1688 break;
1689
1690 case nir_op_pack_snorm_4x8:
1691 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1692 emit_pack_snorm_4x8(dst, op[0]);
1693 break;
1694
1695 case nir_op_bitfield_reverse:
1696 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1697 emit(BFREV(dst, op[0]));
1698 break;
1699
1700 case nir_op_bit_count:
1701 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1702 emit(CBIT(dst, op[0]));
1703 break;
1704
1705 case nir_op_ufind_msb:
1706 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1707 emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst, op[0], false);
1708 break;
1709
1710 case nir_op_ifind_msb: {
1711 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1712 vec4_builder bld = vec4_builder(this).at_end();
1713 src_reg src(dst);
1714
1715 if (devinfo->gen < 7) {
1716 emit_find_msb_using_lzd(bld, dst, op[0], true);
1717 } else {
1718 emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0]));
1719
1720 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1721 * count from the LSB side. If FBH didn't return an error
1722 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1723 * count into an LSB count.
1724 */
1725 bld.CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1726
1727 inst = bld.ADD(dst, src, brw_imm_d(31));
1728 inst->predicate = BRW_PREDICATE_NORMAL;
1729 inst->src[0].negate = true;
1730 }
1731 break;
1732 }
1733
1734 case nir_op_find_lsb: {
1735 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1736 vec4_builder bld = vec4_builder(this).at_end();
1737
1738 if (devinfo->gen < 7) {
1739 dst_reg temp = bld.vgrf(BRW_REGISTER_TYPE_D);
1740
1741 /* (x & -x) generates a value that consists of only the LSB of x.
1742 * For all powers of 2, findMSB(y) == findLSB(y).
1743 */
1744 src_reg src = src_reg(retype(op[0], BRW_REGISTER_TYPE_D));
1745 src_reg negated_src = src;
1746
1747 /* One must be negated, and the other must be non-negated. It
1748 * doesn't matter which is which.
1749 */
1750 negated_src.negate = true;
1751 src.negate = false;
1752
1753 bld.AND(temp, src, negated_src);
1754 emit_find_msb_using_lzd(bld, dst, src_reg(temp), false);
1755 } else {
1756 bld.FBL(dst, op[0]);
1757 }
1758 break;
1759 }
1760
1761 case nir_op_ubitfield_extract:
1762 case nir_op_ibitfield_extract:
1763 unreachable("should have been lowered");
1764 case nir_op_ubfe:
1765 case nir_op_ibfe:
1766 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1767 op[0] = fix_3src_operand(op[0]);
1768 op[1] = fix_3src_operand(op[1]);
1769 op[2] = fix_3src_operand(op[2]);
1770
1771 emit(BFE(dst, op[2], op[1], op[0]));
1772 break;
1773
1774 case nir_op_bfm:
1775 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1776 emit(BFI1(dst, op[0], op[1]));
1777 break;
1778
1779 case nir_op_bfi:
1780 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1781 op[0] = fix_3src_operand(op[0]);
1782 op[1] = fix_3src_operand(op[1]);
1783 op[2] = fix_3src_operand(op[2]);
1784
1785 emit(BFI2(dst, op[0], op[1], op[2]));
1786 break;
1787
1788 case nir_op_bitfield_insert:
1789 unreachable("not reached: should have been lowered");
1790
1791 case nir_op_fsign:
1792 if (type_sz(op[0].type) < 8) {
1793 /* AND(val, 0x80000000) gives the sign bit.
1794 *
1795 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1796 * zero.
1797 */
1798 emit(CMP(dst_null_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1799
1800 op[0].type = BRW_REGISTER_TYPE_UD;
1801 dst.type = BRW_REGISTER_TYPE_UD;
1802 emit(AND(dst, op[0], brw_imm_ud(0x80000000u)));
1803
1804 inst = emit(OR(dst, src_reg(dst), brw_imm_ud(0x3f800000u)));
1805 inst->predicate = BRW_PREDICATE_NORMAL;
1806 dst.type = BRW_REGISTER_TYPE_F;
1807 } else {
1808 /* For doubles we do the same but we need to consider:
1809 *
1810 * - We use a MOV with conditional_mod instead of a CMP so that we can
1811 * skip loading a 0.0 immediate. We use a source modifier on the
1812 * source of the MOV so that we flush denormalized values to 0.
1813 * Since we want to compare against 0, this won't alter the result.
1814 * - We need to extract the high 32-bit of each DF where the sign
1815 * is stored.
1816 * - We need to produce a DF result.
1817 */
1818
1819 /* Check for zero */
1820 src_reg value = op[0];
1821 value.abs = true;
1822 inst = emit(MOV(dst_null_df(), value));
1823 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1824
1825 /* AND each high 32-bit channel with 0x80000000u */
1826 dst_reg tmp = dst_reg(this, glsl_type::uvec4_type);
1827 emit(VEC4_OPCODE_PICK_HIGH_32BIT, tmp, op[0]);
1828 emit(AND(tmp, src_reg(tmp), brw_imm_ud(0x80000000u)));
1829
1830 /* Add 1.0 to each channel, predicated to skip the cases where the
1831 * channel's value was 0
1832 */
1833 inst = emit(OR(tmp, src_reg(tmp), brw_imm_ud(0x3f800000u)));
1834 inst->predicate = BRW_PREDICATE_NORMAL;
1835
1836 /* Now convert the result from float to double */
1837 emit_conversion_to_double(dst, retype(src_reg(tmp),
1838 BRW_REGISTER_TYPE_F));
1839 }
1840 break;
1841
1842 case nir_op_ishl:
1843 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1844 try_immediate_source(instr, op, false, devinfo);
1845 emit(SHL(dst, op[0], op[1]));
1846 break;
1847
1848 case nir_op_ishr:
1849 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1850 try_immediate_source(instr, op, false, devinfo);
1851 emit(ASR(dst, op[0], op[1]));
1852 break;
1853
1854 case nir_op_ushr:
1855 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1856 try_immediate_source(instr, op, false, devinfo);
1857 emit(SHR(dst, op[0], op[1]));
1858 break;
1859
1860 case nir_op_ffma:
1861 if (type_sz(dst.type) == 8) {
1862 dst_reg mul_dst = dst_reg(this, glsl_type::dvec4_type);
1863 emit(MUL(mul_dst, op[1], op[0]));
1864 inst = emit(ADD(dst, src_reg(mul_dst), op[2]));
1865 } else {
1866 fix_float_operands(op, instr);
1867 inst = emit(MAD(dst, op[2], op[1], op[0]));
1868 }
1869 break;
1870
1871 case nir_op_flrp:
1872 fix_float_operands(op, instr);
1873 inst = emit(LRP(dst, op[2], op[1], op[0]));
1874 break;
1875
1876 case nir_op_b32csel:
1877 enum brw_predicate predicate;
1878 if (!optimize_predicate(instr, &predicate)) {
1879 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1880 switch (dst.writemask) {
1881 case WRITEMASK_X:
1882 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X;
1883 break;
1884 case WRITEMASK_Y:
1885 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1886 break;
1887 case WRITEMASK_Z:
1888 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1889 break;
1890 case WRITEMASK_W:
1891 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W;
1892 break;
1893 default:
1894 predicate = BRW_PREDICATE_NORMAL;
1895 break;
1896 }
1897 }
1898 inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]);
1899 inst->predicate = predicate;
1900 break;
1901
1902 case nir_op_fdot_replicated2:
1903 try_immediate_source(instr, op, true, devinfo);
1904 inst = emit(BRW_OPCODE_DP2, dst, op[0], op[1]);
1905 break;
1906
1907 case nir_op_fdot_replicated3:
1908 try_immediate_source(instr, op, true, devinfo);
1909 inst = emit(BRW_OPCODE_DP3, dst, op[0], op[1]);
1910 break;
1911
1912 case nir_op_fdot_replicated4:
1913 try_immediate_source(instr, op, true, devinfo);
1914 inst = emit(BRW_OPCODE_DP4, dst, op[0], op[1]);
1915 break;
1916
1917 case nir_op_fdph_replicated:
1918 try_immediate_source(instr, op, false, devinfo);
1919 inst = emit(BRW_OPCODE_DPH, dst, op[0], op[1]);
1920 break;
1921
1922 case nir_op_fdiv:
1923 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1924
1925 case nir_op_fmod:
1926 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1927
1928 case nir_op_fsub:
1929 case nir_op_isub:
1930 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1931
1932 default:
1933 unreachable("Unimplemented ALU operation");
1934 }
1935
1936 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1937 * to sign extend the low bit to 0/~0
1938 */
1939 if (devinfo->gen <= 5 &&
1940 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) ==
1941 BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1942 dst_reg masked = dst_reg(this, glsl_type::int_type);
1943 masked.writemask = dst.writemask;
1944 emit(AND(masked, src_reg(dst), brw_imm_d(1)));
1945 src_reg masked_neg = src_reg(masked);
1946 masked_neg.negate = true;
1947 emit(MOV(retype(dst, BRW_REGISTER_TYPE_D), masked_neg));
1948 }
1949 }
1950
1951 void
1952 vec4_visitor::nir_emit_jump(nir_jump_instr *instr)
1953 {
1954 switch (instr->type) {
1955 case nir_jump_break:
1956 emit(BRW_OPCODE_BREAK);
1957 break;
1958
1959 case nir_jump_continue:
1960 emit(BRW_OPCODE_CONTINUE);
1961 break;
1962
1963 case nir_jump_return:
1964 /* fall through */
1965 default:
1966 unreachable("unknown jump");
1967 }
1968 }
1969
1970 static enum ir_texture_opcode
1971 ir_texture_opcode_for_nir_texop(nir_texop texop)
1972 {
1973 enum ir_texture_opcode op;
1974
1975 switch (texop) {
1976 case nir_texop_lod: op = ir_lod; break;
1977 case nir_texop_query_levels: op = ir_query_levels; break;
1978 case nir_texop_texture_samples: op = ir_texture_samples; break;
1979 case nir_texop_tex: op = ir_tex; break;
1980 case nir_texop_tg4: op = ir_tg4; break;
1981 case nir_texop_txb: op = ir_txb; break;
1982 case nir_texop_txd: op = ir_txd; break;
1983 case nir_texop_txf: op = ir_txf; break;
1984 case nir_texop_txf_ms: op = ir_txf_ms; break;
1985 case nir_texop_txl: op = ir_txl; break;
1986 case nir_texop_txs: op = ir_txs; break;
1987 case nir_texop_samples_identical: op = ir_samples_identical; break;
1988 default:
1989 unreachable("unknown texture opcode");
1990 }
1991
1992 return op;
1993 }
1994
1995 static const glsl_type *
1996 glsl_type_for_nir_alu_type(nir_alu_type alu_type,
1997 unsigned components)
1998 {
1999 return glsl_type::get_instance(brw_glsl_base_type_for_nir_type(alu_type),
2000 components, 1);
2001 }
2002
2003 void
2004 vec4_visitor::nir_emit_texture(nir_tex_instr *instr)
2005 {
2006 unsigned texture = instr->texture_index;
2007 unsigned sampler = instr->sampler_index;
2008 src_reg texture_reg = brw_imm_ud(texture);
2009 src_reg sampler_reg = brw_imm_ud(sampler);
2010 src_reg coordinate;
2011 const glsl_type *coord_type = NULL;
2012 src_reg shadow_comparator;
2013 src_reg offset_value;
2014 src_reg lod, lod2;
2015 src_reg sample_index;
2016 src_reg mcs;
2017
2018 const glsl_type *dest_type =
2019 glsl_type_for_nir_alu_type(instr->dest_type,
2020 nir_tex_instr_dest_size(instr));
2021 dst_reg dest = get_nir_dest(instr->dest, instr->dest_type);
2022
2023 /* The hardware requires a LOD for buffer textures */
2024 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
2025 lod = brw_imm_d(0);
2026
2027 /* Load the texture operation sources */
2028 uint32_t constant_offset = 0;
2029 for (unsigned i = 0; i < instr->num_srcs; i++) {
2030 switch (instr->src[i].src_type) {
2031 case nir_tex_src_comparator:
2032 shadow_comparator = get_nir_src(instr->src[i].src,
2033 BRW_REGISTER_TYPE_F, 1);
2034 break;
2035
2036 case nir_tex_src_coord: {
2037 unsigned src_size = nir_tex_instr_src_size(instr, i);
2038
2039 switch (instr->op) {
2040 case nir_texop_txf:
2041 case nir_texop_txf_ms:
2042 case nir_texop_samples_identical:
2043 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D,
2044 src_size);
2045 coord_type = glsl_type::ivec(src_size);
2046 break;
2047
2048 default:
2049 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2050 src_size);
2051 coord_type = glsl_type::vec(src_size);
2052 break;
2053 }
2054 break;
2055 }
2056
2057 case nir_tex_src_ddx:
2058 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2059 nir_tex_instr_src_size(instr, i));
2060 break;
2061
2062 case nir_tex_src_ddy:
2063 lod2 = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
2064 nir_tex_instr_src_size(instr, i));
2065 break;
2066
2067 case nir_tex_src_lod:
2068 switch (instr->op) {
2069 case nir_texop_txs:
2070 case nir_texop_txf:
2071 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
2072 break;
2073
2074 default:
2075 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F, 1);
2076 break;
2077 }
2078 break;
2079
2080 case nir_tex_src_ms_index: {
2081 sample_index = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
2082 break;
2083 }
2084
2085 case nir_tex_src_offset:
2086 if (!brw_texture_offset(instr, i, &constant_offset)) {
2087 offset_value =
2088 get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2);
2089 }
2090 break;
2091
2092 case nir_tex_src_texture_offset: {
2093 /* Emit code to evaluate the actual indexing expression */
2094 src_reg src = get_nir_src(instr->src[i].src, 1);
2095 src_reg temp(this, glsl_type::uint_type);
2096 emit(ADD(dst_reg(temp), src, brw_imm_ud(texture)));
2097 texture_reg = emit_uniformize(temp);
2098 break;
2099 }
2100
2101 case nir_tex_src_sampler_offset: {
2102 /* Emit code to evaluate the actual indexing expression */
2103 src_reg src = get_nir_src(instr->src[i].src, 1);
2104 src_reg temp(this, glsl_type::uint_type);
2105 emit(ADD(dst_reg(temp), src, brw_imm_ud(sampler)));
2106 sampler_reg = emit_uniformize(temp);
2107 break;
2108 }
2109
2110 case nir_tex_src_projector:
2111 unreachable("Should be lowered by do_lower_texture_projection");
2112
2113 case nir_tex_src_bias:
2114 unreachable("LOD bias is not valid for vertex shaders.\n");
2115
2116 default:
2117 unreachable("unknown texture source");
2118 }
2119 }
2120
2121 if (instr->op == nir_texop_txf_ms ||
2122 instr->op == nir_texop_samples_identical) {
2123 assert(coord_type != NULL);
2124 if (devinfo->gen >= 7 &&
2125 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
2126 mcs = emit_mcs_fetch(coord_type, coordinate, texture_reg);
2127 } else {
2128 mcs = brw_imm_ud(0u);
2129 }
2130 }
2131
2132 /* Stuff the channel select bits in the top of the texture offset */
2133 if (instr->op == nir_texop_tg4) {
2134 if (instr->component == 1 &&
2135 (key_tex->gather_channel_quirk_mask & (1 << texture))) {
2136 /* gather4 sampler is broken for green channel on RG32F --
2137 * we must ask for blue instead.
2138 */
2139 constant_offset |= 2 << 16;
2140 } else {
2141 constant_offset |= instr->component << 16;
2142 }
2143 }
2144
2145 ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op);
2146
2147 emit_texture(op, dest, dest_type, coordinate, instr->coord_components,
2148 shadow_comparator,
2149 lod, lod2, sample_index,
2150 constant_offset, offset_value, mcs,
2151 texture, texture_reg, sampler_reg);
2152 }
2153
2154 void
2155 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr *instr)
2156 {
2157 nir_ssa_values[instr->def.index] =
2158 dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(instr->def.bit_size, 32)));
2159 }
2160
2161 /* SIMD4x2 64bit data is stored in register space like this:
2162 *
2163 * r0.0:DF x0 y0 z0 w0
2164 * r1.0:DF x1 y1 z1 w1
2165 *
2166 * When we need to write data such as this to memory using 32-bit write
2167 * messages we need to shuffle it in this fashion:
2168 *
2169 * r0.0:DF x0 y0 x1 y1 (to be written at base offset)
2170 * r0.0:DF z0 w0 z1 w1 (to be written at base offset + 16)
2171 *
2172 * We need to do the inverse operation when we read using 32-bit messages,
2173 * which we can do by applying the same exact shuffling on the 64-bit data
2174 * read, only that because the data for each vertex is positioned differently
2175 * we need to apply different channel enables.
2176 *
2177 * This function takes 64bit data and shuffles it as explained above.
2178 *
2179 * The @for_write parameter is used to specify if the shuffling is being done
2180 * for proper SIMD4x2 64-bit data that needs to be shuffled prior to a 32-bit
2181 * write message (for_write = true), or instead we are doing the inverse
2182 * operation and we have just read 64-bit data using a 32-bit messages that we
2183 * need to shuffle to create valid SIMD4x2 64-bit data (for_write = false).
2184 *
2185 * If @block and @ref are non-NULL, then the shuffling is done after @ref,
2186 * otherwise the instructions are emitted normally at the end. The function
2187 * returns the last instruction inserted.
2188 *
2189 * Notice that @src and @dst cannot be the same register.
2190 */
2191 vec4_instruction *
2192 vec4_visitor::shuffle_64bit_data(dst_reg dst, src_reg src, bool for_write,
2193 bblock_t *block, vec4_instruction *ref)
2194 {
2195 assert(type_sz(src.type) == 8);
2196 assert(type_sz(dst.type) == 8);
2197 assert(!regions_overlap(dst, 2 * REG_SIZE, src, 2 * REG_SIZE));
2198 assert(!ref == !block);
2199
2200 const vec4_builder bld = !ref ? vec4_builder(this).at_end() :
2201 vec4_builder(this).at(block, ref->next);
2202
2203 /* Resolve swizzle in src */
2204 vec4_instruction *inst;
2205 if (src.swizzle != BRW_SWIZZLE_XYZW) {
2206 dst_reg data = dst_reg(this, glsl_type::dvec4_type);
2207 inst = bld.MOV(data, src);
2208 src = src_reg(data);
2209 }
2210
2211 /* dst+0.XY = src+0.XY */
2212 inst = bld.group(4, 0).MOV(writemask(dst, WRITEMASK_XY), src);
2213
2214 /* dst+0.ZW = src+1.XY */
2215 inst = bld.group(4, for_write ? 1 : 0)
2216 .MOV(writemask(dst, WRITEMASK_ZW),
2217 swizzle(byte_offset(src, REG_SIZE), BRW_SWIZZLE_XYXY));
2218
2219 /* dst+1.XY = src+0.ZW */
2220 inst = bld.group(4, for_write ? 0 : 1)
2221 .MOV(writemask(byte_offset(dst, REG_SIZE), WRITEMASK_XY),
2222 swizzle(src, BRW_SWIZZLE_ZWZW));
2223
2224 /* dst+1.ZW = src+1.ZW */
2225 inst = bld.group(4, 1)
2226 .MOV(writemask(byte_offset(dst, REG_SIZE), WRITEMASK_ZW),
2227 byte_offset(src, REG_SIZE));
2228
2229 return inst;
2230 }
2231
2232 }