nv50/ir/nir: fix global_atomic_comp_swap
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
34
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
37 #else
38 #include <tr1/unordered_map>
39 #endif
40 #include <cstring>
41 #include <list>
42 #include <vector>
43
44 namespace {
45
46 #if __cplusplus >= 201103L
47 using std::hash;
48 using std::unordered_map;
49 #else
50 using std::tr1::hash;
51 using std::tr1::unordered_map;
52 #endif
53
54 using namespace nv50_ir;
55
56 int
57 type_size(const struct glsl_type *type, bool bindless)
58 {
59 return glsl_count_attribute_slots(type, false);
60 }
61
62 static void
63 function_temp_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
64 {
65 assert(glsl_type_is_vector_or_scalar(type));
66
67 unsigned comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
68 unsigned length = glsl_get_vector_elements(type);
69
70 *size = comp_size * length;
71 *align = 0x10;
72 }
73
74 class Converter : public ConverterCommon
75 {
76 public:
77 Converter(Program *, nir_shader *, nv50_ir_prog_info *);
78
79 bool run();
80 private:
81 typedef std::vector<LValue*> LValues;
82 typedef unordered_map<unsigned, LValues> NirDefMap;
83 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
84 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
85
86 CacheMode convert(enum gl_access_qualifier);
87 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
88 LValues& convert(nir_alu_dest *);
89 BasicBlock* convert(nir_block *);
90 LValues& convert(nir_dest *);
91 SVSemantic convert(nir_intrinsic_op);
92 Value* convert(nir_load_const_instr*, uint8_t);
93 LValues& convert(nir_register *);
94 LValues& convert(nir_ssa_def *);
95
96 Value* getSrc(nir_alu_src *, uint8_t component = 0);
97 Value* getSrc(nir_register *, uint8_t);
98 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
99 Value* getSrc(nir_ssa_def *, uint8_t);
100
101 // returned value is the constant part of the given source (either the
102 // nir_src or the selected source component of an intrinsic). Even though
103 // this is mostly an optimization to be able to skip indirects in a few
104 // cases, sometimes we require immediate values or set some fileds on
105 // instructions (e.g. tex) in order for codegen to consume those.
106 // If the found value has not a constant part, the Value gets returned
107 // through the Value parameter.
108 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
109 // isScalar indicates that the addressing is scalar, vec4 addressing is
110 // assumed otherwise
111 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
112 bool isScalar = false);
113
114 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
115
116 void setInterpolate(nv50_ir_varying *,
117 uint8_t,
118 bool centroid,
119 unsigned semantics);
120
121 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
122 uint8_t c, Value *indirect0 = NULL,
123 Value *indirect1 = NULL, bool patch = false);
124 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
125 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
126 Value *indirect1 = NULL);
127
128 bool isFloatType(nir_alu_type);
129 bool isSignedType(nir_alu_type);
130 bool isResultFloat(nir_op);
131 bool isResultSigned(nir_op);
132
133 DataType getDType(nir_alu_instr *);
134 DataType getDType(nir_intrinsic_instr *);
135 DataType getDType(nir_intrinsic_instr *, bool isSigned);
136 DataType getDType(nir_op, uint8_t);
137
138 DataFile getFile(nir_intrinsic_op);
139
140 std::vector<DataType> getSTypes(nir_alu_instr *);
141 DataType getSType(nir_src &, bool isFloat, bool isSigned);
142
143 operation getOperation(nir_intrinsic_op);
144 operation getOperation(nir_op);
145 operation getOperation(nir_texop);
146 operation preOperationNeeded(nir_op);
147
148 int getSubOp(nir_intrinsic_op);
149 int getSubOp(nir_op);
150
151 CondCode getCondCode(nir_op);
152
153 bool assignSlots();
154 bool parseNIR();
155
156 bool visit(nir_alu_instr *);
157 bool visit(nir_block *);
158 bool visit(nir_cf_node *);
159 bool visit(nir_function *);
160 bool visit(nir_if *);
161 bool visit(nir_instr *);
162 bool visit(nir_intrinsic_instr *);
163 bool visit(nir_jump_instr *);
164 bool visit(nir_load_const_instr*);
165 bool visit(nir_loop *);
166 bool visit(nir_ssa_undef_instr *);
167 bool visit(nir_tex_instr *);
168
169 // tex stuff
170 Value* applyProjection(Value *src, Value *proj);
171 unsigned int getNIRArgCount(TexInstruction::Target&);
172
173 nir_shader *nir;
174
175 NirDefMap ssaDefs;
176 NirDefMap regDefs;
177 ImmediateMap immediates;
178 NirBlockMap blocks;
179 unsigned int curLoopDepth;
180 unsigned int curIfDepth;
181
182 BasicBlock *exit;
183 Value *zero;
184 Instruction *immInsertPos;
185
186 int clipVertexOutput;
187
188 union {
189 struct {
190 Value *position;
191 } fp;
192 };
193 };
194
195 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
196 : ConverterCommon(prog, info),
197 nir(nir),
198 curLoopDepth(0),
199 curIfDepth(0),
200 clipVertexOutput(-1)
201 {
202 zero = mkImm((uint32_t)0);
203 }
204
205 BasicBlock *
206 Converter::convert(nir_block *block)
207 {
208 NirBlockMap::iterator it = blocks.find(block->index);
209 if (it != blocks.end())
210 return it->second;
211
212 BasicBlock *bb = new BasicBlock(func);
213 blocks[block->index] = bb;
214 return bb;
215 }
216
217 bool
218 Converter::isFloatType(nir_alu_type type)
219 {
220 return nir_alu_type_get_base_type(type) == nir_type_float;
221 }
222
223 bool
224 Converter::isSignedType(nir_alu_type type)
225 {
226 return nir_alu_type_get_base_type(type) == nir_type_int;
227 }
228
229 bool
230 Converter::isResultFloat(nir_op op)
231 {
232 const nir_op_info &info = nir_op_infos[op];
233 if (info.output_type != nir_type_invalid)
234 return isFloatType(info.output_type);
235
236 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
237 assert(false);
238 return true;
239 }
240
241 bool
242 Converter::isResultSigned(nir_op op)
243 {
244 switch (op) {
245 // there is no umul and we get wrong results if we treat all muls as signed
246 case nir_op_imul:
247 case nir_op_inot:
248 return false;
249 default:
250 const nir_op_info &info = nir_op_infos[op];
251 if (info.output_type != nir_type_invalid)
252 return isSignedType(info.output_type);
253 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
254 assert(false);
255 return true;
256 }
257 }
258
259 DataType
260 Converter::getDType(nir_alu_instr *insn)
261 {
262 if (insn->dest.dest.is_ssa)
263 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
264 else
265 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
266 }
267
268 DataType
269 Converter::getDType(nir_intrinsic_instr *insn)
270 {
271 bool isSigned;
272 switch (insn->intrinsic) {
273 case nir_intrinsic_shared_atomic_imax:
274 case nir_intrinsic_shared_atomic_imin:
275 case nir_intrinsic_ssbo_atomic_imax:
276 case nir_intrinsic_ssbo_atomic_imin:
277 isSigned = true;
278 break;
279 default:
280 isSigned = false;
281 break;
282 }
283
284 return getDType(insn, isSigned);
285 }
286
287 DataType
288 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
289 {
290 if (insn->dest.is_ssa)
291 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
292 else
293 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
294 }
295
296 DataType
297 Converter::getDType(nir_op op, uint8_t bitSize)
298 {
299 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
300 if (ty == TYPE_NONE) {
301 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
302 assert(false);
303 }
304 return ty;
305 }
306
307 std::vector<DataType>
308 Converter::getSTypes(nir_alu_instr *insn)
309 {
310 const nir_op_info &info = nir_op_infos[insn->op];
311 std::vector<DataType> res(info.num_inputs);
312
313 for (uint8_t i = 0; i < info.num_inputs; ++i) {
314 if (info.input_types[i] != nir_type_invalid) {
315 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
316 } else {
317 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
318 assert(false);
319 res[i] = TYPE_NONE;
320 break;
321 }
322 }
323
324 return res;
325 }
326
327 DataType
328 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
329 {
330 uint8_t bitSize;
331 if (src.is_ssa)
332 bitSize = src.ssa->bit_size;
333 else
334 bitSize = src.reg.reg->bit_size;
335
336 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
337 if (ty == TYPE_NONE) {
338 const char *str;
339 if (isFloat)
340 str = "float";
341 else if (isSigned)
342 str = "int";
343 else
344 str = "uint";
345 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
346 assert(false);
347 }
348 return ty;
349 }
350
351 DataFile
352 Converter::getFile(nir_intrinsic_op op)
353 {
354 switch (op) {
355 case nir_intrinsic_load_global:
356 case nir_intrinsic_store_global:
357 return FILE_MEMORY_GLOBAL;
358 case nir_intrinsic_load_scratch:
359 case nir_intrinsic_store_scratch:
360 return FILE_MEMORY_LOCAL;
361 case nir_intrinsic_load_shared:
362 case nir_intrinsic_store_shared:
363 return FILE_MEMORY_SHARED;
364 case nir_intrinsic_load_kernel_input:
365 return FILE_SHADER_INPUT;
366 default:
367 ERROR("couldn't get DateFile for op %s\n", nir_intrinsic_infos[op].name);
368 assert(false);
369 }
370 return FILE_NULL;
371 }
372
373 operation
374 Converter::getOperation(nir_op op)
375 {
376 switch (op) {
377 // basic ops with float and int variants
378 case nir_op_fabs:
379 case nir_op_iabs:
380 return OP_ABS;
381 case nir_op_fadd:
382 case nir_op_iadd:
383 return OP_ADD;
384 case nir_op_iand:
385 return OP_AND;
386 case nir_op_ifind_msb:
387 case nir_op_ufind_msb:
388 return OP_BFIND;
389 case nir_op_fceil:
390 return OP_CEIL;
391 case nir_op_fcos:
392 return OP_COS;
393 case nir_op_f2f32:
394 case nir_op_f2f64:
395 case nir_op_f2i32:
396 case nir_op_f2i64:
397 case nir_op_f2u32:
398 case nir_op_f2u64:
399 case nir_op_i2f32:
400 case nir_op_i2f64:
401 case nir_op_i2i32:
402 case nir_op_i2i64:
403 case nir_op_u2f32:
404 case nir_op_u2f64:
405 case nir_op_u2u32:
406 case nir_op_u2u64:
407 return OP_CVT;
408 case nir_op_fddx:
409 case nir_op_fddx_coarse:
410 case nir_op_fddx_fine:
411 return OP_DFDX;
412 case nir_op_fddy:
413 case nir_op_fddy_coarse:
414 case nir_op_fddy_fine:
415 return OP_DFDY;
416 case nir_op_fdiv:
417 case nir_op_idiv:
418 case nir_op_udiv:
419 return OP_DIV;
420 case nir_op_fexp2:
421 return OP_EX2;
422 case nir_op_ffloor:
423 return OP_FLOOR;
424 case nir_op_ffma:
425 return OP_FMA;
426 case nir_op_flog2:
427 return OP_LG2;
428 case nir_op_fmax:
429 case nir_op_imax:
430 case nir_op_umax:
431 return OP_MAX;
432 case nir_op_pack_64_2x32_split:
433 return OP_MERGE;
434 case nir_op_fmin:
435 case nir_op_imin:
436 case nir_op_umin:
437 return OP_MIN;
438 case nir_op_fmod:
439 case nir_op_imod:
440 case nir_op_umod:
441 case nir_op_frem:
442 case nir_op_irem:
443 return OP_MOD;
444 case nir_op_fmul:
445 case nir_op_imul:
446 case nir_op_imul_high:
447 case nir_op_umul_high:
448 return OP_MUL;
449 case nir_op_fneg:
450 case nir_op_ineg:
451 return OP_NEG;
452 case nir_op_inot:
453 return OP_NOT;
454 case nir_op_ior:
455 return OP_OR;
456 case nir_op_fpow:
457 return OP_POW;
458 case nir_op_frcp:
459 return OP_RCP;
460 case nir_op_frsq:
461 return OP_RSQ;
462 case nir_op_fsat:
463 return OP_SAT;
464 case nir_op_feq32:
465 case nir_op_ieq32:
466 case nir_op_fge32:
467 case nir_op_ige32:
468 case nir_op_uge32:
469 case nir_op_flt32:
470 case nir_op_ilt32:
471 case nir_op_ult32:
472 case nir_op_fne32:
473 case nir_op_ine32:
474 return OP_SET;
475 case nir_op_ishl:
476 return OP_SHL;
477 case nir_op_ishr:
478 case nir_op_ushr:
479 return OP_SHR;
480 case nir_op_fsin:
481 return OP_SIN;
482 case nir_op_fsqrt:
483 return OP_SQRT;
484 case nir_op_ftrunc:
485 return OP_TRUNC;
486 case nir_op_ixor:
487 return OP_XOR;
488 default:
489 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
490 assert(false);
491 return OP_NOP;
492 }
493 }
494
495 operation
496 Converter::getOperation(nir_texop op)
497 {
498 switch (op) {
499 case nir_texop_tex:
500 return OP_TEX;
501 case nir_texop_lod:
502 return OP_TXLQ;
503 case nir_texop_txb:
504 return OP_TXB;
505 case nir_texop_txd:
506 return OP_TXD;
507 case nir_texop_txf:
508 case nir_texop_txf_ms:
509 return OP_TXF;
510 case nir_texop_tg4:
511 return OP_TXG;
512 case nir_texop_txl:
513 return OP_TXL;
514 case nir_texop_query_levels:
515 case nir_texop_texture_samples:
516 case nir_texop_txs:
517 return OP_TXQ;
518 default:
519 ERROR("couldn't get operation for nir_texop %u\n", op);
520 assert(false);
521 return OP_NOP;
522 }
523 }
524
525 operation
526 Converter::getOperation(nir_intrinsic_op op)
527 {
528 switch (op) {
529 case nir_intrinsic_emit_vertex:
530 return OP_EMIT;
531 case nir_intrinsic_end_primitive:
532 return OP_RESTART;
533 case nir_intrinsic_bindless_image_atomic_add:
534 case nir_intrinsic_image_atomic_add:
535 case nir_intrinsic_bindless_image_atomic_and:
536 case nir_intrinsic_image_atomic_and:
537 case nir_intrinsic_bindless_image_atomic_comp_swap:
538 case nir_intrinsic_image_atomic_comp_swap:
539 case nir_intrinsic_bindless_image_atomic_exchange:
540 case nir_intrinsic_image_atomic_exchange:
541 case nir_intrinsic_bindless_image_atomic_imax:
542 case nir_intrinsic_image_atomic_imax:
543 case nir_intrinsic_bindless_image_atomic_umax:
544 case nir_intrinsic_image_atomic_umax:
545 case nir_intrinsic_bindless_image_atomic_imin:
546 case nir_intrinsic_image_atomic_imin:
547 case nir_intrinsic_bindless_image_atomic_umin:
548 case nir_intrinsic_image_atomic_umin:
549 case nir_intrinsic_bindless_image_atomic_or:
550 case nir_intrinsic_image_atomic_or:
551 case nir_intrinsic_bindless_image_atomic_xor:
552 case nir_intrinsic_image_atomic_xor:
553 case nir_intrinsic_bindless_image_atomic_inc_wrap:
554 case nir_intrinsic_image_atomic_inc_wrap:
555 case nir_intrinsic_bindless_image_atomic_dec_wrap:
556 case nir_intrinsic_image_atomic_dec_wrap:
557 return OP_SUREDP;
558 case nir_intrinsic_bindless_image_load:
559 case nir_intrinsic_image_load:
560 return OP_SULDP;
561 case nir_intrinsic_bindless_image_samples:
562 case nir_intrinsic_image_samples:
563 case nir_intrinsic_bindless_image_size:
564 case nir_intrinsic_image_size:
565 return OP_SUQ;
566 case nir_intrinsic_bindless_image_store:
567 case nir_intrinsic_image_store:
568 return OP_SUSTP;
569 default:
570 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
571 assert(false);
572 return OP_NOP;
573 }
574 }
575
576 operation
577 Converter::preOperationNeeded(nir_op op)
578 {
579 switch (op) {
580 case nir_op_fcos:
581 case nir_op_fsin:
582 return OP_PRESIN;
583 default:
584 return OP_NOP;
585 }
586 }
587
588 int
589 Converter::getSubOp(nir_op op)
590 {
591 switch (op) {
592 case nir_op_imul_high:
593 case nir_op_umul_high:
594 return NV50_IR_SUBOP_MUL_HIGH;
595 case nir_op_ishl:
596 case nir_op_ishr:
597 case nir_op_ushr:
598 return NV50_IR_SUBOP_SHIFT_WRAP;
599 default:
600 return 0;
601 }
602 }
603
604 int
605 Converter::getSubOp(nir_intrinsic_op op)
606 {
607 switch (op) {
608 case nir_intrinsic_bindless_image_atomic_add:
609 case nir_intrinsic_global_atomic_add:
610 case nir_intrinsic_image_atomic_add:
611 case nir_intrinsic_shared_atomic_add:
612 case nir_intrinsic_ssbo_atomic_add:
613 return NV50_IR_SUBOP_ATOM_ADD;
614 case nir_intrinsic_bindless_image_atomic_and:
615 case nir_intrinsic_global_atomic_and:
616 case nir_intrinsic_image_atomic_and:
617 case nir_intrinsic_shared_atomic_and:
618 case nir_intrinsic_ssbo_atomic_and:
619 return NV50_IR_SUBOP_ATOM_AND;
620 case nir_intrinsic_bindless_image_atomic_comp_swap:
621 case nir_intrinsic_global_atomic_comp_swap:
622 case nir_intrinsic_image_atomic_comp_swap:
623 case nir_intrinsic_shared_atomic_comp_swap:
624 case nir_intrinsic_ssbo_atomic_comp_swap:
625 return NV50_IR_SUBOP_ATOM_CAS;
626 case nir_intrinsic_bindless_image_atomic_exchange:
627 case nir_intrinsic_global_atomic_exchange:
628 case nir_intrinsic_image_atomic_exchange:
629 case nir_intrinsic_shared_atomic_exchange:
630 case nir_intrinsic_ssbo_atomic_exchange:
631 return NV50_IR_SUBOP_ATOM_EXCH;
632 case nir_intrinsic_bindless_image_atomic_or:
633 case nir_intrinsic_global_atomic_or:
634 case nir_intrinsic_image_atomic_or:
635 case nir_intrinsic_shared_atomic_or:
636 case nir_intrinsic_ssbo_atomic_or:
637 return NV50_IR_SUBOP_ATOM_OR;
638 case nir_intrinsic_bindless_image_atomic_imax:
639 case nir_intrinsic_bindless_image_atomic_umax:
640 case nir_intrinsic_global_atomic_imax:
641 case nir_intrinsic_global_atomic_umax:
642 case nir_intrinsic_image_atomic_imax:
643 case nir_intrinsic_image_atomic_umax:
644 case nir_intrinsic_shared_atomic_imax:
645 case nir_intrinsic_shared_atomic_umax:
646 case nir_intrinsic_ssbo_atomic_imax:
647 case nir_intrinsic_ssbo_atomic_umax:
648 return NV50_IR_SUBOP_ATOM_MAX;
649 case nir_intrinsic_bindless_image_atomic_imin:
650 case nir_intrinsic_bindless_image_atomic_umin:
651 case nir_intrinsic_global_atomic_imin:
652 case nir_intrinsic_global_atomic_umin:
653 case nir_intrinsic_image_atomic_imin:
654 case nir_intrinsic_image_atomic_umin:
655 case nir_intrinsic_shared_atomic_imin:
656 case nir_intrinsic_shared_atomic_umin:
657 case nir_intrinsic_ssbo_atomic_imin:
658 case nir_intrinsic_ssbo_atomic_umin:
659 return NV50_IR_SUBOP_ATOM_MIN;
660 case nir_intrinsic_bindless_image_atomic_xor:
661 case nir_intrinsic_global_atomic_xor:
662 case nir_intrinsic_image_atomic_xor:
663 case nir_intrinsic_shared_atomic_xor:
664 case nir_intrinsic_ssbo_atomic_xor:
665 return NV50_IR_SUBOP_ATOM_XOR;
666 case nir_intrinsic_bindless_image_atomic_inc_wrap:
667 case nir_intrinsic_image_atomic_inc_wrap:
668 return NV50_IR_SUBOP_ATOM_INC;
669 case nir_intrinsic_bindless_image_atomic_dec_wrap:
670 case nir_intrinsic_image_atomic_dec_wrap:
671 return NV50_IR_SUBOP_ATOM_DEC;
672
673 case nir_intrinsic_group_memory_barrier:
674 case nir_intrinsic_memory_barrier:
675 case nir_intrinsic_memory_barrier_buffer:
676 case nir_intrinsic_memory_barrier_image:
677 return NV50_IR_SUBOP_MEMBAR(M, GL);
678 case nir_intrinsic_memory_barrier_shared:
679 return NV50_IR_SUBOP_MEMBAR(M, CTA);
680
681 case nir_intrinsic_vote_all:
682 return NV50_IR_SUBOP_VOTE_ALL;
683 case nir_intrinsic_vote_any:
684 return NV50_IR_SUBOP_VOTE_ANY;
685 case nir_intrinsic_vote_ieq:
686 return NV50_IR_SUBOP_VOTE_UNI;
687 default:
688 return 0;
689 }
690 }
691
692 CondCode
693 Converter::getCondCode(nir_op op)
694 {
695 switch (op) {
696 case nir_op_feq32:
697 case nir_op_ieq32:
698 return CC_EQ;
699 case nir_op_fge32:
700 case nir_op_ige32:
701 case nir_op_uge32:
702 return CC_GE;
703 case nir_op_flt32:
704 case nir_op_ilt32:
705 case nir_op_ult32:
706 return CC_LT;
707 case nir_op_fne32:
708 return CC_NEU;
709 case nir_op_ine32:
710 return CC_NE;
711 default:
712 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
713 assert(false);
714 return CC_FL;
715 }
716 }
717
718 Converter::LValues&
719 Converter::convert(nir_alu_dest *dest)
720 {
721 return convert(&dest->dest);
722 }
723
724 Converter::LValues&
725 Converter::convert(nir_dest *dest)
726 {
727 if (dest->is_ssa)
728 return convert(&dest->ssa);
729 if (dest->reg.indirect) {
730 ERROR("no support for indirects.");
731 assert(false);
732 }
733 return convert(dest->reg.reg);
734 }
735
736 Converter::LValues&
737 Converter::convert(nir_register *reg)
738 {
739 assert(!reg->num_array_elems);
740
741 NirDefMap::iterator it = regDefs.find(reg->index);
742 if (it != regDefs.end())
743 return it->second;
744
745 LValues newDef(reg->num_components);
746 for (uint8_t i = 0; i < reg->num_components; i++)
747 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
748 return regDefs[reg->index] = newDef;
749 }
750
751 Converter::LValues&
752 Converter::convert(nir_ssa_def *def)
753 {
754 NirDefMap::iterator it = ssaDefs.find(def->index);
755 if (it != ssaDefs.end())
756 return it->second;
757
758 LValues newDef(def->num_components);
759 for (uint8_t i = 0; i < def->num_components; i++)
760 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
761 return ssaDefs[def->index] = newDef;
762 }
763
764 Value*
765 Converter::getSrc(nir_alu_src *src, uint8_t component)
766 {
767 if (src->abs || src->negate) {
768 ERROR("modifiers currently not supported on nir_alu_src\n");
769 assert(false);
770 }
771 return getSrc(&src->src, src->swizzle[component]);
772 }
773
774 Value*
775 Converter::getSrc(nir_register *reg, uint8_t idx)
776 {
777 NirDefMap::iterator it = regDefs.find(reg->index);
778 if (it == regDefs.end())
779 return convert(reg)[idx];
780 return it->second[idx];
781 }
782
783 Value*
784 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
785 {
786 if (src->is_ssa)
787 return getSrc(src->ssa, idx);
788
789 if (src->reg.indirect) {
790 if (indirect)
791 return getSrc(src->reg.indirect, idx);
792 ERROR("no support for indirects.");
793 assert(false);
794 return NULL;
795 }
796
797 return getSrc(src->reg.reg, idx);
798 }
799
800 Value*
801 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
802 {
803 ImmediateMap::iterator iit = immediates.find(src->index);
804 if (iit != immediates.end())
805 return convert((*iit).second, idx);
806
807 NirDefMap::iterator it = ssaDefs.find(src->index);
808 if (it == ssaDefs.end()) {
809 ERROR("SSA value %u not found\n", src->index);
810 assert(false);
811 return NULL;
812 }
813 return it->second[idx];
814 }
815
816 uint32_t
817 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
818 {
819 nir_const_value *offset = nir_src_as_const_value(*src);
820
821 if (offset) {
822 indirect = NULL;
823 return offset[0].u32;
824 }
825
826 indirect = getSrc(src, idx, true);
827 return 0;
828 }
829
830 uint32_t
831 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
832 {
833 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
834 if (indirect && !isScalar)
835 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
836 return idx;
837 }
838
839 static void
840 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
841 {
842 assert(name && index);
843
844 if (slot >= VERT_ATTRIB_MAX) {
845 ERROR("invalid varying slot %u\n", slot);
846 assert(false);
847 return;
848 }
849
850 if (slot >= VERT_ATTRIB_GENERIC0 &&
851 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
852 *name = TGSI_SEMANTIC_GENERIC;
853 *index = slot - VERT_ATTRIB_GENERIC0;
854 return;
855 }
856
857 if (slot >= VERT_ATTRIB_TEX0 &&
858 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
859 *name = TGSI_SEMANTIC_TEXCOORD;
860 *index = slot - VERT_ATTRIB_TEX0;
861 return;
862 }
863
864 switch (slot) {
865 case VERT_ATTRIB_COLOR0:
866 *name = TGSI_SEMANTIC_COLOR;
867 *index = 0;
868 break;
869 case VERT_ATTRIB_COLOR1:
870 *name = TGSI_SEMANTIC_COLOR;
871 *index = 1;
872 break;
873 case VERT_ATTRIB_EDGEFLAG:
874 *name = TGSI_SEMANTIC_EDGEFLAG;
875 *index = 0;
876 break;
877 case VERT_ATTRIB_FOG:
878 *name = TGSI_SEMANTIC_FOG;
879 *index = 0;
880 break;
881 case VERT_ATTRIB_NORMAL:
882 *name = TGSI_SEMANTIC_NORMAL;
883 *index = 0;
884 break;
885 case VERT_ATTRIB_POS:
886 *name = TGSI_SEMANTIC_POSITION;
887 *index = 0;
888 break;
889 case VERT_ATTRIB_POINT_SIZE:
890 *name = TGSI_SEMANTIC_PSIZE;
891 *index = 0;
892 break;
893 default:
894 ERROR("unknown vert attrib slot %u\n", slot);
895 assert(false);
896 break;
897 }
898 }
899
900 void
901 Converter::setInterpolate(nv50_ir_varying *var,
902 uint8_t mode,
903 bool centroid,
904 unsigned semantic)
905 {
906 switch (mode) {
907 case INTERP_MODE_FLAT:
908 var->flat = 1;
909 break;
910 case INTERP_MODE_NONE:
911 if (semantic == TGSI_SEMANTIC_COLOR)
912 var->sc = 1;
913 else if (semantic == TGSI_SEMANTIC_POSITION)
914 var->linear = 1;
915 break;
916 case INTERP_MODE_NOPERSPECTIVE:
917 var->linear = 1;
918 break;
919 case INTERP_MODE_SMOOTH:
920 break;
921 }
922 var->centroid = centroid;
923 }
924
925 static uint16_t
926 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
927 bool input, const nir_variable *var)
928 {
929 if (!type->is_array())
930 return type->count_attribute_slots(false);
931
932 uint16_t slots;
933 switch (stage) {
934 case Program::TYPE_GEOMETRY:
935 slots = type->count_attribute_slots(false);
936 if (input)
937 slots /= info.gs.vertices_in;
938 break;
939 case Program::TYPE_TESSELLATION_CONTROL:
940 case Program::TYPE_TESSELLATION_EVAL:
941 // remove first dimension
942 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
943 slots = type->count_attribute_slots(false);
944 else
945 slots = type->fields.array->count_attribute_slots(false);
946 break;
947 default:
948 slots = type->count_attribute_slots(false);
949 break;
950 }
951
952 return slots;
953 }
954
955 static uint8_t
956 getMaskForType(const glsl_type *type, uint8_t slot) {
957 uint16_t comp = type->without_array()->components();
958 comp = comp ? comp : 4;
959
960 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
961 comp *= 2;
962 if (comp > 4) {
963 if (slot % 2)
964 comp -= 4;
965 else
966 comp = 4;
967 }
968 }
969
970 return (1 << comp) - 1;
971 }
972
973 bool Converter::assignSlots() {
974 unsigned name;
975 unsigned index;
976
977 info->io.viewportId = -1;
978 info->numInputs = 0;
979 info->numOutputs = 0;
980 info->numSysVals = 0;
981
982 for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
983 if (!(nir->info.system_values_read & 1ull << i))
984 continue;
985
986 info->sv[info->numSysVals].sn = tgsi_get_sysval_semantic(i);
987 info->sv[info->numSysVals].si = 0;
988 info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
989
990 switch (i) {
991 case SYSTEM_VALUE_INSTANCE_ID:
992 info->io.instanceId = info->numSysVals;
993 break;
994 case SYSTEM_VALUE_TESS_LEVEL_INNER:
995 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
996 info->sv[info->numSysVals].patch = 1;
997 break;
998 case SYSTEM_VALUE_VERTEX_ID:
999 info->io.vertexId = info->numSysVals;
1000 break;
1001 default:
1002 break;
1003 }
1004
1005 info->numSysVals += 1;
1006 }
1007
1008 if (prog->getType() == Program::TYPE_COMPUTE)
1009 return true;
1010
1011 nir_foreach_shader_in_variable(var, nir) {
1012 const glsl_type *type = var->type;
1013 int slot = var->data.location;
1014 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1015 uint32_t vary = var->data.driver_location;
1016
1017 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1018
1019 switch(prog->getType()) {
1020 case Program::TYPE_FRAGMENT:
1021 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1022 &name, &index);
1023 for (uint16_t i = 0; i < slots; ++i) {
1024 setInterpolate(&info->in[vary + i], var->data.interpolation,
1025 var->data.centroid | var->data.sample, name);
1026 }
1027 break;
1028 case Program::TYPE_GEOMETRY:
1029 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1030 &name, &index);
1031 break;
1032 case Program::TYPE_TESSELLATION_CONTROL:
1033 case Program::TYPE_TESSELLATION_EVAL:
1034 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1035 &name, &index);
1036 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1037 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1038 break;
1039 case Program::TYPE_VERTEX:
1040 if (slot >= VERT_ATTRIB_GENERIC0)
1041 slot = VERT_ATTRIB_GENERIC0 + vary;
1042 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1043 switch (name) {
1044 case TGSI_SEMANTIC_EDGEFLAG:
1045 info->io.edgeFlagIn = vary;
1046 break;
1047 default:
1048 break;
1049 }
1050 break;
1051 default:
1052 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1053 return false;
1054 }
1055
1056 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1057 nv50_ir_varying *v = &info->in[vary];
1058
1059 v->patch = var->data.patch;
1060 v->sn = name;
1061 v->si = index + i;
1062 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1063 }
1064 info->numInputs = std::max<uint8_t>(info->numInputs, vary);
1065 }
1066
1067 nir_foreach_shader_out_variable(var, nir) {
1068 const glsl_type *type = var->type;
1069 int slot = var->data.location;
1070 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1071 uint32_t vary = var->data.driver_location;
1072
1073 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1074
1075 switch(prog->getType()) {
1076 case Program::TYPE_FRAGMENT:
1077 tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1078 switch (name) {
1079 case TGSI_SEMANTIC_COLOR:
1080 if (!var->data.fb_fetch_output)
1081 info->prop.fp.numColourResults++;
1082
1083 if (var->data.location == FRAG_RESULT_COLOR &&
1084 nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1085 info->prop.fp.separateFragData = true;
1086
1087 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1088 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1089 index = index == 0 ? var->data.index : index;
1090 break;
1091 case TGSI_SEMANTIC_POSITION:
1092 info->io.fragDepth = vary;
1093 info->prop.fp.writesDepth = true;
1094 break;
1095 case TGSI_SEMANTIC_SAMPLEMASK:
1096 info->io.sampleMask = vary;
1097 break;
1098 default:
1099 break;
1100 }
1101 break;
1102 case Program::TYPE_GEOMETRY:
1103 case Program::TYPE_TESSELLATION_CONTROL:
1104 case Program::TYPE_TESSELLATION_EVAL:
1105 case Program::TYPE_VERTEX:
1106 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1107 &name, &index);
1108
1109 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1110 name != TGSI_SEMANTIC_TESSOUTER)
1111 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1112
1113 switch (name) {
1114 case TGSI_SEMANTIC_CLIPDIST:
1115 info->io.genUserClip = -1;
1116 break;
1117 case TGSI_SEMANTIC_CLIPVERTEX:
1118 clipVertexOutput = vary;
1119 break;
1120 case TGSI_SEMANTIC_EDGEFLAG:
1121 info->io.edgeFlagOut = vary;
1122 break;
1123 case TGSI_SEMANTIC_POSITION:
1124 if (clipVertexOutput < 0)
1125 clipVertexOutput = vary;
1126 break;
1127 default:
1128 break;
1129 }
1130 break;
1131 default:
1132 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1133 return false;
1134 }
1135
1136 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1137 nv50_ir_varying *v = &info->out[vary];
1138 v->patch = var->data.patch;
1139 v->sn = name;
1140 v->si = index + i;
1141 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1142
1143 if (nir->info.outputs_read & 1ull << slot)
1144 v->oread = 1;
1145 }
1146 info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
1147 }
1148
1149 if (info->io.genUserClip > 0) {
1150 info->io.clipDistances = info->io.genUserClip;
1151
1152 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1153
1154 for (unsigned int n = 0; n < nOut; ++n) {
1155 unsigned int i = info->numOutputs++;
1156 info->out[i].id = i;
1157 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1158 info->out[i].si = n;
1159 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1160 }
1161 }
1162
1163 return info->assignSlots(info) == 0;
1164 }
1165
1166 uint32_t
1167 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1168 {
1169 DataType ty;
1170 int offset = nir_intrinsic_component(insn);
1171 bool input;
1172
1173 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1174 ty = getDType(insn);
1175 else
1176 ty = getSType(insn->src[0], false, false);
1177
1178 switch (insn->intrinsic) {
1179 case nir_intrinsic_load_input:
1180 case nir_intrinsic_load_interpolated_input:
1181 case nir_intrinsic_load_per_vertex_input:
1182 input = true;
1183 break;
1184 case nir_intrinsic_load_output:
1185 case nir_intrinsic_load_per_vertex_output:
1186 case nir_intrinsic_store_output:
1187 case nir_intrinsic_store_per_vertex_output:
1188 input = false;
1189 break;
1190 default:
1191 ERROR("unknown intrinsic in getSlotAddress %s",
1192 nir_intrinsic_infos[insn->intrinsic].name);
1193 input = false;
1194 assert(false);
1195 break;
1196 }
1197
1198 if (typeSizeof(ty) == 8) {
1199 slot *= 2;
1200 slot += offset;
1201 if (slot >= 4) {
1202 idx += 1;
1203 slot -= 4;
1204 }
1205 } else {
1206 slot += offset;
1207 }
1208
1209 assert(slot < 4);
1210 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1211 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1212
1213 const nv50_ir_varying *vary = input ? info->in : info->out;
1214 return vary[idx].slot[slot] * 4;
1215 }
1216
1217 Instruction *
1218 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1219 uint32_t base, uint8_t c, Value *indirect0,
1220 Value *indirect1, bool patch)
1221 {
1222 unsigned int tySize = typeSizeof(ty);
1223
1224 if (tySize == 8 &&
1225 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1226 Value *lo = getSSA();
1227 Value *hi = getSSA();
1228
1229 Instruction *loi =
1230 mkLoad(TYPE_U32, lo,
1231 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1232 indirect0);
1233 loi->setIndirect(0, 1, indirect1);
1234 loi->perPatch = patch;
1235
1236 Instruction *hii =
1237 mkLoad(TYPE_U32, hi,
1238 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1239 indirect0);
1240 hii->setIndirect(0, 1, indirect1);
1241 hii->perPatch = patch;
1242
1243 return mkOp2(OP_MERGE, ty, def, lo, hi);
1244 } else {
1245 Instruction *ld =
1246 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1247 ld->setIndirect(0, 1, indirect1);
1248 ld->perPatch = patch;
1249 return ld;
1250 }
1251 }
1252
1253 void
1254 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1255 DataType ty, Value *src, uint8_t idx, uint8_t c,
1256 Value *indirect0, Value *indirect1)
1257 {
1258 uint8_t size = typeSizeof(ty);
1259 uint32_t address = getSlotAddress(insn, idx, c);
1260
1261 if (size == 8 && indirect0) {
1262 Value *split[2];
1263 mkSplit(split, 4, src);
1264
1265 if (op == OP_EXPORT) {
1266 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1267 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1268 }
1269
1270 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1271 split[0])->perPatch = info->out[idx].patch;
1272 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1273 split[1])->perPatch = info->out[idx].patch;
1274 } else {
1275 if (op == OP_EXPORT)
1276 src = mkMov(getSSA(size), src, ty)->getDef(0);
1277 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1278 src)->perPatch = info->out[idx].patch;
1279 }
1280 }
1281
1282 bool
1283 Converter::parseNIR()
1284 {
1285 info->bin.tlsSpace = nir->scratch_size;
1286 info->io.clipDistances = nir->info.clip_distance_array_size;
1287 info->io.cullDistances = nir->info.cull_distance_array_size;
1288 info->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1289
1290 switch(prog->getType()) {
1291 case Program::TYPE_COMPUTE:
1292 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1293 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1294 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1295 info->bin.smemSize += nir->info.cs.shared_size;
1296 break;
1297 case Program::TYPE_FRAGMENT:
1298 info->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1299 prog->persampleInvocation =
1300 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1301 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1302 info->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1303 info->prop.fp.readsSampleLocations =
1304 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1305 info->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
1306 info->prop.fp.usesSampleMaskIn =
1307 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1308 break;
1309 case Program::TYPE_GEOMETRY:
1310 info->prop.gp.instanceCount = nir->info.gs.invocations;
1311 info->prop.gp.maxVertices = nir->info.gs.vertices_out;
1312 info->prop.gp.outputPrim = nir->info.gs.output_primitive;
1313 break;
1314 case Program::TYPE_TESSELLATION_CONTROL:
1315 case Program::TYPE_TESSELLATION_EVAL:
1316 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1317 info->prop.tp.domain = GL_LINES;
1318 else
1319 info->prop.tp.domain = nir->info.tess.primitive_mode;
1320 info->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1321 info->prop.tp.outputPrim =
1322 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1323 info->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1324 info->prop.tp.winding = !nir->info.tess.ccw;
1325 break;
1326 case Program::TYPE_VERTEX:
1327 info->prop.vp.usesDrawParameters =
1328 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1329 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1330 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1331 break;
1332 default:
1333 break;
1334 }
1335
1336 return true;
1337 }
1338
1339 bool
1340 Converter::visit(nir_function *function)
1341 {
1342 assert(function->impl);
1343
1344 // usually the blocks will set everything up, but main is special
1345 BasicBlock *entry = new BasicBlock(prog->main);
1346 exit = new BasicBlock(prog->main);
1347 blocks[nir_start_block(function->impl)->index] = entry;
1348 prog->main->setEntry(entry);
1349 prog->main->setExit(exit);
1350
1351 setPosition(entry, true);
1352
1353 if (info->io.genUserClip > 0) {
1354 for (int c = 0; c < 4; ++c)
1355 clipVtx[c] = getScratch();
1356 }
1357
1358 switch (prog->getType()) {
1359 case Program::TYPE_TESSELLATION_CONTROL:
1360 outBase = mkOp2v(
1361 OP_SUB, TYPE_U32, getSSA(),
1362 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1363 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1364 break;
1365 case Program::TYPE_FRAGMENT: {
1366 Symbol *sv = mkSysVal(SV_POSITION, 3);
1367 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1368 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1369 break;
1370 }
1371 default:
1372 break;
1373 }
1374
1375 nir_index_ssa_defs(function->impl);
1376 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1377 if (!visit(node))
1378 return false;
1379 }
1380
1381 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1382 setPosition(exit, true);
1383
1384 if ((prog->getType() == Program::TYPE_VERTEX ||
1385 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1386 && info->io.genUserClip > 0)
1387 handleUserClipPlanes();
1388
1389 // TODO: for non main function this needs to be a OP_RETURN
1390 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1391 return true;
1392 }
1393
1394 bool
1395 Converter::visit(nir_cf_node *node)
1396 {
1397 switch (node->type) {
1398 case nir_cf_node_block:
1399 return visit(nir_cf_node_as_block(node));
1400 case nir_cf_node_if:
1401 return visit(nir_cf_node_as_if(node));
1402 case nir_cf_node_loop:
1403 return visit(nir_cf_node_as_loop(node));
1404 default:
1405 ERROR("unknown nir_cf_node type %u\n", node->type);
1406 return false;
1407 }
1408 }
1409
1410 bool
1411 Converter::visit(nir_block *block)
1412 {
1413 if (!block->predecessors->entries && block->instr_list.is_empty())
1414 return true;
1415
1416 BasicBlock *bb = convert(block);
1417
1418 setPosition(bb, true);
1419 nir_foreach_instr(insn, block) {
1420 if (!visit(insn))
1421 return false;
1422 }
1423 return true;
1424 }
1425
1426 bool
1427 Converter::visit(nir_if *nif)
1428 {
1429 curIfDepth++;
1430
1431 DataType sType = getSType(nif->condition, false, false);
1432 Value *src = getSrc(&nif->condition, 0);
1433
1434 nir_block *lastThen = nir_if_last_then_block(nif);
1435 nir_block *lastElse = nir_if_last_else_block(nif);
1436
1437 BasicBlock *headBB = bb;
1438 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1439 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1440
1441 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1442 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1443
1444 bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1445 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1446
1447 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1448 if (!visit(node))
1449 return false;
1450 }
1451
1452 setPosition(convert(lastThen), true);
1453 if (!bb->isTerminated()) {
1454 BasicBlock *tailBB = convert(lastThen->successors[0]);
1455 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1456 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1457 } else {
1458 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1459 }
1460
1461 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1462 if (!visit(node))
1463 return false;
1464 }
1465
1466 setPosition(convert(lastElse), true);
1467 if (!bb->isTerminated()) {
1468 BasicBlock *tailBB = convert(lastElse->successors[0]);
1469 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1470 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1471 } else {
1472 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1473 }
1474
1475 /* only insert joins for the most outer if */
1476 if (--curIfDepth)
1477 insertJoins = false;
1478
1479 /* we made sure that all threads would converge at the same block */
1480 if (insertJoins) {
1481 BasicBlock *conv = convert(lastThen->successors[0]);
1482 setPosition(headBB->getExit(), false);
1483 headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1484 setPosition(conv, false);
1485 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1486 }
1487
1488 return true;
1489 }
1490
1491 // TODO: add convergency
1492 bool
1493 Converter::visit(nir_loop *loop)
1494 {
1495 curLoopDepth += 1;
1496 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1497
1498 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1499 BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1500
1501 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1502
1503 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1504 setPosition(loopBB, false);
1505 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1506
1507 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1508 if (!visit(node))
1509 return false;
1510 }
1511
1512 if (!bb->isTerminated()) {
1513 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1514 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1515 }
1516
1517 if (tailBB->cfg.incidentCount() == 0)
1518 loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1519
1520 curLoopDepth -= 1;
1521
1522 return true;
1523 }
1524
1525 bool
1526 Converter::visit(nir_instr *insn)
1527 {
1528 // we need an insertion point for on the fly generated immediate loads
1529 immInsertPos = bb->getExit();
1530 switch (insn->type) {
1531 case nir_instr_type_alu:
1532 return visit(nir_instr_as_alu(insn));
1533 case nir_instr_type_intrinsic:
1534 return visit(nir_instr_as_intrinsic(insn));
1535 case nir_instr_type_jump:
1536 return visit(nir_instr_as_jump(insn));
1537 case nir_instr_type_load_const:
1538 return visit(nir_instr_as_load_const(insn));
1539 case nir_instr_type_ssa_undef:
1540 return visit(nir_instr_as_ssa_undef(insn));
1541 case nir_instr_type_tex:
1542 return visit(nir_instr_as_tex(insn));
1543 default:
1544 ERROR("unknown nir_instr type %u\n", insn->type);
1545 return false;
1546 }
1547 return true;
1548 }
1549
1550 SVSemantic
1551 Converter::convert(nir_intrinsic_op intr)
1552 {
1553 switch (intr) {
1554 case nir_intrinsic_load_base_vertex:
1555 return SV_BASEVERTEX;
1556 case nir_intrinsic_load_base_instance:
1557 return SV_BASEINSTANCE;
1558 case nir_intrinsic_load_draw_id:
1559 return SV_DRAWID;
1560 case nir_intrinsic_load_front_face:
1561 return SV_FACE;
1562 case nir_intrinsic_is_helper_invocation:
1563 case nir_intrinsic_load_helper_invocation:
1564 return SV_THREAD_KILL;
1565 case nir_intrinsic_load_instance_id:
1566 return SV_INSTANCE_ID;
1567 case nir_intrinsic_load_invocation_id:
1568 return SV_INVOCATION_ID;
1569 case nir_intrinsic_load_local_group_size:
1570 return SV_NTID;
1571 case nir_intrinsic_load_local_invocation_id:
1572 return SV_TID;
1573 case nir_intrinsic_load_num_work_groups:
1574 return SV_NCTAID;
1575 case nir_intrinsic_load_patch_vertices_in:
1576 return SV_VERTEX_COUNT;
1577 case nir_intrinsic_load_primitive_id:
1578 return SV_PRIMITIVE_ID;
1579 case nir_intrinsic_load_sample_id:
1580 return SV_SAMPLE_INDEX;
1581 case nir_intrinsic_load_sample_mask_in:
1582 return SV_SAMPLE_MASK;
1583 case nir_intrinsic_load_sample_pos:
1584 return SV_SAMPLE_POS;
1585 case nir_intrinsic_load_subgroup_eq_mask:
1586 return SV_LANEMASK_EQ;
1587 case nir_intrinsic_load_subgroup_ge_mask:
1588 return SV_LANEMASK_GE;
1589 case nir_intrinsic_load_subgroup_gt_mask:
1590 return SV_LANEMASK_GT;
1591 case nir_intrinsic_load_subgroup_le_mask:
1592 return SV_LANEMASK_LE;
1593 case nir_intrinsic_load_subgroup_lt_mask:
1594 return SV_LANEMASK_LT;
1595 case nir_intrinsic_load_subgroup_invocation:
1596 return SV_LANEID;
1597 case nir_intrinsic_load_tess_coord:
1598 return SV_TESS_COORD;
1599 case nir_intrinsic_load_tess_level_inner:
1600 return SV_TESS_INNER;
1601 case nir_intrinsic_load_tess_level_outer:
1602 return SV_TESS_OUTER;
1603 case nir_intrinsic_load_vertex_id:
1604 return SV_VERTEX_ID;
1605 case nir_intrinsic_load_work_group_id:
1606 return SV_CTAID;
1607 default:
1608 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1609 nir_intrinsic_infos[intr].name);
1610 assert(false);
1611 return SV_LAST;
1612 }
1613 }
1614
1615 bool
1616 Converter::visit(nir_intrinsic_instr *insn)
1617 {
1618 nir_intrinsic_op op = insn->intrinsic;
1619 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1620 unsigned dest_components = nir_intrinsic_dest_components(insn);
1621
1622 switch (op) {
1623 case nir_intrinsic_load_uniform: {
1624 LValues &newDefs = convert(&insn->dest);
1625 const DataType dType = getDType(insn);
1626 Value *indirect;
1627 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1628 for (uint8_t i = 0; i < dest_components; ++i) {
1629 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1630 }
1631 break;
1632 }
1633 case nir_intrinsic_store_output:
1634 case nir_intrinsic_store_per_vertex_output: {
1635 Value *indirect;
1636 DataType dType = getSType(insn->src[0], false, false);
1637 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1638
1639 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1640 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1641 continue;
1642
1643 uint8_t offset = 0;
1644 Value *src = getSrc(&insn->src[0], i);
1645 switch (prog->getType()) {
1646 case Program::TYPE_FRAGMENT: {
1647 if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1648 // TGSI uses a different interface than NIR, TGSI stores that
1649 // value in the z component, NIR in X
1650 offset += 2;
1651 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1652 }
1653 break;
1654 }
1655 case Program::TYPE_GEOMETRY:
1656 case Program::TYPE_TESSELLATION_EVAL:
1657 case Program::TYPE_VERTEX: {
1658 if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1659 mkMov(clipVtx[i], src);
1660 src = clipVtx[i];
1661 }
1662 break;
1663 }
1664 default:
1665 break;
1666 }
1667
1668 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1669 }
1670 break;
1671 }
1672 case nir_intrinsic_load_input:
1673 case nir_intrinsic_load_interpolated_input:
1674 case nir_intrinsic_load_output: {
1675 LValues &newDefs = convert(&insn->dest);
1676
1677 // FBFetch
1678 if (prog->getType() == Program::TYPE_FRAGMENT &&
1679 op == nir_intrinsic_load_output) {
1680 std::vector<Value*> defs, srcs;
1681 uint8_t mask = 0;
1682
1683 srcs.push_back(getSSA());
1684 srcs.push_back(getSSA());
1685 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1686 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1687 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1688 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1689
1690 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1691 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1692
1693 for (uint8_t i = 0u; i < dest_components; ++i) {
1694 defs.push_back(newDefs[i]);
1695 mask |= 1 << i;
1696 }
1697
1698 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1699 texi->tex.levelZero = 1;
1700 texi->tex.mask = mask;
1701 texi->tex.useOffsets = 0;
1702 texi->tex.r = 0xffff;
1703 texi->tex.s = 0xffff;
1704
1705 info->prop.fp.readsFramebuffer = true;
1706 break;
1707 }
1708
1709 const DataType dType = getDType(insn);
1710 Value *indirect;
1711 bool input = op != nir_intrinsic_load_output;
1712 operation nvirOp;
1713 uint32_t mode = 0;
1714
1715 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1716 nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
1717
1718 // see load_barycentric_* handling
1719 if (prog->getType() == Program::TYPE_FRAGMENT) {
1720 if (op == nir_intrinsic_load_interpolated_input) {
1721 ImmediateValue immMode;
1722 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1723 mode = immMode.reg.data.u32;
1724 }
1725 if (mode == NV50_IR_INTERP_DEFAULT)
1726 mode |= translateInterpMode(&vary, nvirOp);
1727 else {
1728 if (vary.linear) {
1729 nvirOp = OP_LINTERP;
1730 mode |= NV50_IR_INTERP_LINEAR;
1731 } else {
1732 nvirOp = OP_PINTERP;
1733 mode |= NV50_IR_INTERP_PERSPECTIVE;
1734 }
1735 }
1736 }
1737
1738 for (uint8_t i = 0u; i < dest_components; ++i) {
1739 uint32_t address = getSlotAddress(insn, idx, i);
1740 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1741 if (prog->getType() == Program::TYPE_FRAGMENT) {
1742 int s = 1;
1743 if (typeSizeof(dType) == 8) {
1744 Value *lo = getSSA();
1745 Value *hi = getSSA();
1746 Instruction *interp;
1747
1748 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1749 if (nvirOp == OP_PINTERP)
1750 interp->setSrc(s++, fp.position);
1751 if (mode & NV50_IR_INTERP_OFFSET)
1752 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1753 interp->setInterpolate(mode);
1754 interp->setIndirect(0, 0, indirect);
1755
1756 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1757 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1758 if (nvirOp == OP_PINTERP)
1759 interp->setSrc(s++, fp.position);
1760 if (mode & NV50_IR_INTERP_OFFSET)
1761 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1762 interp->setInterpolate(mode);
1763 interp->setIndirect(0, 0, indirect);
1764
1765 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1766 } else {
1767 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1768 if (nvirOp == OP_PINTERP)
1769 interp->setSrc(s++, fp.position);
1770 if (mode & NV50_IR_INTERP_OFFSET)
1771 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1772 interp->setInterpolate(mode);
1773 interp->setIndirect(0, 0, indirect);
1774 }
1775 } else {
1776 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1777 }
1778 }
1779 break;
1780 }
1781 case nir_intrinsic_load_barycentric_at_offset:
1782 case nir_intrinsic_load_barycentric_at_sample:
1783 case nir_intrinsic_load_barycentric_centroid:
1784 case nir_intrinsic_load_barycentric_pixel:
1785 case nir_intrinsic_load_barycentric_sample: {
1786 LValues &newDefs = convert(&insn->dest);
1787 uint32_t mode;
1788
1789 if (op == nir_intrinsic_load_barycentric_centroid ||
1790 op == nir_intrinsic_load_barycentric_sample) {
1791 mode = NV50_IR_INTERP_CENTROID;
1792 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1793 Value *offs[2];
1794 for (uint8_t c = 0; c < 2; c++) {
1795 offs[c] = getScratch();
1796 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1797 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1798 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1799 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1800 }
1801 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1802
1803 mode = NV50_IR_INTERP_OFFSET;
1804 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1805 mode = NV50_IR_INTERP_DEFAULT;
1806 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1807 info->prop.fp.readsSampleLocations = true;
1808 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1809 mode = NV50_IR_INTERP_OFFSET;
1810 } else {
1811 unreachable("all intrinsics already handled above");
1812 }
1813
1814 loadImm(newDefs[1], mode);
1815 break;
1816 }
1817 case nir_intrinsic_demote:
1818 case nir_intrinsic_discard:
1819 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1820 break;
1821 case nir_intrinsic_demote_if:
1822 case nir_intrinsic_discard_if: {
1823 Value *pred = getSSA(1, FILE_PREDICATE);
1824 if (insn->num_components > 1) {
1825 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1826 assert(false);
1827 return false;
1828 }
1829 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1830 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1831 break;
1832 }
1833 case nir_intrinsic_load_base_vertex:
1834 case nir_intrinsic_load_base_instance:
1835 case nir_intrinsic_load_draw_id:
1836 case nir_intrinsic_load_front_face:
1837 case nir_intrinsic_is_helper_invocation:
1838 case nir_intrinsic_load_helper_invocation:
1839 case nir_intrinsic_load_instance_id:
1840 case nir_intrinsic_load_invocation_id:
1841 case nir_intrinsic_load_local_group_size:
1842 case nir_intrinsic_load_local_invocation_id:
1843 case nir_intrinsic_load_num_work_groups:
1844 case nir_intrinsic_load_patch_vertices_in:
1845 case nir_intrinsic_load_primitive_id:
1846 case nir_intrinsic_load_sample_id:
1847 case nir_intrinsic_load_sample_mask_in:
1848 case nir_intrinsic_load_sample_pos:
1849 case nir_intrinsic_load_subgroup_eq_mask:
1850 case nir_intrinsic_load_subgroup_ge_mask:
1851 case nir_intrinsic_load_subgroup_gt_mask:
1852 case nir_intrinsic_load_subgroup_le_mask:
1853 case nir_intrinsic_load_subgroup_lt_mask:
1854 case nir_intrinsic_load_subgroup_invocation:
1855 case nir_intrinsic_load_tess_coord:
1856 case nir_intrinsic_load_tess_level_inner:
1857 case nir_intrinsic_load_tess_level_outer:
1858 case nir_intrinsic_load_vertex_id:
1859 case nir_intrinsic_load_work_group_id: {
1860 const DataType dType = getDType(insn);
1861 SVSemantic sv = convert(op);
1862 LValues &newDefs = convert(&insn->dest);
1863
1864 for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
1865 Value *def;
1866 if (typeSizeof(dType) == 8)
1867 def = getSSA();
1868 else
1869 def = newDefs[i];
1870
1871 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1872 loadImm(def, 0u);
1873 } else {
1874 Symbol *sym = mkSysVal(sv, i);
1875 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1876 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1877 rdsv->perPatch = 1;
1878 }
1879
1880 if (typeSizeof(dType) == 8)
1881 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1882 }
1883 break;
1884 }
1885 // constants
1886 case nir_intrinsic_load_subgroup_size: {
1887 LValues &newDefs = convert(&insn->dest);
1888 loadImm(newDefs[0], 32u);
1889 break;
1890 }
1891 case nir_intrinsic_vote_all:
1892 case nir_intrinsic_vote_any:
1893 case nir_intrinsic_vote_ieq: {
1894 LValues &newDefs = convert(&insn->dest);
1895 Value *pred = getScratch(1, FILE_PREDICATE);
1896 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1897 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1898 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1899 break;
1900 }
1901 case nir_intrinsic_ballot: {
1902 LValues &newDefs = convert(&insn->dest);
1903 Value *pred = getSSA(1, FILE_PREDICATE);
1904 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1905 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1906 break;
1907 }
1908 case nir_intrinsic_read_first_invocation:
1909 case nir_intrinsic_read_invocation: {
1910 LValues &newDefs = convert(&insn->dest);
1911 const DataType dType = getDType(insn);
1912 Value *tmp = getScratch();
1913
1914 if (op == nir_intrinsic_read_first_invocation) {
1915 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
1916 mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
1917 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
1918 } else
1919 tmp = getSrc(&insn->src[1], 0);
1920
1921 for (uint8_t i = 0; i < dest_components; ++i) {
1922 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
1923 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
1924 }
1925 break;
1926 }
1927 case nir_intrinsic_load_per_vertex_input: {
1928 const DataType dType = getDType(insn);
1929 LValues &newDefs = convert(&insn->dest);
1930 Value *indirectVertex;
1931 Value *indirectOffset;
1932 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1933 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1934
1935 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1936 mkImm(baseVertex), indirectVertex);
1937 for (uint8_t i = 0u; i < dest_components; ++i) {
1938 uint32_t address = getSlotAddress(insn, idx, i);
1939 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
1940 indirectOffset, vtxBase, info->in[idx].patch);
1941 }
1942 break;
1943 }
1944 case nir_intrinsic_load_per_vertex_output: {
1945 const DataType dType = getDType(insn);
1946 LValues &newDefs = convert(&insn->dest);
1947 Value *indirectVertex;
1948 Value *indirectOffset;
1949 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1950 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1951 Value *vtxBase = NULL;
1952
1953 if (indirectVertex)
1954 vtxBase = indirectVertex;
1955 else
1956 vtxBase = loadImm(NULL, baseVertex);
1957
1958 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
1959
1960 for (uint8_t i = 0u; i < dest_components; ++i) {
1961 uint32_t address = getSlotAddress(insn, idx, i);
1962 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
1963 indirectOffset, vtxBase, info->in[idx].patch);
1964 }
1965 break;
1966 }
1967 case nir_intrinsic_emit_vertex: {
1968 if (info->io.genUserClip > 0)
1969 handleUserClipPlanes();
1970 uint32_t idx = nir_intrinsic_stream_id(insn);
1971 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1972 break;
1973 }
1974 case nir_intrinsic_end_primitive: {
1975 uint32_t idx = nir_intrinsic_stream_id(insn);
1976 if (idx)
1977 break;
1978 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1979 break;
1980 }
1981 case nir_intrinsic_load_ubo: {
1982 const DataType dType = getDType(insn);
1983 LValues &newDefs = convert(&insn->dest);
1984 Value *indirectIndex;
1985 Value *indirectOffset;
1986 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
1987 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
1988
1989 for (uint8_t i = 0u; i < dest_components; ++i) {
1990 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
1991 indirectOffset, indirectIndex);
1992 }
1993 break;
1994 }
1995 case nir_intrinsic_get_buffer_size: {
1996 LValues &newDefs = convert(&insn->dest);
1997 const DataType dType = getDType(insn);
1998 Value *indirectBuffer;
1999 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2000
2001 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2002 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2003 break;
2004 }
2005 case nir_intrinsic_store_ssbo: {
2006 DataType sType = getSType(insn->src[0], false, false);
2007 Value *indirectBuffer;
2008 Value *indirectOffset;
2009 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2010 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2011
2012 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2013 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2014 continue;
2015 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2016 offset + i * typeSizeof(sType));
2017 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2018 ->setIndirect(0, 1, indirectBuffer);
2019 }
2020 info->io.globalAccess |= 0x2;
2021 break;
2022 }
2023 case nir_intrinsic_load_ssbo: {
2024 const DataType dType = getDType(insn);
2025 LValues &newDefs = convert(&insn->dest);
2026 Value *indirectBuffer;
2027 Value *indirectOffset;
2028 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2029 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2030
2031 for (uint8_t i = 0u; i < dest_components; ++i)
2032 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2033 indirectOffset, indirectBuffer);
2034
2035 info->io.globalAccess |= 0x1;
2036 break;
2037 }
2038 case nir_intrinsic_shared_atomic_add:
2039 case nir_intrinsic_shared_atomic_and:
2040 case nir_intrinsic_shared_atomic_comp_swap:
2041 case nir_intrinsic_shared_atomic_exchange:
2042 case nir_intrinsic_shared_atomic_or:
2043 case nir_intrinsic_shared_atomic_imax:
2044 case nir_intrinsic_shared_atomic_imin:
2045 case nir_intrinsic_shared_atomic_umax:
2046 case nir_intrinsic_shared_atomic_umin:
2047 case nir_intrinsic_shared_atomic_xor: {
2048 const DataType dType = getDType(insn);
2049 LValues &newDefs = convert(&insn->dest);
2050 Value *indirectOffset;
2051 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2052 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2053 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2054 if (op == nir_intrinsic_shared_atomic_comp_swap)
2055 atom->setSrc(2, getSrc(&insn->src[2], 0));
2056 atom->setIndirect(0, 0, indirectOffset);
2057 atom->subOp = getSubOp(op);
2058 break;
2059 }
2060 case nir_intrinsic_ssbo_atomic_add:
2061 case nir_intrinsic_ssbo_atomic_and:
2062 case nir_intrinsic_ssbo_atomic_comp_swap:
2063 case nir_intrinsic_ssbo_atomic_exchange:
2064 case nir_intrinsic_ssbo_atomic_or:
2065 case nir_intrinsic_ssbo_atomic_imax:
2066 case nir_intrinsic_ssbo_atomic_imin:
2067 case nir_intrinsic_ssbo_atomic_umax:
2068 case nir_intrinsic_ssbo_atomic_umin:
2069 case nir_intrinsic_ssbo_atomic_xor: {
2070 const DataType dType = getDType(insn);
2071 LValues &newDefs = convert(&insn->dest);
2072 Value *indirectBuffer;
2073 Value *indirectOffset;
2074 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2075 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2076
2077 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2078 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2079 getSrc(&insn->src[2], 0));
2080 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2081 atom->setSrc(2, getSrc(&insn->src[3], 0));
2082 atom->setIndirect(0, 0, indirectOffset);
2083 atom->setIndirect(0, 1, indirectBuffer);
2084 atom->subOp = getSubOp(op);
2085
2086 info->io.globalAccess |= 0x2;
2087 break;
2088 }
2089 case nir_intrinsic_global_atomic_add:
2090 case nir_intrinsic_global_atomic_and:
2091 case nir_intrinsic_global_atomic_comp_swap:
2092 case nir_intrinsic_global_atomic_exchange:
2093 case nir_intrinsic_global_atomic_or:
2094 case nir_intrinsic_global_atomic_imax:
2095 case nir_intrinsic_global_atomic_imin:
2096 case nir_intrinsic_global_atomic_umax:
2097 case nir_intrinsic_global_atomic_umin:
2098 case nir_intrinsic_global_atomic_xor: {
2099 const DataType dType = getDType(insn);
2100 LValues &newDefs = convert(&insn->dest);
2101 Value *address;
2102 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2103
2104 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2105 Instruction *atom =
2106 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2107 if (op == nir_intrinsic_global_atomic_comp_swap)
2108 atom->setSrc(2, getSrc(&insn->src[2], 0));
2109 atom->setIndirect(0, 0, address);
2110 atom->subOp = getSubOp(op);
2111
2112 info->io.globalAccess |= 0x2;
2113 break;
2114 }
2115 case nir_intrinsic_bindless_image_atomic_add:
2116 case nir_intrinsic_bindless_image_atomic_and:
2117 case nir_intrinsic_bindless_image_atomic_comp_swap:
2118 case nir_intrinsic_bindless_image_atomic_exchange:
2119 case nir_intrinsic_bindless_image_atomic_imax:
2120 case nir_intrinsic_bindless_image_atomic_umax:
2121 case nir_intrinsic_bindless_image_atomic_imin:
2122 case nir_intrinsic_bindless_image_atomic_umin:
2123 case nir_intrinsic_bindless_image_atomic_or:
2124 case nir_intrinsic_bindless_image_atomic_xor:
2125 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2126 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2127 case nir_intrinsic_bindless_image_load:
2128 case nir_intrinsic_bindless_image_samples:
2129 case nir_intrinsic_bindless_image_size:
2130 case nir_intrinsic_bindless_image_store:
2131 case nir_intrinsic_image_atomic_add:
2132 case nir_intrinsic_image_atomic_and:
2133 case nir_intrinsic_image_atomic_comp_swap:
2134 case nir_intrinsic_image_atomic_exchange:
2135 case nir_intrinsic_image_atomic_imax:
2136 case nir_intrinsic_image_atomic_umax:
2137 case nir_intrinsic_image_atomic_imin:
2138 case nir_intrinsic_image_atomic_umin:
2139 case nir_intrinsic_image_atomic_or:
2140 case nir_intrinsic_image_atomic_xor:
2141 case nir_intrinsic_image_atomic_inc_wrap:
2142 case nir_intrinsic_image_atomic_dec_wrap:
2143 case nir_intrinsic_image_load:
2144 case nir_intrinsic_image_samples:
2145 case nir_intrinsic_image_size:
2146 case nir_intrinsic_image_store: {
2147 std::vector<Value*> srcs, defs;
2148 Value *indirect;
2149 DataType ty;
2150
2151 uint32_t mask = 0;
2152 TexInstruction::Target target =
2153 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2154 unsigned int argCount = getNIRArgCount(target);
2155 uint16_t location = 0;
2156
2157 if (opInfo.has_dest) {
2158 LValues &newDefs = convert(&insn->dest);
2159 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2160 defs.push_back(newDefs[i]);
2161 mask |= 1 << i;
2162 }
2163 }
2164
2165 int lod_src = -1;
2166 bool bindless = false;
2167 switch (op) {
2168 case nir_intrinsic_bindless_image_atomic_add:
2169 case nir_intrinsic_bindless_image_atomic_and:
2170 case nir_intrinsic_bindless_image_atomic_comp_swap:
2171 case nir_intrinsic_bindless_image_atomic_exchange:
2172 case nir_intrinsic_bindless_image_atomic_imax:
2173 case nir_intrinsic_bindless_image_atomic_umax:
2174 case nir_intrinsic_bindless_image_atomic_imin:
2175 case nir_intrinsic_bindless_image_atomic_umin:
2176 case nir_intrinsic_bindless_image_atomic_or:
2177 case nir_intrinsic_bindless_image_atomic_xor:
2178 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2179 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2180 ty = getDType(insn);
2181 bindless = true;
2182 info->io.globalAccess |= 0x2;
2183 mask = 0x1;
2184 break;
2185 case nir_intrinsic_image_atomic_add:
2186 case nir_intrinsic_image_atomic_and:
2187 case nir_intrinsic_image_atomic_comp_swap:
2188 case nir_intrinsic_image_atomic_exchange:
2189 case nir_intrinsic_image_atomic_imax:
2190 case nir_intrinsic_image_atomic_umax:
2191 case nir_intrinsic_image_atomic_imin:
2192 case nir_intrinsic_image_atomic_umin:
2193 case nir_intrinsic_image_atomic_or:
2194 case nir_intrinsic_image_atomic_xor:
2195 case nir_intrinsic_image_atomic_inc_wrap:
2196 case nir_intrinsic_image_atomic_dec_wrap:
2197 ty = getDType(insn);
2198 bindless = false;
2199 info->io.globalAccess |= 0x2;
2200 mask = 0x1;
2201 break;
2202 case nir_intrinsic_bindless_image_load:
2203 case nir_intrinsic_image_load:
2204 ty = TYPE_U32;
2205 bindless = op == nir_intrinsic_bindless_image_load;
2206 info->io.globalAccess |= 0x1;
2207 lod_src = 4;
2208 break;
2209 case nir_intrinsic_bindless_image_store:
2210 case nir_intrinsic_image_store:
2211 ty = TYPE_U32;
2212 bindless = op == nir_intrinsic_bindless_image_store;
2213 info->io.globalAccess |= 0x2;
2214 lod_src = 5;
2215 mask = 0xf;
2216 break;
2217 case nir_intrinsic_bindless_image_samples:
2218 case nir_intrinsic_image_samples:
2219 ty = TYPE_U32;
2220 bindless = op == nir_intrinsic_bindless_image_samples;
2221 mask = 0x8;
2222 break;
2223 case nir_intrinsic_bindless_image_size:
2224 case nir_intrinsic_image_size:
2225 ty = TYPE_U32;
2226 bindless = op == nir_intrinsic_bindless_image_size;
2227 break;
2228 default:
2229 unreachable("unhandled image opcode");
2230 break;
2231 }
2232
2233 if (bindless)
2234 indirect = getSrc(&insn->src[0], 0);
2235 else
2236 location = getIndirect(&insn->src[0], 0, indirect);
2237
2238 // coords
2239 if (opInfo.num_srcs >= 2)
2240 for (unsigned int i = 0u; i < argCount; ++i)
2241 srcs.push_back(getSrc(&insn->src[1], i));
2242
2243 // the sampler is just another src added after coords
2244 if (opInfo.num_srcs >= 3 && target.isMS())
2245 srcs.push_back(getSrc(&insn->src[2], 0));
2246
2247 if (opInfo.num_srcs >= 4 && lod_src != 4) {
2248 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2249 for (uint8_t i = 0u; i < components; ++i)
2250 srcs.push_back(getSrc(&insn->src[3], i));
2251 }
2252
2253 if (opInfo.num_srcs >= 5 && lod_src != 5)
2254 // 1 for aotmic swap
2255 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2256 srcs.push_back(getSrc(&insn->src[4], i));
2257
2258 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2259 texi->tex.bindless = bindless;
2260 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2261 texi->tex.mask = mask;
2262 texi->cache = convert(nir_intrinsic_access(insn));
2263 texi->setType(ty);
2264 texi->subOp = getSubOp(op);
2265
2266 if (indirect)
2267 texi->setIndirectR(indirect);
2268
2269 break;
2270 }
2271 case nir_intrinsic_store_scratch:
2272 case nir_intrinsic_store_shared: {
2273 DataType sType = getSType(insn->src[0], false, false);
2274 Value *indirectOffset;
2275 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2276
2277 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2278 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2279 continue;
2280 Symbol *sym = mkSymbol(getFile(op), 0, sType, offset + i * typeSizeof(sType));
2281 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2282 }
2283 break;
2284 }
2285 case nir_intrinsic_load_kernel_input:
2286 case nir_intrinsic_load_scratch:
2287 case nir_intrinsic_load_shared: {
2288 const DataType dType = getDType(insn);
2289 LValues &newDefs = convert(&insn->dest);
2290 Value *indirectOffset;
2291 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2292
2293 for (uint8_t i = 0u; i < dest_components; ++i)
2294 loadFrom(getFile(op), 0, dType, newDefs[i], offset, i, indirectOffset);
2295
2296 break;
2297 }
2298 case nir_intrinsic_control_barrier: {
2299 // TODO: add flag to shader_info
2300 info->numBarriers = 1;
2301 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2302 bar->fixed = 1;
2303 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2304 break;
2305 }
2306 case nir_intrinsic_group_memory_barrier:
2307 case nir_intrinsic_memory_barrier:
2308 case nir_intrinsic_memory_barrier_buffer:
2309 case nir_intrinsic_memory_barrier_image:
2310 case nir_intrinsic_memory_barrier_shared: {
2311 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2312 bar->fixed = 1;
2313 bar->subOp = getSubOp(op);
2314 break;
2315 }
2316 case nir_intrinsic_memory_barrier_tcs_patch:
2317 break;
2318 case nir_intrinsic_shader_clock: {
2319 const DataType dType = getDType(insn);
2320 LValues &newDefs = convert(&insn->dest);
2321
2322 loadImm(newDefs[0], 0u);
2323 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2324 break;
2325 }
2326 case nir_intrinsic_load_global: {
2327 const DataType dType = getDType(insn);
2328 LValues &newDefs = convert(&insn->dest);
2329 Value *indirectOffset;
2330 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2331
2332 for (auto i = 0u; i < dest_components; ++i)
2333 loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2334
2335 info->io.globalAccess |= 0x1;
2336 break;
2337 }
2338 case nir_intrinsic_store_global: {
2339 DataType sType = getSType(insn->src[0], false, false);
2340
2341 for (auto i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2342 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2343 continue;
2344 if (typeSizeof(sType) == 8) {
2345 Value *split[2];
2346 mkSplit(split, 4, getSrc(&insn->src[0], i));
2347
2348 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2349 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2350
2351 sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2352 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2353 } else {
2354 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2355 mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2356 }
2357 }
2358
2359 info->io.globalAccess |= 0x2;
2360 break;
2361 }
2362 default:
2363 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2364 return false;
2365 }
2366
2367 return true;
2368 }
2369
2370 bool
2371 Converter::visit(nir_jump_instr *insn)
2372 {
2373 switch (insn->type) {
2374 case nir_jump_return:
2375 // TODO: this only works in the main function
2376 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2377 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2378 break;
2379 case nir_jump_break:
2380 case nir_jump_continue: {
2381 bool isBreak = insn->type == nir_jump_break;
2382 nir_block *block = insn->instr.block;
2383 BasicBlock *target = convert(block->successors[0]);
2384 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2385 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2386 break;
2387 }
2388 default:
2389 ERROR("unknown nir_jump_type %u\n", insn->type);
2390 return false;
2391 }
2392
2393 return true;
2394 }
2395
2396 Value*
2397 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2398 {
2399 Value *val;
2400
2401 if (immInsertPos)
2402 setPosition(immInsertPos, true);
2403 else
2404 setPosition(bb, false);
2405
2406 switch (insn->def.bit_size) {
2407 case 64:
2408 val = loadImm(getSSA(8), insn->value[idx].u64);
2409 break;
2410 case 32:
2411 val = loadImm(getSSA(4), insn->value[idx].u32);
2412 break;
2413 case 16:
2414 val = loadImm(getSSA(2), insn->value[idx].u16);
2415 break;
2416 case 8:
2417 val = loadImm(getSSA(1), insn->value[idx].u8);
2418 break;
2419 default:
2420 unreachable("unhandled bit size!\n");
2421 }
2422 setPosition(bb, true);
2423 return val;
2424 }
2425
2426 bool
2427 Converter::visit(nir_load_const_instr *insn)
2428 {
2429 assert(insn->def.bit_size <= 64);
2430 immediates[insn->def.index] = insn;
2431 return true;
2432 }
2433
2434 #define DEFAULT_CHECKS \
2435 if (insn->dest.dest.ssa.num_components > 1) { \
2436 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2437 return false; \
2438 } \
2439 if (insn->dest.write_mask != 1) { \
2440 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2441 return false; \
2442 }
2443 bool
2444 Converter::visit(nir_alu_instr *insn)
2445 {
2446 const nir_op op = insn->op;
2447 const nir_op_info &info = nir_op_infos[op];
2448 DataType dType = getDType(insn);
2449 const std::vector<DataType> sTypes = getSTypes(insn);
2450
2451 Instruction *oldPos = this->bb->getExit();
2452
2453 switch (op) {
2454 case nir_op_fabs:
2455 case nir_op_iabs:
2456 case nir_op_fadd:
2457 case nir_op_iadd:
2458 case nir_op_iand:
2459 case nir_op_fceil:
2460 case nir_op_fcos:
2461 case nir_op_fddx:
2462 case nir_op_fddx_coarse:
2463 case nir_op_fddx_fine:
2464 case nir_op_fddy:
2465 case nir_op_fddy_coarse:
2466 case nir_op_fddy_fine:
2467 case nir_op_fdiv:
2468 case nir_op_idiv:
2469 case nir_op_udiv:
2470 case nir_op_fexp2:
2471 case nir_op_ffloor:
2472 case nir_op_ffma:
2473 case nir_op_flog2:
2474 case nir_op_fmax:
2475 case nir_op_imax:
2476 case nir_op_umax:
2477 case nir_op_fmin:
2478 case nir_op_imin:
2479 case nir_op_umin:
2480 case nir_op_fmod:
2481 case nir_op_imod:
2482 case nir_op_umod:
2483 case nir_op_fmul:
2484 case nir_op_imul:
2485 case nir_op_imul_high:
2486 case nir_op_umul_high:
2487 case nir_op_fneg:
2488 case nir_op_ineg:
2489 case nir_op_inot:
2490 case nir_op_ior:
2491 case nir_op_pack_64_2x32_split:
2492 case nir_op_fpow:
2493 case nir_op_frcp:
2494 case nir_op_frem:
2495 case nir_op_irem:
2496 case nir_op_frsq:
2497 case nir_op_fsat:
2498 case nir_op_ishr:
2499 case nir_op_ushr:
2500 case nir_op_fsin:
2501 case nir_op_fsqrt:
2502 case nir_op_ftrunc:
2503 case nir_op_ishl:
2504 case nir_op_ixor: {
2505 DEFAULT_CHECKS;
2506 LValues &newDefs = convert(&insn->dest);
2507 operation preOp = preOperationNeeded(op);
2508 if (preOp != OP_NOP) {
2509 assert(info.num_inputs < 2);
2510 Value *tmp = getSSA(typeSizeof(dType));
2511 Instruction *i0 = mkOp(preOp, dType, tmp);
2512 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2513 if (info.num_inputs) {
2514 i0->setSrc(0, getSrc(&insn->src[0]));
2515 i1->setSrc(0, tmp);
2516 }
2517 i1->subOp = getSubOp(op);
2518 } else {
2519 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2520 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2521 i->setSrc(s, getSrc(&insn->src[s]));
2522 }
2523 i->subOp = getSubOp(op);
2524 }
2525 break;
2526 }
2527 case nir_op_ifind_msb:
2528 case nir_op_ufind_msb: {
2529 DEFAULT_CHECKS;
2530 LValues &newDefs = convert(&insn->dest);
2531 dType = sTypes[0];
2532 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2533 break;
2534 }
2535 case nir_op_fround_even: {
2536 DEFAULT_CHECKS;
2537 LValues &newDefs = convert(&insn->dest);
2538 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2539 break;
2540 }
2541 // convert instructions
2542 case nir_op_f2f32:
2543 case nir_op_f2i32:
2544 case nir_op_f2u32:
2545 case nir_op_i2f32:
2546 case nir_op_i2i32:
2547 case nir_op_u2f32:
2548 case nir_op_u2u32:
2549 case nir_op_f2f64:
2550 case nir_op_f2i64:
2551 case nir_op_f2u64:
2552 case nir_op_i2f64:
2553 case nir_op_i2i64:
2554 case nir_op_u2f64:
2555 case nir_op_u2u64: {
2556 DEFAULT_CHECKS;
2557 LValues &newDefs = convert(&insn->dest);
2558 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2559 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2560 i->rnd = ROUND_Z;
2561 i->sType = sTypes[0];
2562 break;
2563 }
2564 // compare instructions
2565 case nir_op_feq32:
2566 case nir_op_ieq32:
2567 case nir_op_fge32:
2568 case nir_op_ige32:
2569 case nir_op_uge32:
2570 case nir_op_flt32:
2571 case nir_op_ilt32:
2572 case nir_op_ult32:
2573 case nir_op_fne32:
2574 case nir_op_ine32: {
2575 DEFAULT_CHECKS;
2576 LValues &newDefs = convert(&insn->dest);
2577 Instruction *i = mkCmp(getOperation(op),
2578 getCondCode(op),
2579 dType,
2580 newDefs[0],
2581 dType,
2582 getSrc(&insn->src[0]),
2583 getSrc(&insn->src[1]));
2584 if (info.num_inputs == 3)
2585 i->setSrc(2, getSrc(&insn->src[2]));
2586 i->sType = sTypes[0];
2587 break;
2588 }
2589 case nir_op_mov:
2590 case nir_op_vec2:
2591 case nir_op_vec3:
2592 case nir_op_vec4:
2593 case nir_op_vec8:
2594 case nir_op_vec16: {
2595 LValues &newDefs = convert(&insn->dest);
2596 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2597 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2598 }
2599 break;
2600 }
2601 // (un)pack
2602 case nir_op_pack_64_2x32: {
2603 LValues &newDefs = convert(&insn->dest);
2604 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2605 merge->setSrc(0, getSrc(&insn->src[0], 0));
2606 merge->setSrc(1, getSrc(&insn->src[0], 1));
2607 break;
2608 }
2609 case nir_op_pack_half_2x16_split: {
2610 LValues &newDefs = convert(&insn->dest);
2611 Value *tmpH = getSSA();
2612 Value *tmpL = getSSA();
2613
2614 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2615 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2616 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2617 break;
2618 }
2619 case nir_op_unpack_half_2x16_split_x:
2620 case nir_op_unpack_half_2x16_split_y: {
2621 LValues &newDefs = convert(&insn->dest);
2622 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2623 if (op == nir_op_unpack_half_2x16_split_y)
2624 cvt->subOp = 1;
2625 break;
2626 }
2627 case nir_op_unpack_64_2x32: {
2628 LValues &newDefs = convert(&insn->dest);
2629 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2630 break;
2631 }
2632 case nir_op_unpack_64_2x32_split_x: {
2633 LValues &newDefs = convert(&insn->dest);
2634 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2635 break;
2636 }
2637 case nir_op_unpack_64_2x32_split_y: {
2638 LValues &newDefs = convert(&insn->dest);
2639 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2640 break;
2641 }
2642 // special instructions
2643 case nir_op_fsign:
2644 case nir_op_isign: {
2645 DEFAULT_CHECKS;
2646 DataType iType;
2647 if (::isFloatType(dType))
2648 iType = TYPE_F32;
2649 else
2650 iType = TYPE_S32;
2651
2652 LValues &newDefs = convert(&insn->dest);
2653 LValue *val0 = getScratch();
2654 LValue *val1 = getScratch();
2655 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2656 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2657
2658 if (dType == TYPE_F64) {
2659 mkOp2(OP_SUB, iType, val0, val0, val1);
2660 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2661 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2662 mkOp2(OP_SUB, iType, val0, val1, val0);
2663 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2664 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2665 } else if (::isFloatType(dType))
2666 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2667 else
2668 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2669 break;
2670 }
2671 case nir_op_fcsel:
2672 case nir_op_b32csel: {
2673 DEFAULT_CHECKS;
2674 LValues &newDefs = convert(&insn->dest);
2675 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2676 break;
2677 }
2678 case nir_op_ibitfield_extract:
2679 case nir_op_ubitfield_extract: {
2680 DEFAULT_CHECKS;
2681 Value *tmp = getSSA();
2682 LValues &newDefs = convert(&insn->dest);
2683 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2684 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2685 break;
2686 }
2687 case nir_op_bfm: {
2688 DEFAULT_CHECKS;
2689 LValues &newDefs = convert(&insn->dest);
2690 mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2691 break;
2692 }
2693 case nir_op_bitfield_insert: {
2694 DEFAULT_CHECKS;
2695 LValues &newDefs = convert(&insn->dest);
2696 LValue *temp = getSSA();
2697 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2698 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2699 break;
2700 }
2701 case nir_op_bit_count: {
2702 DEFAULT_CHECKS;
2703 LValues &newDefs = convert(&insn->dest);
2704 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2705 break;
2706 }
2707 case nir_op_bitfield_reverse: {
2708 DEFAULT_CHECKS;
2709 LValues &newDefs = convert(&insn->dest);
2710 mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2711 break;
2712 }
2713 case nir_op_find_lsb: {
2714 DEFAULT_CHECKS;
2715 LValues &newDefs = convert(&insn->dest);
2716 Value *tmp = getSSA();
2717 mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2718 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2719 break;
2720 }
2721 case nir_op_extract_u8: {
2722 DEFAULT_CHECKS;
2723 LValues &newDefs = convert(&insn->dest);
2724 Value *prmt = getSSA();
2725 mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2726 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2727 break;
2728 }
2729 case nir_op_extract_i8: {
2730 DEFAULT_CHECKS;
2731 LValues &newDefs = convert(&insn->dest);
2732 Value *prmt = getSSA();
2733 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2734 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2735 break;
2736 }
2737 case nir_op_extract_u16: {
2738 DEFAULT_CHECKS;
2739 LValues &newDefs = convert(&insn->dest);
2740 Value *prmt = getSSA();
2741 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2742 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2743 break;
2744 }
2745 case nir_op_extract_i16: {
2746 DEFAULT_CHECKS;
2747 LValues &newDefs = convert(&insn->dest);
2748 Value *prmt = getSSA();
2749 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2750 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2751 break;
2752 }
2753 case nir_op_urol: {
2754 DEFAULT_CHECKS;
2755 LValues &newDefs = convert(&insn->dest);
2756 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2757 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2758 ->subOp = NV50_IR_SUBOP_SHF_L |
2759 NV50_IR_SUBOP_SHF_W |
2760 NV50_IR_SUBOP_SHF_HI;
2761 break;
2762 }
2763 case nir_op_uror: {
2764 DEFAULT_CHECKS;
2765 LValues &newDefs = convert(&insn->dest);
2766 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2767 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2768 ->subOp = NV50_IR_SUBOP_SHF_R |
2769 NV50_IR_SUBOP_SHF_W |
2770 NV50_IR_SUBOP_SHF_LO;
2771 break;
2772 }
2773 // boolean conversions
2774 case nir_op_b2f32: {
2775 DEFAULT_CHECKS;
2776 LValues &newDefs = convert(&insn->dest);
2777 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2778 break;
2779 }
2780 case nir_op_b2f64: {
2781 DEFAULT_CHECKS;
2782 LValues &newDefs = convert(&insn->dest);
2783 Value *tmp = getSSA(4);
2784 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2785 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2786 break;
2787 }
2788 case nir_op_f2b32:
2789 case nir_op_i2b32: {
2790 DEFAULT_CHECKS;
2791 LValues &newDefs = convert(&insn->dest);
2792 Value *src1;
2793 if (typeSizeof(sTypes[0]) == 8) {
2794 src1 = loadImm(getSSA(8), 0.0);
2795 } else {
2796 src1 = zero;
2797 }
2798 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2799 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2800 break;
2801 }
2802 case nir_op_b2i32: {
2803 DEFAULT_CHECKS;
2804 LValues &newDefs = convert(&insn->dest);
2805 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2806 break;
2807 }
2808 case nir_op_b2i64: {
2809 DEFAULT_CHECKS;
2810 LValues &newDefs = convert(&insn->dest);
2811 LValue *def = getScratch();
2812 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2813 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2814 break;
2815 }
2816 default:
2817 ERROR("unknown nir_op %s\n", info.name);
2818 assert(false);
2819 return false;
2820 }
2821
2822 if (!oldPos) {
2823 oldPos = this->bb->getEntry();
2824 oldPos->precise = insn->exact;
2825 }
2826
2827 if (unlikely(!oldPos))
2828 return true;
2829
2830 while (oldPos->next) {
2831 oldPos = oldPos->next;
2832 oldPos->precise = insn->exact;
2833 }
2834 oldPos->saturate = insn->dest.saturate;
2835
2836 return true;
2837 }
2838 #undef DEFAULT_CHECKS
2839
2840 bool
2841 Converter::visit(nir_ssa_undef_instr *insn)
2842 {
2843 LValues &newDefs = convert(&insn->def);
2844 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2845 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2846 }
2847 return true;
2848 }
2849
2850 #define CASE_SAMPLER(ty) \
2851 case GLSL_SAMPLER_DIM_ ## ty : \
2852 if (isArray && !isShadow) \
2853 return TEX_TARGET_ ## ty ## _ARRAY; \
2854 else if (!isArray && isShadow) \
2855 return TEX_TARGET_## ty ## _SHADOW; \
2856 else if (isArray && isShadow) \
2857 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2858 else \
2859 return TEX_TARGET_ ## ty
2860
2861 TexTarget
2862 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2863 {
2864 switch (dim) {
2865 CASE_SAMPLER(1D);
2866 CASE_SAMPLER(2D);
2867 CASE_SAMPLER(CUBE);
2868 case GLSL_SAMPLER_DIM_3D:
2869 return TEX_TARGET_3D;
2870 case GLSL_SAMPLER_DIM_MS:
2871 if (isArray)
2872 return TEX_TARGET_2D_MS_ARRAY;
2873 return TEX_TARGET_2D_MS;
2874 case GLSL_SAMPLER_DIM_RECT:
2875 if (isShadow)
2876 return TEX_TARGET_RECT_SHADOW;
2877 return TEX_TARGET_RECT;
2878 case GLSL_SAMPLER_DIM_BUF:
2879 return TEX_TARGET_BUFFER;
2880 case GLSL_SAMPLER_DIM_EXTERNAL:
2881 return TEX_TARGET_2D;
2882 default:
2883 ERROR("unknown glsl_sampler_dim %u\n", dim);
2884 assert(false);
2885 return TEX_TARGET_COUNT;
2886 }
2887 }
2888 #undef CASE_SAMPLER
2889
2890 Value*
2891 Converter::applyProjection(Value *src, Value *proj)
2892 {
2893 if (!proj)
2894 return src;
2895 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2896 }
2897
2898 unsigned int
2899 Converter::getNIRArgCount(TexInstruction::Target& target)
2900 {
2901 unsigned int result = target.getArgCount();
2902 if (target.isCube() && target.isArray())
2903 result--;
2904 if (target.isMS())
2905 result--;
2906 return result;
2907 }
2908
2909 CacheMode
2910 Converter::convert(enum gl_access_qualifier access)
2911 {
2912 if (access & ACCESS_VOLATILE)
2913 return CACHE_CV;
2914 if (access & ACCESS_COHERENT)
2915 return CACHE_CG;
2916 return CACHE_CA;
2917 }
2918
2919 bool
2920 Converter::visit(nir_tex_instr *insn)
2921 {
2922 switch (insn->op) {
2923 case nir_texop_lod:
2924 case nir_texop_query_levels:
2925 case nir_texop_tex:
2926 case nir_texop_texture_samples:
2927 case nir_texop_tg4:
2928 case nir_texop_txb:
2929 case nir_texop_txd:
2930 case nir_texop_txf:
2931 case nir_texop_txf_ms:
2932 case nir_texop_txl:
2933 case nir_texop_txs: {
2934 LValues &newDefs = convert(&insn->dest);
2935 std::vector<Value*> srcs;
2936 std::vector<Value*> defs;
2937 std::vector<nir_src*> offsets;
2938 uint8_t mask = 0;
2939 bool lz = false;
2940 Value *proj = NULL;
2941 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2942 operation op = getOperation(insn->op);
2943
2944 int r, s;
2945 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2946 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2947 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2948 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2949 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2950 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2951 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2952 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2953 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
2954 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2955 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2956 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
2957 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
2958
2959 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
2960 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
2961
2962 if (projIdx != -1)
2963 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
2964
2965 srcs.resize(insn->coord_components);
2966 for (uint8_t i = 0u; i < insn->coord_components; ++i)
2967 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
2968
2969 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2970 if (insn->coord_components) {
2971 uint32_t argCount = target.getArgCount();
2972
2973 if (target.isMS())
2974 argCount -= 1;
2975
2976 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
2977 srcs.push_back(getSSA());
2978 }
2979
2980 if (insn->op == nir_texop_texture_samples)
2981 srcs.push_back(zero);
2982 else if (!insn->num_srcs)
2983 srcs.push_back(loadImm(NULL, 0));
2984 if (biasIdx != -1)
2985 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
2986 if (lodIdx != -1)
2987 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
2988 else if (op == OP_TXF)
2989 lz = true;
2990 if (msIdx != -1)
2991 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
2992 if (offsetIdx != -1)
2993 offsets.push_back(&insn->src[offsetIdx].src);
2994 if (compIdx != -1)
2995 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
2996 if (texOffIdx != -1) {
2997 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
2998 texOffIdx = srcs.size() - 1;
2999 }
3000 if (sampOffIdx != -1) {
3001 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3002 sampOffIdx = srcs.size() - 1;
3003 }
3004 if (bindless) {
3005 // currently we use the lower bits
3006 Value *split[2];
3007 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3008
3009 mkSplit(split, 4, handle);
3010
3011 srcs.push_back(split[0]);
3012 texOffIdx = srcs.size() - 1;
3013 }
3014
3015 r = bindless ? 0xff : insn->texture_index;
3016 s = bindless ? 0x1f : insn->sampler_index;
3017
3018 defs.resize(newDefs.size());
3019 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3020 defs[d] = newDefs[d];
3021 mask |= 1 << d;
3022 }
3023 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3024 lz = true;
3025
3026 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3027 texi->tex.levelZero = lz;
3028 texi->tex.mask = mask;
3029 texi->tex.bindless = bindless;
3030
3031 if (texOffIdx != -1)
3032 texi->tex.rIndirectSrc = texOffIdx;
3033 if (sampOffIdx != -1)
3034 texi->tex.sIndirectSrc = sampOffIdx;
3035
3036 switch (insn->op) {
3037 case nir_texop_tg4:
3038 if (!target.isShadow())
3039 texi->tex.gatherComp = insn->component;
3040 break;
3041 case nir_texop_txs:
3042 texi->tex.query = TXQ_DIMS;
3043 break;
3044 case nir_texop_texture_samples:
3045 texi->tex.mask = 0x4;
3046 texi->tex.query = TXQ_TYPE;
3047 break;
3048 case nir_texop_query_levels:
3049 texi->tex.mask = 0x8;
3050 texi->tex.query = TXQ_DIMS;
3051 break;
3052 default:
3053 break;
3054 }
3055
3056 texi->tex.useOffsets = offsets.size();
3057 if (texi->tex.useOffsets) {
3058 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3059 for (uint32_t c = 0u; c < 3; ++c) {
3060 uint8_t s2 = std::min(c, target.getDim() - 1);
3061 texi->offset[s][c].set(getSrc(offsets[s], s2));
3062 texi->offset[s][c].setInsn(texi);
3063 }
3064 }
3065 }
3066
3067 if (op == OP_TXG && offsetIdx == -1) {
3068 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3069 texi->tex.useOffsets = 4;
3070 setPosition(texi, false);
3071 for (uint8_t i = 0; i < 4; ++i) {
3072 for (uint8_t j = 0; j < 2; ++j) {
3073 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3074 texi->offset[i][j].setInsn(texi);
3075 }
3076 }
3077 setPosition(texi, true);
3078 }
3079 }
3080
3081 if (ddxIdx != -1 && ddyIdx != -1) {
3082 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3083 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3084 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3085 }
3086 }
3087
3088 break;
3089 }
3090 default:
3091 ERROR("unknown nir_texop %u\n", insn->op);
3092 return false;
3093 }
3094 return true;
3095 }
3096
3097 bool
3098 Converter::run()
3099 {
3100 bool progress;
3101
3102 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3103 nir_print_shader(nir, stderr);
3104
3105 struct nir_lower_subgroups_options subgroup_options = {
3106 .subgroup_size = 32,
3107 .ballot_bit_size = 32,
3108 };
3109
3110 /* prepare for IO lowering */
3111 NIR_PASS_V(nir, nir_opt_deref);
3112 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3113 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3114
3115 /* codegen assumes vec4 alignment for memory */
3116 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_function_temp, function_temp_type_info);
3117 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, nir_address_format_32bit_offset);
3118 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3119
3120 NIR_PASS_V(nir, nir_lower_io,
3121 (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out),
3122 type_size, (nir_lower_io_options)0);
3123
3124 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3125
3126 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3127 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3128 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3129
3130 /*TODO: improve this lowering/optimisation loop so that we can use
3131 * nir_opt_idiv_const effectively before this.
3132 */
3133 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_precise);
3134
3135 do {
3136 progress = false;
3137 NIR_PASS(progress, nir, nir_copy_prop);
3138 NIR_PASS(progress, nir, nir_opt_remove_phis);
3139 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3140 NIR_PASS(progress, nir, nir_opt_cse);
3141 NIR_PASS(progress, nir, nir_opt_algebraic);
3142 NIR_PASS(progress, nir, nir_opt_constant_folding);
3143 NIR_PASS(progress, nir, nir_copy_prop);
3144 NIR_PASS(progress, nir, nir_opt_dce);
3145 NIR_PASS(progress, nir, nir_opt_dead_cf);
3146 } while (progress);
3147
3148 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3149 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3150
3151 // Garbage collect dead instructions
3152 nir_sweep(nir);
3153
3154 if (!parseNIR()) {
3155 ERROR("Couldn't prase NIR!\n");
3156 return false;
3157 }
3158
3159 if (!assignSlots()) {
3160 ERROR("Couldn't assign slots!\n");
3161 return false;
3162 }
3163
3164 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3165 nir_print_shader(nir, stderr);
3166
3167 nir_foreach_function(function, nir) {
3168 if (!visit(function))
3169 return false;
3170 }
3171
3172 return true;
3173 }
3174
3175 } // unnamed namespace
3176
3177 namespace nv50_ir {
3178
3179 bool
3180 Program::makeFromNIR(struct nv50_ir_prog_info *info)
3181 {
3182 nir_shader *nir = (nir_shader*)info->bin.source;
3183 Converter converter(this, nir, info);
3184 bool result = converter.run();
3185 if (!result)
3186 return result;
3187 LoweringHelper lowering;
3188 lowering.run(this);
3189 tlsSize = info->bin.tlsSpace;
3190 return result;
3191 }
3192
3193 } // namespace nv50_ir
3194
3195 static nir_shader_compiler_options
3196 nvir_nir_shader_compiler_options(int chipset)
3197 {
3198 nir_shader_compiler_options op = {};
3199 op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3200 op.lower_ffma = false;
3201 op.fuse_ffma = false; /* nir doesn't track mad vs fma */
3202 op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3203 op.lower_flrp32 = true;
3204 op.lower_flrp64 = true;
3205 op.lower_fpow = false; // TODO: nir's lowering is broken, or we could use it
3206 op.lower_fsat = false;
3207 op.lower_fsqrt = false; // TODO: only before gm200
3208 op.lower_sincos = false;
3209 op.lower_fmod = true;
3210 op.lower_bitfield_extract = false;
3211 op.lower_bitfield_extract_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3212 op.lower_bitfield_insert = false;
3213 op.lower_bitfield_insert_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3214 op.lower_bitfield_insert_to_bitfield_select = false;
3215 op.lower_bitfield_reverse = false;
3216 op.lower_bit_count = false;
3217 op.lower_ifind_msb = false;
3218 op.lower_find_lsb = false;
3219 op.lower_uadd_carry = true; // TODO
3220 op.lower_usub_borrow = true; // TODO
3221 op.lower_mul_high = false;
3222 op.lower_negate = false;
3223 op.lower_sub = true;
3224 op.lower_scmp = true; // TODO: not implemented yet
3225 op.lower_vector_cmp = false;
3226 op.lower_idiv = true;
3227 op.lower_bitops = false;
3228 op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3229 op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3230 op.lower_fdph = false;
3231 op.lower_fdot = false;
3232 op.fdot_replicates = false; // TODO
3233 op.lower_ffloor = false; // TODO
3234 op.lower_ffract = true;
3235 op.lower_fceil = false; // TODO
3236 op.lower_ftrunc = false;
3237 op.lower_ldexp = true;
3238 op.lower_pack_half_2x16 = true;
3239 op.lower_pack_unorm_2x16 = true;
3240 op.lower_pack_snorm_2x16 = true;
3241 op.lower_pack_unorm_4x8 = true;
3242 op.lower_pack_snorm_4x8 = true;
3243 op.lower_unpack_half_2x16 = true;
3244 op.lower_unpack_unorm_2x16 = true;
3245 op.lower_unpack_snorm_2x16 = true;
3246 op.lower_unpack_unorm_4x8 = true;
3247 op.lower_unpack_snorm_4x8 = true;
3248 op.lower_pack_split = false;
3249 op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3250 op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3251 op.lower_all_io_to_temps = false;
3252 op.lower_all_io_to_elements = false;
3253 op.vertex_id_zero_based = false;
3254 op.lower_base_vertex = false;
3255 op.lower_helper_invocation = false;
3256 op.optimize_sample_mask_in = false;
3257 op.lower_cs_local_index_from_id = true;
3258 op.lower_cs_local_id_from_index = false;
3259 op.lower_device_index_to_zero = false; // TODO
3260 op.lower_wpos_pntc = false; // TODO
3261 op.lower_hadd = true; // TODO
3262 op.lower_add_sat = true; // TODO
3263 op.vectorize_io = false;
3264 op.lower_to_scalar = false;
3265 op.unify_interfaces = false;
3266 op.use_interpolated_input_intrinsics = true;
3267 op.lower_mul_2x32_64 = true; // TODO
3268 op.lower_rotate = (chipset < NVISA_GV100_CHIPSET);
3269 op.has_imul24 = false;
3270 op.intel_vec4 = false;
3271 op.max_unroll_iterations = 32;
3272 op.lower_int64_options = (nir_lower_int64_options) (
3273 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3274 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3275 nir_lower_divmod64 |
3276 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3277 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_mov64 : 0) |
3278 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3279 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3280 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3281 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3282 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3283 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3284 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_2x32_64 : 0) |
3285 ((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3286 nir_lower_ufind_msb64
3287 );
3288 op.lower_doubles_options = (nir_lower_doubles_options) (
3289 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3290 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3291 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3292 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3293 nir_lower_dmod |
3294 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3295 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3296 );
3297 return op;
3298 }
3299
3300 static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3301 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET);
3302 static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3303 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET);
3304 static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3305 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET);
3306
3307 const nir_shader_compiler_options *
3308 nv50_ir_nir_shader_compiler_options(int chipset)
3309 {
3310 if (chipset >= NVISA_GV100_CHIPSET)
3311 return &gv100_nir_shader_compiler_options;
3312 if (chipset >= NVISA_GM107_CHIPSET)
3313 return &gm107_nir_shader_compiler_options;
3314 return &gf100_nir_shader_compiler_options;
3315 }