nv50: Clear nv50_ir_prog_info of dead and codegen specific variables
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
34
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
37 #else
38 #include <tr1/unordered_map>
39 #endif
40 #include <cstring>
41 #include <list>
42 #include <vector>
43
44 namespace {
45
46 #if __cplusplus >= 201103L
47 using std::hash;
48 using std::unordered_map;
49 #else
50 using std::tr1::hash;
51 using std::tr1::unordered_map;
52 #endif
53
54 using namespace nv50_ir;
55
56 int
57 type_size(const struct glsl_type *type, bool bindless)
58 {
59 return glsl_count_attribute_slots(type, false);
60 }
61
62 class Converter : public ConverterCommon
63 {
64 public:
65 Converter(Program *, nir_shader *, nv50_ir_prog_info *);
66
67 bool run();
68 private:
69 typedef std::vector<LValue*> LValues;
70 typedef unordered_map<unsigned, LValues> NirDefMap;
71 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
72 typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
73 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
74
75 CacheMode convert(enum gl_access_qualifier);
76 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
77 LValues& convert(nir_alu_dest *);
78 BasicBlock* convert(nir_block *);
79 LValues& convert(nir_dest *);
80 SVSemantic convert(nir_intrinsic_op);
81 Value* convert(nir_load_const_instr*, uint8_t);
82 LValues& convert(nir_register *);
83 LValues& convert(nir_ssa_def *);
84
85 Value* getSrc(nir_alu_src *, uint8_t component = 0);
86 Value* getSrc(nir_register *, uint8_t);
87 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
88 Value* getSrc(nir_ssa_def *, uint8_t);
89
90 // returned value is the constant part of the given source (either the
91 // nir_src or the selected source component of an intrinsic). Even though
92 // this is mostly an optimization to be able to skip indirects in a few
93 // cases, sometimes we require immediate values or set some fileds on
94 // instructions (e.g. tex) in order for codegen to consume those.
95 // If the found value has not a constant part, the Value gets returned
96 // through the Value parameter.
97 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
98 // isScalar indicates that the addressing is scalar, vec4 addressing is
99 // assumed otherwise
100 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
101 bool isScalar = false);
102
103 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
104
105 void setInterpolate(nv50_ir_varying *,
106 uint8_t,
107 bool centroid,
108 unsigned semantics);
109
110 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
111 uint8_t c, Value *indirect0 = NULL,
112 Value *indirect1 = NULL, bool patch = false);
113 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
114 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
115 Value *indirect1 = NULL);
116
117 bool isFloatType(nir_alu_type);
118 bool isSignedType(nir_alu_type);
119 bool isResultFloat(nir_op);
120 bool isResultSigned(nir_op);
121
122 DataType getDType(nir_alu_instr *);
123 DataType getDType(nir_intrinsic_instr *);
124 DataType getDType(nir_intrinsic_instr *, bool isSigned);
125 DataType getDType(nir_op, uint8_t);
126
127 std::vector<DataType> getSTypes(nir_alu_instr *);
128 DataType getSType(nir_src &, bool isFloat, bool isSigned);
129
130 operation getOperation(nir_intrinsic_op);
131 operation getOperation(nir_op);
132 operation getOperation(nir_texop);
133 operation preOperationNeeded(nir_op);
134
135 int getSubOp(nir_intrinsic_op);
136 int getSubOp(nir_op);
137
138 CondCode getCondCode(nir_op);
139
140 bool assignSlots();
141 bool parseNIR();
142
143 bool visit(nir_alu_instr *);
144 bool visit(nir_block *);
145 bool visit(nir_cf_node *);
146 bool visit(nir_function *);
147 bool visit(nir_if *);
148 bool visit(nir_instr *);
149 bool visit(nir_intrinsic_instr *);
150 bool visit(nir_jump_instr *);
151 bool visit(nir_load_const_instr*);
152 bool visit(nir_loop *);
153 bool visit(nir_ssa_undef_instr *);
154 bool visit(nir_tex_instr *);
155
156 // tex stuff
157 Value* applyProjection(Value *src, Value *proj);
158 unsigned int getNIRArgCount(TexInstruction::Target&);
159
160 nir_shader *nir;
161
162 NirDefMap ssaDefs;
163 NirDefMap regDefs;
164 ImmediateMap immediates;
165 NirArrayLMemOffsets regToLmemOffset;
166 NirBlockMap blocks;
167 unsigned int curLoopDepth;
168 unsigned int curIfDepth;
169
170 BasicBlock *exit;
171 Value *zero;
172 Instruction *immInsertPos;
173
174 int clipVertexOutput;
175
176 union {
177 struct {
178 Value *position;
179 } fp;
180 };
181 };
182
183 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
184 : ConverterCommon(prog, info),
185 nir(nir),
186 curLoopDepth(0),
187 curIfDepth(0),
188 clipVertexOutput(-1)
189 {
190 zero = mkImm((uint32_t)0);
191 }
192
193 BasicBlock *
194 Converter::convert(nir_block *block)
195 {
196 NirBlockMap::iterator it = blocks.find(block->index);
197 if (it != blocks.end())
198 return it->second;
199
200 BasicBlock *bb = new BasicBlock(func);
201 blocks[block->index] = bb;
202 return bb;
203 }
204
205 bool
206 Converter::isFloatType(nir_alu_type type)
207 {
208 return nir_alu_type_get_base_type(type) == nir_type_float;
209 }
210
211 bool
212 Converter::isSignedType(nir_alu_type type)
213 {
214 return nir_alu_type_get_base_type(type) == nir_type_int;
215 }
216
217 bool
218 Converter::isResultFloat(nir_op op)
219 {
220 const nir_op_info &info = nir_op_infos[op];
221 if (info.output_type != nir_type_invalid)
222 return isFloatType(info.output_type);
223
224 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
225 assert(false);
226 return true;
227 }
228
229 bool
230 Converter::isResultSigned(nir_op op)
231 {
232 switch (op) {
233 // there is no umul and we get wrong results if we treat all muls as signed
234 case nir_op_imul:
235 case nir_op_inot:
236 return false;
237 default:
238 const nir_op_info &info = nir_op_infos[op];
239 if (info.output_type != nir_type_invalid)
240 return isSignedType(info.output_type);
241 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
242 assert(false);
243 return true;
244 }
245 }
246
247 DataType
248 Converter::getDType(nir_alu_instr *insn)
249 {
250 if (insn->dest.dest.is_ssa)
251 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
252 else
253 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
254 }
255
256 DataType
257 Converter::getDType(nir_intrinsic_instr *insn)
258 {
259 bool isSigned;
260 switch (insn->intrinsic) {
261 case nir_intrinsic_shared_atomic_imax:
262 case nir_intrinsic_shared_atomic_imin:
263 case nir_intrinsic_ssbo_atomic_imax:
264 case nir_intrinsic_ssbo_atomic_imin:
265 isSigned = true;
266 break;
267 default:
268 isSigned = false;
269 break;
270 }
271
272 return getDType(insn, isSigned);
273 }
274
275 DataType
276 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
277 {
278 if (insn->dest.is_ssa)
279 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
280 else
281 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
282 }
283
284 DataType
285 Converter::getDType(nir_op op, uint8_t bitSize)
286 {
287 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
288 if (ty == TYPE_NONE) {
289 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
290 assert(false);
291 }
292 return ty;
293 }
294
295 std::vector<DataType>
296 Converter::getSTypes(nir_alu_instr *insn)
297 {
298 const nir_op_info &info = nir_op_infos[insn->op];
299 std::vector<DataType> res(info.num_inputs);
300
301 for (uint8_t i = 0; i < info.num_inputs; ++i) {
302 if (info.input_types[i] != nir_type_invalid) {
303 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
304 } else {
305 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
306 assert(false);
307 res[i] = TYPE_NONE;
308 break;
309 }
310 }
311
312 return res;
313 }
314
315 DataType
316 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
317 {
318 uint8_t bitSize;
319 if (src.is_ssa)
320 bitSize = src.ssa->bit_size;
321 else
322 bitSize = src.reg.reg->bit_size;
323
324 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
325 if (ty == TYPE_NONE) {
326 const char *str;
327 if (isFloat)
328 str = "float";
329 else if (isSigned)
330 str = "int";
331 else
332 str = "uint";
333 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
334 assert(false);
335 }
336 return ty;
337 }
338
339 operation
340 Converter::getOperation(nir_op op)
341 {
342 switch (op) {
343 // basic ops with float and int variants
344 case nir_op_fabs:
345 case nir_op_iabs:
346 return OP_ABS;
347 case nir_op_fadd:
348 case nir_op_iadd:
349 return OP_ADD;
350 case nir_op_iand:
351 return OP_AND;
352 case nir_op_ifind_msb:
353 case nir_op_ufind_msb:
354 return OP_BFIND;
355 case nir_op_fceil:
356 return OP_CEIL;
357 case nir_op_fcos:
358 return OP_COS;
359 case nir_op_f2f32:
360 case nir_op_f2f64:
361 case nir_op_f2i32:
362 case nir_op_f2i64:
363 case nir_op_f2u32:
364 case nir_op_f2u64:
365 case nir_op_i2f32:
366 case nir_op_i2f64:
367 case nir_op_i2i32:
368 case nir_op_i2i64:
369 case nir_op_u2f32:
370 case nir_op_u2f64:
371 case nir_op_u2u32:
372 case nir_op_u2u64:
373 return OP_CVT;
374 case nir_op_fddx:
375 case nir_op_fddx_coarse:
376 case nir_op_fddx_fine:
377 return OP_DFDX;
378 case nir_op_fddy:
379 case nir_op_fddy_coarse:
380 case nir_op_fddy_fine:
381 return OP_DFDY;
382 case nir_op_fdiv:
383 case nir_op_idiv:
384 case nir_op_udiv:
385 return OP_DIV;
386 case nir_op_fexp2:
387 return OP_EX2;
388 case nir_op_ffloor:
389 return OP_FLOOR;
390 case nir_op_ffma:
391 return OP_FMA;
392 case nir_op_flog2:
393 return OP_LG2;
394 case nir_op_fmax:
395 case nir_op_imax:
396 case nir_op_umax:
397 return OP_MAX;
398 case nir_op_pack_64_2x32_split:
399 return OP_MERGE;
400 case nir_op_fmin:
401 case nir_op_imin:
402 case nir_op_umin:
403 return OP_MIN;
404 case nir_op_fmod:
405 case nir_op_imod:
406 case nir_op_umod:
407 case nir_op_frem:
408 case nir_op_irem:
409 return OP_MOD;
410 case nir_op_fmul:
411 case nir_op_imul:
412 case nir_op_imul_high:
413 case nir_op_umul_high:
414 return OP_MUL;
415 case nir_op_fneg:
416 case nir_op_ineg:
417 return OP_NEG;
418 case nir_op_inot:
419 return OP_NOT;
420 case nir_op_ior:
421 return OP_OR;
422 case nir_op_fpow:
423 return OP_POW;
424 case nir_op_frcp:
425 return OP_RCP;
426 case nir_op_frsq:
427 return OP_RSQ;
428 case nir_op_fsat:
429 return OP_SAT;
430 case nir_op_feq32:
431 case nir_op_ieq32:
432 case nir_op_fge32:
433 case nir_op_ige32:
434 case nir_op_uge32:
435 case nir_op_flt32:
436 case nir_op_ilt32:
437 case nir_op_ult32:
438 case nir_op_fne32:
439 case nir_op_ine32:
440 return OP_SET;
441 case nir_op_ishl:
442 return OP_SHL;
443 case nir_op_ishr:
444 case nir_op_ushr:
445 return OP_SHR;
446 case nir_op_fsin:
447 return OP_SIN;
448 case nir_op_fsqrt:
449 return OP_SQRT;
450 case nir_op_ftrunc:
451 return OP_TRUNC;
452 case nir_op_ixor:
453 return OP_XOR;
454 default:
455 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
456 assert(false);
457 return OP_NOP;
458 }
459 }
460
461 operation
462 Converter::getOperation(nir_texop op)
463 {
464 switch (op) {
465 case nir_texop_tex:
466 return OP_TEX;
467 case nir_texop_lod:
468 return OP_TXLQ;
469 case nir_texop_txb:
470 return OP_TXB;
471 case nir_texop_txd:
472 return OP_TXD;
473 case nir_texop_txf:
474 case nir_texop_txf_ms:
475 return OP_TXF;
476 case nir_texop_tg4:
477 return OP_TXG;
478 case nir_texop_txl:
479 return OP_TXL;
480 case nir_texop_query_levels:
481 case nir_texop_texture_samples:
482 case nir_texop_txs:
483 return OP_TXQ;
484 default:
485 ERROR("couldn't get operation for nir_texop %u\n", op);
486 assert(false);
487 return OP_NOP;
488 }
489 }
490
491 operation
492 Converter::getOperation(nir_intrinsic_op op)
493 {
494 switch (op) {
495 case nir_intrinsic_emit_vertex:
496 return OP_EMIT;
497 case nir_intrinsic_end_primitive:
498 return OP_RESTART;
499 case nir_intrinsic_bindless_image_atomic_add:
500 case nir_intrinsic_image_atomic_add:
501 case nir_intrinsic_bindless_image_atomic_and:
502 case nir_intrinsic_image_atomic_and:
503 case nir_intrinsic_bindless_image_atomic_comp_swap:
504 case nir_intrinsic_image_atomic_comp_swap:
505 case nir_intrinsic_bindless_image_atomic_exchange:
506 case nir_intrinsic_image_atomic_exchange:
507 case nir_intrinsic_bindless_image_atomic_imax:
508 case nir_intrinsic_image_atomic_imax:
509 case nir_intrinsic_bindless_image_atomic_umax:
510 case nir_intrinsic_image_atomic_umax:
511 case nir_intrinsic_bindless_image_atomic_imin:
512 case nir_intrinsic_image_atomic_imin:
513 case nir_intrinsic_bindless_image_atomic_umin:
514 case nir_intrinsic_image_atomic_umin:
515 case nir_intrinsic_bindless_image_atomic_or:
516 case nir_intrinsic_image_atomic_or:
517 case nir_intrinsic_bindless_image_atomic_xor:
518 case nir_intrinsic_image_atomic_xor:
519 case nir_intrinsic_bindless_image_atomic_inc_wrap:
520 case nir_intrinsic_image_atomic_inc_wrap:
521 case nir_intrinsic_bindless_image_atomic_dec_wrap:
522 case nir_intrinsic_image_atomic_dec_wrap:
523 return OP_SUREDP;
524 case nir_intrinsic_bindless_image_load:
525 case nir_intrinsic_image_load:
526 return OP_SULDP;
527 case nir_intrinsic_bindless_image_samples:
528 case nir_intrinsic_image_samples:
529 case nir_intrinsic_bindless_image_size:
530 case nir_intrinsic_image_size:
531 return OP_SUQ;
532 case nir_intrinsic_bindless_image_store:
533 case nir_intrinsic_image_store:
534 return OP_SUSTP;
535 default:
536 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
537 assert(false);
538 return OP_NOP;
539 }
540 }
541
542 operation
543 Converter::preOperationNeeded(nir_op op)
544 {
545 switch (op) {
546 case nir_op_fcos:
547 case nir_op_fsin:
548 return OP_PRESIN;
549 default:
550 return OP_NOP;
551 }
552 }
553
554 int
555 Converter::getSubOp(nir_op op)
556 {
557 switch (op) {
558 case nir_op_imul_high:
559 case nir_op_umul_high:
560 return NV50_IR_SUBOP_MUL_HIGH;
561 case nir_op_ishl:
562 case nir_op_ishr:
563 case nir_op_ushr:
564 return NV50_IR_SUBOP_SHIFT_WRAP;
565 default:
566 return 0;
567 }
568 }
569
570 int
571 Converter::getSubOp(nir_intrinsic_op op)
572 {
573 switch (op) {
574 case nir_intrinsic_bindless_image_atomic_add:
575 case nir_intrinsic_global_atomic_add:
576 case nir_intrinsic_image_atomic_add:
577 case nir_intrinsic_shared_atomic_add:
578 case nir_intrinsic_ssbo_atomic_add:
579 return NV50_IR_SUBOP_ATOM_ADD;
580 case nir_intrinsic_bindless_image_atomic_and:
581 case nir_intrinsic_global_atomic_and:
582 case nir_intrinsic_image_atomic_and:
583 case nir_intrinsic_shared_atomic_and:
584 case nir_intrinsic_ssbo_atomic_and:
585 return NV50_IR_SUBOP_ATOM_AND;
586 case nir_intrinsic_bindless_image_atomic_comp_swap:
587 case nir_intrinsic_global_atomic_comp_swap:
588 case nir_intrinsic_image_atomic_comp_swap:
589 case nir_intrinsic_shared_atomic_comp_swap:
590 case nir_intrinsic_ssbo_atomic_comp_swap:
591 return NV50_IR_SUBOP_ATOM_CAS;
592 case nir_intrinsic_bindless_image_atomic_exchange:
593 case nir_intrinsic_global_atomic_exchange:
594 case nir_intrinsic_image_atomic_exchange:
595 case nir_intrinsic_shared_atomic_exchange:
596 case nir_intrinsic_ssbo_atomic_exchange:
597 return NV50_IR_SUBOP_ATOM_EXCH;
598 case nir_intrinsic_bindless_image_atomic_or:
599 case nir_intrinsic_global_atomic_or:
600 case nir_intrinsic_image_atomic_or:
601 case nir_intrinsic_shared_atomic_or:
602 case nir_intrinsic_ssbo_atomic_or:
603 return NV50_IR_SUBOP_ATOM_OR;
604 case nir_intrinsic_bindless_image_atomic_imax:
605 case nir_intrinsic_bindless_image_atomic_umax:
606 case nir_intrinsic_global_atomic_imax:
607 case nir_intrinsic_global_atomic_umax:
608 case nir_intrinsic_image_atomic_imax:
609 case nir_intrinsic_image_atomic_umax:
610 case nir_intrinsic_shared_atomic_imax:
611 case nir_intrinsic_shared_atomic_umax:
612 case nir_intrinsic_ssbo_atomic_imax:
613 case nir_intrinsic_ssbo_atomic_umax:
614 return NV50_IR_SUBOP_ATOM_MAX;
615 case nir_intrinsic_bindless_image_atomic_imin:
616 case nir_intrinsic_bindless_image_atomic_umin:
617 case nir_intrinsic_global_atomic_imin:
618 case nir_intrinsic_global_atomic_umin:
619 case nir_intrinsic_image_atomic_imin:
620 case nir_intrinsic_image_atomic_umin:
621 case nir_intrinsic_shared_atomic_imin:
622 case nir_intrinsic_shared_atomic_umin:
623 case nir_intrinsic_ssbo_atomic_imin:
624 case nir_intrinsic_ssbo_atomic_umin:
625 return NV50_IR_SUBOP_ATOM_MIN;
626 case nir_intrinsic_bindless_image_atomic_xor:
627 case nir_intrinsic_global_atomic_xor:
628 case nir_intrinsic_image_atomic_xor:
629 case nir_intrinsic_shared_atomic_xor:
630 case nir_intrinsic_ssbo_atomic_xor:
631 return NV50_IR_SUBOP_ATOM_XOR;
632 case nir_intrinsic_bindless_image_atomic_inc_wrap:
633 case nir_intrinsic_image_atomic_inc_wrap:
634 return NV50_IR_SUBOP_ATOM_INC;
635 case nir_intrinsic_bindless_image_atomic_dec_wrap:
636 case nir_intrinsic_image_atomic_dec_wrap:
637 return NV50_IR_SUBOP_ATOM_DEC;
638
639 case nir_intrinsic_group_memory_barrier:
640 case nir_intrinsic_memory_barrier:
641 case nir_intrinsic_memory_barrier_buffer:
642 case nir_intrinsic_memory_barrier_image:
643 return NV50_IR_SUBOP_MEMBAR(M, GL);
644 case nir_intrinsic_memory_barrier_shared:
645 return NV50_IR_SUBOP_MEMBAR(M, CTA);
646
647 case nir_intrinsic_vote_all:
648 return NV50_IR_SUBOP_VOTE_ALL;
649 case nir_intrinsic_vote_any:
650 return NV50_IR_SUBOP_VOTE_ANY;
651 case nir_intrinsic_vote_ieq:
652 return NV50_IR_SUBOP_VOTE_UNI;
653 default:
654 return 0;
655 }
656 }
657
658 CondCode
659 Converter::getCondCode(nir_op op)
660 {
661 switch (op) {
662 case nir_op_feq32:
663 case nir_op_ieq32:
664 return CC_EQ;
665 case nir_op_fge32:
666 case nir_op_ige32:
667 case nir_op_uge32:
668 return CC_GE;
669 case nir_op_flt32:
670 case nir_op_ilt32:
671 case nir_op_ult32:
672 return CC_LT;
673 case nir_op_fne32:
674 return CC_NEU;
675 case nir_op_ine32:
676 return CC_NE;
677 default:
678 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
679 assert(false);
680 return CC_FL;
681 }
682 }
683
684 Converter::LValues&
685 Converter::convert(nir_alu_dest *dest)
686 {
687 return convert(&dest->dest);
688 }
689
690 Converter::LValues&
691 Converter::convert(nir_dest *dest)
692 {
693 if (dest->is_ssa)
694 return convert(&dest->ssa);
695 if (dest->reg.indirect) {
696 ERROR("no support for indirects.");
697 assert(false);
698 }
699 return convert(dest->reg.reg);
700 }
701
702 Converter::LValues&
703 Converter::convert(nir_register *reg)
704 {
705 NirDefMap::iterator it = regDefs.find(reg->index);
706 if (it != regDefs.end())
707 return it->second;
708
709 LValues newDef(reg->num_components);
710 for (uint8_t i = 0; i < reg->num_components; i++)
711 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
712 return regDefs[reg->index] = newDef;
713 }
714
715 Converter::LValues&
716 Converter::convert(nir_ssa_def *def)
717 {
718 NirDefMap::iterator it = ssaDefs.find(def->index);
719 if (it != ssaDefs.end())
720 return it->second;
721
722 LValues newDef(def->num_components);
723 for (uint8_t i = 0; i < def->num_components; i++)
724 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
725 return ssaDefs[def->index] = newDef;
726 }
727
728 Value*
729 Converter::getSrc(nir_alu_src *src, uint8_t component)
730 {
731 if (src->abs || src->negate) {
732 ERROR("modifiers currently not supported on nir_alu_src\n");
733 assert(false);
734 }
735 return getSrc(&src->src, src->swizzle[component]);
736 }
737
738 Value*
739 Converter::getSrc(nir_register *reg, uint8_t idx)
740 {
741 NirDefMap::iterator it = regDefs.find(reg->index);
742 if (it == regDefs.end())
743 return convert(reg)[idx];
744 return it->second[idx];
745 }
746
747 Value*
748 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
749 {
750 if (src->is_ssa)
751 return getSrc(src->ssa, idx);
752
753 if (src->reg.indirect) {
754 if (indirect)
755 return getSrc(src->reg.indirect, idx);
756 ERROR("no support for indirects.");
757 assert(false);
758 return NULL;
759 }
760
761 return getSrc(src->reg.reg, idx);
762 }
763
764 Value*
765 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
766 {
767 ImmediateMap::iterator iit = immediates.find(src->index);
768 if (iit != immediates.end())
769 return convert((*iit).second, idx);
770
771 NirDefMap::iterator it = ssaDefs.find(src->index);
772 if (it == ssaDefs.end()) {
773 ERROR("SSA value %u not found\n", src->index);
774 assert(false);
775 return NULL;
776 }
777 return it->second[idx];
778 }
779
780 uint32_t
781 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
782 {
783 nir_const_value *offset = nir_src_as_const_value(*src);
784
785 if (offset) {
786 indirect = NULL;
787 return offset[0].u32;
788 }
789
790 indirect = getSrc(src, idx, true);
791 return 0;
792 }
793
794 uint32_t
795 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
796 {
797 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
798 if (indirect && !isScalar)
799 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
800 return idx;
801 }
802
803 static void
804 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
805 {
806 assert(name && index);
807
808 if (slot >= VERT_ATTRIB_MAX) {
809 ERROR("invalid varying slot %u\n", slot);
810 assert(false);
811 return;
812 }
813
814 if (slot >= VERT_ATTRIB_GENERIC0 &&
815 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
816 *name = TGSI_SEMANTIC_GENERIC;
817 *index = slot - VERT_ATTRIB_GENERIC0;
818 return;
819 }
820
821 if (slot >= VERT_ATTRIB_TEX0 &&
822 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
823 *name = TGSI_SEMANTIC_TEXCOORD;
824 *index = slot - VERT_ATTRIB_TEX0;
825 return;
826 }
827
828 switch (slot) {
829 case VERT_ATTRIB_COLOR0:
830 *name = TGSI_SEMANTIC_COLOR;
831 *index = 0;
832 break;
833 case VERT_ATTRIB_COLOR1:
834 *name = TGSI_SEMANTIC_COLOR;
835 *index = 1;
836 break;
837 case VERT_ATTRIB_EDGEFLAG:
838 *name = TGSI_SEMANTIC_EDGEFLAG;
839 *index = 0;
840 break;
841 case VERT_ATTRIB_FOG:
842 *name = TGSI_SEMANTIC_FOG;
843 *index = 0;
844 break;
845 case VERT_ATTRIB_NORMAL:
846 *name = TGSI_SEMANTIC_NORMAL;
847 *index = 0;
848 break;
849 case VERT_ATTRIB_POS:
850 *name = TGSI_SEMANTIC_POSITION;
851 *index = 0;
852 break;
853 case VERT_ATTRIB_POINT_SIZE:
854 *name = TGSI_SEMANTIC_PSIZE;
855 *index = 0;
856 break;
857 default:
858 ERROR("unknown vert attrib slot %u\n", slot);
859 assert(false);
860 break;
861 }
862 }
863
864 void
865 Converter::setInterpolate(nv50_ir_varying *var,
866 uint8_t mode,
867 bool centroid,
868 unsigned semantic)
869 {
870 switch (mode) {
871 case INTERP_MODE_FLAT:
872 var->flat = 1;
873 break;
874 case INTERP_MODE_NONE:
875 if (semantic == TGSI_SEMANTIC_COLOR)
876 var->sc = 1;
877 else if (semantic == TGSI_SEMANTIC_POSITION)
878 var->linear = 1;
879 break;
880 case INTERP_MODE_NOPERSPECTIVE:
881 var->linear = 1;
882 break;
883 case INTERP_MODE_SMOOTH:
884 break;
885 }
886 var->centroid = centroid;
887 }
888
889 static uint16_t
890 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
891 bool input, const nir_variable *var)
892 {
893 if (!type->is_array())
894 return type->count_attribute_slots(false);
895
896 uint16_t slots;
897 switch (stage) {
898 case Program::TYPE_GEOMETRY:
899 slots = type->count_attribute_slots(false);
900 if (input)
901 slots /= info.gs.vertices_in;
902 break;
903 case Program::TYPE_TESSELLATION_CONTROL:
904 case Program::TYPE_TESSELLATION_EVAL:
905 // remove first dimension
906 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
907 slots = type->count_attribute_slots(false);
908 else
909 slots = type->fields.array->count_attribute_slots(false);
910 break;
911 default:
912 slots = type->count_attribute_slots(false);
913 break;
914 }
915
916 return slots;
917 }
918
919 static uint8_t
920 getMaskForType(const glsl_type *type, uint8_t slot) {
921 uint16_t comp = type->without_array()->components();
922 comp = comp ? comp : 4;
923
924 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
925 comp *= 2;
926 if (comp > 4) {
927 if (slot % 2)
928 comp -= 4;
929 else
930 comp = 4;
931 }
932 }
933
934 return (1 << comp) - 1;
935 }
936
937 bool Converter::assignSlots() {
938 unsigned name;
939 unsigned index;
940
941 info->io.viewportId = -1;
942 info->numInputs = 0;
943 info->numOutputs = 0;
944 info->numSysVals = 0;
945
946 for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
947 if (!(nir->info.system_values_read & 1ull << i))
948 continue;
949
950 info->sv[info->numSysVals].sn = tgsi_get_sysval_semantic(i);
951 info->sv[info->numSysVals].si = 0;
952 info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
953
954 switch (i) {
955 case SYSTEM_VALUE_INSTANCE_ID:
956 info->io.instanceId = info->numSysVals;
957 break;
958 case SYSTEM_VALUE_TESS_LEVEL_INNER:
959 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
960 info->sv[info->numSysVals].patch = 1;
961 break;
962 case SYSTEM_VALUE_VERTEX_ID:
963 info->io.vertexId = info->numSysVals;
964 break;
965 default:
966 break;
967 }
968
969 info->numSysVals += 1;
970 }
971
972 if (prog->getType() == Program::TYPE_COMPUTE)
973 return true;
974
975 nir_foreach_variable(var, &nir->inputs) {
976 const glsl_type *type = var->type;
977 int slot = var->data.location;
978 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
979 uint32_t vary = var->data.driver_location;
980
981 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
982
983 switch(prog->getType()) {
984 case Program::TYPE_FRAGMENT:
985 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
986 &name, &index);
987 for (uint16_t i = 0; i < slots; ++i) {
988 setInterpolate(&info->in[vary + i], var->data.interpolation,
989 var->data.centroid | var->data.sample, name);
990 }
991 break;
992 case Program::TYPE_GEOMETRY:
993 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
994 &name, &index);
995 break;
996 case Program::TYPE_TESSELLATION_CONTROL:
997 case Program::TYPE_TESSELLATION_EVAL:
998 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
999 &name, &index);
1000 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1001 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1002 break;
1003 case Program::TYPE_VERTEX:
1004 if (slot >= VERT_ATTRIB_GENERIC0)
1005 slot = VERT_ATTRIB_GENERIC0 + vary;
1006 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1007 switch (name) {
1008 case TGSI_SEMANTIC_EDGEFLAG:
1009 info->io.edgeFlagIn = vary;
1010 break;
1011 default:
1012 break;
1013 }
1014 break;
1015 default:
1016 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1017 return false;
1018 }
1019
1020 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1021 nv50_ir_varying *v = &info->in[vary];
1022
1023 v->patch = var->data.patch;
1024 v->sn = name;
1025 v->si = index + i;
1026 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1027 }
1028 info->numInputs = std::max<uint8_t>(info->numInputs, vary);
1029 }
1030
1031 nir_foreach_variable(var, &nir->outputs) {
1032 const glsl_type *type = var->type;
1033 int slot = var->data.location;
1034 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1035 uint32_t vary = var->data.driver_location;
1036
1037 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1038
1039 switch(prog->getType()) {
1040 case Program::TYPE_FRAGMENT:
1041 tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1042 switch (name) {
1043 case TGSI_SEMANTIC_COLOR:
1044 if (!var->data.fb_fetch_output)
1045 info->prop.fp.numColourResults++;
1046
1047 if (var->data.location == FRAG_RESULT_COLOR &&
1048 nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1049 info->prop.fp.separateFragData = true;
1050
1051 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1052 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1053 index = index == 0 ? var->data.index : index;
1054 break;
1055 case TGSI_SEMANTIC_POSITION:
1056 info->io.fragDepth = vary;
1057 info->prop.fp.writesDepth = true;
1058 break;
1059 case TGSI_SEMANTIC_SAMPLEMASK:
1060 info->io.sampleMask = vary;
1061 break;
1062 default:
1063 break;
1064 }
1065 break;
1066 case Program::TYPE_GEOMETRY:
1067 case Program::TYPE_TESSELLATION_CONTROL:
1068 case Program::TYPE_TESSELLATION_EVAL:
1069 case Program::TYPE_VERTEX:
1070 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1071 &name, &index);
1072
1073 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1074 name != TGSI_SEMANTIC_TESSOUTER)
1075 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1076
1077 switch (name) {
1078 case TGSI_SEMANTIC_CLIPDIST:
1079 info->io.genUserClip = -1;
1080 break;
1081 case TGSI_SEMANTIC_CLIPVERTEX:
1082 clipVertexOutput = vary;
1083 break;
1084 case TGSI_SEMANTIC_EDGEFLAG:
1085 info->io.edgeFlagOut = vary;
1086 break;
1087 case TGSI_SEMANTIC_POSITION:
1088 if (clipVertexOutput < 0)
1089 clipVertexOutput = vary;
1090 break;
1091 default:
1092 break;
1093 }
1094 break;
1095 default:
1096 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1097 return false;
1098 }
1099
1100 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1101 nv50_ir_varying *v = &info->out[vary];
1102 v->patch = var->data.patch;
1103 v->sn = name;
1104 v->si = index + i;
1105 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1106
1107 if (nir->info.outputs_read & 1ull << slot)
1108 v->oread = 1;
1109 }
1110 info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
1111 }
1112
1113 if (info->io.genUserClip > 0) {
1114 info->io.clipDistances = info->io.genUserClip;
1115
1116 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1117
1118 for (unsigned int n = 0; n < nOut; ++n) {
1119 unsigned int i = info->numOutputs++;
1120 info->out[i].id = i;
1121 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1122 info->out[i].si = n;
1123 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1124 }
1125 }
1126
1127 return info->assignSlots(info) == 0;
1128 }
1129
1130 uint32_t
1131 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1132 {
1133 DataType ty;
1134 int offset = nir_intrinsic_component(insn);
1135 bool input;
1136
1137 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1138 ty = getDType(insn);
1139 else
1140 ty = getSType(insn->src[0], false, false);
1141
1142 switch (insn->intrinsic) {
1143 case nir_intrinsic_load_input:
1144 case nir_intrinsic_load_interpolated_input:
1145 case nir_intrinsic_load_per_vertex_input:
1146 input = true;
1147 break;
1148 case nir_intrinsic_load_output:
1149 case nir_intrinsic_load_per_vertex_output:
1150 case nir_intrinsic_store_output:
1151 case nir_intrinsic_store_per_vertex_output:
1152 input = false;
1153 break;
1154 default:
1155 ERROR("unknown intrinsic in getSlotAddress %s",
1156 nir_intrinsic_infos[insn->intrinsic].name);
1157 input = false;
1158 assert(false);
1159 break;
1160 }
1161
1162 if (typeSizeof(ty) == 8) {
1163 slot *= 2;
1164 slot += offset;
1165 if (slot >= 4) {
1166 idx += 1;
1167 slot -= 4;
1168 }
1169 } else {
1170 slot += offset;
1171 }
1172
1173 assert(slot < 4);
1174 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1175 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1176
1177 const nv50_ir_varying *vary = input ? info->in : info->out;
1178 return vary[idx].slot[slot] * 4;
1179 }
1180
1181 Instruction *
1182 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1183 uint32_t base, uint8_t c, Value *indirect0,
1184 Value *indirect1, bool patch)
1185 {
1186 unsigned int tySize = typeSizeof(ty);
1187
1188 if (tySize == 8 &&
1189 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1190 Value *lo = getSSA();
1191 Value *hi = getSSA();
1192
1193 Instruction *loi =
1194 mkLoad(TYPE_U32, lo,
1195 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1196 indirect0);
1197 loi->setIndirect(0, 1, indirect1);
1198 loi->perPatch = patch;
1199
1200 Instruction *hii =
1201 mkLoad(TYPE_U32, hi,
1202 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1203 indirect0);
1204 hii->setIndirect(0, 1, indirect1);
1205 hii->perPatch = patch;
1206
1207 return mkOp2(OP_MERGE, ty, def, lo, hi);
1208 } else {
1209 Instruction *ld =
1210 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1211 ld->setIndirect(0, 1, indirect1);
1212 ld->perPatch = patch;
1213 return ld;
1214 }
1215 }
1216
1217 void
1218 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1219 DataType ty, Value *src, uint8_t idx, uint8_t c,
1220 Value *indirect0, Value *indirect1)
1221 {
1222 uint8_t size = typeSizeof(ty);
1223 uint32_t address = getSlotAddress(insn, idx, c);
1224
1225 if (size == 8 && indirect0) {
1226 Value *split[2];
1227 mkSplit(split, 4, src);
1228
1229 if (op == OP_EXPORT) {
1230 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1231 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1232 }
1233
1234 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1235 split[0])->perPatch = info->out[idx].patch;
1236 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1237 split[1])->perPatch = info->out[idx].patch;
1238 } else {
1239 if (op == OP_EXPORT)
1240 src = mkMov(getSSA(size), src, ty)->getDef(0);
1241 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1242 src)->perPatch = info->out[idx].patch;
1243 }
1244 }
1245
1246 bool
1247 Converter::parseNIR()
1248 {
1249 info->bin.tlsSpace = 0;
1250 info->io.clipDistances = nir->info.clip_distance_array_size;
1251 info->io.cullDistances = nir->info.cull_distance_array_size;
1252 info->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1253
1254 switch(prog->getType()) {
1255 case Program::TYPE_COMPUTE:
1256 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1257 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1258 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1259 info->bin.smemSize = nir->info.cs.shared_size;
1260 break;
1261 case Program::TYPE_FRAGMENT:
1262 info->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1263 prog->persampleInvocation =
1264 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1265 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1266 info->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1267 info->prop.fp.readsSampleLocations =
1268 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1269 info->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
1270 info->prop.fp.usesSampleMaskIn =
1271 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1272 break;
1273 case Program::TYPE_GEOMETRY:
1274 info->prop.gp.instanceCount = nir->info.gs.invocations;
1275 info->prop.gp.maxVertices = nir->info.gs.vertices_out;
1276 info->prop.gp.outputPrim = nir->info.gs.output_primitive;
1277 break;
1278 case Program::TYPE_TESSELLATION_CONTROL:
1279 case Program::TYPE_TESSELLATION_EVAL:
1280 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1281 info->prop.tp.domain = GL_LINES;
1282 else
1283 info->prop.tp.domain = nir->info.tess.primitive_mode;
1284 info->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1285 info->prop.tp.outputPrim =
1286 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1287 info->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1288 info->prop.tp.winding = !nir->info.tess.ccw;
1289 break;
1290 case Program::TYPE_VERTEX:
1291 info->prop.vp.usesDrawParameters =
1292 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1293 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1294 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1295 break;
1296 default:
1297 break;
1298 }
1299
1300 return true;
1301 }
1302
1303 bool
1304 Converter::visit(nir_function *function)
1305 {
1306 assert(function->impl);
1307
1308 // usually the blocks will set everything up, but main is special
1309 BasicBlock *entry = new BasicBlock(prog->main);
1310 exit = new BasicBlock(prog->main);
1311 blocks[nir_start_block(function->impl)->index] = entry;
1312 prog->main->setEntry(entry);
1313 prog->main->setExit(exit);
1314
1315 setPosition(entry, true);
1316
1317 if (info->io.genUserClip > 0) {
1318 for (int c = 0; c < 4; ++c)
1319 clipVtx[c] = getScratch();
1320 }
1321
1322 switch (prog->getType()) {
1323 case Program::TYPE_TESSELLATION_CONTROL:
1324 outBase = mkOp2v(
1325 OP_SUB, TYPE_U32, getSSA(),
1326 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1327 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1328 break;
1329 case Program::TYPE_FRAGMENT: {
1330 Symbol *sv = mkSysVal(SV_POSITION, 3);
1331 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1332 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1333 break;
1334 }
1335 default:
1336 break;
1337 }
1338
1339 nir_foreach_register(reg, &function->impl->registers) {
1340 if (reg->num_array_elems) {
1341 // TODO: packed variables would be nice, but MemoryOpt fails
1342 // replace 4 with reg->num_components
1343 uint32_t size = 4 * reg->num_array_elems * (reg->bit_size / 8);
1344 regToLmemOffset[reg->index] = info->bin.tlsSpace;
1345 info->bin.tlsSpace += size;
1346 }
1347 }
1348
1349 nir_index_ssa_defs(function->impl);
1350 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1351 if (!visit(node))
1352 return false;
1353 }
1354
1355 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1356 setPosition(exit, true);
1357
1358 if ((prog->getType() == Program::TYPE_VERTEX ||
1359 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1360 && info->io.genUserClip > 0)
1361 handleUserClipPlanes();
1362
1363 // TODO: for non main function this needs to be a OP_RETURN
1364 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1365 return true;
1366 }
1367
1368 bool
1369 Converter::visit(nir_cf_node *node)
1370 {
1371 switch (node->type) {
1372 case nir_cf_node_block:
1373 return visit(nir_cf_node_as_block(node));
1374 case nir_cf_node_if:
1375 return visit(nir_cf_node_as_if(node));
1376 case nir_cf_node_loop:
1377 return visit(nir_cf_node_as_loop(node));
1378 default:
1379 ERROR("unknown nir_cf_node type %u\n", node->type);
1380 return false;
1381 }
1382 }
1383
1384 bool
1385 Converter::visit(nir_block *block)
1386 {
1387 if (!block->predecessors->entries && block->instr_list.is_empty())
1388 return true;
1389
1390 BasicBlock *bb = convert(block);
1391
1392 setPosition(bb, true);
1393 nir_foreach_instr(insn, block) {
1394 if (!visit(insn))
1395 return false;
1396 }
1397 return true;
1398 }
1399
1400 bool
1401 Converter::visit(nir_if *nif)
1402 {
1403 curIfDepth++;
1404
1405 DataType sType = getSType(nif->condition, false, false);
1406 Value *src = getSrc(&nif->condition, 0);
1407
1408 nir_block *lastThen = nir_if_last_then_block(nif);
1409 nir_block *lastElse = nir_if_last_else_block(nif);
1410
1411 BasicBlock *headBB = bb;
1412 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1413 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1414
1415 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1416 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1417
1418 bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1419 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1420
1421 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1422 if (!visit(node))
1423 return false;
1424 }
1425
1426 setPosition(convert(lastThen), true);
1427 if (!bb->isTerminated()) {
1428 BasicBlock *tailBB = convert(lastThen->successors[0]);
1429 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1430 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1431 } else {
1432 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1433 }
1434
1435 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1436 if (!visit(node))
1437 return false;
1438 }
1439
1440 setPosition(convert(lastElse), true);
1441 if (!bb->isTerminated()) {
1442 BasicBlock *tailBB = convert(lastElse->successors[0]);
1443 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1444 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1445 } else {
1446 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1447 }
1448
1449 /* only insert joins for the most outer if */
1450 if (--curIfDepth)
1451 insertJoins = false;
1452
1453 /* we made sure that all threads would converge at the same block */
1454 if (insertJoins) {
1455 BasicBlock *conv = convert(lastThen->successors[0]);
1456 setPosition(headBB->getExit(), false);
1457 headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1458 setPosition(conv, false);
1459 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1460 }
1461
1462 return true;
1463 }
1464
1465 // TODO: add convergency
1466 bool
1467 Converter::visit(nir_loop *loop)
1468 {
1469 curLoopDepth += 1;
1470 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1471
1472 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1473 BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1474
1475 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1476
1477 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1478 setPosition(loopBB, false);
1479 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1480
1481 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1482 if (!visit(node))
1483 return false;
1484 }
1485
1486 if (!bb->isTerminated()) {
1487 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1488 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1489 }
1490
1491 if (tailBB->cfg.incidentCount() == 0)
1492 loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1493
1494 curLoopDepth -= 1;
1495
1496 return true;
1497 }
1498
1499 bool
1500 Converter::visit(nir_instr *insn)
1501 {
1502 // we need an insertion point for on the fly generated immediate loads
1503 immInsertPos = bb->getExit();
1504 switch (insn->type) {
1505 case nir_instr_type_alu:
1506 return visit(nir_instr_as_alu(insn));
1507 case nir_instr_type_intrinsic:
1508 return visit(nir_instr_as_intrinsic(insn));
1509 case nir_instr_type_jump:
1510 return visit(nir_instr_as_jump(insn));
1511 case nir_instr_type_load_const:
1512 return visit(nir_instr_as_load_const(insn));
1513 case nir_instr_type_ssa_undef:
1514 return visit(nir_instr_as_ssa_undef(insn));
1515 case nir_instr_type_tex:
1516 return visit(nir_instr_as_tex(insn));
1517 default:
1518 ERROR("unknown nir_instr type %u\n", insn->type);
1519 return false;
1520 }
1521 return true;
1522 }
1523
1524 SVSemantic
1525 Converter::convert(nir_intrinsic_op intr)
1526 {
1527 switch (intr) {
1528 case nir_intrinsic_load_base_vertex:
1529 return SV_BASEVERTEX;
1530 case nir_intrinsic_load_base_instance:
1531 return SV_BASEINSTANCE;
1532 case nir_intrinsic_load_draw_id:
1533 return SV_DRAWID;
1534 case nir_intrinsic_load_front_face:
1535 return SV_FACE;
1536 case nir_intrinsic_is_helper_invocation:
1537 case nir_intrinsic_load_helper_invocation:
1538 return SV_THREAD_KILL;
1539 case nir_intrinsic_load_instance_id:
1540 return SV_INSTANCE_ID;
1541 case nir_intrinsic_load_invocation_id:
1542 return SV_INVOCATION_ID;
1543 case nir_intrinsic_load_local_group_size:
1544 return SV_NTID;
1545 case nir_intrinsic_load_local_invocation_id:
1546 return SV_TID;
1547 case nir_intrinsic_load_num_work_groups:
1548 return SV_NCTAID;
1549 case nir_intrinsic_load_patch_vertices_in:
1550 return SV_VERTEX_COUNT;
1551 case nir_intrinsic_load_primitive_id:
1552 return SV_PRIMITIVE_ID;
1553 case nir_intrinsic_load_sample_id:
1554 return SV_SAMPLE_INDEX;
1555 case nir_intrinsic_load_sample_mask_in:
1556 return SV_SAMPLE_MASK;
1557 case nir_intrinsic_load_sample_pos:
1558 return SV_SAMPLE_POS;
1559 case nir_intrinsic_load_subgroup_eq_mask:
1560 return SV_LANEMASK_EQ;
1561 case nir_intrinsic_load_subgroup_ge_mask:
1562 return SV_LANEMASK_GE;
1563 case nir_intrinsic_load_subgroup_gt_mask:
1564 return SV_LANEMASK_GT;
1565 case nir_intrinsic_load_subgroup_le_mask:
1566 return SV_LANEMASK_LE;
1567 case nir_intrinsic_load_subgroup_lt_mask:
1568 return SV_LANEMASK_LT;
1569 case nir_intrinsic_load_subgroup_invocation:
1570 return SV_LANEID;
1571 case nir_intrinsic_load_tess_coord:
1572 return SV_TESS_COORD;
1573 case nir_intrinsic_load_tess_level_inner:
1574 return SV_TESS_INNER;
1575 case nir_intrinsic_load_tess_level_outer:
1576 return SV_TESS_OUTER;
1577 case nir_intrinsic_load_vertex_id:
1578 return SV_VERTEX_ID;
1579 case nir_intrinsic_load_work_group_id:
1580 return SV_CTAID;
1581 default:
1582 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1583 nir_intrinsic_infos[intr].name);
1584 assert(false);
1585 return SV_LAST;
1586 }
1587 }
1588
1589 bool
1590 Converter::visit(nir_intrinsic_instr *insn)
1591 {
1592 nir_intrinsic_op op = insn->intrinsic;
1593 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1594 unsigned dest_components = nir_intrinsic_dest_components(insn);
1595
1596 switch (op) {
1597 case nir_intrinsic_load_uniform: {
1598 LValues &newDefs = convert(&insn->dest);
1599 const DataType dType = getDType(insn);
1600 Value *indirect;
1601 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1602 for (uint8_t i = 0; i < dest_components; ++i) {
1603 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1604 }
1605 break;
1606 }
1607 case nir_intrinsic_store_output:
1608 case nir_intrinsic_store_per_vertex_output: {
1609 Value *indirect;
1610 DataType dType = getSType(insn->src[0], false, false);
1611 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1612
1613 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1614 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1615 continue;
1616
1617 uint8_t offset = 0;
1618 Value *src = getSrc(&insn->src[0], i);
1619 switch (prog->getType()) {
1620 case Program::TYPE_FRAGMENT: {
1621 if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1622 // TGSI uses a different interface than NIR, TGSI stores that
1623 // value in the z component, NIR in X
1624 offset += 2;
1625 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1626 }
1627 break;
1628 }
1629 case Program::TYPE_GEOMETRY:
1630 case Program::TYPE_TESSELLATION_EVAL:
1631 case Program::TYPE_VERTEX: {
1632 if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1633 mkMov(clipVtx[i], src);
1634 src = clipVtx[i];
1635 }
1636 break;
1637 }
1638 default:
1639 break;
1640 }
1641
1642 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1643 }
1644 break;
1645 }
1646 case nir_intrinsic_load_input:
1647 case nir_intrinsic_load_interpolated_input:
1648 case nir_intrinsic_load_output: {
1649 LValues &newDefs = convert(&insn->dest);
1650
1651 // FBFetch
1652 if (prog->getType() == Program::TYPE_FRAGMENT &&
1653 op == nir_intrinsic_load_output) {
1654 std::vector<Value*> defs, srcs;
1655 uint8_t mask = 0;
1656
1657 srcs.push_back(getSSA());
1658 srcs.push_back(getSSA());
1659 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1660 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1661 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1662 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1663
1664 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1665 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1666
1667 for (uint8_t i = 0u; i < dest_components; ++i) {
1668 defs.push_back(newDefs[i]);
1669 mask |= 1 << i;
1670 }
1671
1672 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1673 texi->tex.levelZero = 1;
1674 texi->tex.mask = mask;
1675 texi->tex.useOffsets = 0;
1676 texi->tex.r = 0xffff;
1677 texi->tex.s = 0xffff;
1678
1679 info->prop.fp.readsFramebuffer = true;
1680 break;
1681 }
1682
1683 const DataType dType = getDType(insn);
1684 Value *indirect;
1685 bool input = op != nir_intrinsic_load_output;
1686 operation nvirOp;
1687 uint32_t mode = 0;
1688
1689 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1690 nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
1691
1692 // see load_barycentric_* handling
1693 if (prog->getType() == Program::TYPE_FRAGMENT) {
1694 if (op == nir_intrinsic_load_interpolated_input) {
1695 ImmediateValue immMode;
1696 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1697 mode = immMode.reg.data.u32;
1698 }
1699 if (mode == NV50_IR_INTERP_DEFAULT)
1700 mode |= translateInterpMode(&vary, nvirOp);
1701 else {
1702 if (vary.linear) {
1703 nvirOp = OP_LINTERP;
1704 mode |= NV50_IR_INTERP_LINEAR;
1705 } else {
1706 nvirOp = OP_PINTERP;
1707 mode |= NV50_IR_INTERP_PERSPECTIVE;
1708 }
1709 }
1710 }
1711
1712 for (uint8_t i = 0u; i < dest_components; ++i) {
1713 uint32_t address = getSlotAddress(insn, idx, i);
1714 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1715 if (prog->getType() == Program::TYPE_FRAGMENT) {
1716 int s = 1;
1717 if (typeSizeof(dType) == 8) {
1718 Value *lo = getSSA();
1719 Value *hi = getSSA();
1720 Instruction *interp;
1721
1722 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1723 if (nvirOp == OP_PINTERP)
1724 interp->setSrc(s++, fp.position);
1725 if (mode & NV50_IR_INTERP_OFFSET)
1726 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1727 interp->setInterpolate(mode);
1728 interp->setIndirect(0, 0, indirect);
1729
1730 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1731 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1732 if (nvirOp == OP_PINTERP)
1733 interp->setSrc(s++, fp.position);
1734 if (mode & NV50_IR_INTERP_OFFSET)
1735 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1736 interp->setInterpolate(mode);
1737 interp->setIndirect(0, 0, indirect);
1738
1739 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1740 } else {
1741 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1742 if (nvirOp == OP_PINTERP)
1743 interp->setSrc(s++, fp.position);
1744 if (mode & NV50_IR_INTERP_OFFSET)
1745 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1746 interp->setInterpolate(mode);
1747 interp->setIndirect(0, 0, indirect);
1748 }
1749 } else {
1750 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1751 }
1752 }
1753 break;
1754 }
1755 case nir_intrinsic_load_kernel_input: {
1756 assert(prog->getType() == Program::TYPE_COMPUTE);
1757 assert(insn->num_components == 1);
1758
1759 LValues &newDefs = convert(&insn->dest);
1760 const DataType dType = getDType(insn);
1761 Value *indirect;
1762 uint32_t idx = getIndirect(insn, 0, 0, indirect, true);
1763
1764 mkLoad(dType, newDefs[0], mkSymbol(FILE_SHADER_INPUT, 0, dType, idx), indirect);
1765 break;
1766 }
1767 case nir_intrinsic_load_barycentric_at_offset:
1768 case nir_intrinsic_load_barycentric_at_sample:
1769 case nir_intrinsic_load_barycentric_centroid:
1770 case nir_intrinsic_load_barycentric_pixel:
1771 case nir_intrinsic_load_barycentric_sample: {
1772 LValues &newDefs = convert(&insn->dest);
1773 uint32_t mode;
1774
1775 if (op == nir_intrinsic_load_barycentric_centroid ||
1776 op == nir_intrinsic_load_barycentric_sample) {
1777 mode = NV50_IR_INTERP_CENTROID;
1778 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1779 Value *offs[2];
1780 for (uint8_t c = 0; c < 2; c++) {
1781 offs[c] = getScratch();
1782 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1783 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1784 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1785 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1786 }
1787 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1788
1789 mode = NV50_IR_INTERP_OFFSET;
1790 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1791 mode = NV50_IR_INTERP_DEFAULT;
1792 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1793 info->prop.fp.readsSampleLocations = true;
1794 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1795 mode = NV50_IR_INTERP_OFFSET;
1796 } else {
1797 unreachable("all intrinsics already handled above");
1798 }
1799
1800 loadImm(newDefs[1], mode);
1801 break;
1802 }
1803 case nir_intrinsic_demote:
1804 case nir_intrinsic_discard:
1805 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1806 break;
1807 case nir_intrinsic_demote_if:
1808 case nir_intrinsic_discard_if: {
1809 Value *pred = getSSA(1, FILE_PREDICATE);
1810 if (insn->num_components > 1) {
1811 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1812 assert(false);
1813 return false;
1814 }
1815 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1816 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1817 break;
1818 }
1819 case nir_intrinsic_load_base_vertex:
1820 case nir_intrinsic_load_base_instance:
1821 case nir_intrinsic_load_draw_id:
1822 case nir_intrinsic_load_front_face:
1823 case nir_intrinsic_is_helper_invocation:
1824 case nir_intrinsic_load_helper_invocation:
1825 case nir_intrinsic_load_instance_id:
1826 case nir_intrinsic_load_invocation_id:
1827 case nir_intrinsic_load_local_group_size:
1828 case nir_intrinsic_load_local_invocation_id:
1829 case nir_intrinsic_load_num_work_groups:
1830 case nir_intrinsic_load_patch_vertices_in:
1831 case nir_intrinsic_load_primitive_id:
1832 case nir_intrinsic_load_sample_id:
1833 case nir_intrinsic_load_sample_mask_in:
1834 case nir_intrinsic_load_sample_pos:
1835 case nir_intrinsic_load_subgroup_eq_mask:
1836 case nir_intrinsic_load_subgroup_ge_mask:
1837 case nir_intrinsic_load_subgroup_gt_mask:
1838 case nir_intrinsic_load_subgroup_le_mask:
1839 case nir_intrinsic_load_subgroup_lt_mask:
1840 case nir_intrinsic_load_subgroup_invocation:
1841 case nir_intrinsic_load_tess_coord:
1842 case nir_intrinsic_load_tess_level_inner:
1843 case nir_intrinsic_load_tess_level_outer:
1844 case nir_intrinsic_load_vertex_id:
1845 case nir_intrinsic_load_work_group_id: {
1846 const DataType dType = getDType(insn);
1847 SVSemantic sv = convert(op);
1848 LValues &newDefs = convert(&insn->dest);
1849
1850 for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
1851 Value *def;
1852 if (typeSizeof(dType) == 8)
1853 def = getSSA();
1854 else
1855 def = newDefs[i];
1856
1857 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1858 loadImm(def, 0u);
1859 } else {
1860 Symbol *sym = mkSysVal(sv, i);
1861 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1862 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1863 rdsv->perPatch = 1;
1864 }
1865
1866 if (typeSizeof(dType) == 8)
1867 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1868 }
1869 break;
1870 }
1871 // constants
1872 case nir_intrinsic_load_subgroup_size: {
1873 LValues &newDefs = convert(&insn->dest);
1874 loadImm(newDefs[0], 32u);
1875 break;
1876 }
1877 case nir_intrinsic_vote_all:
1878 case nir_intrinsic_vote_any:
1879 case nir_intrinsic_vote_ieq: {
1880 LValues &newDefs = convert(&insn->dest);
1881 Value *pred = getScratch(1, FILE_PREDICATE);
1882 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1883 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1884 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1885 break;
1886 }
1887 case nir_intrinsic_ballot: {
1888 LValues &newDefs = convert(&insn->dest);
1889 Value *pred = getSSA(1, FILE_PREDICATE);
1890 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1891 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1892 break;
1893 }
1894 case nir_intrinsic_read_first_invocation:
1895 case nir_intrinsic_read_invocation: {
1896 LValues &newDefs = convert(&insn->dest);
1897 const DataType dType = getDType(insn);
1898 Value *tmp = getScratch();
1899
1900 if (op == nir_intrinsic_read_first_invocation) {
1901 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
1902 mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
1903 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
1904 } else
1905 tmp = getSrc(&insn->src[1], 0);
1906
1907 for (uint8_t i = 0; i < dest_components; ++i) {
1908 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
1909 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
1910 }
1911 break;
1912 }
1913 case nir_intrinsic_load_per_vertex_input: {
1914 const DataType dType = getDType(insn);
1915 LValues &newDefs = convert(&insn->dest);
1916 Value *indirectVertex;
1917 Value *indirectOffset;
1918 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1919 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1920
1921 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1922 mkImm(baseVertex), indirectVertex);
1923 for (uint8_t i = 0u; i < dest_components; ++i) {
1924 uint32_t address = getSlotAddress(insn, idx, i);
1925 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
1926 indirectOffset, vtxBase, info->in[idx].patch);
1927 }
1928 break;
1929 }
1930 case nir_intrinsic_load_per_vertex_output: {
1931 const DataType dType = getDType(insn);
1932 LValues &newDefs = convert(&insn->dest);
1933 Value *indirectVertex;
1934 Value *indirectOffset;
1935 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1936 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1937 Value *vtxBase = NULL;
1938
1939 if (indirectVertex)
1940 vtxBase = indirectVertex;
1941 else
1942 vtxBase = loadImm(NULL, baseVertex);
1943
1944 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
1945
1946 for (uint8_t i = 0u; i < dest_components; ++i) {
1947 uint32_t address = getSlotAddress(insn, idx, i);
1948 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
1949 indirectOffset, vtxBase, info->in[idx].patch);
1950 }
1951 break;
1952 }
1953 case nir_intrinsic_emit_vertex: {
1954 if (info->io.genUserClip > 0)
1955 handleUserClipPlanes();
1956 uint32_t idx = nir_intrinsic_stream_id(insn);
1957 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1958 break;
1959 }
1960 case nir_intrinsic_end_primitive: {
1961 uint32_t idx = nir_intrinsic_stream_id(insn);
1962 if (idx)
1963 break;
1964 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1965 break;
1966 }
1967 case nir_intrinsic_load_ubo: {
1968 const DataType dType = getDType(insn);
1969 LValues &newDefs = convert(&insn->dest);
1970 Value *indirectIndex;
1971 Value *indirectOffset;
1972 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
1973 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
1974
1975 for (uint8_t i = 0u; i < dest_components; ++i) {
1976 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
1977 indirectOffset, indirectIndex);
1978 }
1979 break;
1980 }
1981 case nir_intrinsic_get_buffer_size: {
1982 LValues &newDefs = convert(&insn->dest);
1983 const DataType dType = getDType(insn);
1984 Value *indirectBuffer;
1985 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
1986
1987 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
1988 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
1989 break;
1990 }
1991 case nir_intrinsic_store_ssbo: {
1992 DataType sType = getSType(insn->src[0], false, false);
1993 Value *indirectBuffer;
1994 Value *indirectOffset;
1995 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
1996 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
1997
1998 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1999 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2000 continue;
2001 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2002 offset + i * typeSizeof(sType));
2003 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2004 ->setIndirect(0, 1, indirectBuffer);
2005 }
2006 info->io.globalAccess |= 0x2;
2007 break;
2008 }
2009 case nir_intrinsic_load_ssbo: {
2010 const DataType dType = getDType(insn);
2011 LValues &newDefs = convert(&insn->dest);
2012 Value *indirectBuffer;
2013 Value *indirectOffset;
2014 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2015 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2016
2017 for (uint8_t i = 0u; i < dest_components; ++i)
2018 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2019 indirectOffset, indirectBuffer);
2020
2021 info->io.globalAccess |= 0x1;
2022 break;
2023 }
2024 case nir_intrinsic_shared_atomic_add:
2025 case nir_intrinsic_shared_atomic_and:
2026 case nir_intrinsic_shared_atomic_comp_swap:
2027 case nir_intrinsic_shared_atomic_exchange:
2028 case nir_intrinsic_shared_atomic_or:
2029 case nir_intrinsic_shared_atomic_imax:
2030 case nir_intrinsic_shared_atomic_imin:
2031 case nir_intrinsic_shared_atomic_umax:
2032 case nir_intrinsic_shared_atomic_umin:
2033 case nir_intrinsic_shared_atomic_xor: {
2034 const DataType dType = getDType(insn);
2035 LValues &newDefs = convert(&insn->dest);
2036 Value *indirectOffset;
2037 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2038 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2039 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2040 if (op == nir_intrinsic_shared_atomic_comp_swap)
2041 atom->setSrc(2, getSrc(&insn->src[2], 0));
2042 atom->setIndirect(0, 0, indirectOffset);
2043 atom->subOp = getSubOp(op);
2044 break;
2045 }
2046 case nir_intrinsic_ssbo_atomic_add:
2047 case nir_intrinsic_ssbo_atomic_and:
2048 case nir_intrinsic_ssbo_atomic_comp_swap:
2049 case nir_intrinsic_ssbo_atomic_exchange:
2050 case nir_intrinsic_ssbo_atomic_or:
2051 case nir_intrinsic_ssbo_atomic_imax:
2052 case nir_intrinsic_ssbo_atomic_imin:
2053 case nir_intrinsic_ssbo_atomic_umax:
2054 case nir_intrinsic_ssbo_atomic_umin:
2055 case nir_intrinsic_ssbo_atomic_xor: {
2056 const DataType dType = getDType(insn);
2057 LValues &newDefs = convert(&insn->dest);
2058 Value *indirectBuffer;
2059 Value *indirectOffset;
2060 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2061 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2062
2063 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2064 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2065 getSrc(&insn->src[2], 0));
2066 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2067 atom->setSrc(2, getSrc(&insn->src[3], 0));
2068 atom->setIndirect(0, 0, indirectOffset);
2069 atom->setIndirect(0, 1, indirectBuffer);
2070 atom->subOp = getSubOp(op);
2071
2072 info->io.globalAccess |= 0x2;
2073 break;
2074 }
2075 case nir_intrinsic_global_atomic_add:
2076 case nir_intrinsic_global_atomic_and:
2077 case nir_intrinsic_global_atomic_comp_swap:
2078 case nir_intrinsic_global_atomic_exchange:
2079 case nir_intrinsic_global_atomic_or:
2080 case nir_intrinsic_global_atomic_imax:
2081 case nir_intrinsic_global_atomic_imin:
2082 case nir_intrinsic_global_atomic_umax:
2083 case nir_intrinsic_global_atomic_umin:
2084 case nir_intrinsic_global_atomic_xor: {
2085 const DataType dType = getDType(insn);
2086 LValues &newDefs = convert(&insn->dest);
2087 Value *address;
2088 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2089
2090 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2091 Instruction *atom =
2092 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2093 atom->setIndirect(0, 0, address);
2094 atom->subOp = getSubOp(op);
2095
2096 info->io.globalAccess |= 0x2;
2097 break;
2098 }
2099 case nir_intrinsic_bindless_image_atomic_add:
2100 case nir_intrinsic_bindless_image_atomic_and:
2101 case nir_intrinsic_bindless_image_atomic_comp_swap:
2102 case nir_intrinsic_bindless_image_atomic_exchange:
2103 case nir_intrinsic_bindless_image_atomic_imax:
2104 case nir_intrinsic_bindless_image_atomic_umax:
2105 case nir_intrinsic_bindless_image_atomic_imin:
2106 case nir_intrinsic_bindless_image_atomic_umin:
2107 case nir_intrinsic_bindless_image_atomic_or:
2108 case nir_intrinsic_bindless_image_atomic_xor:
2109 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2110 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2111 case nir_intrinsic_bindless_image_load:
2112 case nir_intrinsic_bindless_image_samples:
2113 case nir_intrinsic_bindless_image_size:
2114 case nir_intrinsic_bindless_image_store:
2115 case nir_intrinsic_image_atomic_add:
2116 case nir_intrinsic_image_atomic_and:
2117 case nir_intrinsic_image_atomic_comp_swap:
2118 case nir_intrinsic_image_atomic_exchange:
2119 case nir_intrinsic_image_atomic_imax:
2120 case nir_intrinsic_image_atomic_umax:
2121 case nir_intrinsic_image_atomic_imin:
2122 case nir_intrinsic_image_atomic_umin:
2123 case nir_intrinsic_image_atomic_or:
2124 case nir_intrinsic_image_atomic_xor:
2125 case nir_intrinsic_image_atomic_inc_wrap:
2126 case nir_intrinsic_image_atomic_dec_wrap:
2127 case nir_intrinsic_image_load:
2128 case nir_intrinsic_image_samples:
2129 case nir_intrinsic_image_size:
2130 case nir_intrinsic_image_store: {
2131 std::vector<Value*> srcs, defs;
2132 Value *indirect;
2133 DataType ty;
2134
2135 uint32_t mask = 0;
2136 TexInstruction::Target target =
2137 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2138 unsigned int argCount = getNIRArgCount(target);
2139 uint16_t location = 0;
2140
2141 if (opInfo.has_dest) {
2142 LValues &newDefs = convert(&insn->dest);
2143 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2144 defs.push_back(newDefs[i]);
2145 mask |= 1 << i;
2146 }
2147 }
2148
2149 int lod_src = -1;
2150 bool bindless = false;
2151 switch (op) {
2152 case nir_intrinsic_bindless_image_atomic_add:
2153 case nir_intrinsic_bindless_image_atomic_and:
2154 case nir_intrinsic_bindless_image_atomic_comp_swap:
2155 case nir_intrinsic_bindless_image_atomic_exchange:
2156 case nir_intrinsic_bindless_image_atomic_imax:
2157 case nir_intrinsic_bindless_image_atomic_umax:
2158 case nir_intrinsic_bindless_image_atomic_imin:
2159 case nir_intrinsic_bindless_image_atomic_umin:
2160 case nir_intrinsic_bindless_image_atomic_or:
2161 case nir_intrinsic_bindless_image_atomic_xor:
2162 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2163 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2164 ty = getDType(insn);
2165 bindless = true;
2166 info->io.globalAccess |= 0x2;
2167 mask = 0x1;
2168 break;
2169 case nir_intrinsic_image_atomic_add:
2170 case nir_intrinsic_image_atomic_and:
2171 case nir_intrinsic_image_atomic_comp_swap:
2172 case nir_intrinsic_image_atomic_exchange:
2173 case nir_intrinsic_image_atomic_imax:
2174 case nir_intrinsic_image_atomic_umax:
2175 case nir_intrinsic_image_atomic_imin:
2176 case nir_intrinsic_image_atomic_umin:
2177 case nir_intrinsic_image_atomic_or:
2178 case nir_intrinsic_image_atomic_xor:
2179 case nir_intrinsic_image_atomic_inc_wrap:
2180 case nir_intrinsic_image_atomic_dec_wrap:
2181 ty = getDType(insn);
2182 bindless = false;
2183 info->io.globalAccess |= 0x2;
2184 mask = 0x1;
2185 break;
2186 case nir_intrinsic_bindless_image_load:
2187 case nir_intrinsic_image_load:
2188 ty = TYPE_U32;
2189 bindless = op == nir_intrinsic_bindless_image_load;
2190 info->io.globalAccess |= 0x1;
2191 lod_src = 4;
2192 break;
2193 case nir_intrinsic_bindless_image_store:
2194 case nir_intrinsic_image_store:
2195 ty = TYPE_U32;
2196 bindless = op == nir_intrinsic_bindless_image_store;
2197 info->io.globalAccess |= 0x2;
2198 lod_src = 5;
2199 mask = 0xf;
2200 break;
2201 case nir_intrinsic_bindless_image_samples:
2202 case nir_intrinsic_image_samples:
2203 ty = TYPE_U32;
2204 bindless = op == nir_intrinsic_bindless_image_samples;
2205 mask = 0x8;
2206 break;
2207 case nir_intrinsic_bindless_image_size:
2208 case nir_intrinsic_image_size:
2209 ty = TYPE_U32;
2210 bindless = op == nir_intrinsic_bindless_image_size;
2211 break;
2212 default:
2213 unreachable("unhandled image opcode");
2214 break;
2215 }
2216
2217 if (bindless)
2218 indirect = getSrc(&insn->src[0], 0);
2219 else
2220 location = getIndirect(&insn->src[0], 0, indirect);
2221
2222 // coords
2223 if (opInfo.num_srcs >= 2)
2224 for (unsigned int i = 0u; i < argCount; ++i)
2225 srcs.push_back(getSrc(&insn->src[1], i));
2226
2227 // the sampler is just another src added after coords
2228 if (opInfo.num_srcs >= 3 && target.isMS())
2229 srcs.push_back(getSrc(&insn->src[2], 0));
2230
2231 if (opInfo.num_srcs >= 4 && lod_src != 4) {
2232 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2233 for (uint8_t i = 0u; i < components; ++i)
2234 srcs.push_back(getSrc(&insn->src[3], i));
2235 }
2236
2237 if (opInfo.num_srcs >= 5 && lod_src != 5)
2238 // 1 for aotmic swap
2239 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2240 srcs.push_back(getSrc(&insn->src[4], i));
2241
2242 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2243 texi->tex.bindless = bindless;
2244 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2245 texi->tex.mask = mask;
2246 texi->cache = convert(nir_intrinsic_access(insn));
2247 texi->setType(ty);
2248 texi->subOp = getSubOp(op);
2249
2250 if (indirect)
2251 texi->setIndirectR(indirect);
2252
2253 break;
2254 }
2255 case nir_intrinsic_store_shared: {
2256 DataType sType = getSType(insn->src[0], false, false);
2257 Value *indirectOffset;
2258 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2259
2260 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2261 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2262 continue;
2263 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, sType, offset + i * typeSizeof(sType));
2264 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2265 }
2266 break;
2267 }
2268 case nir_intrinsic_load_shared: {
2269 const DataType dType = getDType(insn);
2270 LValues &newDefs = convert(&insn->dest);
2271 Value *indirectOffset;
2272 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2273
2274 for (uint8_t i = 0u; i < dest_components; ++i)
2275 loadFrom(FILE_MEMORY_SHARED, 0, dType, newDefs[i], offset, i, indirectOffset);
2276
2277 break;
2278 }
2279 case nir_intrinsic_control_barrier: {
2280 // TODO: add flag to shader_info
2281 info->numBarriers = 1;
2282 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2283 bar->fixed = 1;
2284 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2285 break;
2286 }
2287 case nir_intrinsic_group_memory_barrier:
2288 case nir_intrinsic_memory_barrier:
2289 case nir_intrinsic_memory_barrier_buffer:
2290 case nir_intrinsic_memory_barrier_image:
2291 case nir_intrinsic_memory_barrier_shared: {
2292 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2293 bar->fixed = 1;
2294 bar->subOp = getSubOp(op);
2295 break;
2296 }
2297 case nir_intrinsic_memory_barrier_tcs_patch:
2298 break;
2299 case nir_intrinsic_shader_clock: {
2300 const DataType dType = getDType(insn);
2301 LValues &newDefs = convert(&insn->dest);
2302
2303 loadImm(newDefs[0], 0u);
2304 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2305 break;
2306 }
2307 case nir_intrinsic_load_global: {
2308 const DataType dType = getDType(insn);
2309 LValues &newDefs = convert(&insn->dest);
2310 Value *indirectOffset;
2311 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2312
2313 for (auto i = 0u; i < dest_components; ++i)
2314 loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2315
2316 info->io.globalAccess |= 0x1;
2317 break;
2318 }
2319 case nir_intrinsic_store_global: {
2320 DataType sType = getSType(insn->src[0], false, false);
2321
2322 for (auto i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2323 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2324 continue;
2325 if (typeSizeof(sType) == 8) {
2326 Value *split[2];
2327 mkSplit(split, 4, getSrc(&insn->src[0], i));
2328
2329 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2330 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2331
2332 sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2333 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2334 } else {
2335 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2336 mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2337 }
2338 }
2339
2340 info->io.globalAccess |= 0x2;
2341 break;
2342 }
2343 default:
2344 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2345 return false;
2346 }
2347
2348 return true;
2349 }
2350
2351 bool
2352 Converter::visit(nir_jump_instr *insn)
2353 {
2354 switch (insn->type) {
2355 case nir_jump_return:
2356 // TODO: this only works in the main function
2357 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2358 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2359 break;
2360 case nir_jump_break:
2361 case nir_jump_continue: {
2362 bool isBreak = insn->type == nir_jump_break;
2363 nir_block *block = insn->instr.block;
2364 BasicBlock *target = convert(block->successors[0]);
2365 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2366 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2367 break;
2368 }
2369 default:
2370 ERROR("unknown nir_jump_type %u\n", insn->type);
2371 return false;
2372 }
2373
2374 return true;
2375 }
2376
2377 Value*
2378 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2379 {
2380 Value *val;
2381
2382 if (immInsertPos)
2383 setPosition(immInsertPos, true);
2384 else
2385 setPosition(bb, false);
2386
2387 switch (insn->def.bit_size) {
2388 case 64:
2389 val = loadImm(getSSA(8), insn->value[idx].u64);
2390 break;
2391 case 32:
2392 val = loadImm(getSSA(4), insn->value[idx].u32);
2393 break;
2394 case 16:
2395 val = loadImm(getSSA(2), insn->value[idx].u16);
2396 break;
2397 case 8:
2398 val = loadImm(getSSA(1), insn->value[idx].u8);
2399 break;
2400 default:
2401 unreachable("unhandled bit size!\n");
2402 }
2403 setPosition(bb, true);
2404 return val;
2405 }
2406
2407 bool
2408 Converter::visit(nir_load_const_instr *insn)
2409 {
2410 assert(insn->def.bit_size <= 64);
2411 immediates[insn->def.index] = insn;
2412 return true;
2413 }
2414
2415 #define DEFAULT_CHECKS \
2416 if (insn->dest.dest.ssa.num_components > 1) { \
2417 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2418 return false; \
2419 } \
2420 if (insn->dest.write_mask != 1) { \
2421 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2422 return false; \
2423 }
2424 bool
2425 Converter::visit(nir_alu_instr *insn)
2426 {
2427 const nir_op op = insn->op;
2428 const nir_op_info &info = nir_op_infos[op];
2429 DataType dType = getDType(insn);
2430 const std::vector<DataType> sTypes = getSTypes(insn);
2431
2432 Instruction *oldPos = this->bb->getExit();
2433
2434 switch (op) {
2435 case nir_op_fabs:
2436 case nir_op_iabs:
2437 case nir_op_fadd:
2438 case nir_op_iadd:
2439 case nir_op_iand:
2440 case nir_op_fceil:
2441 case nir_op_fcos:
2442 case nir_op_fddx:
2443 case nir_op_fddx_coarse:
2444 case nir_op_fddx_fine:
2445 case nir_op_fddy:
2446 case nir_op_fddy_coarse:
2447 case nir_op_fddy_fine:
2448 case nir_op_fdiv:
2449 case nir_op_idiv:
2450 case nir_op_udiv:
2451 case nir_op_fexp2:
2452 case nir_op_ffloor:
2453 case nir_op_ffma:
2454 case nir_op_flog2:
2455 case nir_op_fmax:
2456 case nir_op_imax:
2457 case nir_op_umax:
2458 case nir_op_fmin:
2459 case nir_op_imin:
2460 case nir_op_umin:
2461 case nir_op_fmod:
2462 case nir_op_imod:
2463 case nir_op_umod:
2464 case nir_op_fmul:
2465 case nir_op_imul:
2466 case nir_op_imul_high:
2467 case nir_op_umul_high:
2468 case nir_op_fneg:
2469 case nir_op_ineg:
2470 case nir_op_inot:
2471 case nir_op_ior:
2472 case nir_op_pack_64_2x32_split:
2473 case nir_op_fpow:
2474 case nir_op_frcp:
2475 case nir_op_frem:
2476 case nir_op_irem:
2477 case nir_op_frsq:
2478 case nir_op_fsat:
2479 case nir_op_ishr:
2480 case nir_op_ushr:
2481 case nir_op_fsin:
2482 case nir_op_fsqrt:
2483 case nir_op_ftrunc:
2484 case nir_op_ishl:
2485 case nir_op_ixor: {
2486 DEFAULT_CHECKS;
2487 LValues &newDefs = convert(&insn->dest);
2488 operation preOp = preOperationNeeded(op);
2489 if (preOp != OP_NOP) {
2490 assert(info.num_inputs < 2);
2491 Value *tmp = getSSA(typeSizeof(dType));
2492 Instruction *i0 = mkOp(preOp, dType, tmp);
2493 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2494 if (info.num_inputs) {
2495 i0->setSrc(0, getSrc(&insn->src[0]));
2496 i1->setSrc(0, tmp);
2497 }
2498 i1->subOp = getSubOp(op);
2499 } else {
2500 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2501 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2502 i->setSrc(s, getSrc(&insn->src[s]));
2503 }
2504 i->subOp = getSubOp(op);
2505 }
2506 break;
2507 }
2508 case nir_op_ifind_msb:
2509 case nir_op_ufind_msb: {
2510 DEFAULT_CHECKS;
2511 LValues &newDefs = convert(&insn->dest);
2512 dType = sTypes[0];
2513 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2514 break;
2515 }
2516 case nir_op_fround_even: {
2517 DEFAULT_CHECKS;
2518 LValues &newDefs = convert(&insn->dest);
2519 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2520 break;
2521 }
2522 // convert instructions
2523 case nir_op_f2f32:
2524 case nir_op_f2i32:
2525 case nir_op_f2u32:
2526 case nir_op_i2f32:
2527 case nir_op_i2i32:
2528 case nir_op_u2f32:
2529 case nir_op_u2u32:
2530 case nir_op_f2f64:
2531 case nir_op_f2i64:
2532 case nir_op_f2u64:
2533 case nir_op_i2f64:
2534 case nir_op_i2i64:
2535 case nir_op_u2f64:
2536 case nir_op_u2u64: {
2537 DEFAULT_CHECKS;
2538 LValues &newDefs = convert(&insn->dest);
2539 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2540 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2541 i->rnd = ROUND_Z;
2542 i->sType = sTypes[0];
2543 break;
2544 }
2545 // compare instructions
2546 case nir_op_feq32:
2547 case nir_op_ieq32:
2548 case nir_op_fge32:
2549 case nir_op_ige32:
2550 case nir_op_uge32:
2551 case nir_op_flt32:
2552 case nir_op_ilt32:
2553 case nir_op_ult32:
2554 case nir_op_fne32:
2555 case nir_op_ine32: {
2556 DEFAULT_CHECKS;
2557 LValues &newDefs = convert(&insn->dest);
2558 Instruction *i = mkCmp(getOperation(op),
2559 getCondCode(op),
2560 dType,
2561 newDefs[0],
2562 dType,
2563 getSrc(&insn->src[0]),
2564 getSrc(&insn->src[1]));
2565 if (info.num_inputs == 3)
2566 i->setSrc(2, getSrc(&insn->src[2]));
2567 i->sType = sTypes[0];
2568 break;
2569 }
2570 // those are weird ALU ops and need special handling, because
2571 // 1. they are always componend based
2572 // 2. they basically just merge multiple values into one data type
2573 case nir_op_mov:
2574 if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
2575 nir_reg_dest& reg = insn->dest.dest.reg;
2576 uint32_t goffset = regToLmemOffset[reg.reg->index];
2577 uint8_t comps = reg.reg->num_components;
2578 uint8_t size = reg.reg->bit_size / 8;
2579 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2580 uint32_t aoffset = csize * reg.base_offset;
2581 Value *indirect = NULL;
2582
2583 if (reg.indirect)
2584 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS),
2585 getSrc(reg.indirect, 0), mkImm(csize));
2586
2587 for (uint8_t i = 0u; i < comps; ++i) {
2588 if (!((1u << i) & insn->dest.write_mask))
2589 continue;
2590
2591 Symbol *sym = mkSymbol(FILE_MEMORY_LOCAL, 0, dType, goffset + aoffset + i * size);
2592 mkStore(OP_STORE, dType, sym, indirect, getSrc(&insn->src[0], i));
2593 }
2594 break;
2595 } else if (!insn->src[0].src.is_ssa && insn->src[0].src.reg.reg->num_array_elems) {
2596 LValues &newDefs = convert(&insn->dest);
2597 nir_reg_src& reg = insn->src[0].src.reg;
2598 uint32_t goffset = regToLmemOffset[reg.reg->index];
2599 // uint8_t comps = reg.reg->num_components;
2600 uint8_t size = reg.reg->bit_size / 8;
2601 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2602 uint32_t aoffset = csize * reg.base_offset;
2603 Value *indirect = NULL;
2604
2605 if (reg.indirect)
2606 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS), getSrc(reg.indirect, 0), mkImm(csize));
2607
2608 for (uint8_t i = 0u; i < newDefs.size(); ++i)
2609 loadFrom(FILE_MEMORY_LOCAL, 0, dType, newDefs[i], goffset + aoffset, i, indirect);
2610
2611 break;
2612 } else {
2613 LValues &newDefs = convert(&insn->dest);
2614 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2615 mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
2616 }
2617 }
2618 break;
2619 case nir_op_vec2:
2620 case nir_op_vec3:
2621 case nir_op_vec4:
2622 case nir_op_vec8:
2623 case nir_op_vec16: {
2624 LValues &newDefs = convert(&insn->dest);
2625 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2626 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2627 }
2628 break;
2629 }
2630 // (un)pack
2631 case nir_op_pack_64_2x32: {
2632 LValues &newDefs = convert(&insn->dest);
2633 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2634 merge->setSrc(0, getSrc(&insn->src[0], 0));
2635 merge->setSrc(1, getSrc(&insn->src[0], 1));
2636 break;
2637 }
2638 case nir_op_pack_half_2x16_split: {
2639 LValues &newDefs = convert(&insn->dest);
2640 Value *tmpH = getSSA();
2641 Value *tmpL = getSSA();
2642
2643 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2644 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2645 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2646 break;
2647 }
2648 case nir_op_unpack_half_2x16_split_x:
2649 case nir_op_unpack_half_2x16_split_y: {
2650 LValues &newDefs = convert(&insn->dest);
2651 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2652 if (op == nir_op_unpack_half_2x16_split_y)
2653 cvt->subOp = 1;
2654 break;
2655 }
2656 case nir_op_unpack_64_2x32: {
2657 LValues &newDefs = convert(&insn->dest);
2658 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2659 break;
2660 }
2661 case nir_op_unpack_64_2x32_split_x: {
2662 LValues &newDefs = convert(&insn->dest);
2663 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2664 break;
2665 }
2666 case nir_op_unpack_64_2x32_split_y: {
2667 LValues &newDefs = convert(&insn->dest);
2668 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2669 break;
2670 }
2671 // special instructions
2672 case nir_op_fsign:
2673 case nir_op_isign: {
2674 DEFAULT_CHECKS;
2675 DataType iType;
2676 if (::isFloatType(dType))
2677 iType = TYPE_F32;
2678 else
2679 iType = TYPE_S32;
2680
2681 LValues &newDefs = convert(&insn->dest);
2682 LValue *val0 = getScratch();
2683 LValue *val1 = getScratch();
2684 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2685 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2686
2687 if (dType == TYPE_F64) {
2688 mkOp2(OP_SUB, iType, val0, val0, val1);
2689 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2690 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2691 mkOp2(OP_SUB, iType, val0, val1, val0);
2692 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2693 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2694 } else if (::isFloatType(dType))
2695 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2696 else
2697 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2698 break;
2699 }
2700 case nir_op_fcsel:
2701 case nir_op_b32csel: {
2702 DEFAULT_CHECKS;
2703 LValues &newDefs = convert(&insn->dest);
2704 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2705 break;
2706 }
2707 case nir_op_ibitfield_extract:
2708 case nir_op_ubitfield_extract: {
2709 DEFAULT_CHECKS;
2710 Value *tmp = getSSA();
2711 LValues &newDefs = convert(&insn->dest);
2712 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2713 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2714 break;
2715 }
2716 case nir_op_bfm: {
2717 DEFAULT_CHECKS;
2718 LValues &newDefs = convert(&insn->dest);
2719 mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2720 break;
2721 }
2722 case nir_op_bitfield_insert: {
2723 DEFAULT_CHECKS;
2724 LValues &newDefs = convert(&insn->dest);
2725 LValue *temp = getSSA();
2726 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2727 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2728 break;
2729 }
2730 case nir_op_bit_count: {
2731 DEFAULT_CHECKS;
2732 LValues &newDefs = convert(&insn->dest);
2733 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2734 break;
2735 }
2736 case nir_op_bitfield_reverse: {
2737 DEFAULT_CHECKS;
2738 LValues &newDefs = convert(&insn->dest);
2739 mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2740 break;
2741 }
2742 case nir_op_find_lsb: {
2743 DEFAULT_CHECKS;
2744 LValues &newDefs = convert(&insn->dest);
2745 Value *tmp = getSSA();
2746 mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2747 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2748 break;
2749 }
2750 case nir_op_extract_u8: {
2751 DEFAULT_CHECKS;
2752 LValues &newDefs = convert(&insn->dest);
2753 Value *prmt = getSSA();
2754 mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2755 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2756 break;
2757 }
2758 case nir_op_extract_i8: {
2759 DEFAULT_CHECKS;
2760 LValues &newDefs = convert(&insn->dest);
2761 Value *prmt = getSSA();
2762 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2763 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2764 break;
2765 }
2766 case nir_op_extract_u16: {
2767 DEFAULT_CHECKS;
2768 LValues &newDefs = convert(&insn->dest);
2769 Value *prmt = getSSA();
2770 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2771 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2772 break;
2773 }
2774 case nir_op_extract_i16: {
2775 DEFAULT_CHECKS;
2776 LValues &newDefs = convert(&insn->dest);
2777 Value *prmt = getSSA();
2778 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2779 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2780 break;
2781 }
2782 case nir_op_urol: {
2783 DEFAULT_CHECKS;
2784 LValues &newDefs = convert(&insn->dest);
2785 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2786 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2787 ->subOp = NV50_IR_SUBOP_SHF_L |
2788 NV50_IR_SUBOP_SHF_W |
2789 NV50_IR_SUBOP_SHF_HI;
2790 break;
2791 }
2792 case nir_op_uror: {
2793 DEFAULT_CHECKS;
2794 LValues &newDefs = convert(&insn->dest);
2795 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2796 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2797 ->subOp = NV50_IR_SUBOP_SHF_R |
2798 NV50_IR_SUBOP_SHF_W |
2799 NV50_IR_SUBOP_SHF_LO;
2800 break;
2801 }
2802 // boolean conversions
2803 case nir_op_b2f32: {
2804 DEFAULT_CHECKS;
2805 LValues &newDefs = convert(&insn->dest);
2806 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2807 break;
2808 }
2809 case nir_op_b2f64: {
2810 DEFAULT_CHECKS;
2811 LValues &newDefs = convert(&insn->dest);
2812 Value *tmp = getSSA(4);
2813 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2814 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2815 break;
2816 }
2817 case nir_op_f2b32:
2818 case nir_op_i2b32: {
2819 DEFAULT_CHECKS;
2820 LValues &newDefs = convert(&insn->dest);
2821 Value *src1;
2822 if (typeSizeof(sTypes[0]) == 8) {
2823 src1 = loadImm(getSSA(8), 0.0);
2824 } else {
2825 src1 = zero;
2826 }
2827 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2828 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2829 break;
2830 }
2831 case nir_op_b2i32: {
2832 DEFAULT_CHECKS;
2833 LValues &newDefs = convert(&insn->dest);
2834 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2835 break;
2836 }
2837 case nir_op_b2i64: {
2838 DEFAULT_CHECKS;
2839 LValues &newDefs = convert(&insn->dest);
2840 LValue *def = getScratch();
2841 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2842 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2843 break;
2844 }
2845 default:
2846 ERROR("unknown nir_op %s\n", info.name);
2847 return false;
2848 }
2849
2850 if (!oldPos) {
2851 oldPos = this->bb->getEntry();
2852 oldPos->precise = insn->exact;
2853 }
2854
2855 if (unlikely(!oldPos))
2856 return true;
2857
2858 while (oldPos->next) {
2859 oldPos = oldPos->next;
2860 oldPos->precise = insn->exact;
2861 }
2862 oldPos->saturate = insn->dest.saturate;
2863
2864 return true;
2865 }
2866 #undef DEFAULT_CHECKS
2867
2868 bool
2869 Converter::visit(nir_ssa_undef_instr *insn)
2870 {
2871 LValues &newDefs = convert(&insn->def);
2872 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2873 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2874 }
2875 return true;
2876 }
2877
2878 #define CASE_SAMPLER(ty) \
2879 case GLSL_SAMPLER_DIM_ ## ty : \
2880 if (isArray && !isShadow) \
2881 return TEX_TARGET_ ## ty ## _ARRAY; \
2882 else if (!isArray && isShadow) \
2883 return TEX_TARGET_## ty ## _SHADOW; \
2884 else if (isArray && isShadow) \
2885 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2886 else \
2887 return TEX_TARGET_ ## ty
2888
2889 TexTarget
2890 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2891 {
2892 switch (dim) {
2893 CASE_SAMPLER(1D);
2894 CASE_SAMPLER(2D);
2895 CASE_SAMPLER(CUBE);
2896 case GLSL_SAMPLER_DIM_3D:
2897 return TEX_TARGET_3D;
2898 case GLSL_SAMPLER_DIM_MS:
2899 if (isArray)
2900 return TEX_TARGET_2D_MS_ARRAY;
2901 return TEX_TARGET_2D_MS;
2902 case GLSL_SAMPLER_DIM_RECT:
2903 if (isShadow)
2904 return TEX_TARGET_RECT_SHADOW;
2905 return TEX_TARGET_RECT;
2906 case GLSL_SAMPLER_DIM_BUF:
2907 return TEX_TARGET_BUFFER;
2908 case GLSL_SAMPLER_DIM_EXTERNAL:
2909 return TEX_TARGET_2D;
2910 default:
2911 ERROR("unknown glsl_sampler_dim %u\n", dim);
2912 assert(false);
2913 return TEX_TARGET_COUNT;
2914 }
2915 }
2916 #undef CASE_SAMPLER
2917
2918 Value*
2919 Converter::applyProjection(Value *src, Value *proj)
2920 {
2921 if (!proj)
2922 return src;
2923 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2924 }
2925
2926 unsigned int
2927 Converter::getNIRArgCount(TexInstruction::Target& target)
2928 {
2929 unsigned int result = target.getArgCount();
2930 if (target.isCube() && target.isArray())
2931 result--;
2932 if (target.isMS())
2933 result--;
2934 return result;
2935 }
2936
2937 CacheMode
2938 Converter::convert(enum gl_access_qualifier access)
2939 {
2940 if (access & ACCESS_VOLATILE)
2941 return CACHE_CV;
2942 if (access & ACCESS_COHERENT)
2943 return CACHE_CG;
2944 return CACHE_CA;
2945 }
2946
2947 bool
2948 Converter::visit(nir_tex_instr *insn)
2949 {
2950 switch (insn->op) {
2951 case nir_texop_lod:
2952 case nir_texop_query_levels:
2953 case nir_texop_tex:
2954 case nir_texop_texture_samples:
2955 case nir_texop_tg4:
2956 case nir_texop_txb:
2957 case nir_texop_txd:
2958 case nir_texop_txf:
2959 case nir_texop_txf_ms:
2960 case nir_texop_txl:
2961 case nir_texop_txs: {
2962 LValues &newDefs = convert(&insn->dest);
2963 std::vector<Value*> srcs;
2964 std::vector<Value*> defs;
2965 std::vector<nir_src*> offsets;
2966 uint8_t mask = 0;
2967 bool lz = false;
2968 Value *proj = NULL;
2969 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2970 operation op = getOperation(insn->op);
2971
2972 int r, s;
2973 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2974 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2975 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2976 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2977 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2978 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2979 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2980 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2981 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
2982 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2983 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2984 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
2985 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
2986
2987 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
2988 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
2989
2990 if (projIdx != -1)
2991 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
2992
2993 srcs.resize(insn->coord_components);
2994 for (uint8_t i = 0u; i < insn->coord_components; ++i)
2995 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
2996
2997 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2998 if (insn->coord_components) {
2999 uint32_t argCount = target.getArgCount();
3000
3001 if (target.isMS())
3002 argCount -= 1;
3003
3004 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
3005 srcs.push_back(getSSA());
3006 }
3007
3008 if (insn->op == nir_texop_texture_samples)
3009 srcs.push_back(zero);
3010 else if (!insn->num_srcs)
3011 srcs.push_back(loadImm(NULL, 0));
3012 if (biasIdx != -1)
3013 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
3014 if (lodIdx != -1)
3015 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
3016 else if (op == OP_TXF)
3017 lz = true;
3018 if (msIdx != -1)
3019 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3020 if (offsetIdx != -1)
3021 offsets.push_back(&insn->src[offsetIdx].src);
3022 if (compIdx != -1)
3023 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3024 if (texOffIdx != -1) {
3025 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3026 texOffIdx = srcs.size() - 1;
3027 }
3028 if (sampOffIdx != -1) {
3029 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3030 sampOffIdx = srcs.size() - 1;
3031 }
3032 if (bindless) {
3033 // currently we use the lower bits
3034 Value *split[2];
3035 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3036
3037 mkSplit(split, 4, handle);
3038
3039 srcs.push_back(split[0]);
3040 texOffIdx = srcs.size() - 1;
3041 }
3042
3043 r = bindless ? 0xff : insn->texture_index;
3044 s = bindless ? 0x1f : insn->sampler_index;
3045
3046 defs.resize(newDefs.size());
3047 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3048 defs[d] = newDefs[d];
3049 mask |= 1 << d;
3050 }
3051 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3052 lz = true;
3053
3054 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3055 texi->tex.levelZero = lz;
3056 texi->tex.mask = mask;
3057 texi->tex.bindless = bindless;
3058
3059 if (texOffIdx != -1)
3060 texi->tex.rIndirectSrc = texOffIdx;
3061 if (sampOffIdx != -1)
3062 texi->tex.sIndirectSrc = sampOffIdx;
3063
3064 switch (insn->op) {
3065 case nir_texop_tg4:
3066 if (!target.isShadow())
3067 texi->tex.gatherComp = insn->component;
3068 break;
3069 case nir_texop_txs:
3070 texi->tex.query = TXQ_DIMS;
3071 break;
3072 case nir_texop_texture_samples:
3073 texi->tex.mask = 0x4;
3074 texi->tex.query = TXQ_TYPE;
3075 break;
3076 case nir_texop_query_levels:
3077 texi->tex.mask = 0x8;
3078 texi->tex.query = TXQ_DIMS;
3079 break;
3080 default:
3081 break;
3082 }
3083
3084 texi->tex.useOffsets = offsets.size();
3085 if (texi->tex.useOffsets) {
3086 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3087 for (uint32_t c = 0u; c < 3; ++c) {
3088 uint8_t s2 = std::min(c, target.getDim() - 1);
3089 texi->offset[s][c].set(getSrc(offsets[s], s2));
3090 texi->offset[s][c].setInsn(texi);
3091 }
3092 }
3093 }
3094
3095 if (op == OP_TXG && offsetIdx == -1) {
3096 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3097 texi->tex.useOffsets = 4;
3098 setPosition(texi, false);
3099 for (uint8_t i = 0; i < 4; ++i) {
3100 for (uint8_t j = 0; j < 2; ++j) {
3101 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3102 texi->offset[i][j].setInsn(texi);
3103 }
3104 }
3105 setPosition(texi, true);
3106 }
3107 }
3108
3109 if (ddxIdx != -1 && ddyIdx != -1) {
3110 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3111 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3112 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3113 }
3114 }
3115
3116 break;
3117 }
3118 default:
3119 ERROR("unknown nir_texop %u\n", insn->op);
3120 return false;
3121 }
3122 return true;
3123 }
3124
3125 bool
3126 Converter::run()
3127 {
3128 bool progress;
3129
3130 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3131 nir_print_shader(nir, stderr);
3132
3133 struct nir_lower_subgroups_options subgroup_options = {
3134 .subgroup_size = 32,
3135 .ballot_bit_size = 32,
3136 };
3137
3138 NIR_PASS_V(nir, nir_lower_io,
3139 (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out),
3140 type_size, (nir_lower_io_options)0);
3141 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3142 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3143 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3144 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3145 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3146 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3147
3148 /*TODO: improve this lowering/optimisation loop so that we can use
3149 * nir_opt_idiv_const effectively before this.
3150 */
3151 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_precise);
3152
3153 do {
3154 progress = false;
3155 NIR_PASS(progress, nir, nir_copy_prop);
3156 NIR_PASS(progress, nir, nir_opt_remove_phis);
3157 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3158 NIR_PASS(progress, nir, nir_opt_cse);
3159 NIR_PASS(progress, nir, nir_opt_algebraic);
3160 NIR_PASS(progress, nir, nir_opt_constant_folding);
3161 NIR_PASS(progress, nir, nir_copy_prop);
3162 NIR_PASS(progress, nir, nir_opt_dce);
3163 NIR_PASS(progress, nir, nir_opt_dead_cf);
3164 } while (progress);
3165
3166 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3167 NIR_PASS_V(nir, nir_lower_locals_to_regs);
3168 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3169 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3170
3171 // Garbage collect dead instructions
3172 nir_sweep(nir);
3173
3174 if (!parseNIR()) {
3175 ERROR("Couldn't prase NIR!\n");
3176 return false;
3177 }
3178
3179 if (!assignSlots()) {
3180 ERROR("Couldn't assign slots!\n");
3181 return false;
3182 }
3183
3184 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3185 nir_print_shader(nir, stderr);
3186
3187 nir_foreach_function(function, nir) {
3188 if (!visit(function))
3189 return false;
3190 }
3191
3192 return true;
3193 }
3194
3195 } // unnamed namespace
3196
3197 namespace nv50_ir {
3198
3199 bool
3200 Program::makeFromNIR(struct nv50_ir_prog_info *info)
3201 {
3202 nir_shader *nir = (nir_shader*)info->bin.source;
3203 Converter converter(this, nir, info);
3204 bool result = converter.run();
3205 if (!result)
3206 return result;
3207 LoweringHelper lowering;
3208 lowering.run(this);
3209 tlsSize = info->bin.tlsSpace;
3210 return result;
3211 }
3212
3213 } // namespace nv50_ir
3214
3215 static nir_shader_compiler_options
3216 nvir_nir_shader_compiler_options(int chipset)
3217 {
3218 nir_shader_compiler_options op = {};
3219 op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3220 op.lower_ffma = false;
3221 op.fuse_ffma = false; /* nir doesn't track mad vs fma */
3222 op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3223 op.lower_flrp32 = true;
3224 op.lower_flrp64 = true;
3225 op.lower_fpow = false; // TODO: nir's lowering is broken, or we could use it
3226 op.lower_fsat = false;
3227 op.lower_fsqrt = false; // TODO: only before gm200
3228 op.lower_sincos = false;
3229 op.lower_fmod = true;
3230 op.lower_bitfield_extract = false;
3231 op.lower_bitfield_extract_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3232 op.lower_bitfield_insert = false;
3233 op.lower_bitfield_insert_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3234 op.lower_bitfield_insert_to_bitfield_select = false;
3235 op.lower_bitfield_reverse = false;
3236 op.lower_bit_count = false;
3237 op.lower_ifind_msb = false;
3238 op.lower_find_lsb = false;
3239 op.lower_uadd_carry = true; // TODO
3240 op.lower_usub_borrow = true; // TODO
3241 op.lower_mul_high = false;
3242 op.lower_negate = false;
3243 op.lower_sub = true;
3244 op.lower_scmp = true; // TODO: not implemented yet
3245 op.lower_vector_cmp = false;
3246 op.lower_idiv = true;
3247 op.lower_bitops = false;
3248 op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3249 op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3250 op.lower_fdph = false;
3251 op.lower_fdot = false;
3252 op.fdot_replicates = false; // TODO
3253 op.lower_ffloor = false; // TODO
3254 op.lower_ffract = true;
3255 op.lower_fceil = false; // TODO
3256 op.lower_ftrunc = false;
3257 op.lower_ldexp = true;
3258 op.lower_pack_half_2x16 = true;
3259 op.lower_pack_unorm_2x16 = true;
3260 op.lower_pack_snorm_2x16 = true;
3261 op.lower_pack_unorm_4x8 = true;
3262 op.lower_pack_snorm_4x8 = true;
3263 op.lower_unpack_half_2x16 = true;
3264 op.lower_unpack_unorm_2x16 = true;
3265 op.lower_unpack_snorm_2x16 = true;
3266 op.lower_unpack_unorm_4x8 = true;
3267 op.lower_unpack_snorm_4x8 = true;
3268 op.lower_pack_split = false;
3269 op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3270 op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3271 op.lower_all_io_to_temps = false;
3272 op.lower_all_io_to_elements = false;
3273 op.vertex_id_zero_based = false;
3274 op.lower_base_vertex = false;
3275 op.lower_helper_invocation = false;
3276 op.optimize_sample_mask_in = false;
3277 op.lower_cs_local_index_from_id = true;
3278 op.lower_cs_local_id_from_index = false;
3279 op.lower_device_index_to_zero = false; // TODO
3280 op.lower_wpos_pntc = false; // TODO
3281 op.lower_hadd = true; // TODO
3282 op.lower_add_sat = true; // TODO
3283 op.vectorize_io = false;
3284 op.lower_to_scalar = false;
3285 op.unify_interfaces = false;
3286 op.use_interpolated_input_intrinsics = true;
3287 op.lower_mul_2x32_64 = true; // TODO
3288 op.lower_rotate = (chipset < NVISA_GV100_CHIPSET);
3289 op.has_imul24 = false;
3290 op.intel_vec4 = false;
3291 op.max_unroll_iterations = 32;
3292 op.lower_int64_options = (nir_lower_int64_options) (
3293 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3294 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3295 nir_lower_divmod64 |
3296 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3297 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_mov64 : 0) |
3298 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3299 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3300 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3301 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3302 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3303 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3304 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_2x32_64 : 0) |
3305 ((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3306 nir_lower_ufind_msb64
3307 );
3308 op.lower_doubles_options = (nir_lower_doubles_options) (
3309 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3310 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3311 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3312 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3313 nir_lower_dmod |
3314 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3315 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3316 );
3317 return op;
3318 }
3319
3320 static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3321 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET);
3322 static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3323 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET);
3324 static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3325 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET);
3326
3327 const nir_shader_compiler_options *
3328 nv50_ir_nir_shader_compiler_options(int chipset)
3329 {
3330 if (chipset >= NVISA_GV100_CHIPSET)
3331 return &gv100_nir_shader_compiler_options;
3332 if (chipset >= NVISA_GM107_CHIPSET)
3333 return &gm107_nir_shader_compiler_options;
3334 return &gf100_nir_shader_compiler_options;
3335 }