nv50/ir/nir: implement ssbo intrinsics
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33
34 #if __cplusplus >= 201103L
35 #include <unordered_map>
36 #else
37 #include <tr1/unordered_map>
38 #endif
39 #include <vector>
40
41 namespace {
42
43 #if __cplusplus >= 201103L
44 using std::hash;
45 using std::unordered_map;
46 #else
47 using std::tr1::hash;
48 using std::tr1::unordered_map;
49 #endif
50
51 using namespace nv50_ir;
52
53 int
54 type_size(const struct glsl_type *type)
55 {
56 return glsl_count_attribute_slots(type, false);
57 }
58
59 class Converter : public ConverterCommon
60 {
61 public:
62 Converter(Program *, nir_shader *, nv50_ir_prog_info *);
63
64 bool run();
65 private:
66 typedef std::vector<LValue*> LValues;
67 typedef unordered_map<unsigned, LValues> NirDefMap;
68 typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
69 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
70
71 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
72 LValues& convert(nir_alu_dest *);
73 BasicBlock* convert(nir_block *);
74 LValues& convert(nir_dest *);
75 SVSemantic convert(nir_intrinsic_op);
76 LValues& convert(nir_register *);
77 LValues& convert(nir_ssa_def *);
78
79 Value* getSrc(nir_alu_src *, uint8_t component = 0);
80 Value* getSrc(nir_register *, uint8_t);
81 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
82 Value* getSrc(nir_ssa_def *, uint8_t);
83
84 // returned value is the constant part of the given source (either the
85 // nir_src or the selected source component of an intrinsic). Even though
86 // this is mostly an optimization to be able to skip indirects in a few
87 // cases, sometimes we require immediate values or set some fileds on
88 // instructions (e.g. tex) in order for codegen to consume those.
89 // If the found value has not a constant part, the Value gets returned
90 // through the Value parameter.
91 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
92 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&);
93
94 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
95
96 void setInterpolate(nv50_ir_varying *,
97 uint8_t,
98 bool centroid,
99 unsigned semantics);
100
101 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
102 uint8_t c, Value *indirect0 = NULL,
103 Value *indirect1 = NULL, bool patch = false);
104 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
105 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
106 Value *indirect1 = NULL);
107
108 bool isFloatType(nir_alu_type);
109 bool isSignedType(nir_alu_type);
110 bool isResultFloat(nir_op);
111 bool isResultSigned(nir_op);
112
113 DataType getDType(nir_alu_instr *);
114 DataType getDType(nir_intrinsic_instr *);
115 DataType getDType(nir_op, uint8_t);
116
117 std::vector<DataType> getSTypes(nir_alu_instr *);
118 DataType getSType(nir_src &, bool isFloat, bool isSigned);
119
120 operation getOperation(nir_intrinsic_op);
121 operation getOperation(nir_op);
122 operation getOperation(nir_texop);
123 operation preOperationNeeded(nir_op);
124
125 int getSubOp(nir_intrinsic_op);
126 int getSubOp(nir_op);
127
128 CondCode getCondCode(nir_op);
129
130 bool assignSlots();
131 bool parseNIR();
132
133 bool visit(nir_alu_instr *);
134 bool visit(nir_block *);
135 bool visit(nir_cf_node *);
136 bool visit(nir_function *);
137 bool visit(nir_if *);
138 bool visit(nir_instr *);
139 bool visit(nir_intrinsic_instr *);
140 bool visit(nir_jump_instr *);
141 bool visit(nir_load_const_instr*);
142 bool visit(nir_loop *);
143 bool visit(nir_ssa_undef_instr *);
144 bool visit(nir_tex_instr *);
145
146 // tex stuff
147 Value* applyProjection(Value *src, Value *proj);
148
149 nir_shader *nir;
150
151 NirDefMap ssaDefs;
152 NirDefMap regDefs;
153 NirArrayLMemOffsets regToLmemOffset;
154 NirBlockMap blocks;
155 unsigned int curLoopDepth;
156
157 BasicBlock *exit;
158 Value *zero;
159
160 int clipVertexOutput;
161
162 union {
163 struct {
164 Value *position;
165 } fp;
166 };
167 };
168
169 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
170 : ConverterCommon(prog, info),
171 nir(nir),
172 curLoopDepth(0),
173 clipVertexOutput(-1)
174 {
175 zero = mkImm((uint32_t)0);
176 }
177
178 BasicBlock *
179 Converter::convert(nir_block *block)
180 {
181 NirBlockMap::iterator it = blocks.find(block->index);
182 if (it != blocks.end())
183 return it->second;
184
185 BasicBlock *bb = new BasicBlock(func);
186 blocks[block->index] = bb;
187 return bb;
188 }
189
190 bool
191 Converter::isFloatType(nir_alu_type type)
192 {
193 return nir_alu_type_get_base_type(type) == nir_type_float;
194 }
195
196 bool
197 Converter::isSignedType(nir_alu_type type)
198 {
199 return nir_alu_type_get_base_type(type) == nir_type_int;
200 }
201
202 bool
203 Converter::isResultFloat(nir_op op)
204 {
205 const nir_op_info &info = nir_op_infos[op];
206 if (info.output_type != nir_type_invalid)
207 return isFloatType(info.output_type);
208
209 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
210 assert(false);
211 return true;
212 }
213
214 bool
215 Converter::isResultSigned(nir_op op)
216 {
217 switch (op) {
218 // there is no umul and we get wrong results if we treat all muls as signed
219 case nir_op_imul:
220 case nir_op_inot:
221 return false;
222 default:
223 const nir_op_info &info = nir_op_infos[op];
224 if (info.output_type != nir_type_invalid)
225 return isSignedType(info.output_type);
226 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
227 assert(false);
228 return true;
229 }
230 }
231
232 DataType
233 Converter::getDType(nir_alu_instr *insn)
234 {
235 if (insn->dest.dest.is_ssa)
236 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
237 else
238 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
239 }
240
241 DataType
242 Converter::getDType(nir_intrinsic_instr *insn)
243 {
244 if (insn->dest.is_ssa)
245 return typeOfSize(insn->dest.ssa.bit_size / 8, false, false);
246 else
247 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, false);
248 }
249
250 DataType
251 Converter::getDType(nir_op op, uint8_t bitSize)
252 {
253 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
254 if (ty == TYPE_NONE) {
255 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
256 assert(false);
257 }
258 return ty;
259 }
260
261 std::vector<DataType>
262 Converter::getSTypes(nir_alu_instr *insn)
263 {
264 const nir_op_info &info = nir_op_infos[insn->op];
265 std::vector<DataType> res(info.num_inputs);
266
267 for (uint8_t i = 0; i < info.num_inputs; ++i) {
268 if (info.input_types[i] != nir_type_invalid) {
269 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
270 } else {
271 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
272 assert(false);
273 res[i] = TYPE_NONE;
274 break;
275 }
276 }
277
278 return res;
279 }
280
281 DataType
282 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
283 {
284 uint8_t bitSize;
285 if (src.is_ssa)
286 bitSize = src.ssa->bit_size;
287 else
288 bitSize = src.reg.reg->bit_size;
289
290 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
291 if (ty == TYPE_NONE) {
292 const char *str;
293 if (isFloat)
294 str = "float";
295 else if (isSigned)
296 str = "int";
297 else
298 str = "uint";
299 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
300 assert(false);
301 }
302 return ty;
303 }
304
305 operation
306 Converter::getOperation(nir_op op)
307 {
308 switch (op) {
309 // basic ops with float and int variants
310 case nir_op_fabs:
311 case nir_op_iabs:
312 return OP_ABS;
313 case nir_op_fadd:
314 case nir_op_iadd:
315 return OP_ADD;
316 case nir_op_fand:
317 case nir_op_iand:
318 return OP_AND;
319 case nir_op_ifind_msb:
320 case nir_op_ufind_msb:
321 return OP_BFIND;
322 case nir_op_fceil:
323 return OP_CEIL;
324 case nir_op_fcos:
325 return OP_COS;
326 case nir_op_f2f32:
327 case nir_op_f2f64:
328 case nir_op_f2i32:
329 case nir_op_f2i64:
330 case nir_op_f2u32:
331 case nir_op_f2u64:
332 case nir_op_i2f32:
333 case nir_op_i2f64:
334 case nir_op_i2i32:
335 case nir_op_i2i64:
336 case nir_op_u2f32:
337 case nir_op_u2f64:
338 case nir_op_u2u32:
339 case nir_op_u2u64:
340 return OP_CVT;
341 case nir_op_fddx:
342 case nir_op_fddx_coarse:
343 case nir_op_fddx_fine:
344 return OP_DFDX;
345 case nir_op_fddy:
346 case nir_op_fddy_coarse:
347 case nir_op_fddy_fine:
348 return OP_DFDY;
349 case nir_op_fdiv:
350 case nir_op_idiv:
351 case nir_op_udiv:
352 return OP_DIV;
353 case nir_op_fexp2:
354 return OP_EX2;
355 case nir_op_ffloor:
356 return OP_FLOOR;
357 case nir_op_ffma:
358 return OP_FMA;
359 case nir_op_flog2:
360 return OP_LG2;
361 case nir_op_fmax:
362 case nir_op_imax:
363 case nir_op_umax:
364 return OP_MAX;
365 case nir_op_pack_64_2x32_split:
366 return OP_MERGE;
367 case nir_op_fmin:
368 case nir_op_imin:
369 case nir_op_umin:
370 return OP_MIN;
371 case nir_op_fmod:
372 case nir_op_imod:
373 case nir_op_umod:
374 case nir_op_frem:
375 case nir_op_irem:
376 return OP_MOD;
377 case nir_op_fmul:
378 case nir_op_imul:
379 case nir_op_imul_high:
380 case nir_op_umul_high:
381 return OP_MUL;
382 case nir_op_fneg:
383 case nir_op_ineg:
384 return OP_NEG;
385 case nir_op_fnot:
386 case nir_op_inot:
387 return OP_NOT;
388 case nir_op_for:
389 case nir_op_ior:
390 return OP_OR;
391 case nir_op_fpow:
392 return OP_POW;
393 case nir_op_frcp:
394 return OP_RCP;
395 case nir_op_frsq:
396 return OP_RSQ;
397 case nir_op_fsat:
398 return OP_SAT;
399 case nir_op_feq32:
400 case nir_op_ieq32:
401 case nir_op_fge32:
402 case nir_op_ige32:
403 case nir_op_uge32:
404 case nir_op_flt32:
405 case nir_op_ilt32:
406 case nir_op_ult32:
407 case nir_op_fne32:
408 case nir_op_ine32:
409 return OP_SET;
410 case nir_op_ishl:
411 return OP_SHL;
412 case nir_op_ishr:
413 case nir_op_ushr:
414 return OP_SHR;
415 case nir_op_fsin:
416 return OP_SIN;
417 case nir_op_fsqrt:
418 return OP_SQRT;
419 case nir_op_fsub:
420 case nir_op_isub:
421 return OP_SUB;
422 case nir_op_ftrunc:
423 return OP_TRUNC;
424 case nir_op_fxor:
425 case nir_op_ixor:
426 return OP_XOR;
427 default:
428 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
429 assert(false);
430 return OP_NOP;
431 }
432 }
433
434 operation
435 Converter::getOperation(nir_texop op)
436 {
437 switch (op) {
438 case nir_texop_tex:
439 return OP_TEX;
440 case nir_texop_lod:
441 return OP_TXLQ;
442 case nir_texop_txb:
443 return OP_TXB;
444 case nir_texop_txd:
445 return OP_TXD;
446 case nir_texop_txf:
447 case nir_texop_txf_ms:
448 return OP_TXF;
449 case nir_texop_tg4:
450 return OP_TXG;
451 case nir_texop_txl:
452 return OP_TXL;
453 case nir_texop_query_levels:
454 case nir_texop_texture_samples:
455 case nir_texop_txs:
456 return OP_TXQ;
457 default:
458 ERROR("couldn't get operation for nir_texop %u\n", op);
459 assert(false);
460 return OP_NOP;
461 }
462 }
463
464 operation
465 Converter::getOperation(nir_intrinsic_op op)
466 {
467 switch (op) {
468 case nir_intrinsic_emit_vertex:
469 return OP_EMIT;
470 case nir_intrinsic_end_primitive:
471 return OP_RESTART;
472 default:
473 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
474 assert(false);
475 return OP_NOP;
476 }
477 }
478
479 operation
480 Converter::preOperationNeeded(nir_op op)
481 {
482 switch (op) {
483 case nir_op_fcos:
484 case nir_op_fsin:
485 return OP_PRESIN;
486 default:
487 return OP_NOP;
488 }
489 }
490
491 int
492 Converter::getSubOp(nir_op op)
493 {
494 switch (op) {
495 case nir_op_imul_high:
496 case nir_op_umul_high:
497 return NV50_IR_SUBOP_MUL_HIGH;
498 default:
499 return 0;
500 }
501 }
502
503 int
504 Converter::getSubOp(nir_intrinsic_op op)
505 {
506 switch (op) {
507 case nir_intrinsic_ssbo_atomic_add:
508 return NV50_IR_SUBOP_ATOM_ADD;
509 case nir_intrinsic_ssbo_atomic_and:
510 return NV50_IR_SUBOP_ATOM_AND;
511 case nir_intrinsic_ssbo_atomic_comp_swap:
512 return NV50_IR_SUBOP_ATOM_CAS;
513 case nir_intrinsic_ssbo_atomic_exchange:
514 return NV50_IR_SUBOP_ATOM_EXCH;
515 case nir_intrinsic_ssbo_atomic_or:
516 return NV50_IR_SUBOP_ATOM_OR;
517 case nir_intrinsic_ssbo_atomic_imax:
518 case nir_intrinsic_ssbo_atomic_umax:
519 return NV50_IR_SUBOP_ATOM_MAX;
520 case nir_intrinsic_ssbo_atomic_imin:
521 case nir_intrinsic_ssbo_atomic_umin:
522 return NV50_IR_SUBOP_ATOM_MIN;
523 case nir_intrinsic_ssbo_atomic_xor:
524 return NV50_IR_SUBOP_ATOM_XOR;
525 case nir_intrinsic_vote_all:
526 return NV50_IR_SUBOP_VOTE_ALL;
527 case nir_intrinsic_vote_any:
528 return NV50_IR_SUBOP_VOTE_ANY;
529 case nir_intrinsic_vote_ieq:
530 return NV50_IR_SUBOP_VOTE_UNI;
531 default:
532 return 0;
533 }
534 }
535
536 CondCode
537 Converter::getCondCode(nir_op op)
538 {
539 switch (op) {
540 case nir_op_feq32:
541 case nir_op_ieq32:
542 return CC_EQ;
543 case nir_op_fge32:
544 case nir_op_ige32:
545 case nir_op_uge32:
546 return CC_GE;
547 case nir_op_flt32:
548 case nir_op_ilt32:
549 case nir_op_ult32:
550 return CC_LT;
551 case nir_op_fne32:
552 return CC_NEU;
553 case nir_op_ine32:
554 return CC_NE;
555 default:
556 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
557 assert(false);
558 return CC_FL;
559 }
560 }
561
562 Converter::LValues&
563 Converter::convert(nir_alu_dest *dest)
564 {
565 return convert(&dest->dest);
566 }
567
568 Converter::LValues&
569 Converter::convert(nir_dest *dest)
570 {
571 if (dest->is_ssa)
572 return convert(&dest->ssa);
573 if (dest->reg.indirect) {
574 ERROR("no support for indirects.");
575 assert(false);
576 }
577 return convert(dest->reg.reg);
578 }
579
580 Converter::LValues&
581 Converter::convert(nir_register *reg)
582 {
583 NirDefMap::iterator it = regDefs.find(reg->index);
584 if (it != regDefs.end())
585 return it->second;
586
587 LValues newDef(reg->num_components);
588 for (uint8_t i = 0; i < reg->num_components; i++)
589 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
590 return regDefs[reg->index] = newDef;
591 }
592
593 Converter::LValues&
594 Converter::convert(nir_ssa_def *def)
595 {
596 NirDefMap::iterator it = ssaDefs.find(def->index);
597 if (it != ssaDefs.end())
598 return it->second;
599
600 LValues newDef(def->num_components);
601 for (uint8_t i = 0; i < def->num_components; i++)
602 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
603 return ssaDefs[def->index] = newDef;
604 }
605
606 Value*
607 Converter::getSrc(nir_alu_src *src, uint8_t component)
608 {
609 if (src->abs || src->negate) {
610 ERROR("modifiers currently not supported on nir_alu_src\n");
611 assert(false);
612 }
613 return getSrc(&src->src, src->swizzle[component]);
614 }
615
616 Value*
617 Converter::getSrc(nir_register *reg, uint8_t idx)
618 {
619 NirDefMap::iterator it = regDefs.find(reg->index);
620 if (it == regDefs.end())
621 return convert(reg)[idx];
622 return it->second[idx];
623 }
624
625 Value*
626 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
627 {
628 if (src->is_ssa)
629 return getSrc(src->ssa, idx);
630
631 if (src->reg.indirect) {
632 if (indirect)
633 return getSrc(src->reg.indirect, idx);
634 ERROR("no support for indirects.");
635 assert(false);
636 return NULL;
637 }
638
639 return getSrc(src->reg.reg, idx);
640 }
641
642 Value*
643 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
644 {
645 NirDefMap::iterator it = ssaDefs.find(src->index);
646 if (it == ssaDefs.end()) {
647 ERROR("SSA value %u not found\n", src->index);
648 assert(false);
649 return NULL;
650 }
651 return it->second[idx];
652 }
653
654 uint32_t
655 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
656 {
657 nir_const_value *offset = nir_src_as_const_value(*src);
658
659 if (offset) {
660 indirect = NULL;
661 return offset->u32[0];
662 }
663
664 indirect = getSrc(src, idx, true);
665 return 0;
666 }
667
668 uint32_t
669 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect)
670 {
671 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
672 if (indirect)
673 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
674 return idx;
675 }
676
677 static void
678 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
679 {
680 assert(name && index);
681
682 if (slot >= VERT_ATTRIB_MAX) {
683 ERROR("invalid varying slot %u\n", slot);
684 assert(false);
685 return;
686 }
687
688 if (slot >= VERT_ATTRIB_GENERIC0 &&
689 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
690 *name = TGSI_SEMANTIC_GENERIC;
691 *index = slot - VERT_ATTRIB_GENERIC0;
692 return;
693 }
694
695 if (slot >= VERT_ATTRIB_TEX0 &&
696 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
697 *name = TGSI_SEMANTIC_TEXCOORD;
698 *index = slot - VERT_ATTRIB_TEX0;
699 return;
700 }
701
702 switch (slot) {
703 case VERT_ATTRIB_COLOR0:
704 *name = TGSI_SEMANTIC_COLOR;
705 *index = 0;
706 break;
707 case VERT_ATTRIB_COLOR1:
708 *name = TGSI_SEMANTIC_COLOR;
709 *index = 1;
710 break;
711 case VERT_ATTRIB_EDGEFLAG:
712 *name = TGSI_SEMANTIC_EDGEFLAG;
713 *index = 0;
714 break;
715 case VERT_ATTRIB_FOG:
716 *name = TGSI_SEMANTIC_FOG;
717 *index = 0;
718 break;
719 case VERT_ATTRIB_NORMAL:
720 *name = TGSI_SEMANTIC_NORMAL;
721 *index = 0;
722 break;
723 case VERT_ATTRIB_POS:
724 *name = TGSI_SEMANTIC_POSITION;
725 *index = 0;
726 break;
727 case VERT_ATTRIB_POINT_SIZE:
728 *name = TGSI_SEMANTIC_PSIZE;
729 *index = 0;
730 break;
731 default:
732 ERROR("unknown vert attrib slot %u\n", slot);
733 assert(false);
734 break;
735 }
736 }
737
738 static void
739 varying_slot_to_tgsi_semantic(gl_varying_slot slot, unsigned *name, unsigned *index)
740 {
741 assert(name && index);
742
743 if (slot >= VARYING_SLOT_TESS_MAX) {
744 ERROR("invalid varying slot %u\n", slot);
745 assert(false);
746 return;
747 }
748
749 if (slot >= VARYING_SLOT_PATCH0) {
750 *name = TGSI_SEMANTIC_PATCH;
751 *index = slot - VARYING_SLOT_PATCH0;
752 return;
753 }
754
755 if (slot >= VARYING_SLOT_VAR0) {
756 *name = TGSI_SEMANTIC_GENERIC;
757 *index = slot - VARYING_SLOT_VAR0;
758 return;
759 }
760
761 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
762 *name = TGSI_SEMANTIC_TEXCOORD;
763 *index = slot - VARYING_SLOT_TEX0;
764 return;
765 }
766
767 switch (slot) {
768 case VARYING_SLOT_BFC0:
769 *name = TGSI_SEMANTIC_BCOLOR;
770 *index = 0;
771 break;
772 case VARYING_SLOT_BFC1:
773 *name = TGSI_SEMANTIC_BCOLOR;
774 *index = 1;
775 break;
776 case VARYING_SLOT_CLIP_DIST0:
777 *name = TGSI_SEMANTIC_CLIPDIST;
778 *index = 0;
779 break;
780 case VARYING_SLOT_CLIP_DIST1:
781 *name = TGSI_SEMANTIC_CLIPDIST;
782 *index = 1;
783 break;
784 case VARYING_SLOT_CLIP_VERTEX:
785 *name = TGSI_SEMANTIC_CLIPVERTEX;
786 *index = 0;
787 break;
788 case VARYING_SLOT_COL0:
789 *name = TGSI_SEMANTIC_COLOR;
790 *index = 0;
791 break;
792 case VARYING_SLOT_COL1:
793 *name = TGSI_SEMANTIC_COLOR;
794 *index = 1;
795 break;
796 case VARYING_SLOT_EDGE:
797 *name = TGSI_SEMANTIC_EDGEFLAG;
798 *index = 0;
799 break;
800 case VARYING_SLOT_FACE:
801 *name = TGSI_SEMANTIC_FACE;
802 *index = 0;
803 break;
804 case VARYING_SLOT_FOGC:
805 *name = TGSI_SEMANTIC_FOG;
806 *index = 0;
807 break;
808 case VARYING_SLOT_LAYER:
809 *name = TGSI_SEMANTIC_LAYER;
810 *index = 0;
811 break;
812 case VARYING_SLOT_PNTC:
813 *name = TGSI_SEMANTIC_PCOORD;
814 *index = 0;
815 break;
816 case VARYING_SLOT_POS:
817 *name = TGSI_SEMANTIC_POSITION;
818 *index = 0;
819 break;
820 case VARYING_SLOT_PRIMITIVE_ID:
821 *name = TGSI_SEMANTIC_PRIMID;
822 *index = 0;
823 break;
824 case VARYING_SLOT_PSIZ:
825 *name = TGSI_SEMANTIC_PSIZE;
826 *index = 0;
827 break;
828 case VARYING_SLOT_TESS_LEVEL_INNER:
829 *name = TGSI_SEMANTIC_TESSINNER;
830 *index = 0;
831 break;
832 case VARYING_SLOT_TESS_LEVEL_OUTER:
833 *name = TGSI_SEMANTIC_TESSOUTER;
834 *index = 0;
835 break;
836 case VARYING_SLOT_VIEWPORT:
837 *name = TGSI_SEMANTIC_VIEWPORT_INDEX;
838 *index = 0;
839 break;
840 default:
841 ERROR("unknown varying slot %u\n", slot);
842 assert(false);
843 break;
844 }
845 }
846
847 static void
848 frag_result_to_tgsi_semantic(unsigned slot, unsigned *name, unsigned *index)
849 {
850 if (slot >= FRAG_RESULT_DATA0) {
851 *name = TGSI_SEMANTIC_COLOR;
852 *index = slot - FRAG_RESULT_COLOR - 2; // intentional
853 return;
854 }
855
856 switch (slot) {
857 case FRAG_RESULT_COLOR:
858 *name = TGSI_SEMANTIC_COLOR;
859 *index = 0;
860 break;
861 case FRAG_RESULT_DEPTH:
862 *name = TGSI_SEMANTIC_POSITION;
863 *index = 0;
864 break;
865 case FRAG_RESULT_SAMPLE_MASK:
866 *name = TGSI_SEMANTIC_SAMPLEMASK;
867 *index = 0;
868 break;
869 default:
870 ERROR("unknown frag result slot %u\n", slot);
871 assert(false);
872 break;
873 }
874 }
875
876 // copy of _mesa_sysval_to_semantic
877 static void
878 system_val_to_tgsi_semantic(unsigned val, unsigned *name, unsigned *index)
879 {
880 *index = 0;
881 switch (val) {
882 // Vertex shader
883 case SYSTEM_VALUE_VERTEX_ID:
884 *name = TGSI_SEMANTIC_VERTEXID;
885 break;
886 case SYSTEM_VALUE_INSTANCE_ID:
887 *name = TGSI_SEMANTIC_INSTANCEID;
888 break;
889 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
890 *name = TGSI_SEMANTIC_VERTEXID_NOBASE;
891 break;
892 case SYSTEM_VALUE_BASE_VERTEX:
893 *name = TGSI_SEMANTIC_BASEVERTEX;
894 break;
895 case SYSTEM_VALUE_BASE_INSTANCE:
896 *name = TGSI_SEMANTIC_BASEINSTANCE;
897 break;
898 case SYSTEM_VALUE_DRAW_ID:
899 *name = TGSI_SEMANTIC_DRAWID;
900 break;
901
902 // Geometry shader
903 case SYSTEM_VALUE_INVOCATION_ID:
904 *name = TGSI_SEMANTIC_INVOCATIONID;
905 break;
906
907 // Fragment shader
908 case SYSTEM_VALUE_FRAG_COORD:
909 *name = TGSI_SEMANTIC_POSITION;
910 break;
911 case SYSTEM_VALUE_FRONT_FACE:
912 *name = TGSI_SEMANTIC_FACE;
913 break;
914 case SYSTEM_VALUE_SAMPLE_ID:
915 *name = TGSI_SEMANTIC_SAMPLEID;
916 break;
917 case SYSTEM_VALUE_SAMPLE_POS:
918 *name = TGSI_SEMANTIC_SAMPLEPOS;
919 break;
920 case SYSTEM_VALUE_SAMPLE_MASK_IN:
921 *name = TGSI_SEMANTIC_SAMPLEMASK;
922 break;
923 case SYSTEM_VALUE_HELPER_INVOCATION:
924 *name = TGSI_SEMANTIC_HELPER_INVOCATION;
925 break;
926
927 // Tessellation shader
928 case SYSTEM_VALUE_TESS_COORD:
929 *name = TGSI_SEMANTIC_TESSCOORD;
930 break;
931 case SYSTEM_VALUE_VERTICES_IN:
932 *name = TGSI_SEMANTIC_VERTICESIN;
933 break;
934 case SYSTEM_VALUE_PRIMITIVE_ID:
935 *name = TGSI_SEMANTIC_PRIMID;
936 break;
937 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
938 *name = TGSI_SEMANTIC_TESSOUTER;
939 break;
940 case SYSTEM_VALUE_TESS_LEVEL_INNER:
941 *name = TGSI_SEMANTIC_TESSINNER;
942 break;
943
944 // Compute shader
945 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
946 *name = TGSI_SEMANTIC_THREAD_ID;
947 break;
948 case SYSTEM_VALUE_WORK_GROUP_ID:
949 *name = TGSI_SEMANTIC_BLOCK_ID;
950 break;
951 case SYSTEM_VALUE_NUM_WORK_GROUPS:
952 *name = TGSI_SEMANTIC_GRID_SIZE;
953 break;
954 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
955 *name = TGSI_SEMANTIC_BLOCK_SIZE;
956 break;
957
958 // ARB_shader_ballot
959 case SYSTEM_VALUE_SUBGROUP_SIZE:
960 *name = TGSI_SEMANTIC_SUBGROUP_SIZE;
961 break;
962 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
963 *name = TGSI_SEMANTIC_SUBGROUP_INVOCATION;
964 break;
965 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
966 *name = TGSI_SEMANTIC_SUBGROUP_EQ_MASK;
967 break;
968 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
969 *name = TGSI_SEMANTIC_SUBGROUP_GE_MASK;
970 break;
971 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
972 *name = TGSI_SEMANTIC_SUBGROUP_GT_MASK;
973 break;
974 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
975 *name = TGSI_SEMANTIC_SUBGROUP_LE_MASK;
976 break;
977 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
978 *name = TGSI_SEMANTIC_SUBGROUP_LT_MASK;
979 break;
980
981 default:
982 ERROR("unknown system value %u\n", val);
983 assert(false);
984 break;
985 }
986 }
987
988 void
989 Converter::setInterpolate(nv50_ir_varying *var,
990 uint8_t mode,
991 bool centroid,
992 unsigned semantic)
993 {
994 switch (mode) {
995 case INTERP_MODE_FLAT:
996 var->flat = 1;
997 break;
998 case INTERP_MODE_NONE:
999 if (semantic == TGSI_SEMANTIC_COLOR)
1000 var->sc = 1;
1001 else if (semantic == TGSI_SEMANTIC_POSITION)
1002 var->linear = 1;
1003 break;
1004 case INTERP_MODE_NOPERSPECTIVE:
1005 var->linear = 1;
1006 break;
1007 case INTERP_MODE_SMOOTH:
1008 break;
1009 }
1010 var->centroid = centroid;
1011 }
1012
1013 static uint16_t
1014 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
1015 bool input, const nir_variable *var)
1016 {
1017 if (!type->is_array())
1018 return type->count_attribute_slots(false);
1019
1020 uint16_t slots;
1021 switch (stage) {
1022 case Program::TYPE_GEOMETRY:
1023 slots = type->uniform_locations();
1024 if (input)
1025 slots /= info.gs.vertices_in;
1026 break;
1027 case Program::TYPE_TESSELLATION_CONTROL:
1028 case Program::TYPE_TESSELLATION_EVAL:
1029 // remove first dimension
1030 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
1031 slots = type->uniform_locations();
1032 else
1033 slots = type->fields.array->uniform_locations();
1034 break;
1035 default:
1036 slots = type->count_attribute_slots(false);
1037 break;
1038 }
1039
1040 return slots;
1041 }
1042
1043 bool Converter::assignSlots() {
1044 unsigned name;
1045 unsigned index;
1046
1047 info->io.viewportId = -1;
1048 info->numInputs = 0;
1049
1050 // we have to fixup the uniform locations for arrays
1051 unsigned numImages = 0;
1052 nir_foreach_variable(var, &nir->uniforms) {
1053 const glsl_type *type = var->type;
1054 if (!type->without_array()->is_image())
1055 continue;
1056 var->data.driver_location = numImages;
1057 numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
1058 }
1059
1060 nir_foreach_variable(var, &nir->inputs) {
1061 const glsl_type *type = var->type;
1062 int slot = var->data.location;
1063 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1064 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1065 : type->component_slots();
1066 uint32_t frac = var->data.location_frac;
1067 uint32_t vary = var->data.driver_location;
1068
1069 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1070 if (comp > 2)
1071 slots *= 2;
1072 }
1073
1074 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1075
1076 switch(prog->getType()) {
1077 case Program::TYPE_FRAGMENT:
1078 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1079 for (uint16_t i = 0; i < slots; ++i) {
1080 setInterpolate(&info->in[vary + i], var->data.interpolation,
1081 var->data.centroid | var->data.sample, name);
1082 }
1083 break;
1084 case Program::TYPE_GEOMETRY:
1085 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1086 break;
1087 case Program::TYPE_TESSELLATION_CONTROL:
1088 case Program::TYPE_TESSELLATION_EVAL:
1089 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1090 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1091 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1092 break;
1093 case Program::TYPE_VERTEX:
1094 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1095 switch (name) {
1096 case TGSI_SEMANTIC_EDGEFLAG:
1097 info->io.edgeFlagIn = vary;
1098 break;
1099 default:
1100 break;
1101 }
1102 break;
1103 default:
1104 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1105 return false;
1106 }
1107
1108 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1109 info->in[vary].id = vary;
1110 info->in[vary].patch = var->data.patch;
1111 info->in[vary].sn = name;
1112 info->in[vary].si = index + i;
1113 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1114 if (i & 0x1)
1115 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1116 else
1117 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1118 else
1119 info->in[vary].mask |= ((1 << comp) - 1) << frac;
1120 }
1121 info->numInputs = std::max<uint8_t>(info->numInputs, vary);
1122 }
1123
1124 info->numOutputs = 0;
1125 nir_foreach_variable(var, &nir->outputs) {
1126 const glsl_type *type = var->type;
1127 int slot = var->data.location;
1128 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1129 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1130 : type->component_slots();
1131 uint32_t frac = var->data.location_frac;
1132 uint32_t vary = var->data.driver_location;
1133
1134 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1135 if (comp > 2)
1136 slots *= 2;
1137 }
1138
1139 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1140
1141 switch(prog->getType()) {
1142 case Program::TYPE_FRAGMENT:
1143 frag_result_to_tgsi_semantic((gl_frag_result)slot, &name, &index);
1144 switch (name) {
1145 case TGSI_SEMANTIC_COLOR:
1146 if (!var->data.fb_fetch_output)
1147 info->prop.fp.numColourResults++;
1148 info->prop.fp.separateFragData = true;
1149 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1150 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1151 index = index == 0 ? var->data.index : index;
1152 break;
1153 case TGSI_SEMANTIC_POSITION:
1154 info->io.fragDepth = vary;
1155 info->prop.fp.writesDepth = true;
1156 break;
1157 case TGSI_SEMANTIC_SAMPLEMASK:
1158 info->io.sampleMask = vary;
1159 break;
1160 default:
1161 break;
1162 }
1163 break;
1164 case Program::TYPE_GEOMETRY:
1165 case Program::TYPE_TESSELLATION_CONTROL:
1166 case Program::TYPE_TESSELLATION_EVAL:
1167 case Program::TYPE_VERTEX:
1168 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1169
1170 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1171 name != TGSI_SEMANTIC_TESSOUTER)
1172 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1173
1174 switch (name) {
1175 case TGSI_SEMANTIC_CLIPDIST:
1176 info->io.genUserClip = -1;
1177 break;
1178 case TGSI_SEMANTIC_CLIPVERTEX:
1179 clipVertexOutput = vary;
1180 break;
1181 case TGSI_SEMANTIC_EDGEFLAG:
1182 info->io.edgeFlagOut = vary;
1183 break;
1184 case TGSI_SEMANTIC_POSITION:
1185 if (clipVertexOutput < 0)
1186 clipVertexOutput = vary;
1187 break;
1188 default:
1189 break;
1190 }
1191 break;
1192 default:
1193 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1194 return false;
1195 }
1196
1197 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1198 info->out[vary].id = vary;
1199 info->out[vary].patch = var->data.patch;
1200 info->out[vary].sn = name;
1201 info->out[vary].si = index + i;
1202 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1203 if (i & 0x1)
1204 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1205 else
1206 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1207 else
1208 info->out[vary].mask |= ((1 << comp) - 1) << frac;
1209
1210 if (nir->info.outputs_read & 1ll << slot)
1211 info->out[vary].oread = 1;
1212 }
1213 info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
1214 }
1215
1216 info->numSysVals = 0;
1217 for (uint8_t i = 0; i < 64; ++i) {
1218 if (!(nir->info.system_values_read & 1ll << i))
1219 continue;
1220
1221 system_val_to_tgsi_semantic(i, &name, &index);
1222 info->sv[info->numSysVals].sn = name;
1223 info->sv[info->numSysVals].si = index;
1224 info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
1225
1226 switch (i) {
1227 case SYSTEM_VALUE_INSTANCE_ID:
1228 info->io.instanceId = info->numSysVals;
1229 break;
1230 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1231 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1232 info->sv[info->numSysVals].patch = 1;
1233 break;
1234 case SYSTEM_VALUE_VERTEX_ID:
1235 info->io.vertexId = info->numSysVals;
1236 break;
1237 default:
1238 break;
1239 }
1240
1241 info->numSysVals += 1;
1242 }
1243
1244 if (info->io.genUserClip > 0) {
1245 info->io.clipDistances = info->io.genUserClip;
1246
1247 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1248
1249 for (unsigned int n = 0; n < nOut; ++n) {
1250 unsigned int i = info->numOutputs++;
1251 info->out[i].id = i;
1252 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1253 info->out[i].si = n;
1254 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1255 }
1256 }
1257
1258 return info->assignSlots(info) == 0;
1259 }
1260
1261 uint32_t
1262 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1263 {
1264 DataType ty;
1265 int offset = nir_intrinsic_component(insn);
1266 bool input;
1267
1268 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1269 ty = getDType(insn);
1270 else
1271 ty = getSType(insn->src[0], false, false);
1272
1273 switch (insn->intrinsic) {
1274 case nir_intrinsic_load_input:
1275 case nir_intrinsic_load_interpolated_input:
1276 case nir_intrinsic_load_per_vertex_input:
1277 input = true;
1278 break;
1279 case nir_intrinsic_load_output:
1280 case nir_intrinsic_load_per_vertex_output:
1281 case nir_intrinsic_store_output:
1282 case nir_intrinsic_store_per_vertex_output:
1283 input = false;
1284 break;
1285 default:
1286 ERROR("unknown intrinsic in getSlotAddress %s",
1287 nir_intrinsic_infos[insn->intrinsic].name);
1288 input = false;
1289 assert(false);
1290 break;
1291 }
1292
1293 if (typeSizeof(ty) == 8) {
1294 slot *= 2;
1295 slot += offset;
1296 if (slot >= 4) {
1297 idx += 1;
1298 slot -= 4;
1299 }
1300 } else {
1301 slot += offset;
1302 }
1303
1304 assert(slot < 4);
1305 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1306 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1307
1308 const nv50_ir_varying *vary = input ? info->in : info->out;
1309 return vary[idx].slot[slot] * 4;
1310 }
1311
1312 Instruction *
1313 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1314 uint32_t base, uint8_t c, Value *indirect0,
1315 Value *indirect1, bool patch)
1316 {
1317 unsigned int tySize = typeSizeof(ty);
1318
1319 if (tySize == 8 &&
1320 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1321 Value *lo = getSSA();
1322 Value *hi = getSSA();
1323
1324 Instruction *loi =
1325 mkLoad(TYPE_U32, lo,
1326 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1327 indirect0);
1328 loi->setIndirect(0, 1, indirect1);
1329 loi->perPatch = patch;
1330
1331 Instruction *hii =
1332 mkLoad(TYPE_U32, hi,
1333 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1334 indirect0);
1335 hii->setIndirect(0, 1, indirect1);
1336 hii->perPatch = patch;
1337
1338 return mkOp2(OP_MERGE, ty, def, lo, hi);
1339 } else {
1340 Instruction *ld =
1341 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1342 ld->setIndirect(0, 1, indirect1);
1343 ld->perPatch = patch;
1344 return ld;
1345 }
1346 }
1347
1348 void
1349 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1350 DataType ty, Value *src, uint8_t idx, uint8_t c,
1351 Value *indirect0, Value *indirect1)
1352 {
1353 uint8_t size = typeSizeof(ty);
1354 uint32_t address = getSlotAddress(insn, idx, c);
1355
1356 if (size == 8 && indirect0) {
1357 Value *split[2];
1358 mkSplit(split, 4, src);
1359
1360 if (op == OP_EXPORT) {
1361 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1362 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1363 }
1364
1365 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1366 split[0])->perPatch = info->out[idx].patch;
1367 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1368 split[1])->perPatch = info->out[idx].patch;
1369 } else {
1370 if (op == OP_EXPORT)
1371 src = mkMov(getSSA(size), src, ty)->getDef(0);
1372 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1373 src)->perPatch = info->out[idx].patch;
1374 }
1375 }
1376
1377 bool
1378 Converter::parseNIR()
1379 {
1380 info->bin.tlsSpace = 0;
1381 info->io.clipDistances = nir->info.clip_distance_array_size;
1382 info->io.cullDistances = nir->info.cull_distance_array_size;
1383
1384 switch(prog->getType()) {
1385 case Program::TYPE_COMPUTE:
1386 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1387 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1388 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1389 info->bin.smemSize = nir->info.cs.shared_size;
1390 break;
1391 case Program::TYPE_FRAGMENT:
1392 info->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1393 info->prop.fp.persampleInvocation =
1394 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1395 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1396 info->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1397 info->prop.fp.readsSampleLocations =
1398 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1399 info->prop.fp.usesDiscard = nir->info.fs.uses_discard;
1400 info->prop.fp.usesSampleMaskIn =
1401 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1402 break;
1403 case Program::TYPE_GEOMETRY:
1404 info->prop.gp.inputPrim = nir->info.gs.input_primitive;
1405 info->prop.gp.instanceCount = nir->info.gs.invocations;
1406 info->prop.gp.maxVertices = nir->info.gs.vertices_out;
1407 info->prop.gp.outputPrim = nir->info.gs.output_primitive;
1408 break;
1409 case Program::TYPE_TESSELLATION_CONTROL:
1410 case Program::TYPE_TESSELLATION_EVAL:
1411 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1412 info->prop.tp.domain = GL_LINES;
1413 else
1414 info->prop.tp.domain = nir->info.tess.primitive_mode;
1415 info->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1416 info->prop.tp.outputPrim =
1417 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1418 info->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1419 info->prop.tp.winding = !nir->info.tess.ccw;
1420 break;
1421 case Program::TYPE_VERTEX:
1422 info->prop.vp.usesDrawParameters =
1423 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1424 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1425 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1426 break;
1427 default:
1428 break;
1429 }
1430
1431 return true;
1432 }
1433
1434 bool
1435 Converter::visit(nir_function *function)
1436 {
1437 // we only support emiting the main function for now
1438 assert(!strcmp(function->name, "main"));
1439 assert(function->impl);
1440
1441 // usually the blocks will set everything up, but main is special
1442 BasicBlock *entry = new BasicBlock(prog->main);
1443 exit = new BasicBlock(prog->main);
1444 blocks[nir_start_block(function->impl)->index] = entry;
1445 prog->main->setEntry(entry);
1446 prog->main->setExit(exit);
1447
1448 setPosition(entry, true);
1449
1450 if (info->io.genUserClip > 0) {
1451 for (int c = 0; c < 4; ++c)
1452 clipVtx[c] = getScratch();
1453 }
1454
1455 switch (prog->getType()) {
1456 case Program::TYPE_TESSELLATION_CONTROL:
1457 outBase = mkOp2v(
1458 OP_SUB, TYPE_U32, getSSA(),
1459 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1460 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1461 break;
1462 case Program::TYPE_FRAGMENT: {
1463 Symbol *sv = mkSysVal(SV_POSITION, 3);
1464 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1465 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1466 break;
1467 }
1468 default:
1469 break;
1470 }
1471
1472 nir_foreach_register(reg, &function->impl->registers) {
1473 if (reg->num_array_elems) {
1474 // TODO: packed variables would be nice, but MemoryOpt fails
1475 // replace 4 with reg->num_components
1476 uint32_t size = 4 * reg->num_array_elems * (reg->bit_size / 8);
1477 regToLmemOffset[reg->index] = info->bin.tlsSpace;
1478 info->bin.tlsSpace += size;
1479 }
1480 }
1481
1482 nir_index_ssa_defs(function->impl);
1483 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1484 if (!visit(node))
1485 return false;
1486 }
1487
1488 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1489 setPosition(exit, true);
1490
1491 if (info->io.genUserClip > 0)
1492 handleUserClipPlanes();
1493
1494 // TODO: for non main function this needs to be a OP_RETURN
1495 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1496 return true;
1497 }
1498
1499 bool
1500 Converter::visit(nir_cf_node *node)
1501 {
1502 switch (node->type) {
1503 case nir_cf_node_block:
1504 return visit(nir_cf_node_as_block(node));
1505 case nir_cf_node_if:
1506 return visit(nir_cf_node_as_if(node));
1507 case nir_cf_node_loop:
1508 return visit(nir_cf_node_as_loop(node));
1509 default:
1510 ERROR("unknown nir_cf_node type %u\n", node->type);
1511 return false;
1512 }
1513 }
1514
1515 bool
1516 Converter::visit(nir_block *block)
1517 {
1518 if (!block->predecessors->entries && block->instr_list.is_empty())
1519 return true;
1520
1521 BasicBlock *bb = convert(block);
1522
1523 setPosition(bb, true);
1524 nir_foreach_instr(insn, block) {
1525 if (!visit(insn))
1526 return false;
1527 }
1528 return true;
1529 }
1530
1531 bool
1532 Converter::visit(nir_if *nif)
1533 {
1534 DataType sType = getSType(nif->condition, false, false);
1535 Value *src = getSrc(&nif->condition, 0);
1536
1537 nir_block *lastThen = nir_if_last_then_block(nif);
1538 nir_block *lastElse = nir_if_last_else_block(nif);
1539
1540 assert(!lastThen->successors[1]);
1541 assert(!lastElse->successors[1]);
1542
1543 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1544 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1545
1546 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1547 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1548
1549 // we only insert joinats, if both nodes end up at the end of the if again.
1550 // the reason for this to not happens are breaks/continues/ret/... which
1551 // have their own handling
1552 if (lastThen->successors[0] == lastElse->successors[0])
1553 bb->joinAt = mkFlow(OP_JOINAT, convert(lastThen->successors[0]),
1554 CC_ALWAYS, NULL);
1555
1556 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1557
1558 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1559 if (!visit(node))
1560 return false;
1561 }
1562 setPosition(convert(lastThen), true);
1563 if (!bb->getExit() ||
1564 !bb->getExit()->asFlow() ||
1565 bb->getExit()->asFlow()->op == OP_JOIN) {
1566 BasicBlock *tailBB = convert(lastThen->successors[0]);
1567 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1568 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1569 }
1570
1571 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1572 if (!visit(node))
1573 return false;
1574 }
1575 setPosition(convert(lastElse), true);
1576 if (!bb->getExit() ||
1577 !bb->getExit()->asFlow() ||
1578 bb->getExit()->asFlow()->op == OP_JOIN) {
1579 BasicBlock *tailBB = convert(lastElse->successors[0]);
1580 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1581 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1582 }
1583
1584 if (lastThen->successors[0] == lastElse->successors[0]) {
1585 setPosition(convert(lastThen->successors[0]), true);
1586 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1587 }
1588
1589 return true;
1590 }
1591
1592 bool
1593 Converter::visit(nir_loop *loop)
1594 {
1595 curLoopDepth += 1;
1596 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1597
1598 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1599 BasicBlock *tailBB =
1600 convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1601 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1602
1603 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1604 setPosition(loopBB, false);
1605 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1606
1607 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1608 if (!visit(node))
1609 return false;
1610 }
1611 Instruction *insn = bb->getExit();
1612 if (bb->cfg.incidentCount() != 0) {
1613 if (!insn || !insn->asFlow()) {
1614 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1615 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1616 } else if (insn && insn->op == OP_BRA && !insn->getPredicate() &&
1617 tailBB->cfg.incidentCount() == 0) {
1618 // RA doesn't like having blocks around with no incident edge,
1619 // so we create a fake one to make it happy
1620 bb->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1621 }
1622 }
1623
1624 curLoopDepth -= 1;
1625
1626 return true;
1627 }
1628
1629 bool
1630 Converter::visit(nir_instr *insn)
1631 {
1632 switch (insn->type) {
1633 case nir_instr_type_alu:
1634 return visit(nir_instr_as_alu(insn));
1635 case nir_instr_type_intrinsic:
1636 return visit(nir_instr_as_intrinsic(insn));
1637 case nir_instr_type_jump:
1638 return visit(nir_instr_as_jump(insn));
1639 case nir_instr_type_load_const:
1640 return visit(nir_instr_as_load_const(insn));
1641 case nir_instr_type_ssa_undef:
1642 return visit(nir_instr_as_ssa_undef(insn));
1643 case nir_instr_type_tex:
1644 return visit(nir_instr_as_tex(insn));
1645 default:
1646 ERROR("unknown nir_instr type %u\n", insn->type);
1647 return false;
1648 }
1649 return true;
1650 }
1651
1652 SVSemantic
1653 Converter::convert(nir_intrinsic_op intr)
1654 {
1655 switch (intr) {
1656 case nir_intrinsic_load_base_vertex:
1657 return SV_BASEVERTEX;
1658 case nir_intrinsic_load_base_instance:
1659 return SV_BASEINSTANCE;
1660 case nir_intrinsic_load_draw_id:
1661 return SV_DRAWID;
1662 case nir_intrinsic_load_front_face:
1663 return SV_FACE;
1664 case nir_intrinsic_load_helper_invocation:
1665 return SV_THREAD_KILL;
1666 case nir_intrinsic_load_instance_id:
1667 return SV_INSTANCE_ID;
1668 case nir_intrinsic_load_invocation_id:
1669 return SV_INVOCATION_ID;
1670 case nir_intrinsic_load_local_group_size:
1671 return SV_NTID;
1672 case nir_intrinsic_load_local_invocation_id:
1673 return SV_TID;
1674 case nir_intrinsic_load_num_work_groups:
1675 return SV_NCTAID;
1676 case nir_intrinsic_load_patch_vertices_in:
1677 return SV_VERTEX_COUNT;
1678 case nir_intrinsic_load_primitive_id:
1679 return SV_PRIMITIVE_ID;
1680 case nir_intrinsic_load_sample_id:
1681 return SV_SAMPLE_INDEX;
1682 case nir_intrinsic_load_sample_mask_in:
1683 return SV_SAMPLE_MASK;
1684 case nir_intrinsic_load_sample_pos:
1685 return SV_SAMPLE_POS;
1686 case nir_intrinsic_load_subgroup_eq_mask:
1687 return SV_LANEMASK_EQ;
1688 case nir_intrinsic_load_subgroup_ge_mask:
1689 return SV_LANEMASK_GE;
1690 case nir_intrinsic_load_subgroup_gt_mask:
1691 return SV_LANEMASK_GT;
1692 case nir_intrinsic_load_subgroup_le_mask:
1693 return SV_LANEMASK_LE;
1694 case nir_intrinsic_load_subgroup_lt_mask:
1695 return SV_LANEMASK_LT;
1696 case nir_intrinsic_load_subgroup_invocation:
1697 return SV_LANEID;
1698 case nir_intrinsic_load_tess_coord:
1699 return SV_TESS_COORD;
1700 case nir_intrinsic_load_tess_level_inner:
1701 return SV_TESS_INNER;
1702 case nir_intrinsic_load_tess_level_outer:
1703 return SV_TESS_OUTER;
1704 case nir_intrinsic_load_vertex_id:
1705 return SV_VERTEX_ID;
1706 case nir_intrinsic_load_work_group_id:
1707 return SV_CTAID;
1708 default:
1709 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1710 nir_intrinsic_infos[intr].name);
1711 assert(false);
1712 return SV_LAST;
1713 }
1714 }
1715
1716 bool
1717 Converter::visit(nir_intrinsic_instr *insn)
1718 {
1719 nir_intrinsic_op op = insn->intrinsic;
1720
1721 switch (op) {
1722 case nir_intrinsic_load_uniform: {
1723 LValues &newDefs = convert(&insn->dest);
1724 const DataType dType = getDType(insn);
1725 Value *indirect;
1726 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1727 for (uint8_t i = 0; i < insn->num_components; ++i) {
1728 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1729 }
1730 break;
1731 }
1732 case nir_intrinsic_store_output:
1733 case nir_intrinsic_store_per_vertex_output: {
1734 Value *indirect;
1735 DataType dType = getSType(insn->src[0], false, false);
1736 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1737
1738 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1739 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1740 continue;
1741
1742 uint8_t offset = 0;
1743 Value *src = getSrc(&insn->src[0], i);
1744 switch (prog->getType()) {
1745 case Program::TYPE_FRAGMENT: {
1746 if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1747 // TGSI uses a different interface than NIR, TGSI stores that
1748 // value in the z component, NIR in X
1749 offset += 2;
1750 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1751 }
1752 break;
1753 }
1754 case Program::TYPE_VERTEX: {
1755 if (info->io.genUserClip > 0 && idx == clipVertexOutput) {
1756 mkMov(clipVtx[i], src);
1757 src = clipVtx[i];
1758 }
1759 break;
1760 }
1761 default:
1762 break;
1763 }
1764
1765 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1766 }
1767 break;
1768 }
1769 case nir_intrinsic_load_input:
1770 case nir_intrinsic_load_interpolated_input:
1771 case nir_intrinsic_load_output: {
1772 LValues &newDefs = convert(&insn->dest);
1773
1774 // FBFetch
1775 if (prog->getType() == Program::TYPE_FRAGMENT &&
1776 op == nir_intrinsic_load_output) {
1777 std::vector<Value*> defs, srcs;
1778 uint8_t mask = 0;
1779
1780 srcs.push_back(getSSA());
1781 srcs.push_back(getSSA());
1782 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1783 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1784 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1785 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1786
1787 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1788 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1789
1790 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1791 defs.push_back(newDefs[i]);
1792 mask |= 1 << i;
1793 }
1794
1795 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1796 texi->tex.levelZero = 1;
1797 texi->tex.mask = mask;
1798 texi->tex.useOffsets = 0;
1799 texi->tex.r = 0xffff;
1800 texi->tex.s = 0xffff;
1801
1802 info->prop.fp.readsFramebuffer = true;
1803 break;
1804 }
1805
1806 const DataType dType = getDType(insn);
1807 Value *indirect;
1808 bool input = op != nir_intrinsic_load_output;
1809 operation nvirOp;
1810 uint32_t mode = 0;
1811
1812 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1813 nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
1814
1815 // see load_barycentric_* handling
1816 if (prog->getType() == Program::TYPE_FRAGMENT) {
1817 mode = translateInterpMode(&vary, nvirOp);
1818 if (op == nir_intrinsic_load_interpolated_input) {
1819 ImmediateValue immMode;
1820 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1821 mode |= immMode.reg.data.u32;
1822 }
1823 }
1824
1825 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1826 uint32_t address = getSlotAddress(insn, idx, i);
1827 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1828 if (prog->getType() == Program::TYPE_FRAGMENT) {
1829 int s = 1;
1830 if (typeSizeof(dType) == 8) {
1831 Value *lo = getSSA();
1832 Value *hi = getSSA();
1833 Instruction *interp;
1834
1835 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1836 if (nvirOp == OP_PINTERP)
1837 interp->setSrc(s++, fp.position);
1838 if (mode & NV50_IR_INTERP_OFFSET)
1839 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1840 interp->setInterpolate(mode);
1841 interp->setIndirect(0, 0, indirect);
1842
1843 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1844 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1845 if (nvirOp == OP_PINTERP)
1846 interp->setSrc(s++, fp.position);
1847 if (mode & NV50_IR_INTERP_OFFSET)
1848 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1849 interp->setInterpolate(mode);
1850 interp->setIndirect(0, 0, indirect);
1851
1852 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1853 } else {
1854 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1855 if (nvirOp == OP_PINTERP)
1856 interp->setSrc(s++, fp.position);
1857 if (mode & NV50_IR_INTERP_OFFSET)
1858 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1859 interp->setInterpolate(mode);
1860 interp->setIndirect(0, 0, indirect);
1861 }
1862 } else {
1863 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1864 }
1865 }
1866 break;
1867 }
1868 case nir_intrinsic_load_barycentric_at_offset:
1869 case nir_intrinsic_load_barycentric_at_sample:
1870 case nir_intrinsic_load_barycentric_centroid:
1871 case nir_intrinsic_load_barycentric_pixel:
1872 case nir_intrinsic_load_barycentric_sample: {
1873 LValues &newDefs = convert(&insn->dest);
1874 uint32_t mode;
1875
1876 if (op == nir_intrinsic_load_barycentric_centroid ||
1877 op == nir_intrinsic_load_barycentric_sample) {
1878 mode = NV50_IR_INTERP_CENTROID;
1879 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1880 Value *offs[2];
1881 for (uint8_t c = 0; c < 2; c++) {
1882 offs[c] = getScratch();
1883 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1884 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1885 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1886 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1887 }
1888 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1889
1890 mode = NV50_IR_INTERP_OFFSET;
1891 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1892 mode = NV50_IR_INTERP_DEFAULT;
1893 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1894 info->prop.fp.readsSampleLocations = true;
1895 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1896 mode = NV50_IR_INTERP_OFFSET;
1897 } else {
1898 unreachable("all intrinsics already handled above");
1899 }
1900
1901 loadImm(newDefs[1], mode);
1902 break;
1903 }
1904 case nir_intrinsic_discard:
1905 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1906 break;
1907 case nir_intrinsic_discard_if: {
1908 Value *pred = getSSA(1, FILE_PREDICATE);
1909 if (insn->num_components > 1) {
1910 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1911 assert(false);
1912 return false;
1913 }
1914 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1915 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1916 break;
1917 }
1918 case nir_intrinsic_load_base_vertex:
1919 case nir_intrinsic_load_base_instance:
1920 case nir_intrinsic_load_draw_id:
1921 case nir_intrinsic_load_front_face:
1922 case nir_intrinsic_load_helper_invocation:
1923 case nir_intrinsic_load_instance_id:
1924 case nir_intrinsic_load_invocation_id:
1925 case nir_intrinsic_load_local_group_size:
1926 case nir_intrinsic_load_local_invocation_id:
1927 case nir_intrinsic_load_num_work_groups:
1928 case nir_intrinsic_load_patch_vertices_in:
1929 case nir_intrinsic_load_primitive_id:
1930 case nir_intrinsic_load_sample_id:
1931 case nir_intrinsic_load_sample_mask_in:
1932 case nir_intrinsic_load_sample_pos:
1933 case nir_intrinsic_load_subgroup_eq_mask:
1934 case nir_intrinsic_load_subgroup_ge_mask:
1935 case nir_intrinsic_load_subgroup_gt_mask:
1936 case nir_intrinsic_load_subgroup_le_mask:
1937 case nir_intrinsic_load_subgroup_lt_mask:
1938 case nir_intrinsic_load_subgroup_invocation:
1939 case nir_intrinsic_load_tess_coord:
1940 case nir_intrinsic_load_tess_level_inner:
1941 case nir_intrinsic_load_tess_level_outer:
1942 case nir_intrinsic_load_vertex_id:
1943 case nir_intrinsic_load_work_group_id: {
1944 const DataType dType = getDType(insn);
1945 SVSemantic sv = convert(op);
1946 LValues &newDefs = convert(&insn->dest);
1947
1948 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1949 Value *def;
1950 if (typeSizeof(dType) == 8)
1951 def = getSSA();
1952 else
1953 def = newDefs[i];
1954
1955 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1956 loadImm(def, 0u);
1957 } else {
1958 Symbol *sym = mkSysVal(sv, i);
1959 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1960 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1961 rdsv->perPatch = 1;
1962 }
1963
1964 if (typeSizeof(dType) == 8)
1965 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1966 }
1967 break;
1968 }
1969 // constants
1970 case nir_intrinsic_load_subgroup_size: {
1971 LValues &newDefs = convert(&insn->dest);
1972 loadImm(newDefs[0], 32u);
1973 break;
1974 }
1975 case nir_intrinsic_vote_all:
1976 case nir_intrinsic_vote_any:
1977 case nir_intrinsic_vote_ieq: {
1978 LValues &newDefs = convert(&insn->dest);
1979 Value *pred = getScratch(1, FILE_PREDICATE);
1980 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1981 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1982 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1983 break;
1984 }
1985 case nir_intrinsic_ballot: {
1986 LValues &newDefs = convert(&insn->dest);
1987 Value *pred = getSSA(1, FILE_PREDICATE);
1988 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1989 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1990 break;
1991 }
1992 case nir_intrinsic_read_first_invocation:
1993 case nir_intrinsic_read_invocation: {
1994 LValues &newDefs = convert(&insn->dest);
1995 const DataType dType = getDType(insn);
1996 Value *tmp = getScratch();
1997
1998 if (op == nir_intrinsic_read_first_invocation) {
1999 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
2000 mkOp2(OP_EXTBF, TYPE_U32, tmp, tmp, mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2001 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2002 } else
2003 tmp = getSrc(&insn->src[1], 0);
2004
2005 for (uint8_t i = 0; i < insn->num_components; ++i) {
2006 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
2007 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
2008 }
2009 break;
2010 }
2011 case nir_intrinsic_load_per_vertex_input: {
2012 const DataType dType = getDType(insn);
2013 LValues &newDefs = convert(&insn->dest);
2014 Value *indirectVertex;
2015 Value *indirectOffset;
2016 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2017 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2018
2019 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
2020 mkImm(baseVertex), indirectVertex);
2021 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2022 uint32_t address = getSlotAddress(insn, idx, i);
2023 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
2024 indirectOffset, vtxBase, info->in[idx].patch);
2025 }
2026 break;
2027 }
2028 case nir_intrinsic_emit_vertex:
2029 case nir_intrinsic_end_primitive: {
2030 uint32_t idx = nir_intrinsic_stream_id(insn);
2031 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
2032 break;
2033 }
2034 case nir_intrinsic_load_ubo: {
2035 const DataType dType = getDType(insn);
2036 LValues &newDefs = convert(&insn->dest);
2037 Value *indirectIndex;
2038 Value *indirectOffset;
2039 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
2040 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2041
2042 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2043 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
2044 indirectOffset, indirectIndex);
2045 }
2046 break;
2047 }
2048 case nir_intrinsic_get_buffer_size: {
2049 LValues &newDefs = convert(&insn->dest);
2050 const DataType dType = getDType(insn);
2051 Value *indirectBuffer;
2052 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2053
2054 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2055 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2056 break;
2057 }
2058 case nir_intrinsic_store_ssbo: {
2059 DataType sType = getSType(insn->src[0], false, false);
2060 Value *indirectBuffer;
2061 Value *indirectOffset;
2062 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2063 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2064
2065 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2066 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2067 continue;
2068 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2069 offset + i * typeSizeof(sType));
2070 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2071 ->setIndirect(0, 1, indirectBuffer);
2072 }
2073 info->io.globalAccess |= 0x2;
2074 break;
2075 }
2076 case nir_intrinsic_load_ssbo: {
2077 const DataType dType = getDType(insn);
2078 LValues &newDefs = convert(&insn->dest);
2079 Value *indirectBuffer;
2080 Value *indirectOffset;
2081 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2082 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2083
2084 for (uint8_t i = 0u; i < insn->num_components; ++i)
2085 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2086 indirectOffset, indirectBuffer);
2087
2088 info->io.globalAccess |= 0x1;
2089 break;
2090 }
2091 case nir_intrinsic_ssbo_atomic_add:
2092 case nir_intrinsic_ssbo_atomic_and:
2093 case nir_intrinsic_ssbo_atomic_comp_swap:
2094 case nir_intrinsic_ssbo_atomic_exchange:
2095 case nir_intrinsic_ssbo_atomic_or:
2096 case nir_intrinsic_ssbo_atomic_imax:
2097 case nir_intrinsic_ssbo_atomic_imin:
2098 case nir_intrinsic_ssbo_atomic_umax:
2099 case nir_intrinsic_ssbo_atomic_umin:
2100 case nir_intrinsic_ssbo_atomic_xor: {
2101 const DataType dType = getDType(insn);
2102 LValues &newDefs = convert(&insn->dest);
2103 Value *indirectBuffer;
2104 Value *indirectOffset;
2105 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2106 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2107
2108 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2109 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2110 getSrc(&insn->src[2], 0));
2111 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2112 atom->setSrc(2, getSrc(&insn->src[3], 0));
2113 atom->setIndirect(0, 0, indirectOffset);
2114 atom->setIndirect(0, 1, indirectBuffer);
2115 atom->subOp = getSubOp(op);
2116
2117 info->io.globalAccess |= 0x2;
2118 break;
2119 }
2120 default:
2121 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2122 return false;
2123 }
2124
2125 return true;
2126 }
2127
2128 bool
2129 Converter::visit(nir_jump_instr *insn)
2130 {
2131 switch (insn->type) {
2132 case nir_jump_return:
2133 // TODO: this only works in the main function
2134 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2135 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2136 break;
2137 case nir_jump_break:
2138 case nir_jump_continue: {
2139 bool isBreak = insn->type == nir_jump_break;
2140 nir_block *block = insn->instr.block;
2141 assert(!block->successors[1]);
2142 BasicBlock *target = convert(block->successors[0]);
2143 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2144 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2145 break;
2146 }
2147 default:
2148 ERROR("unknown nir_jump_type %u\n", insn->type);
2149 return false;
2150 }
2151
2152 return true;
2153 }
2154
2155 bool
2156 Converter::visit(nir_load_const_instr *insn)
2157 {
2158 assert(insn->def.bit_size <= 64);
2159
2160 LValues &newDefs = convert(&insn->def);
2161 for (int i = 0; i < insn->def.num_components; i++) {
2162 switch (insn->def.bit_size) {
2163 case 64:
2164 loadImm(newDefs[i], insn->value.u64[i]);
2165 break;
2166 case 32:
2167 loadImm(newDefs[i], insn->value.u32[i]);
2168 break;
2169 case 16:
2170 loadImm(newDefs[i], insn->value.u16[i]);
2171 break;
2172 case 8:
2173 loadImm(newDefs[i], insn->value.u8[i]);
2174 break;
2175 }
2176 }
2177 return true;
2178 }
2179
2180 #define DEFAULT_CHECKS \
2181 if (insn->dest.dest.ssa.num_components > 1) { \
2182 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2183 return false; \
2184 } \
2185 if (insn->dest.write_mask != 1) { \
2186 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2187 return false; \
2188 }
2189 bool
2190 Converter::visit(nir_alu_instr *insn)
2191 {
2192 const nir_op op = insn->op;
2193 const nir_op_info &info = nir_op_infos[op];
2194 DataType dType = getDType(insn);
2195 const std::vector<DataType> sTypes = getSTypes(insn);
2196
2197 Instruction *oldPos = this->bb->getExit();
2198
2199 switch (op) {
2200 case nir_op_fabs:
2201 case nir_op_iabs:
2202 case nir_op_fadd:
2203 case nir_op_iadd:
2204 case nir_op_fand:
2205 case nir_op_iand:
2206 case nir_op_fceil:
2207 case nir_op_fcos:
2208 case nir_op_fddx:
2209 case nir_op_fddx_coarse:
2210 case nir_op_fddx_fine:
2211 case nir_op_fddy:
2212 case nir_op_fddy_coarse:
2213 case nir_op_fddy_fine:
2214 case nir_op_fdiv:
2215 case nir_op_idiv:
2216 case nir_op_udiv:
2217 case nir_op_fexp2:
2218 case nir_op_ffloor:
2219 case nir_op_ffma:
2220 case nir_op_flog2:
2221 case nir_op_fmax:
2222 case nir_op_imax:
2223 case nir_op_umax:
2224 case nir_op_fmin:
2225 case nir_op_imin:
2226 case nir_op_umin:
2227 case nir_op_fmod:
2228 case nir_op_imod:
2229 case nir_op_umod:
2230 case nir_op_fmul:
2231 case nir_op_imul:
2232 case nir_op_imul_high:
2233 case nir_op_umul_high:
2234 case nir_op_fneg:
2235 case nir_op_ineg:
2236 case nir_op_fnot:
2237 case nir_op_inot:
2238 case nir_op_for:
2239 case nir_op_ior:
2240 case nir_op_pack_64_2x32_split:
2241 case nir_op_fpow:
2242 case nir_op_frcp:
2243 case nir_op_frem:
2244 case nir_op_irem:
2245 case nir_op_frsq:
2246 case nir_op_fsat:
2247 case nir_op_ishr:
2248 case nir_op_ushr:
2249 case nir_op_fsin:
2250 case nir_op_fsqrt:
2251 case nir_op_fsub:
2252 case nir_op_isub:
2253 case nir_op_ftrunc:
2254 case nir_op_ishl:
2255 case nir_op_fxor:
2256 case nir_op_ixor: {
2257 DEFAULT_CHECKS;
2258 LValues &newDefs = convert(&insn->dest);
2259 operation preOp = preOperationNeeded(op);
2260 if (preOp != OP_NOP) {
2261 assert(info.num_inputs < 2);
2262 Value *tmp = getSSA(typeSizeof(dType));
2263 Instruction *i0 = mkOp(preOp, dType, tmp);
2264 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2265 if (info.num_inputs) {
2266 i0->setSrc(0, getSrc(&insn->src[0]));
2267 i1->setSrc(0, tmp);
2268 }
2269 i1->subOp = getSubOp(op);
2270 } else {
2271 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2272 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2273 i->setSrc(s, getSrc(&insn->src[s]));
2274 }
2275 i->subOp = getSubOp(op);
2276 }
2277 break;
2278 }
2279 case nir_op_ifind_msb:
2280 case nir_op_ufind_msb: {
2281 DEFAULT_CHECKS;
2282 LValues &newDefs = convert(&insn->dest);
2283 dType = sTypes[0];
2284 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2285 break;
2286 }
2287 case nir_op_fround_even: {
2288 DEFAULT_CHECKS;
2289 LValues &newDefs = convert(&insn->dest);
2290 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2291 break;
2292 }
2293 // convert instructions
2294 case nir_op_f2f32:
2295 case nir_op_f2i32:
2296 case nir_op_f2u32:
2297 case nir_op_i2f32:
2298 case nir_op_i2i32:
2299 case nir_op_u2f32:
2300 case nir_op_u2u32:
2301 case nir_op_f2f64:
2302 case nir_op_f2i64:
2303 case nir_op_f2u64:
2304 case nir_op_i2f64:
2305 case nir_op_i2i64:
2306 case nir_op_u2f64:
2307 case nir_op_u2u64: {
2308 DEFAULT_CHECKS;
2309 LValues &newDefs = convert(&insn->dest);
2310 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2311 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2312 i->rnd = ROUND_Z;
2313 i->sType = sTypes[0];
2314 break;
2315 }
2316 // compare instructions
2317 case nir_op_feq32:
2318 case nir_op_ieq32:
2319 case nir_op_fge32:
2320 case nir_op_ige32:
2321 case nir_op_uge32:
2322 case nir_op_flt32:
2323 case nir_op_ilt32:
2324 case nir_op_ult32:
2325 case nir_op_fne32:
2326 case nir_op_ine32: {
2327 DEFAULT_CHECKS;
2328 LValues &newDefs = convert(&insn->dest);
2329 Instruction *i = mkCmp(getOperation(op),
2330 getCondCode(op),
2331 dType,
2332 newDefs[0],
2333 dType,
2334 getSrc(&insn->src[0]),
2335 getSrc(&insn->src[1]));
2336 if (info.num_inputs == 3)
2337 i->setSrc(2, getSrc(&insn->src[2]));
2338 i->sType = sTypes[0];
2339 break;
2340 }
2341 // those are weird ALU ops and need special handling, because
2342 // 1. they are always componend based
2343 // 2. they basically just merge multiple values into one data type
2344 case nir_op_imov:
2345 case nir_op_fmov:
2346 if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
2347 nir_reg_dest& reg = insn->dest.dest.reg;
2348 uint32_t goffset = regToLmemOffset[reg.reg->index];
2349 uint8_t comps = reg.reg->num_components;
2350 uint8_t size = reg.reg->bit_size / 8;
2351 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2352 uint32_t aoffset = csize * reg.base_offset;
2353 Value *indirect = NULL;
2354
2355 if (reg.indirect)
2356 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS),
2357 getSrc(reg.indirect, 0), mkImm(csize));
2358
2359 for (uint8_t i = 0u; i < comps; ++i) {
2360 if (!((1u << i) & insn->dest.write_mask))
2361 continue;
2362
2363 Symbol *sym = mkSymbol(FILE_MEMORY_LOCAL, 0, dType, goffset + aoffset + i * size);
2364 mkStore(OP_STORE, dType, sym, indirect, getSrc(&insn->src[0], i));
2365 }
2366 break;
2367 } else if (!insn->src[0].src.is_ssa && insn->src[0].src.reg.reg->num_array_elems) {
2368 LValues &newDefs = convert(&insn->dest);
2369 nir_reg_src& reg = insn->src[0].src.reg;
2370 uint32_t goffset = regToLmemOffset[reg.reg->index];
2371 // uint8_t comps = reg.reg->num_components;
2372 uint8_t size = reg.reg->bit_size / 8;
2373 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2374 uint32_t aoffset = csize * reg.base_offset;
2375 Value *indirect = NULL;
2376
2377 if (reg.indirect)
2378 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS), getSrc(reg.indirect, 0), mkImm(csize));
2379
2380 for (uint8_t i = 0u; i < newDefs.size(); ++i)
2381 loadFrom(FILE_MEMORY_LOCAL, 0, dType, newDefs[i], goffset + aoffset, i, indirect);
2382
2383 break;
2384 } else {
2385 LValues &newDefs = convert(&insn->dest);
2386 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2387 mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
2388 }
2389 }
2390 break;
2391 case nir_op_vec2:
2392 case nir_op_vec3:
2393 case nir_op_vec4: {
2394 LValues &newDefs = convert(&insn->dest);
2395 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2396 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2397 }
2398 break;
2399 }
2400 // (un)pack
2401 case nir_op_pack_64_2x32: {
2402 LValues &newDefs = convert(&insn->dest);
2403 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2404 merge->setSrc(0, getSrc(&insn->src[0], 0));
2405 merge->setSrc(1, getSrc(&insn->src[0], 1));
2406 break;
2407 }
2408 case nir_op_pack_half_2x16_split: {
2409 LValues &newDefs = convert(&insn->dest);
2410 Value *tmpH = getSSA();
2411 Value *tmpL = getSSA();
2412
2413 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2414 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2415 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2416 break;
2417 }
2418 case nir_op_unpack_half_2x16_split_x:
2419 case nir_op_unpack_half_2x16_split_y: {
2420 LValues &newDefs = convert(&insn->dest);
2421 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2422 if (op == nir_op_unpack_half_2x16_split_y)
2423 cvt->subOp = 1;
2424 break;
2425 }
2426 case nir_op_unpack_64_2x32: {
2427 LValues &newDefs = convert(&insn->dest);
2428 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2429 break;
2430 }
2431 case nir_op_unpack_64_2x32_split_x: {
2432 LValues &newDefs = convert(&insn->dest);
2433 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2434 break;
2435 }
2436 case nir_op_unpack_64_2x32_split_y: {
2437 LValues &newDefs = convert(&insn->dest);
2438 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2439 break;
2440 }
2441 // special instructions
2442 case nir_op_fsign:
2443 case nir_op_isign: {
2444 DEFAULT_CHECKS;
2445 DataType iType;
2446 if (::isFloatType(dType))
2447 iType = TYPE_F32;
2448 else
2449 iType = TYPE_S32;
2450
2451 LValues &newDefs = convert(&insn->dest);
2452 LValue *val0 = getScratch();
2453 LValue *val1 = getScratch();
2454 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2455 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2456
2457 if (dType == TYPE_F64) {
2458 mkOp2(OP_SUB, iType, val0, val0, val1);
2459 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2460 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2461 mkOp2(OP_SUB, iType, val0, val1, val0);
2462 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2463 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2464 } else if (::isFloatType(dType))
2465 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2466 else
2467 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2468 break;
2469 }
2470 case nir_op_fcsel:
2471 case nir_op_b32csel: {
2472 DEFAULT_CHECKS;
2473 LValues &newDefs = convert(&insn->dest);
2474 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2475 break;
2476 }
2477 case nir_op_ibitfield_extract:
2478 case nir_op_ubitfield_extract: {
2479 DEFAULT_CHECKS;
2480 Value *tmp = getSSA();
2481 LValues &newDefs = convert(&insn->dest);
2482 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2483 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2484 break;
2485 }
2486 case nir_op_bfm: {
2487 DEFAULT_CHECKS;
2488 LValues &newDefs = convert(&insn->dest);
2489 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2490 break;
2491 }
2492 case nir_op_bitfield_insert: {
2493 DEFAULT_CHECKS;
2494 LValues &newDefs = convert(&insn->dest);
2495 LValue *temp = getSSA();
2496 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2497 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2498 break;
2499 }
2500 case nir_op_bit_count: {
2501 DEFAULT_CHECKS;
2502 LValues &newDefs = convert(&insn->dest);
2503 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2504 break;
2505 }
2506 case nir_op_bitfield_reverse: {
2507 DEFAULT_CHECKS;
2508 LValues &newDefs = convert(&insn->dest);
2509 mkOp2(OP_EXTBF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2510 break;
2511 }
2512 case nir_op_find_lsb: {
2513 DEFAULT_CHECKS;
2514 LValues &newDefs = convert(&insn->dest);
2515 Value *tmp = getSSA();
2516 mkOp2(OP_EXTBF, TYPE_U32, tmp, getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2517 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2518 break;
2519 }
2520 // boolean conversions
2521 case nir_op_b2f32: {
2522 DEFAULT_CHECKS;
2523 LValues &newDefs = convert(&insn->dest);
2524 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2525 break;
2526 }
2527 case nir_op_b2f64: {
2528 DEFAULT_CHECKS;
2529 LValues &newDefs = convert(&insn->dest);
2530 Value *tmp = getSSA(4);
2531 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2532 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2533 break;
2534 }
2535 case nir_op_f2b32:
2536 case nir_op_i2b32: {
2537 DEFAULT_CHECKS;
2538 LValues &newDefs = convert(&insn->dest);
2539 Value *src1;
2540 if (typeSizeof(sTypes[0]) == 8) {
2541 src1 = loadImm(getSSA(8), 0.0);
2542 } else {
2543 src1 = zero;
2544 }
2545 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2546 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2547 break;
2548 }
2549 case nir_op_b2i32: {
2550 DEFAULT_CHECKS;
2551 LValues &newDefs = convert(&insn->dest);
2552 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2553 break;
2554 }
2555 case nir_op_b2i64: {
2556 DEFAULT_CHECKS;
2557 LValues &newDefs = convert(&insn->dest);
2558 LValue *def = getScratch();
2559 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2560 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2561 break;
2562 }
2563 default:
2564 ERROR("unknown nir_op %s\n", info.name);
2565 return false;
2566 }
2567
2568 if (!oldPos) {
2569 oldPos = this->bb->getEntry();
2570 oldPos->precise = insn->exact;
2571 }
2572
2573 if (unlikely(!oldPos))
2574 return true;
2575
2576 while (oldPos->next) {
2577 oldPos = oldPos->next;
2578 oldPos->precise = insn->exact;
2579 }
2580 oldPos->saturate = insn->dest.saturate;
2581
2582 return true;
2583 }
2584 #undef DEFAULT_CHECKS
2585
2586 bool
2587 Converter::visit(nir_ssa_undef_instr *insn)
2588 {
2589 LValues &newDefs = convert(&insn->def);
2590 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2591 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2592 }
2593 return true;
2594 }
2595
2596 #define CASE_SAMPLER(ty) \
2597 case GLSL_SAMPLER_DIM_ ## ty : \
2598 if (isArray && !isShadow) \
2599 return TEX_TARGET_ ## ty ## _ARRAY; \
2600 else if (!isArray && isShadow) \
2601 return TEX_TARGET_## ty ## _SHADOW; \
2602 else if (isArray && isShadow) \
2603 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2604 else \
2605 return TEX_TARGET_ ## ty
2606
2607 TexTarget
2608 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2609 {
2610 switch (dim) {
2611 CASE_SAMPLER(1D);
2612 CASE_SAMPLER(2D);
2613 CASE_SAMPLER(CUBE);
2614 case GLSL_SAMPLER_DIM_3D:
2615 return TEX_TARGET_3D;
2616 case GLSL_SAMPLER_DIM_MS:
2617 if (isArray)
2618 return TEX_TARGET_2D_MS_ARRAY;
2619 return TEX_TARGET_2D_MS;
2620 case GLSL_SAMPLER_DIM_RECT:
2621 if (isShadow)
2622 return TEX_TARGET_RECT_SHADOW;
2623 return TEX_TARGET_RECT;
2624 case GLSL_SAMPLER_DIM_BUF:
2625 return TEX_TARGET_BUFFER;
2626 case GLSL_SAMPLER_DIM_EXTERNAL:
2627 return TEX_TARGET_2D;
2628 default:
2629 ERROR("unknown glsl_sampler_dim %u\n", dim);
2630 assert(false);
2631 return TEX_TARGET_COUNT;
2632 }
2633 }
2634 #undef CASE_SAMPLER
2635
2636 Value*
2637 Converter::applyProjection(Value *src, Value *proj)
2638 {
2639 if (!proj)
2640 return src;
2641 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2642 }
2643
2644 bool
2645 Converter::visit(nir_tex_instr *insn)
2646 {
2647 switch (insn->op) {
2648 case nir_texop_lod:
2649 case nir_texop_query_levels:
2650 case nir_texop_tex:
2651 case nir_texop_texture_samples:
2652 case nir_texop_tg4:
2653 case nir_texop_txb:
2654 case nir_texop_txd:
2655 case nir_texop_txf:
2656 case nir_texop_txf_ms:
2657 case nir_texop_txl:
2658 case nir_texop_txs: {
2659 LValues &newDefs = convert(&insn->dest);
2660 std::vector<Value*> srcs;
2661 std::vector<Value*> defs;
2662 std::vector<nir_src*> offsets;
2663 uint8_t mask = 0;
2664 bool lz = false;
2665 Value *proj = NULL;
2666 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2667 operation op = getOperation(insn->op);
2668
2669 int r, s;
2670 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2671 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2672 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2673 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2674 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2675 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2676 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2677 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2678 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
2679 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2680 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2681
2682 if (projIdx != -1)
2683 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
2684
2685 srcs.resize(insn->coord_components);
2686 for (uint8_t i = 0u; i < insn->coord_components; ++i)
2687 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
2688
2689 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2690 if (insn->coord_components) {
2691 uint32_t argCount = target.getArgCount();
2692
2693 if (target.isMS())
2694 argCount -= 1;
2695
2696 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
2697 srcs.push_back(getSSA());
2698 }
2699
2700 if (insn->op == nir_texop_texture_samples)
2701 srcs.push_back(zero);
2702 else if (!insn->num_srcs)
2703 srcs.push_back(loadImm(NULL, 0));
2704 if (biasIdx != -1)
2705 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
2706 if (lodIdx != -1)
2707 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
2708 else if (op == OP_TXF)
2709 lz = true;
2710 if (msIdx != -1)
2711 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
2712 if (offsetIdx != -1)
2713 offsets.push_back(&insn->src[offsetIdx].src);
2714 if (compIdx != -1)
2715 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
2716 if (texOffIdx != -1) {
2717 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
2718 texOffIdx = srcs.size() - 1;
2719 }
2720 if (sampOffIdx != -1) {
2721 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
2722 sampOffIdx = srcs.size() - 1;
2723 }
2724
2725 r = insn->texture_index;
2726 s = insn->sampler_index;
2727
2728 defs.resize(newDefs.size());
2729 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
2730 defs[d] = newDefs[d];
2731 mask |= 1 << d;
2732 }
2733 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
2734 lz = true;
2735
2736 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
2737 texi->tex.levelZero = lz;
2738 texi->tex.mask = mask;
2739
2740 if (texOffIdx != -1)
2741 texi->tex.rIndirectSrc = texOffIdx;
2742 if (sampOffIdx != -1)
2743 texi->tex.sIndirectSrc = sampOffIdx;
2744
2745 switch (insn->op) {
2746 case nir_texop_tg4:
2747 if (!target.isShadow())
2748 texi->tex.gatherComp = insn->component;
2749 break;
2750 case nir_texop_txs:
2751 texi->tex.query = TXQ_DIMS;
2752 break;
2753 case nir_texop_texture_samples:
2754 texi->tex.mask = 0x4;
2755 texi->tex.query = TXQ_TYPE;
2756 break;
2757 case nir_texop_query_levels:
2758 texi->tex.mask = 0x8;
2759 texi->tex.query = TXQ_DIMS;
2760 break;
2761 default:
2762 break;
2763 }
2764
2765 texi->tex.useOffsets = offsets.size();
2766 if (texi->tex.useOffsets) {
2767 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
2768 for (uint32_t c = 0u; c < 3; ++c) {
2769 uint8_t s2 = std::min(c, target.getDim() - 1);
2770 texi->offset[s][c].set(getSrc(offsets[s], s2));
2771 texi->offset[s][c].setInsn(texi);
2772 }
2773 }
2774 }
2775
2776 if (ddxIdx != -1 && ddyIdx != -1) {
2777 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
2778 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
2779 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
2780 }
2781 }
2782
2783 break;
2784 }
2785 default:
2786 ERROR("unknown nir_texop %u\n", insn->op);
2787 return false;
2788 }
2789 return true;
2790 }
2791
2792 bool
2793 Converter::run()
2794 {
2795 bool progress;
2796
2797 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
2798 nir_print_shader(nir, stderr);
2799
2800 struct nir_lower_subgroups_options subgroup_options = {
2801 .subgroup_size = 32,
2802 .ballot_bit_size = 32,
2803 };
2804
2805 NIR_PASS_V(nir, nir_lower_io, nir_var_all, type_size, (nir_lower_io_options)0);
2806 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
2807 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
2808 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
2809 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2810 NIR_PASS_V(nir, nir_lower_alu_to_scalar);
2811 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
2812
2813 do {
2814 progress = false;
2815 NIR_PASS(progress, nir, nir_copy_prop);
2816 NIR_PASS(progress, nir, nir_opt_remove_phis);
2817 NIR_PASS(progress, nir, nir_opt_trivial_continues);
2818 NIR_PASS(progress, nir, nir_opt_cse);
2819 NIR_PASS(progress, nir, nir_opt_algebraic);
2820 NIR_PASS(progress, nir, nir_opt_constant_folding);
2821 NIR_PASS(progress, nir, nir_copy_prop);
2822 NIR_PASS(progress, nir, nir_opt_dce);
2823 NIR_PASS(progress, nir, nir_opt_dead_cf);
2824 } while (progress);
2825
2826 NIR_PASS_V(nir, nir_lower_bool_to_int32);
2827 NIR_PASS_V(nir, nir_lower_locals_to_regs);
2828 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
2829 NIR_PASS_V(nir, nir_convert_from_ssa, true);
2830
2831 // Garbage collect dead instructions
2832 nir_sweep(nir);
2833
2834 if (!parseNIR()) {
2835 ERROR("Couldn't prase NIR!\n");
2836 return false;
2837 }
2838
2839 if (!assignSlots()) {
2840 ERROR("Couldn't assign slots!\n");
2841 return false;
2842 }
2843
2844 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
2845 nir_print_shader(nir, stderr);
2846
2847 nir_foreach_function(function, nir) {
2848 if (!visit(function))
2849 return false;
2850 }
2851
2852 return true;
2853 }
2854
2855 } // unnamed namespace
2856
2857 namespace nv50_ir {
2858
2859 bool
2860 Program::makeFromNIR(struct nv50_ir_prog_info *info)
2861 {
2862 nir_shader *nir = (nir_shader*)info->bin.source;
2863 Converter converter(this, nir, info);
2864 bool result = converter.run();
2865 if (!result)
2866 return result;
2867 LoweringHelper lowering;
2868 lowering.run(this);
2869 tlsSize = info->bin.tlsSpace;
2870 return result;
2871 }
2872
2873 } // namespace nv50_ir