nv50/ir/nir: support gather offsets
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33
34 #if __cplusplus >= 201103L
35 #include <unordered_map>
36 #else
37 #include <tr1/unordered_map>
38 #endif
39 #include <cstring>
40 #include <list>
41 #include <vector>
42
43 namespace {
44
45 #if __cplusplus >= 201103L
46 using std::hash;
47 using std::unordered_map;
48 #else
49 using std::tr1::hash;
50 using std::tr1::unordered_map;
51 #endif
52
53 using namespace nv50_ir;
54
55 int
56 type_size(const struct glsl_type *type)
57 {
58 return glsl_count_attribute_slots(type, false);
59 }
60
61 class Converter : public ConverterCommon
62 {
63 public:
64 Converter(Program *, nir_shader *, nv50_ir_prog_info *);
65
66 bool run();
67 private:
68 typedef std::vector<LValue*> LValues;
69 typedef unordered_map<unsigned, LValues> NirDefMap;
70 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
71 typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
72 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
73
74 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
75 LValues& convert(nir_alu_dest *);
76 BasicBlock* convert(nir_block *);
77 LValues& convert(nir_dest *);
78 SVSemantic convert(nir_intrinsic_op);
79 Value* convert(nir_load_const_instr*, uint8_t);
80 LValues& convert(nir_register *);
81 LValues& convert(nir_ssa_def *);
82
83 ImgFormat convertGLImgFormat(GLuint);
84
85 Value* getSrc(nir_alu_src *, uint8_t component = 0);
86 Value* getSrc(nir_register *, uint8_t);
87 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
88 Value* getSrc(nir_ssa_def *, uint8_t);
89
90 // returned value is the constant part of the given source (either the
91 // nir_src or the selected source component of an intrinsic). Even though
92 // this is mostly an optimization to be able to skip indirects in a few
93 // cases, sometimes we require immediate values or set some fileds on
94 // instructions (e.g. tex) in order for codegen to consume those.
95 // If the found value has not a constant part, the Value gets returned
96 // through the Value parameter.
97 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
98 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&);
99
100 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
101
102 void setInterpolate(nv50_ir_varying *,
103 uint8_t,
104 bool centroid,
105 unsigned semantics);
106
107 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
108 uint8_t c, Value *indirect0 = NULL,
109 Value *indirect1 = NULL, bool patch = false);
110 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
111 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
112 Value *indirect1 = NULL);
113
114 bool isFloatType(nir_alu_type);
115 bool isSignedType(nir_alu_type);
116 bool isResultFloat(nir_op);
117 bool isResultSigned(nir_op);
118
119 DataType getDType(nir_alu_instr *);
120 DataType getDType(nir_intrinsic_instr *);
121 DataType getDType(nir_intrinsic_instr *, bool isSigned);
122 DataType getDType(nir_op, uint8_t);
123
124 std::vector<DataType> getSTypes(nir_alu_instr *);
125 DataType getSType(nir_src &, bool isFloat, bool isSigned);
126
127 operation getOperation(nir_intrinsic_op);
128 operation getOperation(nir_op);
129 operation getOperation(nir_texop);
130 operation preOperationNeeded(nir_op);
131
132 int getSubOp(nir_intrinsic_op);
133 int getSubOp(nir_op);
134
135 CondCode getCondCode(nir_op);
136
137 bool assignSlots();
138 bool parseNIR();
139
140 bool visit(nir_alu_instr *);
141 bool visit(nir_block *);
142 bool visit(nir_cf_node *);
143 bool visit(nir_deref_instr *);
144 bool visit(nir_function *);
145 bool visit(nir_if *);
146 bool visit(nir_instr *);
147 bool visit(nir_intrinsic_instr *);
148 bool visit(nir_jump_instr *);
149 bool visit(nir_load_const_instr*);
150 bool visit(nir_loop *);
151 bool visit(nir_ssa_undef_instr *);
152 bool visit(nir_tex_instr *);
153
154 // tex stuff
155 Value* applyProjection(Value *src, Value *proj);
156 unsigned int getNIRArgCount(TexInstruction::Target&);
157
158 // image stuff
159 uint16_t handleDeref(nir_deref_instr *, Value * & indirect, const nir_variable * &);
160 CacheMode getCacheModeFromVar(const nir_variable *);
161
162 nir_shader *nir;
163
164 NirDefMap ssaDefs;
165 NirDefMap regDefs;
166 ImmediateMap immediates;
167 NirArrayLMemOffsets regToLmemOffset;
168 NirBlockMap blocks;
169 unsigned int curLoopDepth;
170
171 BasicBlock *exit;
172 Value *zero;
173 Instruction *immInsertPos;
174
175 int clipVertexOutput;
176
177 union {
178 struct {
179 Value *position;
180 } fp;
181 };
182 };
183
184 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
185 : ConverterCommon(prog, info),
186 nir(nir),
187 curLoopDepth(0),
188 clipVertexOutput(-1)
189 {
190 zero = mkImm((uint32_t)0);
191 }
192
193 BasicBlock *
194 Converter::convert(nir_block *block)
195 {
196 NirBlockMap::iterator it = blocks.find(block->index);
197 if (it != blocks.end())
198 return it->second;
199
200 BasicBlock *bb = new BasicBlock(func);
201 blocks[block->index] = bb;
202 return bb;
203 }
204
205 bool
206 Converter::isFloatType(nir_alu_type type)
207 {
208 return nir_alu_type_get_base_type(type) == nir_type_float;
209 }
210
211 bool
212 Converter::isSignedType(nir_alu_type type)
213 {
214 return nir_alu_type_get_base_type(type) == nir_type_int;
215 }
216
217 bool
218 Converter::isResultFloat(nir_op op)
219 {
220 const nir_op_info &info = nir_op_infos[op];
221 if (info.output_type != nir_type_invalid)
222 return isFloatType(info.output_type);
223
224 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
225 assert(false);
226 return true;
227 }
228
229 bool
230 Converter::isResultSigned(nir_op op)
231 {
232 switch (op) {
233 // there is no umul and we get wrong results if we treat all muls as signed
234 case nir_op_imul:
235 case nir_op_inot:
236 return false;
237 default:
238 const nir_op_info &info = nir_op_infos[op];
239 if (info.output_type != nir_type_invalid)
240 return isSignedType(info.output_type);
241 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
242 assert(false);
243 return true;
244 }
245 }
246
247 DataType
248 Converter::getDType(nir_alu_instr *insn)
249 {
250 if (insn->dest.dest.is_ssa)
251 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
252 else
253 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
254 }
255
256 DataType
257 Converter::getDType(nir_intrinsic_instr *insn)
258 {
259 bool isSigned;
260 switch (insn->intrinsic) {
261 case nir_intrinsic_shared_atomic_imax:
262 case nir_intrinsic_shared_atomic_imin:
263 case nir_intrinsic_ssbo_atomic_imax:
264 case nir_intrinsic_ssbo_atomic_imin:
265 isSigned = true;
266 break;
267 default:
268 isSigned = false;
269 break;
270 }
271
272 return getDType(insn, isSigned);
273 }
274
275 DataType
276 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
277 {
278 if (insn->dest.is_ssa)
279 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
280 else
281 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
282 }
283
284 DataType
285 Converter::getDType(nir_op op, uint8_t bitSize)
286 {
287 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
288 if (ty == TYPE_NONE) {
289 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
290 assert(false);
291 }
292 return ty;
293 }
294
295 std::vector<DataType>
296 Converter::getSTypes(nir_alu_instr *insn)
297 {
298 const nir_op_info &info = nir_op_infos[insn->op];
299 std::vector<DataType> res(info.num_inputs);
300
301 for (uint8_t i = 0; i < info.num_inputs; ++i) {
302 if (info.input_types[i] != nir_type_invalid) {
303 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
304 } else {
305 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
306 assert(false);
307 res[i] = TYPE_NONE;
308 break;
309 }
310 }
311
312 return res;
313 }
314
315 DataType
316 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
317 {
318 uint8_t bitSize;
319 if (src.is_ssa)
320 bitSize = src.ssa->bit_size;
321 else
322 bitSize = src.reg.reg->bit_size;
323
324 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
325 if (ty == TYPE_NONE) {
326 const char *str;
327 if (isFloat)
328 str = "float";
329 else if (isSigned)
330 str = "int";
331 else
332 str = "uint";
333 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
334 assert(false);
335 }
336 return ty;
337 }
338
339 operation
340 Converter::getOperation(nir_op op)
341 {
342 switch (op) {
343 // basic ops with float and int variants
344 case nir_op_fabs:
345 case nir_op_iabs:
346 return OP_ABS;
347 case nir_op_fadd:
348 case nir_op_iadd:
349 return OP_ADD;
350 case nir_op_fand:
351 case nir_op_iand:
352 return OP_AND;
353 case nir_op_ifind_msb:
354 case nir_op_ufind_msb:
355 return OP_BFIND;
356 case nir_op_fceil:
357 return OP_CEIL;
358 case nir_op_fcos:
359 return OP_COS;
360 case nir_op_f2f32:
361 case nir_op_f2f64:
362 case nir_op_f2i32:
363 case nir_op_f2i64:
364 case nir_op_f2u32:
365 case nir_op_f2u64:
366 case nir_op_i2f32:
367 case nir_op_i2f64:
368 case nir_op_i2i32:
369 case nir_op_i2i64:
370 case nir_op_u2f32:
371 case nir_op_u2f64:
372 case nir_op_u2u32:
373 case nir_op_u2u64:
374 return OP_CVT;
375 case nir_op_fddx:
376 case nir_op_fddx_coarse:
377 case nir_op_fddx_fine:
378 return OP_DFDX;
379 case nir_op_fddy:
380 case nir_op_fddy_coarse:
381 case nir_op_fddy_fine:
382 return OP_DFDY;
383 case nir_op_fdiv:
384 case nir_op_idiv:
385 case nir_op_udiv:
386 return OP_DIV;
387 case nir_op_fexp2:
388 return OP_EX2;
389 case nir_op_ffloor:
390 return OP_FLOOR;
391 case nir_op_ffma:
392 return OP_FMA;
393 case nir_op_flog2:
394 return OP_LG2;
395 case nir_op_fmax:
396 case nir_op_imax:
397 case nir_op_umax:
398 return OP_MAX;
399 case nir_op_pack_64_2x32_split:
400 return OP_MERGE;
401 case nir_op_fmin:
402 case nir_op_imin:
403 case nir_op_umin:
404 return OP_MIN;
405 case nir_op_fmod:
406 case nir_op_imod:
407 case nir_op_umod:
408 case nir_op_frem:
409 case nir_op_irem:
410 return OP_MOD;
411 case nir_op_fmul:
412 case nir_op_imul:
413 case nir_op_imul_high:
414 case nir_op_umul_high:
415 return OP_MUL;
416 case nir_op_fneg:
417 case nir_op_ineg:
418 return OP_NEG;
419 case nir_op_fnot:
420 case nir_op_inot:
421 return OP_NOT;
422 case nir_op_for:
423 case nir_op_ior:
424 return OP_OR;
425 case nir_op_fpow:
426 return OP_POW;
427 case nir_op_frcp:
428 return OP_RCP;
429 case nir_op_frsq:
430 return OP_RSQ;
431 case nir_op_fsat:
432 return OP_SAT;
433 case nir_op_feq32:
434 case nir_op_ieq32:
435 case nir_op_fge32:
436 case nir_op_ige32:
437 case nir_op_uge32:
438 case nir_op_flt32:
439 case nir_op_ilt32:
440 case nir_op_ult32:
441 case nir_op_fne32:
442 case nir_op_ine32:
443 return OP_SET;
444 case nir_op_ishl:
445 return OP_SHL;
446 case nir_op_ishr:
447 case nir_op_ushr:
448 return OP_SHR;
449 case nir_op_fsin:
450 return OP_SIN;
451 case nir_op_fsqrt:
452 return OP_SQRT;
453 case nir_op_fsub:
454 case nir_op_isub:
455 return OP_SUB;
456 case nir_op_ftrunc:
457 return OP_TRUNC;
458 case nir_op_fxor:
459 case nir_op_ixor:
460 return OP_XOR;
461 default:
462 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
463 assert(false);
464 return OP_NOP;
465 }
466 }
467
468 operation
469 Converter::getOperation(nir_texop op)
470 {
471 switch (op) {
472 case nir_texop_tex:
473 return OP_TEX;
474 case nir_texop_lod:
475 return OP_TXLQ;
476 case nir_texop_txb:
477 return OP_TXB;
478 case nir_texop_txd:
479 return OP_TXD;
480 case nir_texop_txf:
481 case nir_texop_txf_ms:
482 return OP_TXF;
483 case nir_texop_tg4:
484 return OP_TXG;
485 case nir_texop_txl:
486 return OP_TXL;
487 case nir_texop_query_levels:
488 case nir_texop_texture_samples:
489 case nir_texop_txs:
490 return OP_TXQ;
491 default:
492 ERROR("couldn't get operation for nir_texop %u\n", op);
493 assert(false);
494 return OP_NOP;
495 }
496 }
497
498 operation
499 Converter::getOperation(nir_intrinsic_op op)
500 {
501 switch (op) {
502 case nir_intrinsic_emit_vertex:
503 return OP_EMIT;
504 case nir_intrinsic_end_primitive:
505 return OP_RESTART;
506 case nir_intrinsic_image_deref_atomic_add:
507 case nir_intrinsic_image_deref_atomic_and:
508 case nir_intrinsic_image_deref_atomic_comp_swap:
509 case nir_intrinsic_image_deref_atomic_exchange:
510 case nir_intrinsic_image_deref_atomic_max:
511 case nir_intrinsic_image_deref_atomic_min:
512 case nir_intrinsic_image_deref_atomic_or:
513 case nir_intrinsic_image_deref_atomic_xor:
514 return OP_SUREDP;
515 case nir_intrinsic_image_deref_load:
516 return OP_SULDP;
517 case nir_intrinsic_image_deref_samples:
518 case nir_intrinsic_image_deref_size:
519 return OP_SUQ;
520 case nir_intrinsic_image_deref_store:
521 return OP_SUSTP;
522 default:
523 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
524 assert(false);
525 return OP_NOP;
526 }
527 }
528
529 operation
530 Converter::preOperationNeeded(nir_op op)
531 {
532 switch (op) {
533 case nir_op_fcos:
534 case nir_op_fsin:
535 return OP_PRESIN;
536 default:
537 return OP_NOP;
538 }
539 }
540
541 int
542 Converter::getSubOp(nir_op op)
543 {
544 switch (op) {
545 case nir_op_imul_high:
546 case nir_op_umul_high:
547 return NV50_IR_SUBOP_MUL_HIGH;
548 default:
549 return 0;
550 }
551 }
552
553 int
554 Converter::getSubOp(nir_intrinsic_op op)
555 {
556 switch (op) {
557 case nir_intrinsic_image_deref_atomic_add:
558 case nir_intrinsic_shared_atomic_add:
559 case nir_intrinsic_ssbo_atomic_add:
560 return NV50_IR_SUBOP_ATOM_ADD;
561 case nir_intrinsic_image_deref_atomic_and:
562 case nir_intrinsic_shared_atomic_and:
563 case nir_intrinsic_ssbo_atomic_and:
564 return NV50_IR_SUBOP_ATOM_AND;
565 case nir_intrinsic_image_deref_atomic_comp_swap:
566 case nir_intrinsic_shared_atomic_comp_swap:
567 case nir_intrinsic_ssbo_atomic_comp_swap:
568 return NV50_IR_SUBOP_ATOM_CAS;
569 case nir_intrinsic_image_deref_atomic_exchange:
570 case nir_intrinsic_shared_atomic_exchange:
571 case nir_intrinsic_ssbo_atomic_exchange:
572 return NV50_IR_SUBOP_ATOM_EXCH;
573 case nir_intrinsic_image_deref_atomic_or:
574 case nir_intrinsic_shared_atomic_or:
575 case nir_intrinsic_ssbo_atomic_or:
576 return NV50_IR_SUBOP_ATOM_OR;
577 case nir_intrinsic_image_deref_atomic_max:
578 case nir_intrinsic_shared_atomic_imax:
579 case nir_intrinsic_shared_atomic_umax:
580 case nir_intrinsic_ssbo_atomic_imax:
581 case nir_intrinsic_ssbo_atomic_umax:
582 return NV50_IR_SUBOP_ATOM_MAX;
583 case nir_intrinsic_image_deref_atomic_min:
584 case nir_intrinsic_shared_atomic_imin:
585 case nir_intrinsic_shared_atomic_umin:
586 case nir_intrinsic_ssbo_atomic_imin:
587 case nir_intrinsic_ssbo_atomic_umin:
588 return NV50_IR_SUBOP_ATOM_MIN;
589 case nir_intrinsic_image_deref_atomic_xor:
590 case nir_intrinsic_shared_atomic_xor:
591 case nir_intrinsic_ssbo_atomic_xor:
592 return NV50_IR_SUBOP_ATOM_XOR;
593
594 case nir_intrinsic_group_memory_barrier:
595 case nir_intrinsic_memory_barrier:
596 case nir_intrinsic_memory_barrier_atomic_counter:
597 case nir_intrinsic_memory_barrier_buffer:
598 case nir_intrinsic_memory_barrier_image:
599 return NV50_IR_SUBOP_MEMBAR(M, GL);
600 case nir_intrinsic_memory_barrier_shared:
601 return NV50_IR_SUBOP_MEMBAR(M, CTA);
602
603 case nir_intrinsic_vote_all:
604 return NV50_IR_SUBOP_VOTE_ALL;
605 case nir_intrinsic_vote_any:
606 return NV50_IR_SUBOP_VOTE_ANY;
607 case nir_intrinsic_vote_ieq:
608 return NV50_IR_SUBOP_VOTE_UNI;
609 default:
610 return 0;
611 }
612 }
613
614 CondCode
615 Converter::getCondCode(nir_op op)
616 {
617 switch (op) {
618 case nir_op_feq32:
619 case nir_op_ieq32:
620 return CC_EQ;
621 case nir_op_fge32:
622 case nir_op_ige32:
623 case nir_op_uge32:
624 return CC_GE;
625 case nir_op_flt32:
626 case nir_op_ilt32:
627 case nir_op_ult32:
628 return CC_LT;
629 case nir_op_fne32:
630 return CC_NEU;
631 case nir_op_ine32:
632 return CC_NE;
633 default:
634 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
635 assert(false);
636 return CC_FL;
637 }
638 }
639
640 Converter::LValues&
641 Converter::convert(nir_alu_dest *dest)
642 {
643 return convert(&dest->dest);
644 }
645
646 Converter::LValues&
647 Converter::convert(nir_dest *dest)
648 {
649 if (dest->is_ssa)
650 return convert(&dest->ssa);
651 if (dest->reg.indirect) {
652 ERROR("no support for indirects.");
653 assert(false);
654 }
655 return convert(dest->reg.reg);
656 }
657
658 Converter::LValues&
659 Converter::convert(nir_register *reg)
660 {
661 NirDefMap::iterator it = regDefs.find(reg->index);
662 if (it != regDefs.end())
663 return it->second;
664
665 LValues newDef(reg->num_components);
666 for (uint8_t i = 0; i < reg->num_components; i++)
667 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
668 return regDefs[reg->index] = newDef;
669 }
670
671 Converter::LValues&
672 Converter::convert(nir_ssa_def *def)
673 {
674 NirDefMap::iterator it = ssaDefs.find(def->index);
675 if (it != ssaDefs.end())
676 return it->second;
677
678 LValues newDef(def->num_components);
679 for (uint8_t i = 0; i < def->num_components; i++)
680 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
681 return ssaDefs[def->index] = newDef;
682 }
683
684 Value*
685 Converter::getSrc(nir_alu_src *src, uint8_t component)
686 {
687 if (src->abs || src->negate) {
688 ERROR("modifiers currently not supported on nir_alu_src\n");
689 assert(false);
690 }
691 return getSrc(&src->src, src->swizzle[component]);
692 }
693
694 Value*
695 Converter::getSrc(nir_register *reg, uint8_t idx)
696 {
697 NirDefMap::iterator it = regDefs.find(reg->index);
698 if (it == regDefs.end())
699 return convert(reg)[idx];
700 return it->second[idx];
701 }
702
703 Value*
704 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
705 {
706 if (src->is_ssa)
707 return getSrc(src->ssa, idx);
708
709 if (src->reg.indirect) {
710 if (indirect)
711 return getSrc(src->reg.indirect, idx);
712 ERROR("no support for indirects.");
713 assert(false);
714 return NULL;
715 }
716
717 return getSrc(src->reg.reg, idx);
718 }
719
720 Value*
721 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
722 {
723 ImmediateMap::iterator iit = immediates.find(src->index);
724 if (iit != immediates.end())
725 return convert((*iit).second, idx);
726
727 NirDefMap::iterator it = ssaDefs.find(src->index);
728 if (it == ssaDefs.end()) {
729 ERROR("SSA value %u not found\n", src->index);
730 assert(false);
731 return NULL;
732 }
733 return it->second[idx];
734 }
735
736 uint32_t
737 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
738 {
739 nir_const_value *offset = nir_src_as_const_value(*src);
740
741 if (offset) {
742 indirect = NULL;
743 return offset->u32[0];
744 }
745
746 indirect = getSrc(src, idx, true);
747 return 0;
748 }
749
750 uint32_t
751 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect)
752 {
753 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
754 if (indirect)
755 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
756 return idx;
757 }
758
759 static void
760 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
761 {
762 assert(name && index);
763
764 if (slot >= VERT_ATTRIB_MAX) {
765 ERROR("invalid varying slot %u\n", slot);
766 assert(false);
767 return;
768 }
769
770 if (slot >= VERT_ATTRIB_GENERIC0 &&
771 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
772 *name = TGSI_SEMANTIC_GENERIC;
773 *index = slot - VERT_ATTRIB_GENERIC0;
774 return;
775 }
776
777 if (slot >= VERT_ATTRIB_TEX0 &&
778 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
779 *name = TGSI_SEMANTIC_TEXCOORD;
780 *index = slot - VERT_ATTRIB_TEX0;
781 return;
782 }
783
784 switch (slot) {
785 case VERT_ATTRIB_COLOR0:
786 *name = TGSI_SEMANTIC_COLOR;
787 *index = 0;
788 break;
789 case VERT_ATTRIB_COLOR1:
790 *name = TGSI_SEMANTIC_COLOR;
791 *index = 1;
792 break;
793 case VERT_ATTRIB_EDGEFLAG:
794 *name = TGSI_SEMANTIC_EDGEFLAG;
795 *index = 0;
796 break;
797 case VERT_ATTRIB_FOG:
798 *name = TGSI_SEMANTIC_FOG;
799 *index = 0;
800 break;
801 case VERT_ATTRIB_NORMAL:
802 *name = TGSI_SEMANTIC_NORMAL;
803 *index = 0;
804 break;
805 case VERT_ATTRIB_POS:
806 *name = TGSI_SEMANTIC_POSITION;
807 *index = 0;
808 break;
809 case VERT_ATTRIB_POINT_SIZE:
810 *name = TGSI_SEMANTIC_PSIZE;
811 *index = 0;
812 break;
813 default:
814 ERROR("unknown vert attrib slot %u\n", slot);
815 assert(false);
816 break;
817 }
818 }
819
820 static void
821 varying_slot_to_tgsi_semantic(gl_varying_slot slot, unsigned *name, unsigned *index)
822 {
823 assert(name && index);
824
825 if (slot >= VARYING_SLOT_TESS_MAX) {
826 ERROR("invalid varying slot %u\n", slot);
827 assert(false);
828 return;
829 }
830
831 if (slot >= VARYING_SLOT_PATCH0) {
832 *name = TGSI_SEMANTIC_PATCH;
833 *index = slot - VARYING_SLOT_PATCH0;
834 return;
835 }
836
837 if (slot >= VARYING_SLOT_VAR0) {
838 *name = TGSI_SEMANTIC_GENERIC;
839 *index = slot - VARYING_SLOT_VAR0;
840 return;
841 }
842
843 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
844 *name = TGSI_SEMANTIC_TEXCOORD;
845 *index = slot - VARYING_SLOT_TEX0;
846 return;
847 }
848
849 switch (slot) {
850 case VARYING_SLOT_BFC0:
851 *name = TGSI_SEMANTIC_BCOLOR;
852 *index = 0;
853 break;
854 case VARYING_SLOT_BFC1:
855 *name = TGSI_SEMANTIC_BCOLOR;
856 *index = 1;
857 break;
858 case VARYING_SLOT_CLIP_DIST0:
859 *name = TGSI_SEMANTIC_CLIPDIST;
860 *index = 0;
861 break;
862 case VARYING_SLOT_CLIP_DIST1:
863 *name = TGSI_SEMANTIC_CLIPDIST;
864 *index = 1;
865 break;
866 case VARYING_SLOT_CLIP_VERTEX:
867 *name = TGSI_SEMANTIC_CLIPVERTEX;
868 *index = 0;
869 break;
870 case VARYING_SLOT_COL0:
871 *name = TGSI_SEMANTIC_COLOR;
872 *index = 0;
873 break;
874 case VARYING_SLOT_COL1:
875 *name = TGSI_SEMANTIC_COLOR;
876 *index = 1;
877 break;
878 case VARYING_SLOT_EDGE:
879 *name = TGSI_SEMANTIC_EDGEFLAG;
880 *index = 0;
881 break;
882 case VARYING_SLOT_FACE:
883 *name = TGSI_SEMANTIC_FACE;
884 *index = 0;
885 break;
886 case VARYING_SLOT_FOGC:
887 *name = TGSI_SEMANTIC_FOG;
888 *index = 0;
889 break;
890 case VARYING_SLOT_LAYER:
891 *name = TGSI_SEMANTIC_LAYER;
892 *index = 0;
893 break;
894 case VARYING_SLOT_PNTC:
895 *name = TGSI_SEMANTIC_PCOORD;
896 *index = 0;
897 break;
898 case VARYING_SLOT_POS:
899 *name = TGSI_SEMANTIC_POSITION;
900 *index = 0;
901 break;
902 case VARYING_SLOT_PRIMITIVE_ID:
903 *name = TGSI_SEMANTIC_PRIMID;
904 *index = 0;
905 break;
906 case VARYING_SLOT_PSIZ:
907 *name = TGSI_SEMANTIC_PSIZE;
908 *index = 0;
909 break;
910 case VARYING_SLOT_TESS_LEVEL_INNER:
911 *name = TGSI_SEMANTIC_TESSINNER;
912 *index = 0;
913 break;
914 case VARYING_SLOT_TESS_LEVEL_OUTER:
915 *name = TGSI_SEMANTIC_TESSOUTER;
916 *index = 0;
917 break;
918 case VARYING_SLOT_VIEWPORT:
919 *name = TGSI_SEMANTIC_VIEWPORT_INDEX;
920 *index = 0;
921 break;
922 default:
923 ERROR("unknown varying slot %u\n", slot);
924 assert(false);
925 break;
926 }
927 }
928
929 static void
930 frag_result_to_tgsi_semantic(unsigned slot, unsigned *name, unsigned *index)
931 {
932 if (slot >= FRAG_RESULT_DATA0) {
933 *name = TGSI_SEMANTIC_COLOR;
934 *index = slot - FRAG_RESULT_COLOR - 2; // intentional
935 return;
936 }
937
938 switch (slot) {
939 case FRAG_RESULT_COLOR:
940 *name = TGSI_SEMANTIC_COLOR;
941 *index = 0;
942 break;
943 case FRAG_RESULT_DEPTH:
944 *name = TGSI_SEMANTIC_POSITION;
945 *index = 0;
946 break;
947 case FRAG_RESULT_SAMPLE_MASK:
948 *name = TGSI_SEMANTIC_SAMPLEMASK;
949 *index = 0;
950 break;
951 default:
952 ERROR("unknown frag result slot %u\n", slot);
953 assert(false);
954 break;
955 }
956 }
957
958 // copy of _mesa_sysval_to_semantic
959 static void
960 system_val_to_tgsi_semantic(unsigned val, unsigned *name, unsigned *index)
961 {
962 *index = 0;
963 switch (val) {
964 // Vertex shader
965 case SYSTEM_VALUE_VERTEX_ID:
966 *name = TGSI_SEMANTIC_VERTEXID;
967 break;
968 case SYSTEM_VALUE_INSTANCE_ID:
969 *name = TGSI_SEMANTIC_INSTANCEID;
970 break;
971 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
972 *name = TGSI_SEMANTIC_VERTEXID_NOBASE;
973 break;
974 case SYSTEM_VALUE_BASE_VERTEX:
975 *name = TGSI_SEMANTIC_BASEVERTEX;
976 break;
977 case SYSTEM_VALUE_BASE_INSTANCE:
978 *name = TGSI_SEMANTIC_BASEINSTANCE;
979 break;
980 case SYSTEM_VALUE_DRAW_ID:
981 *name = TGSI_SEMANTIC_DRAWID;
982 break;
983
984 // Geometry shader
985 case SYSTEM_VALUE_INVOCATION_ID:
986 *name = TGSI_SEMANTIC_INVOCATIONID;
987 break;
988
989 // Fragment shader
990 case SYSTEM_VALUE_FRAG_COORD:
991 *name = TGSI_SEMANTIC_POSITION;
992 break;
993 case SYSTEM_VALUE_FRONT_FACE:
994 *name = TGSI_SEMANTIC_FACE;
995 break;
996 case SYSTEM_VALUE_SAMPLE_ID:
997 *name = TGSI_SEMANTIC_SAMPLEID;
998 break;
999 case SYSTEM_VALUE_SAMPLE_POS:
1000 *name = TGSI_SEMANTIC_SAMPLEPOS;
1001 break;
1002 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1003 *name = TGSI_SEMANTIC_SAMPLEMASK;
1004 break;
1005 case SYSTEM_VALUE_HELPER_INVOCATION:
1006 *name = TGSI_SEMANTIC_HELPER_INVOCATION;
1007 break;
1008
1009 // Tessellation shader
1010 case SYSTEM_VALUE_TESS_COORD:
1011 *name = TGSI_SEMANTIC_TESSCOORD;
1012 break;
1013 case SYSTEM_VALUE_VERTICES_IN:
1014 *name = TGSI_SEMANTIC_VERTICESIN;
1015 break;
1016 case SYSTEM_VALUE_PRIMITIVE_ID:
1017 *name = TGSI_SEMANTIC_PRIMID;
1018 break;
1019 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1020 *name = TGSI_SEMANTIC_TESSOUTER;
1021 break;
1022 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1023 *name = TGSI_SEMANTIC_TESSINNER;
1024 break;
1025
1026 // Compute shader
1027 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1028 *name = TGSI_SEMANTIC_THREAD_ID;
1029 break;
1030 case SYSTEM_VALUE_WORK_GROUP_ID:
1031 *name = TGSI_SEMANTIC_BLOCK_ID;
1032 break;
1033 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1034 *name = TGSI_SEMANTIC_GRID_SIZE;
1035 break;
1036 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1037 *name = TGSI_SEMANTIC_BLOCK_SIZE;
1038 break;
1039
1040 // ARB_shader_ballot
1041 case SYSTEM_VALUE_SUBGROUP_SIZE:
1042 *name = TGSI_SEMANTIC_SUBGROUP_SIZE;
1043 break;
1044 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1045 *name = TGSI_SEMANTIC_SUBGROUP_INVOCATION;
1046 break;
1047 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1048 *name = TGSI_SEMANTIC_SUBGROUP_EQ_MASK;
1049 break;
1050 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1051 *name = TGSI_SEMANTIC_SUBGROUP_GE_MASK;
1052 break;
1053 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1054 *name = TGSI_SEMANTIC_SUBGROUP_GT_MASK;
1055 break;
1056 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1057 *name = TGSI_SEMANTIC_SUBGROUP_LE_MASK;
1058 break;
1059 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1060 *name = TGSI_SEMANTIC_SUBGROUP_LT_MASK;
1061 break;
1062
1063 default:
1064 ERROR("unknown system value %u\n", val);
1065 assert(false);
1066 break;
1067 }
1068 }
1069
1070 void
1071 Converter::setInterpolate(nv50_ir_varying *var,
1072 uint8_t mode,
1073 bool centroid,
1074 unsigned semantic)
1075 {
1076 switch (mode) {
1077 case INTERP_MODE_FLAT:
1078 var->flat = 1;
1079 break;
1080 case INTERP_MODE_NONE:
1081 if (semantic == TGSI_SEMANTIC_COLOR)
1082 var->sc = 1;
1083 else if (semantic == TGSI_SEMANTIC_POSITION)
1084 var->linear = 1;
1085 break;
1086 case INTERP_MODE_NOPERSPECTIVE:
1087 var->linear = 1;
1088 break;
1089 case INTERP_MODE_SMOOTH:
1090 break;
1091 }
1092 var->centroid = centroid;
1093 }
1094
1095 static uint16_t
1096 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
1097 bool input, const nir_variable *var)
1098 {
1099 if (!type->is_array())
1100 return type->count_attribute_slots(false);
1101
1102 uint16_t slots;
1103 switch (stage) {
1104 case Program::TYPE_GEOMETRY:
1105 slots = type->uniform_locations();
1106 if (input)
1107 slots /= info.gs.vertices_in;
1108 break;
1109 case Program::TYPE_TESSELLATION_CONTROL:
1110 case Program::TYPE_TESSELLATION_EVAL:
1111 // remove first dimension
1112 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
1113 slots = type->uniform_locations();
1114 else
1115 slots = type->fields.array->uniform_locations();
1116 break;
1117 default:
1118 slots = type->count_attribute_slots(false);
1119 break;
1120 }
1121
1122 return slots;
1123 }
1124
1125 bool Converter::assignSlots() {
1126 unsigned name;
1127 unsigned index;
1128
1129 info->io.viewportId = -1;
1130 info->numInputs = 0;
1131
1132 // we have to fixup the uniform locations for arrays
1133 unsigned numImages = 0;
1134 nir_foreach_variable(var, &nir->uniforms) {
1135 const glsl_type *type = var->type;
1136 if (!type->without_array()->is_image())
1137 continue;
1138 var->data.driver_location = numImages;
1139 numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
1140 }
1141
1142 nir_foreach_variable(var, &nir->inputs) {
1143 const glsl_type *type = var->type;
1144 int slot = var->data.location;
1145 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1146 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1147 : type->component_slots();
1148 uint32_t frac = var->data.location_frac;
1149 uint32_t vary = var->data.driver_location;
1150
1151 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1152 if (comp > 2)
1153 slots *= 2;
1154 }
1155
1156 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1157
1158 switch(prog->getType()) {
1159 case Program::TYPE_FRAGMENT:
1160 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1161 for (uint16_t i = 0; i < slots; ++i) {
1162 setInterpolate(&info->in[vary + i], var->data.interpolation,
1163 var->data.centroid | var->data.sample, name);
1164 }
1165 break;
1166 case Program::TYPE_GEOMETRY:
1167 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1168 break;
1169 case Program::TYPE_TESSELLATION_CONTROL:
1170 case Program::TYPE_TESSELLATION_EVAL:
1171 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1172 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1173 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1174 break;
1175 case Program::TYPE_VERTEX:
1176 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1177 switch (name) {
1178 case TGSI_SEMANTIC_EDGEFLAG:
1179 info->io.edgeFlagIn = vary;
1180 break;
1181 default:
1182 break;
1183 }
1184 break;
1185 default:
1186 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1187 return false;
1188 }
1189
1190 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1191 info->in[vary].id = vary;
1192 info->in[vary].patch = var->data.patch;
1193 info->in[vary].sn = name;
1194 info->in[vary].si = index + i;
1195 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1196 if (i & 0x1)
1197 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1198 else
1199 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1200 else
1201 info->in[vary].mask |= ((1 << comp) - 1) << frac;
1202 }
1203 info->numInputs = std::max<uint8_t>(info->numInputs, vary);
1204 }
1205
1206 info->numOutputs = 0;
1207 nir_foreach_variable(var, &nir->outputs) {
1208 const glsl_type *type = var->type;
1209 int slot = var->data.location;
1210 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1211 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1212 : type->component_slots();
1213 uint32_t frac = var->data.location_frac;
1214 uint32_t vary = var->data.driver_location;
1215
1216 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1217 if (comp > 2)
1218 slots *= 2;
1219 }
1220
1221 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1222
1223 switch(prog->getType()) {
1224 case Program::TYPE_FRAGMENT:
1225 frag_result_to_tgsi_semantic((gl_frag_result)slot, &name, &index);
1226 switch (name) {
1227 case TGSI_SEMANTIC_COLOR:
1228 if (!var->data.fb_fetch_output)
1229 info->prop.fp.numColourResults++;
1230 info->prop.fp.separateFragData = true;
1231 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1232 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1233 index = index == 0 ? var->data.index : index;
1234 break;
1235 case TGSI_SEMANTIC_POSITION:
1236 info->io.fragDepth = vary;
1237 info->prop.fp.writesDepth = true;
1238 break;
1239 case TGSI_SEMANTIC_SAMPLEMASK:
1240 info->io.sampleMask = vary;
1241 break;
1242 default:
1243 break;
1244 }
1245 break;
1246 case Program::TYPE_GEOMETRY:
1247 case Program::TYPE_TESSELLATION_CONTROL:
1248 case Program::TYPE_TESSELLATION_EVAL:
1249 case Program::TYPE_VERTEX:
1250 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1251
1252 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1253 name != TGSI_SEMANTIC_TESSOUTER)
1254 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1255
1256 switch (name) {
1257 case TGSI_SEMANTIC_CLIPDIST:
1258 info->io.genUserClip = -1;
1259 break;
1260 case TGSI_SEMANTIC_CLIPVERTEX:
1261 clipVertexOutput = vary;
1262 break;
1263 case TGSI_SEMANTIC_EDGEFLAG:
1264 info->io.edgeFlagOut = vary;
1265 break;
1266 case TGSI_SEMANTIC_POSITION:
1267 if (clipVertexOutput < 0)
1268 clipVertexOutput = vary;
1269 break;
1270 default:
1271 break;
1272 }
1273 break;
1274 default:
1275 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1276 return false;
1277 }
1278
1279 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1280 info->out[vary].id = vary;
1281 info->out[vary].patch = var->data.patch;
1282 info->out[vary].sn = name;
1283 info->out[vary].si = index + i;
1284 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1285 if (i & 0x1)
1286 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1287 else
1288 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1289 else
1290 info->out[vary].mask |= ((1 << comp) - 1) << frac;
1291
1292 if (nir->info.outputs_read & 1ll << slot)
1293 info->out[vary].oread = 1;
1294 }
1295 info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
1296 }
1297
1298 info->numSysVals = 0;
1299 for (uint8_t i = 0; i < 64; ++i) {
1300 if (!(nir->info.system_values_read & 1ll << i))
1301 continue;
1302
1303 system_val_to_tgsi_semantic(i, &name, &index);
1304 info->sv[info->numSysVals].sn = name;
1305 info->sv[info->numSysVals].si = index;
1306 info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
1307
1308 switch (i) {
1309 case SYSTEM_VALUE_INSTANCE_ID:
1310 info->io.instanceId = info->numSysVals;
1311 break;
1312 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1313 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1314 info->sv[info->numSysVals].patch = 1;
1315 break;
1316 case SYSTEM_VALUE_VERTEX_ID:
1317 info->io.vertexId = info->numSysVals;
1318 break;
1319 default:
1320 break;
1321 }
1322
1323 info->numSysVals += 1;
1324 }
1325
1326 if (info->io.genUserClip > 0) {
1327 info->io.clipDistances = info->io.genUserClip;
1328
1329 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1330
1331 for (unsigned int n = 0; n < nOut; ++n) {
1332 unsigned int i = info->numOutputs++;
1333 info->out[i].id = i;
1334 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1335 info->out[i].si = n;
1336 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1337 }
1338 }
1339
1340 return info->assignSlots(info) == 0;
1341 }
1342
1343 uint32_t
1344 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1345 {
1346 DataType ty;
1347 int offset = nir_intrinsic_component(insn);
1348 bool input;
1349
1350 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1351 ty = getDType(insn);
1352 else
1353 ty = getSType(insn->src[0], false, false);
1354
1355 switch (insn->intrinsic) {
1356 case nir_intrinsic_load_input:
1357 case nir_intrinsic_load_interpolated_input:
1358 case nir_intrinsic_load_per_vertex_input:
1359 input = true;
1360 break;
1361 case nir_intrinsic_load_output:
1362 case nir_intrinsic_load_per_vertex_output:
1363 case nir_intrinsic_store_output:
1364 case nir_intrinsic_store_per_vertex_output:
1365 input = false;
1366 break;
1367 default:
1368 ERROR("unknown intrinsic in getSlotAddress %s",
1369 nir_intrinsic_infos[insn->intrinsic].name);
1370 input = false;
1371 assert(false);
1372 break;
1373 }
1374
1375 if (typeSizeof(ty) == 8) {
1376 slot *= 2;
1377 slot += offset;
1378 if (slot >= 4) {
1379 idx += 1;
1380 slot -= 4;
1381 }
1382 } else {
1383 slot += offset;
1384 }
1385
1386 assert(slot < 4);
1387 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1388 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1389
1390 const nv50_ir_varying *vary = input ? info->in : info->out;
1391 return vary[idx].slot[slot] * 4;
1392 }
1393
1394 Instruction *
1395 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1396 uint32_t base, uint8_t c, Value *indirect0,
1397 Value *indirect1, bool patch)
1398 {
1399 unsigned int tySize = typeSizeof(ty);
1400
1401 if (tySize == 8 &&
1402 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1403 Value *lo = getSSA();
1404 Value *hi = getSSA();
1405
1406 Instruction *loi =
1407 mkLoad(TYPE_U32, lo,
1408 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1409 indirect0);
1410 loi->setIndirect(0, 1, indirect1);
1411 loi->perPatch = patch;
1412
1413 Instruction *hii =
1414 mkLoad(TYPE_U32, hi,
1415 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1416 indirect0);
1417 hii->setIndirect(0, 1, indirect1);
1418 hii->perPatch = patch;
1419
1420 return mkOp2(OP_MERGE, ty, def, lo, hi);
1421 } else {
1422 Instruction *ld =
1423 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1424 ld->setIndirect(0, 1, indirect1);
1425 ld->perPatch = patch;
1426 return ld;
1427 }
1428 }
1429
1430 void
1431 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1432 DataType ty, Value *src, uint8_t idx, uint8_t c,
1433 Value *indirect0, Value *indirect1)
1434 {
1435 uint8_t size = typeSizeof(ty);
1436 uint32_t address = getSlotAddress(insn, idx, c);
1437
1438 if (size == 8 && indirect0) {
1439 Value *split[2];
1440 mkSplit(split, 4, src);
1441
1442 if (op == OP_EXPORT) {
1443 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1444 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1445 }
1446
1447 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1448 split[0])->perPatch = info->out[idx].patch;
1449 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1450 split[1])->perPatch = info->out[idx].patch;
1451 } else {
1452 if (op == OP_EXPORT)
1453 src = mkMov(getSSA(size), src, ty)->getDef(0);
1454 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1455 src)->perPatch = info->out[idx].patch;
1456 }
1457 }
1458
1459 bool
1460 Converter::parseNIR()
1461 {
1462 info->bin.tlsSpace = 0;
1463 info->io.clipDistances = nir->info.clip_distance_array_size;
1464 info->io.cullDistances = nir->info.cull_distance_array_size;
1465
1466 switch(prog->getType()) {
1467 case Program::TYPE_COMPUTE:
1468 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1469 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1470 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1471 info->bin.smemSize = nir->info.cs.shared_size;
1472 break;
1473 case Program::TYPE_FRAGMENT:
1474 info->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1475 info->prop.fp.persampleInvocation =
1476 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1477 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1478 info->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1479 info->prop.fp.readsSampleLocations =
1480 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1481 info->prop.fp.usesDiscard = nir->info.fs.uses_discard;
1482 info->prop.fp.usesSampleMaskIn =
1483 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1484 break;
1485 case Program::TYPE_GEOMETRY:
1486 info->prop.gp.inputPrim = nir->info.gs.input_primitive;
1487 info->prop.gp.instanceCount = nir->info.gs.invocations;
1488 info->prop.gp.maxVertices = nir->info.gs.vertices_out;
1489 info->prop.gp.outputPrim = nir->info.gs.output_primitive;
1490 break;
1491 case Program::TYPE_TESSELLATION_CONTROL:
1492 case Program::TYPE_TESSELLATION_EVAL:
1493 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1494 info->prop.tp.domain = GL_LINES;
1495 else
1496 info->prop.tp.domain = nir->info.tess.primitive_mode;
1497 info->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1498 info->prop.tp.outputPrim =
1499 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1500 info->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1501 info->prop.tp.winding = !nir->info.tess.ccw;
1502 break;
1503 case Program::TYPE_VERTEX:
1504 info->prop.vp.usesDrawParameters =
1505 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1506 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1507 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1508 break;
1509 default:
1510 break;
1511 }
1512
1513 return true;
1514 }
1515
1516 bool
1517 Converter::visit(nir_function *function)
1518 {
1519 // we only support emiting the main function for now
1520 assert(!strcmp(function->name, "main"));
1521 assert(function->impl);
1522
1523 // usually the blocks will set everything up, but main is special
1524 BasicBlock *entry = new BasicBlock(prog->main);
1525 exit = new BasicBlock(prog->main);
1526 blocks[nir_start_block(function->impl)->index] = entry;
1527 prog->main->setEntry(entry);
1528 prog->main->setExit(exit);
1529
1530 setPosition(entry, true);
1531
1532 if (info->io.genUserClip > 0) {
1533 for (int c = 0; c < 4; ++c)
1534 clipVtx[c] = getScratch();
1535 }
1536
1537 switch (prog->getType()) {
1538 case Program::TYPE_TESSELLATION_CONTROL:
1539 outBase = mkOp2v(
1540 OP_SUB, TYPE_U32, getSSA(),
1541 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1542 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1543 break;
1544 case Program::TYPE_FRAGMENT: {
1545 Symbol *sv = mkSysVal(SV_POSITION, 3);
1546 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1547 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1548 break;
1549 }
1550 default:
1551 break;
1552 }
1553
1554 nir_foreach_register(reg, &function->impl->registers) {
1555 if (reg->num_array_elems) {
1556 // TODO: packed variables would be nice, but MemoryOpt fails
1557 // replace 4 with reg->num_components
1558 uint32_t size = 4 * reg->num_array_elems * (reg->bit_size / 8);
1559 regToLmemOffset[reg->index] = info->bin.tlsSpace;
1560 info->bin.tlsSpace += size;
1561 }
1562 }
1563
1564 nir_index_ssa_defs(function->impl);
1565 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1566 if (!visit(node))
1567 return false;
1568 }
1569
1570 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1571 setPosition(exit, true);
1572
1573 if ((prog->getType() == Program::TYPE_VERTEX ||
1574 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1575 && info->io.genUserClip > 0)
1576 handleUserClipPlanes();
1577
1578 // TODO: for non main function this needs to be a OP_RETURN
1579 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1580 return true;
1581 }
1582
1583 bool
1584 Converter::visit(nir_cf_node *node)
1585 {
1586 switch (node->type) {
1587 case nir_cf_node_block:
1588 return visit(nir_cf_node_as_block(node));
1589 case nir_cf_node_if:
1590 return visit(nir_cf_node_as_if(node));
1591 case nir_cf_node_loop:
1592 return visit(nir_cf_node_as_loop(node));
1593 default:
1594 ERROR("unknown nir_cf_node type %u\n", node->type);
1595 return false;
1596 }
1597 }
1598
1599 bool
1600 Converter::visit(nir_block *block)
1601 {
1602 if (!block->predecessors->entries && block->instr_list.is_empty())
1603 return true;
1604
1605 BasicBlock *bb = convert(block);
1606
1607 setPosition(bb, true);
1608 nir_foreach_instr(insn, block) {
1609 if (!visit(insn))
1610 return false;
1611 }
1612 return true;
1613 }
1614
1615 bool
1616 Converter::visit(nir_if *nif)
1617 {
1618 DataType sType = getSType(nif->condition, false, false);
1619 Value *src = getSrc(&nif->condition, 0);
1620
1621 nir_block *lastThen = nir_if_last_then_block(nif);
1622 nir_block *lastElse = nir_if_last_else_block(nif);
1623
1624 assert(!lastThen->successors[1]);
1625 assert(!lastElse->successors[1]);
1626
1627 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1628 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1629
1630 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1631 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1632
1633 // we only insert joinats, if both nodes end up at the end of the if again.
1634 // the reason for this to not happens are breaks/continues/ret/... which
1635 // have their own handling
1636 if (lastThen->successors[0] == lastElse->successors[0])
1637 bb->joinAt = mkFlow(OP_JOINAT, convert(lastThen->successors[0]),
1638 CC_ALWAYS, NULL);
1639
1640 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1641
1642 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1643 if (!visit(node))
1644 return false;
1645 }
1646 setPosition(convert(lastThen), true);
1647 if (!bb->getExit() ||
1648 !bb->getExit()->asFlow() ||
1649 bb->getExit()->asFlow()->op == OP_JOIN) {
1650 BasicBlock *tailBB = convert(lastThen->successors[0]);
1651 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1652 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1653 }
1654
1655 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1656 if (!visit(node))
1657 return false;
1658 }
1659 setPosition(convert(lastElse), true);
1660 if (!bb->getExit() ||
1661 !bb->getExit()->asFlow() ||
1662 bb->getExit()->asFlow()->op == OP_JOIN) {
1663 BasicBlock *tailBB = convert(lastElse->successors[0]);
1664 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1665 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1666 }
1667
1668 if (lastThen->successors[0] == lastElse->successors[0]) {
1669 setPosition(convert(lastThen->successors[0]), true);
1670 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1671 }
1672
1673 return true;
1674 }
1675
1676 bool
1677 Converter::visit(nir_loop *loop)
1678 {
1679 curLoopDepth += 1;
1680 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1681
1682 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1683 BasicBlock *tailBB =
1684 convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1685 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1686
1687 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1688 setPosition(loopBB, false);
1689 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1690
1691 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1692 if (!visit(node))
1693 return false;
1694 }
1695 Instruction *insn = bb->getExit();
1696 if (bb->cfg.incidentCount() != 0) {
1697 if (!insn || !insn->asFlow()) {
1698 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1699 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1700 } else if (insn && insn->op == OP_BRA && !insn->getPredicate() &&
1701 tailBB->cfg.incidentCount() == 0) {
1702 // RA doesn't like having blocks around with no incident edge,
1703 // so we create a fake one to make it happy
1704 bb->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1705 }
1706 }
1707
1708 curLoopDepth -= 1;
1709
1710 return true;
1711 }
1712
1713 bool
1714 Converter::visit(nir_instr *insn)
1715 {
1716 // we need an insertion point for on the fly generated immediate loads
1717 immInsertPos = bb->getExit();
1718 switch (insn->type) {
1719 case nir_instr_type_alu:
1720 return visit(nir_instr_as_alu(insn));
1721 case nir_instr_type_deref:
1722 return visit(nir_instr_as_deref(insn));
1723 case nir_instr_type_intrinsic:
1724 return visit(nir_instr_as_intrinsic(insn));
1725 case nir_instr_type_jump:
1726 return visit(nir_instr_as_jump(insn));
1727 case nir_instr_type_load_const:
1728 return visit(nir_instr_as_load_const(insn));
1729 case nir_instr_type_ssa_undef:
1730 return visit(nir_instr_as_ssa_undef(insn));
1731 case nir_instr_type_tex:
1732 return visit(nir_instr_as_tex(insn));
1733 default:
1734 ERROR("unknown nir_instr type %u\n", insn->type);
1735 return false;
1736 }
1737 return true;
1738 }
1739
1740 SVSemantic
1741 Converter::convert(nir_intrinsic_op intr)
1742 {
1743 switch (intr) {
1744 case nir_intrinsic_load_base_vertex:
1745 return SV_BASEVERTEX;
1746 case nir_intrinsic_load_base_instance:
1747 return SV_BASEINSTANCE;
1748 case nir_intrinsic_load_draw_id:
1749 return SV_DRAWID;
1750 case nir_intrinsic_load_front_face:
1751 return SV_FACE;
1752 case nir_intrinsic_load_helper_invocation:
1753 return SV_THREAD_KILL;
1754 case nir_intrinsic_load_instance_id:
1755 return SV_INSTANCE_ID;
1756 case nir_intrinsic_load_invocation_id:
1757 return SV_INVOCATION_ID;
1758 case nir_intrinsic_load_local_group_size:
1759 return SV_NTID;
1760 case nir_intrinsic_load_local_invocation_id:
1761 return SV_TID;
1762 case nir_intrinsic_load_num_work_groups:
1763 return SV_NCTAID;
1764 case nir_intrinsic_load_patch_vertices_in:
1765 return SV_VERTEX_COUNT;
1766 case nir_intrinsic_load_primitive_id:
1767 return SV_PRIMITIVE_ID;
1768 case nir_intrinsic_load_sample_id:
1769 return SV_SAMPLE_INDEX;
1770 case nir_intrinsic_load_sample_mask_in:
1771 return SV_SAMPLE_MASK;
1772 case nir_intrinsic_load_sample_pos:
1773 return SV_SAMPLE_POS;
1774 case nir_intrinsic_load_subgroup_eq_mask:
1775 return SV_LANEMASK_EQ;
1776 case nir_intrinsic_load_subgroup_ge_mask:
1777 return SV_LANEMASK_GE;
1778 case nir_intrinsic_load_subgroup_gt_mask:
1779 return SV_LANEMASK_GT;
1780 case nir_intrinsic_load_subgroup_le_mask:
1781 return SV_LANEMASK_LE;
1782 case nir_intrinsic_load_subgroup_lt_mask:
1783 return SV_LANEMASK_LT;
1784 case nir_intrinsic_load_subgroup_invocation:
1785 return SV_LANEID;
1786 case nir_intrinsic_load_tess_coord:
1787 return SV_TESS_COORD;
1788 case nir_intrinsic_load_tess_level_inner:
1789 return SV_TESS_INNER;
1790 case nir_intrinsic_load_tess_level_outer:
1791 return SV_TESS_OUTER;
1792 case nir_intrinsic_load_vertex_id:
1793 return SV_VERTEX_ID;
1794 case nir_intrinsic_load_work_group_id:
1795 return SV_CTAID;
1796 default:
1797 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1798 nir_intrinsic_infos[intr].name);
1799 assert(false);
1800 return SV_LAST;
1801 }
1802 }
1803
1804 ImgFormat
1805 Converter::convertGLImgFormat(GLuint format)
1806 {
1807 #define FMT_CASE(a, b) \
1808 case GL_ ## a: return nv50_ir::FMT_ ## b
1809
1810 switch (format) {
1811 FMT_CASE(NONE, NONE);
1812
1813 FMT_CASE(RGBA32F, RGBA32F);
1814 FMT_CASE(RGBA16F, RGBA16F);
1815 FMT_CASE(RG32F, RG32F);
1816 FMT_CASE(RG16F, RG16F);
1817 FMT_CASE(R11F_G11F_B10F, R11G11B10F);
1818 FMT_CASE(R32F, R32F);
1819 FMT_CASE(R16F, R16F);
1820
1821 FMT_CASE(RGBA32UI, RGBA32UI);
1822 FMT_CASE(RGBA16UI, RGBA16UI);
1823 FMT_CASE(RGB10_A2UI, RGB10A2UI);
1824 FMT_CASE(RGBA8UI, RGBA8UI);
1825 FMT_CASE(RG32UI, RG32UI);
1826 FMT_CASE(RG16UI, RG16UI);
1827 FMT_CASE(RG8UI, RG8UI);
1828 FMT_CASE(R32UI, R32UI);
1829 FMT_CASE(R16UI, R16UI);
1830 FMT_CASE(R8UI, R8UI);
1831
1832 FMT_CASE(RGBA32I, RGBA32I);
1833 FMT_CASE(RGBA16I, RGBA16I);
1834 FMT_CASE(RGBA8I, RGBA8I);
1835 FMT_CASE(RG32I, RG32I);
1836 FMT_CASE(RG16I, RG16I);
1837 FMT_CASE(RG8I, RG8I);
1838 FMT_CASE(R32I, R32I);
1839 FMT_CASE(R16I, R16I);
1840 FMT_CASE(R8I, R8I);
1841
1842 FMT_CASE(RGBA16, RGBA16);
1843 FMT_CASE(RGB10_A2, RGB10A2);
1844 FMT_CASE(RGBA8, RGBA8);
1845 FMT_CASE(RG16, RG16);
1846 FMT_CASE(RG8, RG8);
1847 FMT_CASE(R16, R16);
1848 FMT_CASE(R8, R8);
1849
1850 FMT_CASE(RGBA16_SNORM, RGBA16_SNORM);
1851 FMT_CASE(RGBA8_SNORM, RGBA8_SNORM);
1852 FMT_CASE(RG16_SNORM, RG16_SNORM);
1853 FMT_CASE(RG8_SNORM, RG8_SNORM);
1854 FMT_CASE(R16_SNORM, R16_SNORM);
1855 FMT_CASE(R8_SNORM, R8_SNORM);
1856
1857 FMT_CASE(BGRA_INTEGER, BGRA8);
1858 default:
1859 ERROR("unknown format %x\n", format);
1860 assert(false);
1861 return nv50_ir::FMT_NONE;
1862 }
1863 #undef FMT_CASE
1864 }
1865
1866 bool
1867 Converter::visit(nir_intrinsic_instr *insn)
1868 {
1869 nir_intrinsic_op op = insn->intrinsic;
1870 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1871
1872 switch (op) {
1873 case nir_intrinsic_load_uniform: {
1874 LValues &newDefs = convert(&insn->dest);
1875 const DataType dType = getDType(insn);
1876 Value *indirect;
1877 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1878 for (uint8_t i = 0; i < insn->num_components; ++i) {
1879 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1880 }
1881 break;
1882 }
1883 case nir_intrinsic_store_output:
1884 case nir_intrinsic_store_per_vertex_output: {
1885 Value *indirect;
1886 DataType dType = getSType(insn->src[0], false, false);
1887 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1888
1889 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1890 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1891 continue;
1892
1893 uint8_t offset = 0;
1894 Value *src = getSrc(&insn->src[0], i);
1895 switch (prog->getType()) {
1896 case Program::TYPE_FRAGMENT: {
1897 if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1898 // TGSI uses a different interface than NIR, TGSI stores that
1899 // value in the z component, NIR in X
1900 offset += 2;
1901 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1902 }
1903 break;
1904 }
1905 case Program::TYPE_GEOMETRY:
1906 case Program::TYPE_VERTEX: {
1907 if (info->io.genUserClip > 0 && idx == clipVertexOutput) {
1908 mkMov(clipVtx[i], src);
1909 src = clipVtx[i];
1910 }
1911 break;
1912 }
1913 default:
1914 break;
1915 }
1916
1917 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1918 }
1919 break;
1920 }
1921 case nir_intrinsic_load_input:
1922 case nir_intrinsic_load_interpolated_input:
1923 case nir_intrinsic_load_output: {
1924 LValues &newDefs = convert(&insn->dest);
1925
1926 // FBFetch
1927 if (prog->getType() == Program::TYPE_FRAGMENT &&
1928 op == nir_intrinsic_load_output) {
1929 std::vector<Value*> defs, srcs;
1930 uint8_t mask = 0;
1931
1932 srcs.push_back(getSSA());
1933 srcs.push_back(getSSA());
1934 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1935 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1936 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1937 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1938
1939 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1940 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1941
1942 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1943 defs.push_back(newDefs[i]);
1944 mask |= 1 << i;
1945 }
1946
1947 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1948 texi->tex.levelZero = 1;
1949 texi->tex.mask = mask;
1950 texi->tex.useOffsets = 0;
1951 texi->tex.r = 0xffff;
1952 texi->tex.s = 0xffff;
1953
1954 info->prop.fp.readsFramebuffer = true;
1955 break;
1956 }
1957
1958 const DataType dType = getDType(insn);
1959 Value *indirect;
1960 bool input = op != nir_intrinsic_load_output;
1961 operation nvirOp;
1962 uint32_t mode = 0;
1963
1964 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1965 nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
1966
1967 // see load_barycentric_* handling
1968 if (prog->getType() == Program::TYPE_FRAGMENT) {
1969 mode = translateInterpMode(&vary, nvirOp);
1970 if (op == nir_intrinsic_load_interpolated_input) {
1971 ImmediateValue immMode;
1972 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1973 mode |= immMode.reg.data.u32;
1974 }
1975 }
1976
1977 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1978 uint32_t address = getSlotAddress(insn, idx, i);
1979 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1980 if (prog->getType() == Program::TYPE_FRAGMENT) {
1981 int s = 1;
1982 if (typeSizeof(dType) == 8) {
1983 Value *lo = getSSA();
1984 Value *hi = getSSA();
1985 Instruction *interp;
1986
1987 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1988 if (nvirOp == OP_PINTERP)
1989 interp->setSrc(s++, fp.position);
1990 if (mode & NV50_IR_INTERP_OFFSET)
1991 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1992 interp->setInterpolate(mode);
1993 interp->setIndirect(0, 0, indirect);
1994
1995 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1996 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1997 if (nvirOp == OP_PINTERP)
1998 interp->setSrc(s++, fp.position);
1999 if (mode & NV50_IR_INTERP_OFFSET)
2000 interp->setSrc(s++, getSrc(&insn->src[0], 0));
2001 interp->setInterpolate(mode);
2002 interp->setIndirect(0, 0, indirect);
2003
2004 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
2005 } else {
2006 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
2007 if (nvirOp == OP_PINTERP)
2008 interp->setSrc(s++, fp.position);
2009 if (mode & NV50_IR_INTERP_OFFSET)
2010 interp->setSrc(s++, getSrc(&insn->src[0], 0));
2011 interp->setInterpolate(mode);
2012 interp->setIndirect(0, 0, indirect);
2013 }
2014 } else {
2015 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
2016 }
2017 }
2018 break;
2019 }
2020 case nir_intrinsic_load_barycentric_at_offset:
2021 case nir_intrinsic_load_barycentric_at_sample:
2022 case nir_intrinsic_load_barycentric_centroid:
2023 case nir_intrinsic_load_barycentric_pixel:
2024 case nir_intrinsic_load_barycentric_sample: {
2025 LValues &newDefs = convert(&insn->dest);
2026 uint32_t mode;
2027
2028 if (op == nir_intrinsic_load_barycentric_centroid ||
2029 op == nir_intrinsic_load_barycentric_sample) {
2030 mode = NV50_IR_INTERP_CENTROID;
2031 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
2032 Value *offs[2];
2033 for (uint8_t c = 0; c < 2; c++) {
2034 offs[c] = getScratch();
2035 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
2036 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
2037 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
2038 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
2039 }
2040 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
2041
2042 mode = NV50_IR_INTERP_OFFSET;
2043 } else if (op == nir_intrinsic_load_barycentric_pixel) {
2044 mode = NV50_IR_INTERP_DEFAULT;
2045 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
2046 info->prop.fp.readsSampleLocations = true;
2047 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
2048 mode = NV50_IR_INTERP_OFFSET;
2049 } else {
2050 unreachable("all intrinsics already handled above");
2051 }
2052
2053 loadImm(newDefs[1], mode);
2054 break;
2055 }
2056 case nir_intrinsic_discard:
2057 mkOp(OP_DISCARD, TYPE_NONE, NULL);
2058 break;
2059 case nir_intrinsic_discard_if: {
2060 Value *pred = getSSA(1, FILE_PREDICATE);
2061 if (insn->num_components > 1) {
2062 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
2063 assert(false);
2064 return false;
2065 }
2066 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2067 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
2068 break;
2069 }
2070 case nir_intrinsic_load_base_vertex:
2071 case nir_intrinsic_load_base_instance:
2072 case nir_intrinsic_load_draw_id:
2073 case nir_intrinsic_load_front_face:
2074 case nir_intrinsic_load_helper_invocation:
2075 case nir_intrinsic_load_instance_id:
2076 case nir_intrinsic_load_invocation_id:
2077 case nir_intrinsic_load_local_group_size:
2078 case nir_intrinsic_load_local_invocation_id:
2079 case nir_intrinsic_load_num_work_groups:
2080 case nir_intrinsic_load_patch_vertices_in:
2081 case nir_intrinsic_load_primitive_id:
2082 case nir_intrinsic_load_sample_id:
2083 case nir_intrinsic_load_sample_mask_in:
2084 case nir_intrinsic_load_sample_pos:
2085 case nir_intrinsic_load_subgroup_eq_mask:
2086 case nir_intrinsic_load_subgroup_ge_mask:
2087 case nir_intrinsic_load_subgroup_gt_mask:
2088 case nir_intrinsic_load_subgroup_le_mask:
2089 case nir_intrinsic_load_subgroup_lt_mask:
2090 case nir_intrinsic_load_subgroup_invocation:
2091 case nir_intrinsic_load_tess_coord:
2092 case nir_intrinsic_load_tess_level_inner:
2093 case nir_intrinsic_load_tess_level_outer:
2094 case nir_intrinsic_load_vertex_id:
2095 case nir_intrinsic_load_work_group_id: {
2096 const DataType dType = getDType(insn);
2097 SVSemantic sv = convert(op);
2098 LValues &newDefs = convert(&insn->dest);
2099
2100 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2101 Value *def;
2102 if (typeSizeof(dType) == 8)
2103 def = getSSA();
2104 else
2105 def = newDefs[i];
2106
2107 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
2108 loadImm(def, 0u);
2109 } else {
2110 Symbol *sym = mkSysVal(sv, i);
2111 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
2112 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
2113 rdsv->perPatch = 1;
2114 }
2115
2116 if (typeSizeof(dType) == 8)
2117 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
2118 }
2119 break;
2120 }
2121 // constants
2122 case nir_intrinsic_load_subgroup_size: {
2123 LValues &newDefs = convert(&insn->dest);
2124 loadImm(newDefs[0], 32u);
2125 break;
2126 }
2127 case nir_intrinsic_vote_all:
2128 case nir_intrinsic_vote_any:
2129 case nir_intrinsic_vote_ieq: {
2130 LValues &newDefs = convert(&insn->dest);
2131 Value *pred = getScratch(1, FILE_PREDICATE);
2132 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2133 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
2134 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
2135 break;
2136 }
2137 case nir_intrinsic_ballot: {
2138 LValues &newDefs = convert(&insn->dest);
2139 Value *pred = getSSA(1, FILE_PREDICATE);
2140 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2141 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
2142 break;
2143 }
2144 case nir_intrinsic_read_first_invocation:
2145 case nir_intrinsic_read_invocation: {
2146 LValues &newDefs = convert(&insn->dest);
2147 const DataType dType = getDType(insn);
2148 Value *tmp = getScratch();
2149
2150 if (op == nir_intrinsic_read_first_invocation) {
2151 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
2152 mkOp2(OP_EXTBF, TYPE_U32, tmp, tmp, mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2153 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2154 } else
2155 tmp = getSrc(&insn->src[1], 0);
2156
2157 for (uint8_t i = 0; i < insn->num_components; ++i) {
2158 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
2159 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
2160 }
2161 break;
2162 }
2163 case nir_intrinsic_load_per_vertex_input: {
2164 const DataType dType = getDType(insn);
2165 LValues &newDefs = convert(&insn->dest);
2166 Value *indirectVertex;
2167 Value *indirectOffset;
2168 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2169 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2170
2171 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
2172 mkImm(baseVertex), indirectVertex);
2173 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2174 uint32_t address = getSlotAddress(insn, idx, i);
2175 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
2176 indirectOffset, vtxBase, info->in[idx].patch);
2177 }
2178 break;
2179 }
2180 case nir_intrinsic_load_per_vertex_output: {
2181 const DataType dType = getDType(insn);
2182 LValues &newDefs = convert(&insn->dest);
2183 Value *indirectVertex;
2184 Value *indirectOffset;
2185 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2186 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2187 Value *vtxBase = NULL;
2188
2189 if (indirectVertex)
2190 vtxBase = indirectVertex;
2191 else
2192 vtxBase = loadImm(NULL, baseVertex);
2193
2194 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
2195
2196 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2197 uint32_t address = getSlotAddress(insn, idx, i);
2198 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
2199 indirectOffset, vtxBase, info->in[idx].patch);
2200 }
2201 break;
2202 }
2203 case nir_intrinsic_emit_vertex:
2204 if (info->io.genUserClip > 0)
2205 handleUserClipPlanes();
2206 // fallthrough
2207 case nir_intrinsic_end_primitive: {
2208 uint32_t idx = nir_intrinsic_stream_id(insn);
2209 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
2210 break;
2211 }
2212 case nir_intrinsic_load_ubo: {
2213 const DataType dType = getDType(insn);
2214 LValues &newDefs = convert(&insn->dest);
2215 Value *indirectIndex;
2216 Value *indirectOffset;
2217 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
2218 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2219
2220 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2221 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
2222 indirectOffset, indirectIndex);
2223 }
2224 break;
2225 }
2226 case nir_intrinsic_get_buffer_size: {
2227 LValues &newDefs = convert(&insn->dest);
2228 const DataType dType = getDType(insn);
2229 Value *indirectBuffer;
2230 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2231
2232 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2233 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2234 break;
2235 }
2236 case nir_intrinsic_store_ssbo: {
2237 DataType sType = getSType(insn->src[0], false, false);
2238 Value *indirectBuffer;
2239 Value *indirectOffset;
2240 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2241 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2242
2243 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2244 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2245 continue;
2246 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2247 offset + i * typeSizeof(sType));
2248 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2249 ->setIndirect(0, 1, indirectBuffer);
2250 }
2251 info->io.globalAccess |= 0x2;
2252 break;
2253 }
2254 case nir_intrinsic_load_ssbo: {
2255 const DataType dType = getDType(insn);
2256 LValues &newDefs = convert(&insn->dest);
2257 Value *indirectBuffer;
2258 Value *indirectOffset;
2259 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2260 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2261
2262 for (uint8_t i = 0u; i < insn->num_components; ++i)
2263 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2264 indirectOffset, indirectBuffer);
2265
2266 info->io.globalAccess |= 0x1;
2267 break;
2268 }
2269 case nir_intrinsic_shared_atomic_add:
2270 case nir_intrinsic_shared_atomic_and:
2271 case nir_intrinsic_shared_atomic_comp_swap:
2272 case nir_intrinsic_shared_atomic_exchange:
2273 case nir_intrinsic_shared_atomic_or:
2274 case nir_intrinsic_shared_atomic_imax:
2275 case nir_intrinsic_shared_atomic_imin:
2276 case nir_intrinsic_shared_atomic_umax:
2277 case nir_intrinsic_shared_atomic_umin:
2278 case nir_intrinsic_shared_atomic_xor: {
2279 const DataType dType = getDType(insn);
2280 LValues &newDefs = convert(&insn->dest);
2281 Value *indirectOffset;
2282 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2283 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2284 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2285 if (op == nir_intrinsic_shared_atomic_comp_swap)
2286 atom->setSrc(2, getSrc(&insn->src[2], 0));
2287 atom->setIndirect(0, 0, indirectOffset);
2288 atom->subOp = getSubOp(op);
2289 break;
2290 }
2291 case nir_intrinsic_ssbo_atomic_add:
2292 case nir_intrinsic_ssbo_atomic_and:
2293 case nir_intrinsic_ssbo_atomic_comp_swap:
2294 case nir_intrinsic_ssbo_atomic_exchange:
2295 case nir_intrinsic_ssbo_atomic_or:
2296 case nir_intrinsic_ssbo_atomic_imax:
2297 case nir_intrinsic_ssbo_atomic_imin:
2298 case nir_intrinsic_ssbo_atomic_umax:
2299 case nir_intrinsic_ssbo_atomic_umin:
2300 case nir_intrinsic_ssbo_atomic_xor: {
2301 const DataType dType = getDType(insn);
2302 LValues &newDefs = convert(&insn->dest);
2303 Value *indirectBuffer;
2304 Value *indirectOffset;
2305 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2306 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2307
2308 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2309 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2310 getSrc(&insn->src[2], 0));
2311 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2312 atom->setSrc(2, getSrc(&insn->src[3], 0));
2313 atom->setIndirect(0, 0, indirectOffset);
2314 atom->setIndirect(0, 1, indirectBuffer);
2315 atom->subOp = getSubOp(op);
2316
2317 info->io.globalAccess |= 0x2;
2318 break;
2319 }
2320 case nir_intrinsic_image_deref_atomic_add:
2321 case nir_intrinsic_image_deref_atomic_and:
2322 case nir_intrinsic_image_deref_atomic_comp_swap:
2323 case nir_intrinsic_image_deref_atomic_exchange:
2324 case nir_intrinsic_image_deref_atomic_max:
2325 case nir_intrinsic_image_deref_atomic_min:
2326 case nir_intrinsic_image_deref_atomic_or:
2327 case nir_intrinsic_image_deref_atomic_xor:
2328 case nir_intrinsic_image_deref_load:
2329 case nir_intrinsic_image_deref_samples:
2330 case nir_intrinsic_image_deref_size:
2331 case nir_intrinsic_image_deref_store: {
2332 const nir_variable *tex;
2333 std::vector<Value*> srcs, defs;
2334 Value *indirect;
2335 DataType ty;
2336
2337 uint32_t mask = 0;
2338 nir_deref_instr *deref = nir_src_as_deref(insn->src[0]);
2339 const glsl_type *type = deref->type;
2340 TexInstruction::Target target =
2341 convert((glsl_sampler_dim)type->sampler_dimensionality,
2342 type->sampler_array, type->sampler_shadow);
2343 unsigned int argCount = getNIRArgCount(target);
2344 uint16_t location = handleDeref(deref, indirect, tex);
2345
2346 if (opInfo.has_dest) {
2347 LValues &newDefs = convert(&insn->dest);
2348 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2349 defs.push_back(newDefs[i]);
2350 mask |= 1 << i;
2351 }
2352 }
2353
2354 switch (op) {
2355 case nir_intrinsic_image_deref_atomic_add:
2356 case nir_intrinsic_image_deref_atomic_and:
2357 case nir_intrinsic_image_deref_atomic_comp_swap:
2358 case nir_intrinsic_image_deref_atomic_exchange:
2359 case nir_intrinsic_image_deref_atomic_max:
2360 case nir_intrinsic_image_deref_atomic_min:
2361 case nir_intrinsic_image_deref_atomic_or:
2362 case nir_intrinsic_image_deref_atomic_xor:
2363 ty = getDType(insn);
2364 mask = 0x1;
2365 info->io.globalAccess |= 0x2;
2366 break;
2367 case nir_intrinsic_image_deref_load:
2368 ty = TYPE_U32;
2369 info->io.globalAccess |= 0x1;
2370 break;
2371 case nir_intrinsic_image_deref_store:
2372 ty = TYPE_U32;
2373 mask = 0xf;
2374 info->io.globalAccess |= 0x2;
2375 break;
2376 case nir_intrinsic_image_deref_samples:
2377 mask = 0x8;
2378 ty = TYPE_U32;
2379 break;
2380 case nir_intrinsic_image_deref_size:
2381 ty = TYPE_U32;
2382 break;
2383 default:
2384 unreachable("unhandled image opcode");
2385 break;
2386 }
2387
2388 // coords
2389 if (opInfo.num_srcs >= 2)
2390 for (unsigned int i = 0u; i < argCount; ++i)
2391 srcs.push_back(getSrc(&insn->src[1], i));
2392
2393 // the sampler is just another src added after coords
2394 if (opInfo.num_srcs >= 3 && target.isMS())
2395 srcs.push_back(getSrc(&insn->src[2], 0));
2396
2397 if (opInfo.num_srcs >= 4) {
2398 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2399 for (uint8_t i = 0u; i < components; ++i)
2400 srcs.push_back(getSrc(&insn->src[3], i));
2401 }
2402
2403 if (opInfo.num_srcs >= 5)
2404 // 1 for aotmic swap
2405 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2406 srcs.push_back(getSrc(&insn->src[4], i));
2407
2408 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2409 texi->tex.bindless = false;
2410 texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(tex->data.image.format)];
2411 texi->tex.mask = mask;
2412 texi->cache = getCacheModeFromVar(tex);
2413 texi->setType(ty);
2414 texi->subOp = getSubOp(op);
2415
2416 if (indirect)
2417 texi->setIndirectR(indirect);
2418
2419 break;
2420 }
2421 case nir_intrinsic_store_shared: {
2422 DataType sType = getSType(insn->src[0], false, false);
2423 Value *indirectOffset;
2424 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2425
2426 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2427 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2428 continue;
2429 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, sType, offset + i * typeSizeof(sType));
2430 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2431 }
2432 break;
2433 }
2434 case nir_intrinsic_load_shared: {
2435 const DataType dType = getDType(insn);
2436 LValues &newDefs = convert(&insn->dest);
2437 Value *indirectOffset;
2438 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2439
2440 for (uint8_t i = 0u; i < insn->num_components; ++i)
2441 loadFrom(FILE_MEMORY_SHARED, 0, dType, newDefs[i], offset, i, indirectOffset);
2442
2443 break;
2444 }
2445 case nir_intrinsic_barrier: {
2446 // TODO: add flag to shader_info
2447 info->numBarriers = 1;
2448 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2449 bar->fixed = 1;
2450 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2451 break;
2452 }
2453 case nir_intrinsic_group_memory_barrier:
2454 case nir_intrinsic_memory_barrier:
2455 case nir_intrinsic_memory_barrier_atomic_counter:
2456 case nir_intrinsic_memory_barrier_buffer:
2457 case nir_intrinsic_memory_barrier_image:
2458 case nir_intrinsic_memory_barrier_shared: {
2459 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2460 bar->fixed = 1;
2461 bar->subOp = getSubOp(op);
2462 break;
2463 }
2464 case nir_intrinsic_shader_clock: {
2465 const DataType dType = getDType(insn);
2466 LValues &newDefs = convert(&insn->dest);
2467
2468 loadImm(newDefs[0], 0u);
2469 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2470 break;
2471 }
2472 default:
2473 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2474 return false;
2475 }
2476
2477 return true;
2478 }
2479
2480 bool
2481 Converter::visit(nir_jump_instr *insn)
2482 {
2483 switch (insn->type) {
2484 case nir_jump_return:
2485 // TODO: this only works in the main function
2486 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2487 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2488 break;
2489 case nir_jump_break:
2490 case nir_jump_continue: {
2491 bool isBreak = insn->type == nir_jump_break;
2492 nir_block *block = insn->instr.block;
2493 assert(!block->successors[1]);
2494 BasicBlock *target = convert(block->successors[0]);
2495 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2496 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2497 break;
2498 }
2499 default:
2500 ERROR("unknown nir_jump_type %u\n", insn->type);
2501 return false;
2502 }
2503
2504 return true;
2505 }
2506
2507 Value*
2508 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2509 {
2510 Value *val;
2511
2512 if (immInsertPos)
2513 setPosition(immInsertPos, true);
2514 else
2515 setPosition(bb, false);
2516
2517 switch (insn->def.bit_size) {
2518 case 64:
2519 val = loadImm(getSSA(8), insn->value.u64[idx]);
2520 break;
2521 case 32:
2522 val = loadImm(getSSA(4), insn->value.u32[idx]);
2523 break;
2524 case 16:
2525 val = loadImm(getSSA(2), insn->value.u16[idx]);
2526 break;
2527 case 8:
2528 val = loadImm(getSSA(1), insn->value.u8[idx]);
2529 break;
2530 default:
2531 unreachable("unhandled bit size!\n");
2532 }
2533 setPosition(bb, true);
2534 return val;
2535 }
2536
2537 bool
2538 Converter::visit(nir_load_const_instr *insn)
2539 {
2540 assert(insn->def.bit_size <= 64);
2541 immediates[insn->def.index] = insn;
2542 return true;
2543 }
2544
2545 #define DEFAULT_CHECKS \
2546 if (insn->dest.dest.ssa.num_components > 1) { \
2547 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2548 return false; \
2549 } \
2550 if (insn->dest.write_mask != 1) { \
2551 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2552 return false; \
2553 }
2554 bool
2555 Converter::visit(nir_alu_instr *insn)
2556 {
2557 const nir_op op = insn->op;
2558 const nir_op_info &info = nir_op_infos[op];
2559 DataType dType = getDType(insn);
2560 const std::vector<DataType> sTypes = getSTypes(insn);
2561
2562 Instruction *oldPos = this->bb->getExit();
2563
2564 switch (op) {
2565 case nir_op_fabs:
2566 case nir_op_iabs:
2567 case nir_op_fadd:
2568 case nir_op_iadd:
2569 case nir_op_fand:
2570 case nir_op_iand:
2571 case nir_op_fceil:
2572 case nir_op_fcos:
2573 case nir_op_fddx:
2574 case nir_op_fddx_coarse:
2575 case nir_op_fddx_fine:
2576 case nir_op_fddy:
2577 case nir_op_fddy_coarse:
2578 case nir_op_fddy_fine:
2579 case nir_op_fdiv:
2580 case nir_op_idiv:
2581 case nir_op_udiv:
2582 case nir_op_fexp2:
2583 case nir_op_ffloor:
2584 case nir_op_ffma:
2585 case nir_op_flog2:
2586 case nir_op_fmax:
2587 case nir_op_imax:
2588 case nir_op_umax:
2589 case nir_op_fmin:
2590 case nir_op_imin:
2591 case nir_op_umin:
2592 case nir_op_fmod:
2593 case nir_op_imod:
2594 case nir_op_umod:
2595 case nir_op_fmul:
2596 case nir_op_imul:
2597 case nir_op_imul_high:
2598 case nir_op_umul_high:
2599 case nir_op_fneg:
2600 case nir_op_ineg:
2601 case nir_op_fnot:
2602 case nir_op_inot:
2603 case nir_op_for:
2604 case nir_op_ior:
2605 case nir_op_pack_64_2x32_split:
2606 case nir_op_fpow:
2607 case nir_op_frcp:
2608 case nir_op_frem:
2609 case nir_op_irem:
2610 case nir_op_frsq:
2611 case nir_op_fsat:
2612 case nir_op_ishr:
2613 case nir_op_ushr:
2614 case nir_op_fsin:
2615 case nir_op_fsqrt:
2616 case nir_op_fsub:
2617 case nir_op_isub:
2618 case nir_op_ftrunc:
2619 case nir_op_ishl:
2620 case nir_op_fxor:
2621 case nir_op_ixor: {
2622 DEFAULT_CHECKS;
2623 LValues &newDefs = convert(&insn->dest);
2624 operation preOp = preOperationNeeded(op);
2625 if (preOp != OP_NOP) {
2626 assert(info.num_inputs < 2);
2627 Value *tmp = getSSA(typeSizeof(dType));
2628 Instruction *i0 = mkOp(preOp, dType, tmp);
2629 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2630 if (info.num_inputs) {
2631 i0->setSrc(0, getSrc(&insn->src[0]));
2632 i1->setSrc(0, tmp);
2633 }
2634 i1->subOp = getSubOp(op);
2635 } else {
2636 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2637 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2638 i->setSrc(s, getSrc(&insn->src[s]));
2639 }
2640 i->subOp = getSubOp(op);
2641 }
2642 break;
2643 }
2644 case nir_op_ifind_msb:
2645 case nir_op_ufind_msb: {
2646 DEFAULT_CHECKS;
2647 LValues &newDefs = convert(&insn->dest);
2648 dType = sTypes[0];
2649 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2650 break;
2651 }
2652 case nir_op_fround_even: {
2653 DEFAULT_CHECKS;
2654 LValues &newDefs = convert(&insn->dest);
2655 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2656 break;
2657 }
2658 // convert instructions
2659 case nir_op_f2f32:
2660 case nir_op_f2i32:
2661 case nir_op_f2u32:
2662 case nir_op_i2f32:
2663 case nir_op_i2i32:
2664 case nir_op_u2f32:
2665 case nir_op_u2u32:
2666 case nir_op_f2f64:
2667 case nir_op_f2i64:
2668 case nir_op_f2u64:
2669 case nir_op_i2f64:
2670 case nir_op_i2i64:
2671 case nir_op_u2f64:
2672 case nir_op_u2u64: {
2673 DEFAULT_CHECKS;
2674 LValues &newDefs = convert(&insn->dest);
2675 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2676 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2677 i->rnd = ROUND_Z;
2678 i->sType = sTypes[0];
2679 break;
2680 }
2681 // compare instructions
2682 case nir_op_feq32:
2683 case nir_op_ieq32:
2684 case nir_op_fge32:
2685 case nir_op_ige32:
2686 case nir_op_uge32:
2687 case nir_op_flt32:
2688 case nir_op_ilt32:
2689 case nir_op_ult32:
2690 case nir_op_fne32:
2691 case nir_op_ine32: {
2692 DEFAULT_CHECKS;
2693 LValues &newDefs = convert(&insn->dest);
2694 Instruction *i = mkCmp(getOperation(op),
2695 getCondCode(op),
2696 dType,
2697 newDefs[0],
2698 dType,
2699 getSrc(&insn->src[0]),
2700 getSrc(&insn->src[1]));
2701 if (info.num_inputs == 3)
2702 i->setSrc(2, getSrc(&insn->src[2]));
2703 i->sType = sTypes[0];
2704 break;
2705 }
2706 // those are weird ALU ops and need special handling, because
2707 // 1. they are always componend based
2708 // 2. they basically just merge multiple values into one data type
2709 case nir_op_imov:
2710 case nir_op_fmov:
2711 if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
2712 nir_reg_dest& reg = insn->dest.dest.reg;
2713 uint32_t goffset = regToLmemOffset[reg.reg->index];
2714 uint8_t comps = reg.reg->num_components;
2715 uint8_t size = reg.reg->bit_size / 8;
2716 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2717 uint32_t aoffset = csize * reg.base_offset;
2718 Value *indirect = NULL;
2719
2720 if (reg.indirect)
2721 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS),
2722 getSrc(reg.indirect, 0), mkImm(csize));
2723
2724 for (uint8_t i = 0u; i < comps; ++i) {
2725 if (!((1u << i) & insn->dest.write_mask))
2726 continue;
2727
2728 Symbol *sym = mkSymbol(FILE_MEMORY_LOCAL, 0, dType, goffset + aoffset + i * size);
2729 mkStore(OP_STORE, dType, sym, indirect, getSrc(&insn->src[0], i));
2730 }
2731 break;
2732 } else if (!insn->src[0].src.is_ssa && insn->src[0].src.reg.reg->num_array_elems) {
2733 LValues &newDefs = convert(&insn->dest);
2734 nir_reg_src& reg = insn->src[0].src.reg;
2735 uint32_t goffset = regToLmemOffset[reg.reg->index];
2736 // uint8_t comps = reg.reg->num_components;
2737 uint8_t size = reg.reg->bit_size / 8;
2738 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2739 uint32_t aoffset = csize * reg.base_offset;
2740 Value *indirect = NULL;
2741
2742 if (reg.indirect)
2743 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS), getSrc(reg.indirect, 0), mkImm(csize));
2744
2745 for (uint8_t i = 0u; i < newDefs.size(); ++i)
2746 loadFrom(FILE_MEMORY_LOCAL, 0, dType, newDefs[i], goffset + aoffset, i, indirect);
2747
2748 break;
2749 } else {
2750 LValues &newDefs = convert(&insn->dest);
2751 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2752 mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
2753 }
2754 }
2755 break;
2756 case nir_op_vec2:
2757 case nir_op_vec3:
2758 case nir_op_vec4: {
2759 LValues &newDefs = convert(&insn->dest);
2760 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2761 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2762 }
2763 break;
2764 }
2765 // (un)pack
2766 case nir_op_pack_64_2x32: {
2767 LValues &newDefs = convert(&insn->dest);
2768 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2769 merge->setSrc(0, getSrc(&insn->src[0], 0));
2770 merge->setSrc(1, getSrc(&insn->src[0], 1));
2771 break;
2772 }
2773 case nir_op_pack_half_2x16_split: {
2774 LValues &newDefs = convert(&insn->dest);
2775 Value *tmpH = getSSA();
2776 Value *tmpL = getSSA();
2777
2778 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2779 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2780 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2781 break;
2782 }
2783 case nir_op_unpack_half_2x16_split_x:
2784 case nir_op_unpack_half_2x16_split_y: {
2785 LValues &newDefs = convert(&insn->dest);
2786 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2787 if (op == nir_op_unpack_half_2x16_split_y)
2788 cvt->subOp = 1;
2789 break;
2790 }
2791 case nir_op_unpack_64_2x32: {
2792 LValues &newDefs = convert(&insn->dest);
2793 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2794 break;
2795 }
2796 case nir_op_unpack_64_2x32_split_x: {
2797 LValues &newDefs = convert(&insn->dest);
2798 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2799 break;
2800 }
2801 case nir_op_unpack_64_2x32_split_y: {
2802 LValues &newDefs = convert(&insn->dest);
2803 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2804 break;
2805 }
2806 // special instructions
2807 case nir_op_fsign:
2808 case nir_op_isign: {
2809 DEFAULT_CHECKS;
2810 DataType iType;
2811 if (::isFloatType(dType))
2812 iType = TYPE_F32;
2813 else
2814 iType = TYPE_S32;
2815
2816 LValues &newDefs = convert(&insn->dest);
2817 LValue *val0 = getScratch();
2818 LValue *val1 = getScratch();
2819 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2820 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2821
2822 if (dType == TYPE_F64) {
2823 mkOp2(OP_SUB, iType, val0, val0, val1);
2824 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2825 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2826 mkOp2(OP_SUB, iType, val0, val1, val0);
2827 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2828 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2829 } else if (::isFloatType(dType))
2830 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2831 else
2832 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2833 break;
2834 }
2835 case nir_op_fcsel:
2836 case nir_op_b32csel: {
2837 DEFAULT_CHECKS;
2838 LValues &newDefs = convert(&insn->dest);
2839 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2840 break;
2841 }
2842 case nir_op_ibitfield_extract:
2843 case nir_op_ubitfield_extract: {
2844 DEFAULT_CHECKS;
2845 Value *tmp = getSSA();
2846 LValues &newDefs = convert(&insn->dest);
2847 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2848 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2849 break;
2850 }
2851 case nir_op_bfm: {
2852 DEFAULT_CHECKS;
2853 LValues &newDefs = convert(&insn->dest);
2854 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2855 break;
2856 }
2857 case nir_op_bitfield_insert: {
2858 DEFAULT_CHECKS;
2859 LValues &newDefs = convert(&insn->dest);
2860 LValue *temp = getSSA();
2861 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2862 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2863 break;
2864 }
2865 case nir_op_bit_count: {
2866 DEFAULT_CHECKS;
2867 LValues &newDefs = convert(&insn->dest);
2868 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2869 break;
2870 }
2871 case nir_op_bitfield_reverse: {
2872 DEFAULT_CHECKS;
2873 LValues &newDefs = convert(&insn->dest);
2874 mkOp2(OP_EXTBF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2875 break;
2876 }
2877 case nir_op_find_lsb: {
2878 DEFAULT_CHECKS;
2879 LValues &newDefs = convert(&insn->dest);
2880 Value *tmp = getSSA();
2881 mkOp2(OP_EXTBF, TYPE_U32, tmp, getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2882 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2883 break;
2884 }
2885 // boolean conversions
2886 case nir_op_b2f32: {
2887 DEFAULT_CHECKS;
2888 LValues &newDefs = convert(&insn->dest);
2889 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2890 break;
2891 }
2892 case nir_op_b2f64: {
2893 DEFAULT_CHECKS;
2894 LValues &newDefs = convert(&insn->dest);
2895 Value *tmp = getSSA(4);
2896 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2897 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2898 break;
2899 }
2900 case nir_op_f2b32:
2901 case nir_op_i2b32: {
2902 DEFAULT_CHECKS;
2903 LValues &newDefs = convert(&insn->dest);
2904 Value *src1;
2905 if (typeSizeof(sTypes[0]) == 8) {
2906 src1 = loadImm(getSSA(8), 0.0);
2907 } else {
2908 src1 = zero;
2909 }
2910 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2911 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2912 break;
2913 }
2914 case nir_op_b2i32: {
2915 DEFAULT_CHECKS;
2916 LValues &newDefs = convert(&insn->dest);
2917 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2918 break;
2919 }
2920 case nir_op_b2i64: {
2921 DEFAULT_CHECKS;
2922 LValues &newDefs = convert(&insn->dest);
2923 LValue *def = getScratch();
2924 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2925 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2926 break;
2927 }
2928 default:
2929 ERROR("unknown nir_op %s\n", info.name);
2930 return false;
2931 }
2932
2933 if (!oldPos) {
2934 oldPos = this->bb->getEntry();
2935 oldPos->precise = insn->exact;
2936 }
2937
2938 if (unlikely(!oldPos))
2939 return true;
2940
2941 while (oldPos->next) {
2942 oldPos = oldPos->next;
2943 oldPos->precise = insn->exact;
2944 }
2945 oldPos->saturate = insn->dest.saturate;
2946
2947 return true;
2948 }
2949 #undef DEFAULT_CHECKS
2950
2951 bool
2952 Converter::visit(nir_ssa_undef_instr *insn)
2953 {
2954 LValues &newDefs = convert(&insn->def);
2955 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2956 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2957 }
2958 return true;
2959 }
2960
2961 #define CASE_SAMPLER(ty) \
2962 case GLSL_SAMPLER_DIM_ ## ty : \
2963 if (isArray && !isShadow) \
2964 return TEX_TARGET_ ## ty ## _ARRAY; \
2965 else if (!isArray && isShadow) \
2966 return TEX_TARGET_## ty ## _SHADOW; \
2967 else if (isArray && isShadow) \
2968 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2969 else \
2970 return TEX_TARGET_ ## ty
2971
2972 TexTarget
2973 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2974 {
2975 switch (dim) {
2976 CASE_SAMPLER(1D);
2977 CASE_SAMPLER(2D);
2978 CASE_SAMPLER(CUBE);
2979 case GLSL_SAMPLER_DIM_3D:
2980 return TEX_TARGET_3D;
2981 case GLSL_SAMPLER_DIM_MS:
2982 if (isArray)
2983 return TEX_TARGET_2D_MS_ARRAY;
2984 return TEX_TARGET_2D_MS;
2985 case GLSL_SAMPLER_DIM_RECT:
2986 if (isShadow)
2987 return TEX_TARGET_RECT_SHADOW;
2988 return TEX_TARGET_RECT;
2989 case GLSL_SAMPLER_DIM_BUF:
2990 return TEX_TARGET_BUFFER;
2991 case GLSL_SAMPLER_DIM_EXTERNAL:
2992 return TEX_TARGET_2D;
2993 default:
2994 ERROR("unknown glsl_sampler_dim %u\n", dim);
2995 assert(false);
2996 return TEX_TARGET_COUNT;
2997 }
2998 }
2999 #undef CASE_SAMPLER
3000
3001 Value*
3002 Converter::applyProjection(Value *src, Value *proj)
3003 {
3004 if (!proj)
3005 return src;
3006 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
3007 }
3008
3009 unsigned int
3010 Converter::getNIRArgCount(TexInstruction::Target& target)
3011 {
3012 unsigned int result = target.getArgCount();
3013 if (target.isCube() && target.isArray())
3014 result--;
3015 if (target.isMS())
3016 result--;
3017 return result;
3018 }
3019
3020 uint16_t
3021 Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_variable * &tex)
3022 {
3023 typedef std::pair<uint32_t,Value*> DerefPair;
3024 std::list<DerefPair> derefs;
3025
3026 uint16_t result = 0;
3027 while (deref->deref_type != nir_deref_type_var) {
3028 switch (deref->deref_type) {
3029 case nir_deref_type_array: {
3030 Value *indirect;
3031 uint8_t size = type_size(deref->type);
3032 result += size * getIndirect(&deref->arr.index, 0, indirect);
3033
3034 if (indirect) {
3035 derefs.push_front(std::make_pair(size, indirect));
3036 }
3037
3038 break;
3039 }
3040 case nir_deref_type_struct: {
3041 result += nir_deref_instr_parent(deref)->type->struct_location_offset(deref->strct.index);
3042 break;
3043 }
3044 case nir_deref_type_var:
3045 default:
3046 unreachable("nir_deref_type_var reached in handleDeref!");
3047 break;
3048 }
3049 deref = nir_deref_instr_parent(deref);
3050 }
3051
3052 indirect = NULL;
3053 for (std::list<DerefPair>::const_iterator it = derefs.begin(); it != derefs.end(); ++it) {
3054 Value *offset = mkOp2v(OP_MUL, TYPE_U32, getSSA(), loadImm(getSSA(), it->first), it->second);
3055 if (indirect)
3056 indirect = mkOp2v(OP_ADD, TYPE_U32, getSSA(), indirect, offset);
3057 else
3058 indirect = offset;
3059 }
3060
3061 tex = nir_deref_instr_get_variable(deref);
3062 assert(tex);
3063
3064 return result + tex->data.driver_location;
3065 }
3066
3067 CacheMode
3068 Converter::getCacheModeFromVar(const nir_variable *var)
3069 {
3070 if (var->data.image.access == ACCESS_VOLATILE)
3071 return CACHE_CV;
3072 if (var->data.image.access == ACCESS_COHERENT)
3073 return CACHE_CG;
3074 return CACHE_CA;
3075 }
3076
3077 bool
3078 Converter::visit(nir_tex_instr *insn)
3079 {
3080 switch (insn->op) {
3081 case nir_texop_lod:
3082 case nir_texop_query_levels:
3083 case nir_texop_tex:
3084 case nir_texop_texture_samples:
3085 case nir_texop_tg4:
3086 case nir_texop_txb:
3087 case nir_texop_txd:
3088 case nir_texop_txf:
3089 case nir_texop_txf_ms:
3090 case nir_texop_txl:
3091 case nir_texop_txs: {
3092 LValues &newDefs = convert(&insn->dest);
3093 std::vector<Value*> srcs;
3094 std::vector<Value*> defs;
3095 std::vector<nir_src*> offsets;
3096 uint8_t mask = 0;
3097 bool lz = false;
3098 Value *proj = NULL;
3099 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
3100 operation op = getOperation(insn->op);
3101
3102 int r, s;
3103 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
3104 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
3105 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
3106 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
3107 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
3108 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
3109 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
3110 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
3111 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
3112 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
3113 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
3114
3115 if (projIdx != -1)
3116 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
3117
3118 srcs.resize(insn->coord_components);
3119 for (uint8_t i = 0u; i < insn->coord_components; ++i)
3120 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
3121
3122 // sometimes we get less args than target.getArgCount, but codegen expects the latter
3123 if (insn->coord_components) {
3124 uint32_t argCount = target.getArgCount();
3125
3126 if (target.isMS())
3127 argCount -= 1;
3128
3129 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
3130 srcs.push_back(getSSA());
3131 }
3132
3133 if (insn->op == nir_texop_texture_samples)
3134 srcs.push_back(zero);
3135 else if (!insn->num_srcs)
3136 srcs.push_back(loadImm(NULL, 0));
3137 if (biasIdx != -1)
3138 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
3139 if (lodIdx != -1)
3140 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
3141 else if (op == OP_TXF)
3142 lz = true;
3143 if (msIdx != -1)
3144 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3145 if (offsetIdx != -1)
3146 offsets.push_back(&insn->src[offsetIdx].src);
3147 if (compIdx != -1)
3148 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3149 if (texOffIdx != -1) {
3150 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3151 texOffIdx = srcs.size() - 1;
3152 }
3153 if (sampOffIdx != -1) {
3154 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3155 sampOffIdx = srcs.size() - 1;
3156 }
3157
3158 r = insn->texture_index;
3159 s = insn->sampler_index;
3160
3161 defs.resize(newDefs.size());
3162 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3163 defs[d] = newDefs[d];
3164 mask |= 1 << d;
3165 }
3166 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3167 lz = true;
3168
3169 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3170 texi->tex.levelZero = lz;
3171 texi->tex.mask = mask;
3172
3173 if (texOffIdx != -1)
3174 texi->tex.rIndirectSrc = texOffIdx;
3175 if (sampOffIdx != -1)
3176 texi->tex.sIndirectSrc = sampOffIdx;
3177
3178 switch (insn->op) {
3179 case nir_texop_tg4:
3180 if (!target.isShadow())
3181 texi->tex.gatherComp = insn->component;
3182 break;
3183 case nir_texop_txs:
3184 texi->tex.query = TXQ_DIMS;
3185 break;
3186 case nir_texop_texture_samples:
3187 texi->tex.mask = 0x4;
3188 texi->tex.query = TXQ_TYPE;
3189 break;
3190 case nir_texop_query_levels:
3191 texi->tex.mask = 0x8;
3192 texi->tex.query = TXQ_DIMS;
3193 break;
3194 default:
3195 break;
3196 }
3197
3198 texi->tex.useOffsets = offsets.size();
3199 if (texi->tex.useOffsets) {
3200 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3201 for (uint32_t c = 0u; c < 3; ++c) {
3202 uint8_t s2 = std::min(c, target.getDim() - 1);
3203 texi->offset[s][c].set(getSrc(offsets[s], s2));
3204 texi->offset[s][c].setInsn(texi);
3205 }
3206 }
3207 }
3208
3209 if (op == OP_TXG && offsetIdx == -1) {
3210 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3211 texi->tex.useOffsets = 4;
3212 setPosition(texi, false);
3213 for (uint8_t i = 0; i < 4; ++i) {
3214 for (uint8_t j = 0; j < 2; ++j) {
3215 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3216 texi->offset[i][j].setInsn(texi);
3217 }
3218 }
3219 setPosition(texi, true);
3220 }
3221 }
3222
3223 if (ddxIdx != -1 && ddyIdx != -1) {
3224 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3225 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3226 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3227 }
3228 }
3229
3230 break;
3231 }
3232 default:
3233 ERROR("unknown nir_texop %u\n", insn->op);
3234 return false;
3235 }
3236 return true;
3237 }
3238
3239 bool
3240 Converter::visit(nir_deref_instr *deref)
3241 {
3242 // we just ignore those, because images intrinsics are the only place where
3243 // we should end up with deref sources and those have to backtrack anyway
3244 // to get the nir_variable. This code just exists to handle some special
3245 // cases.
3246 switch (deref->deref_type) {
3247 case nir_deref_type_array:
3248 case nir_deref_type_struct:
3249 case nir_deref_type_var:
3250 break;
3251 default:
3252 ERROR("unknown nir_deref_instr %u\n", deref->deref_type);
3253 return false;
3254 }
3255 return true;
3256 }
3257
3258 bool
3259 Converter::run()
3260 {
3261 bool progress;
3262
3263 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3264 nir_print_shader(nir, stderr);
3265
3266 struct nir_lower_subgroups_options subgroup_options = {
3267 .subgroup_size = 32,
3268 .ballot_bit_size = 32,
3269 };
3270
3271 NIR_PASS_V(nir, nir_lower_io, nir_var_all, type_size, (nir_lower_io_options)0);
3272 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3273 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3274 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3275 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3276 NIR_PASS_V(nir, nir_lower_alu_to_scalar);
3277 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3278
3279 do {
3280 progress = false;
3281 NIR_PASS(progress, nir, nir_copy_prop);
3282 NIR_PASS(progress, nir, nir_opt_remove_phis);
3283 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3284 NIR_PASS(progress, nir, nir_opt_cse);
3285 NIR_PASS(progress, nir, nir_opt_algebraic);
3286 NIR_PASS(progress, nir, nir_opt_constant_folding);
3287 NIR_PASS(progress, nir, nir_copy_prop);
3288 NIR_PASS(progress, nir, nir_opt_dce);
3289 NIR_PASS(progress, nir, nir_opt_dead_cf);
3290 } while (progress);
3291
3292 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3293 NIR_PASS_V(nir, nir_lower_locals_to_regs);
3294 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
3295 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3296
3297 // Garbage collect dead instructions
3298 nir_sweep(nir);
3299
3300 if (!parseNIR()) {
3301 ERROR("Couldn't prase NIR!\n");
3302 return false;
3303 }
3304
3305 if (!assignSlots()) {
3306 ERROR("Couldn't assign slots!\n");
3307 return false;
3308 }
3309
3310 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3311 nir_print_shader(nir, stderr);
3312
3313 nir_foreach_function(function, nir) {
3314 if (!visit(function))
3315 return false;
3316 }
3317
3318 return true;
3319 }
3320
3321 } // unnamed namespace
3322
3323 namespace nv50_ir {
3324
3325 bool
3326 Program::makeFromNIR(struct nv50_ir_prog_info *info)
3327 {
3328 nir_shader *nir = (nir_shader*)info->bin.source;
3329 Converter converter(this, nir, info);
3330 bool result = converter.run();
3331 if (!result)
3332 return result;
3333 LoweringHelper lowering;
3334 lowering.run(this);
3335 tlsSize = info->bin.tlsSpace;
3336 return result;
3337 }
3338
3339 } // namespace nv50_ir