nv50/ir/nir: handle user clip planes for each emitted vertex
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33
34 #if __cplusplus >= 201103L
35 #include <unordered_map>
36 #else
37 #include <tr1/unordered_map>
38 #endif
39 #include <list>
40 #include <vector>
41
42 namespace {
43
44 #if __cplusplus >= 201103L
45 using std::hash;
46 using std::unordered_map;
47 #else
48 using std::tr1::hash;
49 using std::tr1::unordered_map;
50 #endif
51
52 using namespace nv50_ir;
53
54 int
55 type_size(const struct glsl_type *type)
56 {
57 return glsl_count_attribute_slots(type, false);
58 }
59
60 class Converter : public ConverterCommon
61 {
62 public:
63 Converter(Program *, nir_shader *, nv50_ir_prog_info *);
64
65 bool run();
66 private:
67 typedef std::vector<LValue*> LValues;
68 typedef unordered_map<unsigned, LValues> NirDefMap;
69 typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
70 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
71
72 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
73 LValues& convert(nir_alu_dest *);
74 BasicBlock* convert(nir_block *);
75 LValues& convert(nir_dest *);
76 SVSemantic convert(nir_intrinsic_op);
77 LValues& convert(nir_register *);
78 LValues& convert(nir_ssa_def *);
79
80 ImgFormat convertGLImgFormat(GLuint);
81
82 Value* getSrc(nir_alu_src *, uint8_t component = 0);
83 Value* getSrc(nir_register *, uint8_t);
84 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
85 Value* getSrc(nir_ssa_def *, uint8_t);
86
87 // returned value is the constant part of the given source (either the
88 // nir_src or the selected source component of an intrinsic). Even though
89 // this is mostly an optimization to be able to skip indirects in a few
90 // cases, sometimes we require immediate values or set some fileds on
91 // instructions (e.g. tex) in order for codegen to consume those.
92 // If the found value has not a constant part, the Value gets returned
93 // through the Value parameter.
94 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
95 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&);
96
97 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
98
99 void setInterpolate(nv50_ir_varying *,
100 uint8_t,
101 bool centroid,
102 unsigned semantics);
103
104 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
105 uint8_t c, Value *indirect0 = NULL,
106 Value *indirect1 = NULL, bool patch = false);
107 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
108 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
109 Value *indirect1 = NULL);
110
111 bool isFloatType(nir_alu_type);
112 bool isSignedType(nir_alu_type);
113 bool isResultFloat(nir_op);
114 bool isResultSigned(nir_op);
115
116 DataType getDType(nir_alu_instr *);
117 DataType getDType(nir_intrinsic_instr *);
118 DataType getDType(nir_intrinsic_instr *, bool isSigned);
119 DataType getDType(nir_op, uint8_t);
120
121 std::vector<DataType> getSTypes(nir_alu_instr *);
122 DataType getSType(nir_src &, bool isFloat, bool isSigned);
123
124 operation getOperation(nir_intrinsic_op);
125 operation getOperation(nir_op);
126 operation getOperation(nir_texop);
127 operation preOperationNeeded(nir_op);
128
129 int getSubOp(nir_intrinsic_op);
130 int getSubOp(nir_op);
131
132 CondCode getCondCode(nir_op);
133
134 bool assignSlots();
135 bool parseNIR();
136
137 bool visit(nir_alu_instr *);
138 bool visit(nir_block *);
139 bool visit(nir_cf_node *);
140 bool visit(nir_deref_instr *);
141 bool visit(nir_function *);
142 bool visit(nir_if *);
143 bool visit(nir_instr *);
144 bool visit(nir_intrinsic_instr *);
145 bool visit(nir_jump_instr *);
146 bool visit(nir_load_const_instr*);
147 bool visit(nir_loop *);
148 bool visit(nir_ssa_undef_instr *);
149 bool visit(nir_tex_instr *);
150
151 // tex stuff
152 Value* applyProjection(Value *src, Value *proj);
153 unsigned int getNIRArgCount(TexInstruction::Target&);
154
155 // image stuff
156 uint16_t handleDeref(nir_deref_instr *, Value * & indirect, const nir_variable * &);
157 CacheMode getCacheModeFromVar(const nir_variable *);
158
159 nir_shader *nir;
160
161 NirDefMap ssaDefs;
162 NirDefMap regDefs;
163 NirArrayLMemOffsets regToLmemOffset;
164 NirBlockMap blocks;
165 unsigned int curLoopDepth;
166
167 BasicBlock *exit;
168 Value *zero;
169
170 int clipVertexOutput;
171
172 union {
173 struct {
174 Value *position;
175 } fp;
176 };
177 };
178
179 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
180 : ConverterCommon(prog, info),
181 nir(nir),
182 curLoopDepth(0),
183 clipVertexOutput(-1)
184 {
185 zero = mkImm((uint32_t)0);
186 }
187
188 BasicBlock *
189 Converter::convert(nir_block *block)
190 {
191 NirBlockMap::iterator it = blocks.find(block->index);
192 if (it != blocks.end())
193 return it->second;
194
195 BasicBlock *bb = new BasicBlock(func);
196 blocks[block->index] = bb;
197 return bb;
198 }
199
200 bool
201 Converter::isFloatType(nir_alu_type type)
202 {
203 return nir_alu_type_get_base_type(type) == nir_type_float;
204 }
205
206 bool
207 Converter::isSignedType(nir_alu_type type)
208 {
209 return nir_alu_type_get_base_type(type) == nir_type_int;
210 }
211
212 bool
213 Converter::isResultFloat(nir_op op)
214 {
215 const nir_op_info &info = nir_op_infos[op];
216 if (info.output_type != nir_type_invalid)
217 return isFloatType(info.output_type);
218
219 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
220 assert(false);
221 return true;
222 }
223
224 bool
225 Converter::isResultSigned(nir_op op)
226 {
227 switch (op) {
228 // there is no umul and we get wrong results if we treat all muls as signed
229 case nir_op_imul:
230 case nir_op_inot:
231 return false;
232 default:
233 const nir_op_info &info = nir_op_infos[op];
234 if (info.output_type != nir_type_invalid)
235 return isSignedType(info.output_type);
236 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
237 assert(false);
238 return true;
239 }
240 }
241
242 DataType
243 Converter::getDType(nir_alu_instr *insn)
244 {
245 if (insn->dest.dest.is_ssa)
246 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
247 else
248 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
249 }
250
251 DataType
252 Converter::getDType(nir_intrinsic_instr *insn)
253 {
254 bool isSigned;
255 switch (insn->intrinsic) {
256 case nir_intrinsic_shared_atomic_imax:
257 case nir_intrinsic_shared_atomic_imin:
258 case nir_intrinsic_ssbo_atomic_imax:
259 case nir_intrinsic_ssbo_atomic_imin:
260 isSigned = true;
261 break;
262 default:
263 isSigned = false;
264 break;
265 }
266
267 return getDType(insn, isSigned);
268 }
269
270 DataType
271 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
272 {
273 if (insn->dest.is_ssa)
274 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
275 else
276 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
277 }
278
279 DataType
280 Converter::getDType(nir_op op, uint8_t bitSize)
281 {
282 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
283 if (ty == TYPE_NONE) {
284 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
285 assert(false);
286 }
287 return ty;
288 }
289
290 std::vector<DataType>
291 Converter::getSTypes(nir_alu_instr *insn)
292 {
293 const nir_op_info &info = nir_op_infos[insn->op];
294 std::vector<DataType> res(info.num_inputs);
295
296 for (uint8_t i = 0; i < info.num_inputs; ++i) {
297 if (info.input_types[i] != nir_type_invalid) {
298 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
299 } else {
300 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
301 assert(false);
302 res[i] = TYPE_NONE;
303 break;
304 }
305 }
306
307 return res;
308 }
309
310 DataType
311 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
312 {
313 uint8_t bitSize;
314 if (src.is_ssa)
315 bitSize = src.ssa->bit_size;
316 else
317 bitSize = src.reg.reg->bit_size;
318
319 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
320 if (ty == TYPE_NONE) {
321 const char *str;
322 if (isFloat)
323 str = "float";
324 else if (isSigned)
325 str = "int";
326 else
327 str = "uint";
328 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
329 assert(false);
330 }
331 return ty;
332 }
333
334 operation
335 Converter::getOperation(nir_op op)
336 {
337 switch (op) {
338 // basic ops with float and int variants
339 case nir_op_fabs:
340 case nir_op_iabs:
341 return OP_ABS;
342 case nir_op_fadd:
343 case nir_op_iadd:
344 return OP_ADD;
345 case nir_op_fand:
346 case nir_op_iand:
347 return OP_AND;
348 case nir_op_ifind_msb:
349 case nir_op_ufind_msb:
350 return OP_BFIND;
351 case nir_op_fceil:
352 return OP_CEIL;
353 case nir_op_fcos:
354 return OP_COS;
355 case nir_op_f2f32:
356 case nir_op_f2f64:
357 case nir_op_f2i32:
358 case nir_op_f2i64:
359 case nir_op_f2u32:
360 case nir_op_f2u64:
361 case nir_op_i2f32:
362 case nir_op_i2f64:
363 case nir_op_i2i32:
364 case nir_op_i2i64:
365 case nir_op_u2f32:
366 case nir_op_u2f64:
367 case nir_op_u2u32:
368 case nir_op_u2u64:
369 return OP_CVT;
370 case nir_op_fddx:
371 case nir_op_fddx_coarse:
372 case nir_op_fddx_fine:
373 return OP_DFDX;
374 case nir_op_fddy:
375 case nir_op_fddy_coarse:
376 case nir_op_fddy_fine:
377 return OP_DFDY;
378 case nir_op_fdiv:
379 case nir_op_idiv:
380 case nir_op_udiv:
381 return OP_DIV;
382 case nir_op_fexp2:
383 return OP_EX2;
384 case nir_op_ffloor:
385 return OP_FLOOR;
386 case nir_op_ffma:
387 return OP_FMA;
388 case nir_op_flog2:
389 return OP_LG2;
390 case nir_op_fmax:
391 case nir_op_imax:
392 case nir_op_umax:
393 return OP_MAX;
394 case nir_op_pack_64_2x32_split:
395 return OP_MERGE;
396 case nir_op_fmin:
397 case nir_op_imin:
398 case nir_op_umin:
399 return OP_MIN;
400 case nir_op_fmod:
401 case nir_op_imod:
402 case nir_op_umod:
403 case nir_op_frem:
404 case nir_op_irem:
405 return OP_MOD;
406 case nir_op_fmul:
407 case nir_op_imul:
408 case nir_op_imul_high:
409 case nir_op_umul_high:
410 return OP_MUL;
411 case nir_op_fneg:
412 case nir_op_ineg:
413 return OP_NEG;
414 case nir_op_fnot:
415 case nir_op_inot:
416 return OP_NOT;
417 case nir_op_for:
418 case nir_op_ior:
419 return OP_OR;
420 case nir_op_fpow:
421 return OP_POW;
422 case nir_op_frcp:
423 return OP_RCP;
424 case nir_op_frsq:
425 return OP_RSQ;
426 case nir_op_fsat:
427 return OP_SAT;
428 case nir_op_feq32:
429 case nir_op_ieq32:
430 case nir_op_fge32:
431 case nir_op_ige32:
432 case nir_op_uge32:
433 case nir_op_flt32:
434 case nir_op_ilt32:
435 case nir_op_ult32:
436 case nir_op_fne32:
437 case nir_op_ine32:
438 return OP_SET;
439 case nir_op_ishl:
440 return OP_SHL;
441 case nir_op_ishr:
442 case nir_op_ushr:
443 return OP_SHR;
444 case nir_op_fsin:
445 return OP_SIN;
446 case nir_op_fsqrt:
447 return OP_SQRT;
448 case nir_op_fsub:
449 case nir_op_isub:
450 return OP_SUB;
451 case nir_op_ftrunc:
452 return OP_TRUNC;
453 case nir_op_fxor:
454 case nir_op_ixor:
455 return OP_XOR;
456 default:
457 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
458 assert(false);
459 return OP_NOP;
460 }
461 }
462
463 operation
464 Converter::getOperation(nir_texop op)
465 {
466 switch (op) {
467 case nir_texop_tex:
468 return OP_TEX;
469 case nir_texop_lod:
470 return OP_TXLQ;
471 case nir_texop_txb:
472 return OP_TXB;
473 case nir_texop_txd:
474 return OP_TXD;
475 case nir_texop_txf:
476 case nir_texop_txf_ms:
477 return OP_TXF;
478 case nir_texop_tg4:
479 return OP_TXG;
480 case nir_texop_txl:
481 return OP_TXL;
482 case nir_texop_query_levels:
483 case nir_texop_texture_samples:
484 case nir_texop_txs:
485 return OP_TXQ;
486 default:
487 ERROR("couldn't get operation for nir_texop %u\n", op);
488 assert(false);
489 return OP_NOP;
490 }
491 }
492
493 operation
494 Converter::getOperation(nir_intrinsic_op op)
495 {
496 switch (op) {
497 case nir_intrinsic_emit_vertex:
498 return OP_EMIT;
499 case nir_intrinsic_end_primitive:
500 return OP_RESTART;
501 case nir_intrinsic_image_deref_atomic_add:
502 case nir_intrinsic_image_deref_atomic_and:
503 case nir_intrinsic_image_deref_atomic_comp_swap:
504 case nir_intrinsic_image_deref_atomic_exchange:
505 case nir_intrinsic_image_deref_atomic_max:
506 case nir_intrinsic_image_deref_atomic_min:
507 case nir_intrinsic_image_deref_atomic_or:
508 case nir_intrinsic_image_deref_atomic_xor:
509 return OP_SUREDP;
510 case nir_intrinsic_image_deref_load:
511 return OP_SULDP;
512 case nir_intrinsic_image_deref_samples:
513 case nir_intrinsic_image_deref_size:
514 return OP_SUQ;
515 case nir_intrinsic_image_deref_store:
516 return OP_SUSTP;
517 default:
518 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
519 assert(false);
520 return OP_NOP;
521 }
522 }
523
524 operation
525 Converter::preOperationNeeded(nir_op op)
526 {
527 switch (op) {
528 case nir_op_fcos:
529 case nir_op_fsin:
530 return OP_PRESIN;
531 default:
532 return OP_NOP;
533 }
534 }
535
536 int
537 Converter::getSubOp(nir_op op)
538 {
539 switch (op) {
540 case nir_op_imul_high:
541 case nir_op_umul_high:
542 return NV50_IR_SUBOP_MUL_HIGH;
543 default:
544 return 0;
545 }
546 }
547
548 int
549 Converter::getSubOp(nir_intrinsic_op op)
550 {
551 switch (op) {
552 case nir_intrinsic_image_deref_atomic_add:
553 case nir_intrinsic_shared_atomic_add:
554 case nir_intrinsic_ssbo_atomic_add:
555 return NV50_IR_SUBOP_ATOM_ADD;
556 case nir_intrinsic_image_deref_atomic_and:
557 case nir_intrinsic_shared_atomic_and:
558 case nir_intrinsic_ssbo_atomic_and:
559 return NV50_IR_SUBOP_ATOM_AND;
560 case nir_intrinsic_image_deref_atomic_comp_swap:
561 case nir_intrinsic_shared_atomic_comp_swap:
562 case nir_intrinsic_ssbo_atomic_comp_swap:
563 return NV50_IR_SUBOP_ATOM_CAS;
564 case nir_intrinsic_image_deref_atomic_exchange:
565 case nir_intrinsic_shared_atomic_exchange:
566 case nir_intrinsic_ssbo_atomic_exchange:
567 return NV50_IR_SUBOP_ATOM_EXCH;
568 case nir_intrinsic_image_deref_atomic_or:
569 case nir_intrinsic_shared_atomic_or:
570 case nir_intrinsic_ssbo_atomic_or:
571 return NV50_IR_SUBOP_ATOM_OR;
572 case nir_intrinsic_image_deref_atomic_max:
573 case nir_intrinsic_shared_atomic_imax:
574 case nir_intrinsic_shared_atomic_umax:
575 case nir_intrinsic_ssbo_atomic_imax:
576 case nir_intrinsic_ssbo_atomic_umax:
577 return NV50_IR_SUBOP_ATOM_MAX;
578 case nir_intrinsic_image_deref_atomic_min:
579 case nir_intrinsic_shared_atomic_imin:
580 case nir_intrinsic_shared_atomic_umin:
581 case nir_intrinsic_ssbo_atomic_imin:
582 case nir_intrinsic_ssbo_atomic_umin:
583 return NV50_IR_SUBOP_ATOM_MIN;
584 case nir_intrinsic_image_deref_atomic_xor:
585 case nir_intrinsic_shared_atomic_xor:
586 case nir_intrinsic_ssbo_atomic_xor:
587 return NV50_IR_SUBOP_ATOM_XOR;
588
589 case nir_intrinsic_group_memory_barrier:
590 case nir_intrinsic_memory_barrier:
591 case nir_intrinsic_memory_barrier_atomic_counter:
592 case nir_intrinsic_memory_barrier_buffer:
593 case nir_intrinsic_memory_barrier_image:
594 return NV50_IR_SUBOP_MEMBAR(M, GL);
595 case nir_intrinsic_memory_barrier_shared:
596 return NV50_IR_SUBOP_MEMBAR(M, CTA);
597
598 case nir_intrinsic_vote_all:
599 return NV50_IR_SUBOP_VOTE_ALL;
600 case nir_intrinsic_vote_any:
601 return NV50_IR_SUBOP_VOTE_ANY;
602 case nir_intrinsic_vote_ieq:
603 return NV50_IR_SUBOP_VOTE_UNI;
604 default:
605 return 0;
606 }
607 }
608
609 CondCode
610 Converter::getCondCode(nir_op op)
611 {
612 switch (op) {
613 case nir_op_feq32:
614 case nir_op_ieq32:
615 return CC_EQ;
616 case nir_op_fge32:
617 case nir_op_ige32:
618 case nir_op_uge32:
619 return CC_GE;
620 case nir_op_flt32:
621 case nir_op_ilt32:
622 case nir_op_ult32:
623 return CC_LT;
624 case nir_op_fne32:
625 return CC_NEU;
626 case nir_op_ine32:
627 return CC_NE;
628 default:
629 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
630 assert(false);
631 return CC_FL;
632 }
633 }
634
635 Converter::LValues&
636 Converter::convert(nir_alu_dest *dest)
637 {
638 return convert(&dest->dest);
639 }
640
641 Converter::LValues&
642 Converter::convert(nir_dest *dest)
643 {
644 if (dest->is_ssa)
645 return convert(&dest->ssa);
646 if (dest->reg.indirect) {
647 ERROR("no support for indirects.");
648 assert(false);
649 }
650 return convert(dest->reg.reg);
651 }
652
653 Converter::LValues&
654 Converter::convert(nir_register *reg)
655 {
656 NirDefMap::iterator it = regDefs.find(reg->index);
657 if (it != regDefs.end())
658 return it->second;
659
660 LValues newDef(reg->num_components);
661 for (uint8_t i = 0; i < reg->num_components; i++)
662 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
663 return regDefs[reg->index] = newDef;
664 }
665
666 Converter::LValues&
667 Converter::convert(nir_ssa_def *def)
668 {
669 NirDefMap::iterator it = ssaDefs.find(def->index);
670 if (it != ssaDefs.end())
671 return it->second;
672
673 LValues newDef(def->num_components);
674 for (uint8_t i = 0; i < def->num_components; i++)
675 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
676 return ssaDefs[def->index] = newDef;
677 }
678
679 Value*
680 Converter::getSrc(nir_alu_src *src, uint8_t component)
681 {
682 if (src->abs || src->negate) {
683 ERROR("modifiers currently not supported on nir_alu_src\n");
684 assert(false);
685 }
686 return getSrc(&src->src, src->swizzle[component]);
687 }
688
689 Value*
690 Converter::getSrc(nir_register *reg, uint8_t idx)
691 {
692 NirDefMap::iterator it = regDefs.find(reg->index);
693 if (it == regDefs.end())
694 return convert(reg)[idx];
695 return it->second[idx];
696 }
697
698 Value*
699 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
700 {
701 if (src->is_ssa)
702 return getSrc(src->ssa, idx);
703
704 if (src->reg.indirect) {
705 if (indirect)
706 return getSrc(src->reg.indirect, idx);
707 ERROR("no support for indirects.");
708 assert(false);
709 return NULL;
710 }
711
712 return getSrc(src->reg.reg, idx);
713 }
714
715 Value*
716 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
717 {
718 NirDefMap::iterator it = ssaDefs.find(src->index);
719 if (it == ssaDefs.end()) {
720 ERROR("SSA value %u not found\n", src->index);
721 assert(false);
722 return NULL;
723 }
724 return it->second[idx];
725 }
726
727 uint32_t
728 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
729 {
730 nir_const_value *offset = nir_src_as_const_value(*src);
731
732 if (offset) {
733 indirect = NULL;
734 return offset->u32[0];
735 }
736
737 indirect = getSrc(src, idx, true);
738 return 0;
739 }
740
741 uint32_t
742 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect)
743 {
744 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
745 if (indirect)
746 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
747 return idx;
748 }
749
750 static void
751 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
752 {
753 assert(name && index);
754
755 if (slot >= VERT_ATTRIB_MAX) {
756 ERROR("invalid varying slot %u\n", slot);
757 assert(false);
758 return;
759 }
760
761 if (slot >= VERT_ATTRIB_GENERIC0 &&
762 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
763 *name = TGSI_SEMANTIC_GENERIC;
764 *index = slot - VERT_ATTRIB_GENERIC0;
765 return;
766 }
767
768 if (slot >= VERT_ATTRIB_TEX0 &&
769 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
770 *name = TGSI_SEMANTIC_TEXCOORD;
771 *index = slot - VERT_ATTRIB_TEX0;
772 return;
773 }
774
775 switch (slot) {
776 case VERT_ATTRIB_COLOR0:
777 *name = TGSI_SEMANTIC_COLOR;
778 *index = 0;
779 break;
780 case VERT_ATTRIB_COLOR1:
781 *name = TGSI_SEMANTIC_COLOR;
782 *index = 1;
783 break;
784 case VERT_ATTRIB_EDGEFLAG:
785 *name = TGSI_SEMANTIC_EDGEFLAG;
786 *index = 0;
787 break;
788 case VERT_ATTRIB_FOG:
789 *name = TGSI_SEMANTIC_FOG;
790 *index = 0;
791 break;
792 case VERT_ATTRIB_NORMAL:
793 *name = TGSI_SEMANTIC_NORMAL;
794 *index = 0;
795 break;
796 case VERT_ATTRIB_POS:
797 *name = TGSI_SEMANTIC_POSITION;
798 *index = 0;
799 break;
800 case VERT_ATTRIB_POINT_SIZE:
801 *name = TGSI_SEMANTIC_PSIZE;
802 *index = 0;
803 break;
804 default:
805 ERROR("unknown vert attrib slot %u\n", slot);
806 assert(false);
807 break;
808 }
809 }
810
811 static void
812 varying_slot_to_tgsi_semantic(gl_varying_slot slot, unsigned *name, unsigned *index)
813 {
814 assert(name && index);
815
816 if (slot >= VARYING_SLOT_TESS_MAX) {
817 ERROR("invalid varying slot %u\n", slot);
818 assert(false);
819 return;
820 }
821
822 if (slot >= VARYING_SLOT_PATCH0) {
823 *name = TGSI_SEMANTIC_PATCH;
824 *index = slot - VARYING_SLOT_PATCH0;
825 return;
826 }
827
828 if (slot >= VARYING_SLOT_VAR0) {
829 *name = TGSI_SEMANTIC_GENERIC;
830 *index = slot - VARYING_SLOT_VAR0;
831 return;
832 }
833
834 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
835 *name = TGSI_SEMANTIC_TEXCOORD;
836 *index = slot - VARYING_SLOT_TEX0;
837 return;
838 }
839
840 switch (slot) {
841 case VARYING_SLOT_BFC0:
842 *name = TGSI_SEMANTIC_BCOLOR;
843 *index = 0;
844 break;
845 case VARYING_SLOT_BFC1:
846 *name = TGSI_SEMANTIC_BCOLOR;
847 *index = 1;
848 break;
849 case VARYING_SLOT_CLIP_DIST0:
850 *name = TGSI_SEMANTIC_CLIPDIST;
851 *index = 0;
852 break;
853 case VARYING_SLOT_CLIP_DIST1:
854 *name = TGSI_SEMANTIC_CLIPDIST;
855 *index = 1;
856 break;
857 case VARYING_SLOT_CLIP_VERTEX:
858 *name = TGSI_SEMANTIC_CLIPVERTEX;
859 *index = 0;
860 break;
861 case VARYING_SLOT_COL0:
862 *name = TGSI_SEMANTIC_COLOR;
863 *index = 0;
864 break;
865 case VARYING_SLOT_COL1:
866 *name = TGSI_SEMANTIC_COLOR;
867 *index = 1;
868 break;
869 case VARYING_SLOT_EDGE:
870 *name = TGSI_SEMANTIC_EDGEFLAG;
871 *index = 0;
872 break;
873 case VARYING_SLOT_FACE:
874 *name = TGSI_SEMANTIC_FACE;
875 *index = 0;
876 break;
877 case VARYING_SLOT_FOGC:
878 *name = TGSI_SEMANTIC_FOG;
879 *index = 0;
880 break;
881 case VARYING_SLOT_LAYER:
882 *name = TGSI_SEMANTIC_LAYER;
883 *index = 0;
884 break;
885 case VARYING_SLOT_PNTC:
886 *name = TGSI_SEMANTIC_PCOORD;
887 *index = 0;
888 break;
889 case VARYING_SLOT_POS:
890 *name = TGSI_SEMANTIC_POSITION;
891 *index = 0;
892 break;
893 case VARYING_SLOT_PRIMITIVE_ID:
894 *name = TGSI_SEMANTIC_PRIMID;
895 *index = 0;
896 break;
897 case VARYING_SLOT_PSIZ:
898 *name = TGSI_SEMANTIC_PSIZE;
899 *index = 0;
900 break;
901 case VARYING_SLOT_TESS_LEVEL_INNER:
902 *name = TGSI_SEMANTIC_TESSINNER;
903 *index = 0;
904 break;
905 case VARYING_SLOT_TESS_LEVEL_OUTER:
906 *name = TGSI_SEMANTIC_TESSOUTER;
907 *index = 0;
908 break;
909 case VARYING_SLOT_VIEWPORT:
910 *name = TGSI_SEMANTIC_VIEWPORT_INDEX;
911 *index = 0;
912 break;
913 default:
914 ERROR("unknown varying slot %u\n", slot);
915 assert(false);
916 break;
917 }
918 }
919
920 static void
921 frag_result_to_tgsi_semantic(unsigned slot, unsigned *name, unsigned *index)
922 {
923 if (slot >= FRAG_RESULT_DATA0) {
924 *name = TGSI_SEMANTIC_COLOR;
925 *index = slot - FRAG_RESULT_COLOR - 2; // intentional
926 return;
927 }
928
929 switch (slot) {
930 case FRAG_RESULT_COLOR:
931 *name = TGSI_SEMANTIC_COLOR;
932 *index = 0;
933 break;
934 case FRAG_RESULT_DEPTH:
935 *name = TGSI_SEMANTIC_POSITION;
936 *index = 0;
937 break;
938 case FRAG_RESULT_SAMPLE_MASK:
939 *name = TGSI_SEMANTIC_SAMPLEMASK;
940 *index = 0;
941 break;
942 default:
943 ERROR("unknown frag result slot %u\n", slot);
944 assert(false);
945 break;
946 }
947 }
948
949 // copy of _mesa_sysval_to_semantic
950 static void
951 system_val_to_tgsi_semantic(unsigned val, unsigned *name, unsigned *index)
952 {
953 *index = 0;
954 switch (val) {
955 // Vertex shader
956 case SYSTEM_VALUE_VERTEX_ID:
957 *name = TGSI_SEMANTIC_VERTEXID;
958 break;
959 case SYSTEM_VALUE_INSTANCE_ID:
960 *name = TGSI_SEMANTIC_INSTANCEID;
961 break;
962 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
963 *name = TGSI_SEMANTIC_VERTEXID_NOBASE;
964 break;
965 case SYSTEM_VALUE_BASE_VERTEX:
966 *name = TGSI_SEMANTIC_BASEVERTEX;
967 break;
968 case SYSTEM_VALUE_BASE_INSTANCE:
969 *name = TGSI_SEMANTIC_BASEINSTANCE;
970 break;
971 case SYSTEM_VALUE_DRAW_ID:
972 *name = TGSI_SEMANTIC_DRAWID;
973 break;
974
975 // Geometry shader
976 case SYSTEM_VALUE_INVOCATION_ID:
977 *name = TGSI_SEMANTIC_INVOCATIONID;
978 break;
979
980 // Fragment shader
981 case SYSTEM_VALUE_FRAG_COORD:
982 *name = TGSI_SEMANTIC_POSITION;
983 break;
984 case SYSTEM_VALUE_FRONT_FACE:
985 *name = TGSI_SEMANTIC_FACE;
986 break;
987 case SYSTEM_VALUE_SAMPLE_ID:
988 *name = TGSI_SEMANTIC_SAMPLEID;
989 break;
990 case SYSTEM_VALUE_SAMPLE_POS:
991 *name = TGSI_SEMANTIC_SAMPLEPOS;
992 break;
993 case SYSTEM_VALUE_SAMPLE_MASK_IN:
994 *name = TGSI_SEMANTIC_SAMPLEMASK;
995 break;
996 case SYSTEM_VALUE_HELPER_INVOCATION:
997 *name = TGSI_SEMANTIC_HELPER_INVOCATION;
998 break;
999
1000 // Tessellation shader
1001 case SYSTEM_VALUE_TESS_COORD:
1002 *name = TGSI_SEMANTIC_TESSCOORD;
1003 break;
1004 case SYSTEM_VALUE_VERTICES_IN:
1005 *name = TGSI_SEMANTIC_VERTICESIN;
1006 break;
1007 case SYSTEM_VALUE_PRIMITIVE_ID:
1008 *name = TGSI_SEMANTIC_PRIMID;
1009 break;
1010 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1011 *name = TGSI_SEMANTIC_TESSOUTER;
1012 break;
1013 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1014 *name = TGSI_SEMANTIC_TESSINNER;
1015 break;
1016
1017 // Compute shader
1018 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1019 *name = TGSI_SEMANTIC_THREAD_ID;
1020 break;
1021 case SYSTEM_VALUE_WORK_GROUP_ID:
1022 *name = TGSI_SEMANTIC_BLOCK_ID;
1023 break;
1024 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1025 *name = TGSI_SEMANTIC_GRID_SIZE;
1026 break;
1027 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1028 *name = TGSI_SEMANTIC_BLOCK_SIZE;
1029 break;
1030
1031 // ARB_shader_ballot
1032 case SYSTEM_VALUE_SUBGROUP_SIZE:
1033 *name = TGSI_SEMANTIC_SUBGROUP_SIZE;
1034 break;
1035 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1036 *name = TGSI_SEMANTIC_SUBGROUP_INVOCATION;
1037 break;
1038 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1039 *name = TGSI_SEMANTIC_SUBGROUP_EQ_MASK;
1040 break;
1041 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1042 *name = TGSI_SEMANTIC_SUBGROUP_GE_MASK;
1043 break;
1044 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1045 *name = TGSI_SEMANTIC_SUBGROUP_GT_MASK;
1046 break;
1047 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1048 *name = TGSI_SEMANTIC_SUBGROUP_LE_MASK;
1049 break;
1050 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1051 *name = TGSI_SEMANTIC_SUBGROUP_LT_MASK;
1052 break;
1053
1054 default:
1055 ERROR("unknown system value %u\n", val);
1056 assert(false);
1057 break;
1058 }
1059 }
1060
1061 void
1062 Converter::setInterpolate(nv50_ir_varying *var,
1063 uint8_t mode,
1064 bool centroid,
1065 unsigned semantic)
1066 {
1067 switch (mode) {
1068 case INTERP_MODE_FLAT:
1069 var->flat = 1;
1070 break;
1071 case INTERP_MODE_NONE:
1072 if (semantic == TGSI_SEMANTIC_COLOR)
1073 var->sc = 1;
1074 else if (semantic == TGSI_SEMANTIC_POSITION)
1075 var->linear = 1;
1076 break;
1077 case INTERP_MODE_NOPERSPECTIVE:
1078 var->linear = 1;
1079 break;
1080 case INTERP_MODE_SMOOTH:
1081 break;
1082 }
1083 var->centroid = centroid;
1084 }
1085
1086 static uint16_t
1087 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
1088 bool input, const nir_variable *var)
1089 {
1090 if (!type->is_array())
1091 return type->count_attribute_slots(false);
1092
1093 uint16_t slots;
1094 switch (stage) {
1095 case Program::TYPE_GEOMETRY:
1096 slots = type->uniform_locations();
1097 if (input)
1098 slots /= info.gs.vertices_in;
1099 break;
1100 case Program::TYPE_TESSELLATION_CONTROL:
1101 case Program::TYPE_TESSELLATION_EVAL:
1102 // remove first dimension
1103 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
1104 slots = type->uniform_locations();
1105 else
1106 slots = type->fields.array->uniform_locations();
1107 break;
1108 default:
1109 slots = type->count_attribute_slots(false);
1110 break;
1111 }
1112
1113 return slots;
1114 }
1115
1116 bool Converter::assignSlots() {
1117 unsigned name;
1118 unsigned index;
1119
1120 info->io.viewportId = -1;
1121 info->numInputs = 0;
1122
1123 // we have to fixup the uniform locations for arrays
1124 unsigned numImages = 0;
1125 nir_foreach_variable(var, &nir->uniforms) {
1126 const glsl_type *type = var->type;
1127 if (!type->without_array()->is_image())
1128 continue;
1129 var->data.driver_location = numImages;
1130 numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
1131 }
1132
1133 nir_foreach_variable(var, &nir->inputs) {
1134 const glsl_type *type = var->type;
1135 int slot = var->data.location;
1136 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1137 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1138 : type->component_slots();
1139 uint32_t frac = var->data.location_frac;
1140 uint32_t vary = var->data.driver_location;
1141
1142 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1143 if (comp > 2)
1144 slots *= 2;
1145 }
1146
1147 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1148
1149 switch(prog->getType()) {
1150 case Program::TYPE_FRAGMENT:
1151 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1152 for (uint16_t i = 0; i < slots; ++i) {
1153 setInterpolate(&info->in[vary + i], var->data.interpolation,
1154 var->data.centroid | var->data.sample, name);
1155 }
1156 break;
1157 case Program::TYPE_GEOMETRY:
1158 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1159 break;
1160 case Program::TYPE_TESSELLATION_CONTROL:
1161 case Program::TYPE_TESSELLATION_EVAL:
1162 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1163 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1164 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1165 break;
1166 case Program::TYPE_VERTEX:
1167 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1168 switch (name) {
1169 case TGSI_SEMANTIC_EDGEFLAG:
1170 info->io.edgeFlagIn = vary;
1171 break;
1172 default:
1173 break;
1174 }
1175 break;
1176 default:
1177 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1178 return false;
1179 }
1180
1181 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1182 info->in[vary].id = vary;
1183 info->in[vary].patch = var->data.patch;
1184 info->in[vary].sn = name;
1185 info->in[vary].si = index + i;
1186 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1187 if (i & 0x1)
1188 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1189 else
1190 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1191 else
1192 info->in[vary].mask |= ((1 << comp) - 1) << frac;
1193 }
1194 info->numInputs = std::max<uint8_t>(info->numInputs, vary);
1195 }
1196
1197 info->numOutputs = 0;
1198 nir_foreach_variable(var, &nir->outputs) {
1199 const glsl_type *type = var->type;
1200 int slot = var->data.location;
1201 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1202 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1203 : type->component_slots();
1204 uint32_t frac = var->data.location_frac;
1205 uint32_t vary = var->data.driver_location;
1206
1207 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1208 if (comp > 2)
1209 slots *= 2;
1210 }
1211
1212 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1213
1214 switch(prog->getType()) {
1215 case Program::TYPE_FRAGMENT:
1216 frag_result_to_tgsi_semantic((gl_frag_result)slot, &name, &index);
1217 switch (name) {
1218 case TGSI_SEMANTIC_COLOR:
1219 if (!var->data.fb_fetch_output)
1220 info->prop.fp.numColourResults++;
1221 info->prop.fp.separateFragData = true;
1222 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1223 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1224 index = index == 0 ? var->data.index : index;
1225 break;
1226 case TGSI_SEMANTIC_POSITION:
1227 info->io.fragDepth = vary;
1228 info->prop.fp.writesDepth = true;
1229 break;
1230 case TGSI_SEMANTIC_SAMPLEMASK:
1231 info->io.sampleMask = vary;
1232 break;
1233 default:
1234 break;
1235 }
1236 break;
1237 case Program::TYPE_GEOMETRY:
1238 case Program::TYPE_TESSELLATION_CONTROL:
1239 case Program::TYPE_TESSELLATION_EVAL:
1240 case Program::TYPE_VERTEX:
1241 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1242
1243 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1244 name != TGSI_SEMANTIC_TESSOUTER)
1245 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1246
1247 switch (name) {
1248 case TGSI_SEMANTIC_CLIPDIST:
1249 info->io.genUserClip = -1;
1250 break;
1251 case TGSI_SEMANTIC_CLIPVERTEX:
1252 clipVertexOutput = vary;
1253 break;
1254 case TGSI_SEMANTIC_EDGEFLAG:
1255 info->io.edgeFlagOut = vary;
1256 break;
1257 case TGSI_SEMANTIC_POSITION:
1258 if (clipVertexOutput < 0)
1259 clipVertexOutput = vary;
1260 break;
1261 default:
1262 break;
1263 }
1264 break;
1265 default:
1266 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1267 return false;
1268 }
1269
1270 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1271 info->out[vary].id = vary;
1272 info->out[vary].patch = var->data.patch;
1273 info->out[vary].sn = name;
1274 info->out[vary].si = index + i;
1275 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1276 if (i & 0x1)
1277 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1278 else
1279 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1280 else
1281 info->out[vary].mask |= ((1 << comp) - 1) << frac;
1282
1283 if (nir->info.outputs_read & 1ll << slot)
1284 info->out[vary].oread = 1;
1285 }
1286 info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
1287 }
1288
1289 info->numSysVals = 0;
1290 for (uint8_t i = 0; i < 64; ++i) {
1291 if (!(nir->info.system_values_read & 1ll << i))
1292 continue;
1293
1294 system_val_to_tgsi_semantic(i, &name, &index);
1295 info->sv[info->numSysVals].sn = name;
1296 info->sv[info->numSysVals].si = index;
1297 info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
1298
1299 switch (i) {
1300 case SYSTEM_VALUE_INSTANCE_ID:
1301 info->io.instanceId = info->numSysVals;
1302 break;
1303 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1304 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1305 info->sv[info->numSysVals].patch = 1;
1306 break;
1307 case SYSTEM_VALUE_VERTEX_ID:
1308 info->io.vertexId = info->numSysVals;
1309 break;
1310 default:
1311 break;
1312 }
1313
1314 info->numSysVals += 1;
1315 }
1316
1317 if (info->io.genUserClip > 0) {
1318 info->io.clipDistances = info->io.genUserClip;
1319
1320 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1321
1322 for (unsigned int n = 0; n < nOut; ++n) {
1323 unsigned int i = info->numOutputs++;
1324 info->out[i].id = i;
1325 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1326 info->out[i].si = n;
1327 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1328 }
1329 }
1330
1331 return info->assignSlots(info) == 0;
1332 }
1333
1334 uint32_t
1335 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1336 {
1337 DataType ty;
1338 int offset = nir_intrinsic_component(insn);
1339 bool input;
1340
1341 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1342 ty = getDType(insn);
1343 else
1344 ty = getSType(insn->src[0], false, false);
1345
1346 switch (insn->intrinsic) {
1347 case nir_intrinsic_load_input:
1348 case nir_intrinsic_load_interpolated_input:
1349 case nir_intrinsic_load_per_vertex_input:
1350 input = true;
1351 break;
1352 case nir_intrinsic_load_output:
1353 case nir_intrinsic_load_per_vertex_output:
1354 case nir_intrinsic_store_output:
1355 case nir_intrinsic_store_per_vertex_output:
1356 input = false;
1357 break;
1358 default:
1359 ERROR("unknown intrinsic in getSlotAddress %s",
1360 nir_intrinsic_infos[insn->intrinsic].name);
1361 input = false;
1362 assert(false);
1363 break;
1364 }
1365
1366 if (typeSizeof(ty) == 8) {
1367 slot *= 2;
1368 slot += offset;
1369 if (slot >= 4) {
1370 idx += 1;
1371 slot -= 4;
1372 }
1373 } else {
1374 slot += offset;
1375 }
1376
1377 assert(slot < 4);
1378 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1379 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1380
1381 const nv50_ir_varying *vary = input ? info->in : info->out;
1382 return vary[idx].slot[slot] * 4;
1383 }
1384
1385 Instruction *
1386 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1387 uint32_t base, uint8_t c, Value *indirect0,
1388 Value *indirect1, bool patch)
1389 {
1390 unsigned int tySize = typeSizeof(ty);
1391
1392 if (tySize == 8 &&
1393 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1394 Value *lo = getSSA();
1395 Value *hi = getSSA();
1396
1397 Instruction *loi =
1398 mkLoad(TYPE_U32, lo,
1399 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1400 indirect0);
1401 loi->setIndirect(0, 1, indirect1);
1402 loi->perPatch = patch;
1403
1404 Instruction *hii =
1405 mkLoad(TYPE_U32, hi,
1406 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1407 indirect0);
1408 hii->setIndirect(0, 1, indirect1);
1409 hii->perPatch = patch;
1410
1411 return mkOp2(OP_MERGE, ty, def, lo, hi);
1412 } else {
1413 Instruction *ld =
1414 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1415 ld->setIndirect(0, 1, indirect1);
1416 ld->perPatch = patch;
1417 return ld;
1418 }
1419 }
1420
1421 void
1422 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1423 DataType ty, Value *src, uint8_t idx, uint8_t c,
1424 Value *indirect0, Value *indirect1)
1425 {
1426 uint8_t size = typeSizeof(ty);
1427 uint32_t address = getSlotAddress(insn, idx, c);
1428
1429 if (size == 8 && indirect0) {
1430 Value *split[2];
1431 mkSplit(split, 4, src);
1432
1433 if (op == OP_EXPORT) {
1434 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1435 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1436 }
1437
1438 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1439 split[0])->perPatch = info->out[idx].patch;
1440 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1441 split[1])->perPatch = info->out[idx].patch;
1442 } else {
1443 if (op == OP_EXPORT)
1444 src = mkMov(getSSA(size), src, ty)->getDef(0);
1445 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1446 src)->perPatch = info->out[idx].patch;
1447 }
1448 }
1449
1450 bool
1451 Converter::parseNIR()
1452 {
1453 info->bin.tlsSpace = 0;
1454 info->io.clipDistances = nir->info.clip_distance_array_size;
1455 info->io.cullDistances = nir->info.cull_distance_array_size;
1456
1457 switch(prog->getType()) {
1458 case Program::TYPE_COMPUTE:
1459 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1460 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1461 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1462 info->bin.smemSize = nir->info.cs.shared_size;
1463 break;
1464 case Program::TYPE_FRAGMENT:
1465 info->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1466 info->prop.fp.persampleInvocation =
1467 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1468 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1469 info->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1470 info->prop.fp.readsSampleLocations =
1471 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1472 info->prop.fp.usesDiscard = nir->info.fs.uses_discard;
1473 info->prop.fp.usesSampleMaskIn =
1474 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1475 break;
1476 case Program::TYPE_GEOMETRY:
1477 info->prop.gp.inputPrim = nir->info.gs.input_primitive;
1478 info->prop.gp.instanceCount = nir->info.gs.invocations;
1479 info->prop.gp.maxVertices = nir->info.gs.vertices_out;
1480 info->prop.gp.outputPrim = nir->info.gs.output_primitive;
1481 break;
1482 case Program::TYPE_TESSELLATION_CONTROL:
1483 case Program::TYPE_TESSELLATION_EVAL:
1484 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1485 info->prop.tp.domain = GL_LINES;
1486 else
1487 info->prop.tp.domain = nir->info.tess.primitive_mode;
1488 info->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1489 info->prop.tp.outputPrim =
1490 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1491 info->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1492 info->prop.tp.winding = !nir->info.tess.ccw;
1493 break;
1494 case Program::TYPE_VERTEX:
1495 info->prop.vp.usesDrawParameters =
1496 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1497 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1498 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1499 break;
1500 default:
1501 break;
1502 }
1503
1504 return true;
1505 }
1506
1507 bool
1508 Converter::visit(nir_function *function)
1509 {
1510 // we only support emiting the main function for now
1511 assert(!strcmp(function->name, "main"));
1512 assert(function->impl);
1513
1514 // usually the blocks will set everything up, but main is special
1515 BasicBlock *entry = new BasicBlock(prog->main);
1516 exit = new BasicBlock(prog->main);
1517 blocks[nir_start_block(function->impl)->index] = entry;
1518 prog->main->setEntry(entry);
1519 prog->main->setExit(exit);
1520
1521 setPosition(entry, true);
1522
1523 if (info->io.genUserClip > 0) {
1524 for (int c = 0; c < 4; ++c)
1525 clipVtx[c] = getScratch();
1526 }
1527
1528 switch (prog->getType()) {
1529 case Program::TYPE_TESSELLATION_CONTROL:
1530 outBase = mkOp2v(
1531 OP_SUB, TYPE_U32, getSSA(),
1532 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1533 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1534 break;
1535 case Program::TYPE_FRAGMENT: {
1536 Symbol *sv = mkSysVal(SV_POSITION, 3);
1537 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1538 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1539 break;
1540 }
1541 default:
1542 break;
1543 }
1544
1545 nir_foreach_register(reg, &function->impl->registers) {
1546 if (reg->num_array_elems) {
1547 // TODO: packed variables would be nice, but MemoryOpt fails
1548 // replace 4 with reg->num_components
1549 uint32_t size = 4 * reg->num_array_elems * (reg->bit_size / 8);
1550 regToLmemOffset[reg->index] = info->bin.tlsSpace;
1551 info->bin.tlsSpace += size;
1552 }
1553 }
1554
1555 nir_index_ssa_defs(function->impl);
1556 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1557 if (!visit(node))
1558 return false;
1559 }
1560
1561 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1562 setPosition(exit, true);
1563
1564 if ((prog->getType() == Program::TYPE_VERTEX ||
1565 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1566 && info->io.genUserClip > 0)
1567 handleUserClipPlanes();
1568
1569 // TODO: for non main function this needs to be a OP_RETURN
1570 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1571 return true;
1572 }
1573
1574 bool
1575 Converter::visit(nir_cf_node *node)
1576 {
1577 switch (node->type) {
1578 case nir_cf_node_block:
1579 return visit(nir_cf_node_as_block(node));
1580 case nir_cf_node_if:
1581 return visit(nir_cf_node_as_if(node));
1582 case nir_cf_node_loop:
1583 return visit(nir_cf_node_as_loop(node));
1584 default:
1585 ERROR("unknown nir_cf_node type %u\n", node->type);
1586 return false;
1587 }
1588 }
1589
1590 bool
1591 Converter::visit(nir_block *block)
1592 {
1593 if (!block->predecessors->entries && block->instr_list.is_empty())
1594 return true;
1595
1596 BasicBlock *bb = convert(block);
1597
1598 setPosition(bb, true);
1599 nir_foreach_instr(insn, block) {
1600 if (!visit(insn))
1601 return false;
1602 }
1603 return true;
1604 }
1605
1606 bool
1607 Converter::visit(nir_if *nif)
1608 {
1609 DataType sType = getSType(nif->condition, false, false);
1610 Value *src = getSrc(&nif->condition, 0);
1611
1612 nir_block *lastThen = nir_if_last_then_block(nif);
1613 nir_block *lastElse = nir_if_last_else_block(nif);
1614
1615 assert(!lastThen->successors[1]);
1616 assert(!lastElse->successors[1]);
1617
1618 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1619 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1620
1621 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1622 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1623
1624 // we only insert joinats, if both nodes end up at the end of the if again.
1625 // the reason for this to not happens are breaks/continues/ret/... which
1626 // have their own handling
1627 if (lastThen->successors[0] == lastElse->successors[0])
1628 bb->joinAt = mkFlow(OP_JOINAT, convert(lastThen->successors[0]),
1629 CC_ALWAYS, NULL);
1630
1631 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1632
1633 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1634 if (!visit(node))
1635 return false;
1636 }
1637 setPosition(convert(lastThen), true);
1638 if (!bb->getExit() ||
1639 !bb->getExit()->asFlow() ||
1640 bb->getExit()->asFlow()->op == OP_JOIN) {
1641 BasicBlock *tailBB = convert(lastThen->successors[0]);
1642 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1643 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1644 }
1645
1646 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1647 if (!visit(node))
1648 return false;
1649 }
1650 setPosition(convert(lastElse), true);
1651 if (!bb->getExit() ||
1652 !bb->getExit()->asFlow() ||
1653 bb->getExit()->asFlow()->op == OP_JOIN) {
1654 BasicBlock *tailBB = convert(lastElse->successors[0]);
1655 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1656 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1657 }
1658
1659 if (lastThen->successors[0] == lastElse->successors[0]) {
1660 setPosition(convert(lastThen->successors[0]), true);
1661 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1662 }
1663
1664 return true;
1665 }
1666
1667 bool
1668 Converter::visit(nir_loop *loop)
1669 {
1670 curLoopDepth += 1;
1671 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1672
1673 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1674 BasicBlock *tailBB =
1675 convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1676 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1677
1678 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1679 setPosition(loopBB, false);
1680 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1681
1682 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1683 if (!visit(node))
1684 return false;
1685 }
1686 Instruction *insn = bb->getExit();
1687 if (bb->cfg.incidentCount() != 0) {
1688 if (!insn || !insn->asFlow()) {
1689 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1690 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1691 } else if (insn && insn->op == OP_BRA && !insn->getPredicate() &&
1692 tailBB->cfg.incidentCount() == 0) {
1693 // RA doesn't like having blocks around with no incident edge,
1694 // so we create a fake one to make it happy
1695 bb->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1696 }
1697 }
1698
1699 curLoopDepth -= 1;
1700
1701 return true;
1702 }
1703
1704 bool
1705 Converter::visit(nir_instr *insn)
1706 {
1707 switch (insn->type) {
1708 case nir_instr_type_alu:
1709 return visit(nir_instr_as_alu(insn));
1710 case nir_instr_type_deref:
1711 return visit(nir_instr_as_deref(insn));
1712 case nir_instr_type_intrinsic:
1713 return visit(nir_instr_as_intrinsic(insn));
1714 case nir_instr_type_jump:
1715 return visit(nir_instr_as_jump(insn));
1716 case nir_instr_type_load_const:
1717 return visit(nir_instr_as_load_const(insn));
1718 case nir_instr_type_ssa_undef:
1719 return visit(nir_instr_as_ssa_undef(insn));
1720 case nir_instr_type_tex:
1721 return visit(nir_instr_as_tex(insn));
1722 default:
1723 ERROR("unknown nir_instr type %u\n", insn->type);
1724 return false;
1725 }
1726 return true;
1727 }
1728
1729 SVSemantic
1730 Converter::convert(nir_intrinsic_op intr)
1731 {
1732 switch (intr) {
1733 case nir_intrinsic_load_base_vertex:
1734 return SV_BASEVERTEX;
1735 case nir_intrinsic_load_base_instance:
1736 return SV_BASEINSTANCE;
1737 case nir_intrinsic_load_draw_id:
1738 return SV_DRAWID;
1739 case nir_intrinsic_load_front_face:
1740 return SV_FACE;
1741 case nir_intrinsic_load_helper_invocation:
1742 return SV_THREAD_KILL;
1743 case nir_intrinsic_load_instance_id:
1744 return SV_INSTANCE_ID;
1745 case nir_intrinsic_load_invocation_id:
1746 return SV_INVOCATION_ID;
1747 case nir_intrinsic_load_local_group_size:
1748 return SV_NTID;
1749 case nir_intrinsic_load_local_invocation_id:
1750 return SV_TID;
1751 case nir_intrinsic_load_num_work_groups:
1752 return SV_NCTAID;
1753 case nir_intrinsic_load_patch_vertices_in:
1754 return SV_VERTEX_COUNT;
1755 case nir_intrinsic_load_primitive_id:
1756 return SV_PRIMITIVE_ID;
1757 case nir_intrinsic_load_sample_id:
1758 return SV_SAMPLE_INDEX;
1759 case nir_intrinsic_load_sample_mask_in:
1760 return SV_SAMPLE_MASK;
1761 case nir_intrinsic_load_sample_pos:
1762 return SV_SAMPLE_POS;
1763 case nir_intrinsic_load_subgroup_eq_mask:
1764 return SV_LANEMASK_EQ;
1765 case nir_intrinsic_load_subgroup_ge_mask:
1766 return SV_LANEMASK_GE;
1767 case nir_intrinsic_load_subgroup_gt_mask:
1768 return SV_LANEMASK_GT;
1769 case nir_intrinsic_load_subgroup_le_mask:
1770 return SV_LANEMASK_LE;
1771 case nir_intrinsic_load_subgroup_lt_mask:
1772 return SV_LANEMASK_LT;
1773 case nir_intrinsic_load_subgroup_invocation:
1774 return SV_LANEID;
1775 case nir_intrinsic_load_tess_coord:
1776 return SV_TESS_COORD;
1777 case nir_intrinsic_load_tess_level_inner:
1778 return SV_TESS_INNER;
1779 case nir_intrinsic_load_tess_level_outer:
1780 return SV_TESS_OUTER;
1781 case nir_intrinsic_load_vertex_id:
1782 return SV_VERTEX_ID;
1783 case nir_intrinsic_load_work_group_id:
1784 return SV_CTAID;
1785 default:
1786 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1787 nir_intrinsic_infos[intr].name);
1788 assert(false);
1789 return SV_LAST;
1790 }
1791 }
1792
1793 ImgFormat
1794 Converter::convertGLImgFormat(GLuint format)
1795 {
1796 #define FMT_CASE(a, b) \
1797 case GL_ ## a: return nv50_ir::FMT_ ## b
1798
1799 switch (format) {
1800 FMT_CASE(NONE, NONE);
1801
1802 FMT_CASE(RGBA32F, RGBA32F);
1803 FMT_CASE(RGBA16F, RGBA16F);
1804 FMT_CASE(RG32F, RG32F);
1805 FMT_CASE(RG16F, RG16F);
1806 FMT_CASE(R11F_G11F_B10F, R11G11B10F);
1807 FMT_CASE(R32F, R32F);
1808 FMT_CASE(R16F, R16F);
1809
1810 FMT_CASE(RGBA32UI, RGBA32UI);
1811 FMT_CASE(RGBA16UI, RGBA16UI);
1812 FMT_CASE(RGB10_A2UI, RGB10A2UI);
1813 FMT_CASE(RGBA8UI, RGBA8UI);
1814 FMT_CASE(RG32UI, RG32UI);
1815 FMT_CASE(RG16UI, RG16UI);
1816 FMT_CASE(RG8UI, RG8UI);
1817 FMT_CASE(R32UI, R32UI);
1818 FMT_CASE(R16UI, R16UI);
1819 FMT_CASE(R8UI, R8UI);
1820
1821 FMT_CASE(RGBA32I, RGBA32I);
1822 FMT_CASE(RGBA16I, RGBA16I);
1823 FMT_CASE(RGBA8I, RGBA8I);
1824 FMT_CASE(RG32I, RG32I);
1825 FMT_CASE(RG16I, RG16I);
1826 FMT_CASE(RG8I, RG8I);
1827 FMT_CASE(R32I, R32I);
1828 FMT_CASE(R16I, R16I);
1829 FMT_CASE(R8I, R8I);
1830
1831 FMT_CASE(RGBA16, RGBA16);
1832 FMT_CASE(RGB10_A2, RGB10A2);
1833 FMT_CASE(RGBA8, RGBA8);
1834 FMT_CASE(RG16, RG16);
1835 FMT_CASE(RG8, RG8);
1836 FMT_CASE(R16, R16);
1837 FMT_CASE(R8, R8);
1838
1839 FMT_CASE(RGBA16_SNORM, RGBA16_SNORM);
1840 FMT_CASE(RGBA8_SNORM, RGBA8_SNORM);
1841 FMT_CASE(RG16_SNORM, RG16_SNORM);
1842 FMT_CASE(RG8_SNORM, RG8_SNORM);
1843 FMT_CASE(R16_SNORM, R16_SNORM);
1844 FMT_CASE(R8_SNORM, R8_SNORM);
1845
1846 FMT_CASE(BGRA_INTEGER, BGRA8);
1847 default:
1848 ERROR("unknown format %x\n", format);
1849 assert(false);
1850 return nv50_ir::FMT_NONE;
1851 }
1852 #undef FMT_CASE
1853 }
1854
1855 bool
1856 Converter::visit(nir_intrinsic_instr *insn)
1857 {
1858 nir_intrinsic_op op = insn->intrinsic;
1859 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1860
1861 switch (op) {
1862 case nir_intrinsic_load_uniform: {
1863 LValues &newDefs = convert(&insn->dest);
1864 const DataType dType = getDType(insn);
1865 Value *indirect;
1866 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1867 for (uint8_t i = 0; i < insn->num_components; ++i) {
1868 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1869 }
1870 break;
1871 }
1872 case nir_intrinsic_store_output:
1873 case nir_intrinsic_store_per_vertex_output: {
1874 Value *indirect;
1875 DataType dType = getSType(insn->src[0], false, false);
1876 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1877
1878 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1879 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1880 continue;
1881
1882 uint8_t offset = 0;
1883 Value *src = getSrc(&insn->src[0], i);
1884 switch (prog->getType()) {
1885 case Program::TYPE_FRAGMENT: {
1886 if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1887 // TGSI uses a different interface than NIR, TGSI stores that
1888 // value in the z component, NIR in X
1889 offset += 2;
1890 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1891 }
1892 break;
1893 }
1894 case Program::TYPE_GEOMETRY:
1895 case Program::TYPE_VERTEX: {
1896 if (info->io.genUserClip > 0 && idx == clipVertexOutput) {
1897 mkMov(clipVtx[i], src);
1898 src = clipVtx[i];
1899 }
1900 break;
1901 }
1902 default:
1903 break;
1904 }
1905
1906 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1907 }
1908 break;
1909 }
1910 case nir_intrinsic_load_input:
1911 case nir_intrinsic_load_interpolated_input:
1912 case nir_intrinsic_load_output: {
1913 LValues &newDefs = convert(&insn->dest);
1914
1915 // FBFetch
1916 if (prog->getType() == Program::TYPE_FRAGMENT &&
1917 op == nir_intrinsic_load_output) {
1918 std::vector<Value*> defs, srcs;
1919 uint8_t mask = 0;
1920
1921 srcs.push_back(getSSA());
1922 srcs.push_back(getSSA());
1923 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1924 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1925 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1926 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1927
1928 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1929 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1930
1931 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1932 defs.push_back(newDefs[i]);
1933 mask |= 1 << i;
1934 }
1935
1936 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1937 texi->tex.levelZero = 1;
1938 texi->tex.mask = mask;
1939 texi->tex.useOffsets = 0;
1940 texi->tex.r = 0xffff;
1941 texi->tex.s = 0xffff;
1942
1943 info->prop.fp.readsFramebuffer = true;
1944 break;
1945 }
1946
1947 const DataType dType = getDType(insn);
1948 Value *indirect;
1949 bool input = op != nir_intrinsic_load_output;
1950 operation nvirOp;
1951 uint32_t mode = 0;
1952
1953 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1954 nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
1955
1956 // see load_barycentric_* handling
1957 if (prog->getType() == Program::TYPE_FRAGMENT) {
1958 mode = translateInterpMode(&vary, nvirOp);
1959 if (op == nir_intrinsic_load_interpolated_input) {
1960 ImmediateValue immMode;
1961 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1962 mode |= immMode.reg.data.u32;
1963 }
1964 }
1965
1966 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1967 uint32_t address = getSlotAddress(insn, idx, i);
1968 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1969 if (prog->getType() == Program::TYPE_FRAGMENT) {
1970 int s = 1;
1971 if (typeSizeof(dType) == 8) {
1972 Value *lo = getSSA();
1973 Value *hi = getSSA();
1974 Instruction *interp;
1975
1976 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1977 if (nvirOp == OP_PINTERP)
1978 interp->setSrc(s++, fp.position);
1979 if (mode & NV50_IR_INTERP_OFFSET)
1980 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1981 interp->setInterpolate(mode);
1982 interp->setIndirect(0, 0, indirect);
1983
1984 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1985 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1986 if (nvirOp == OP_PINTERP)
1987 interp->setSrc(s++, fp.position);
1988 if (mode & NV50_IR_INTERP_OFFSET)
1989 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1990 interp->setInterpolate(mode);
1991 interp->setIndirect(0, 0, indirect);
1992
1993 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1994 } else {
1995 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1996 if (nvirOp == OP_PINTERP)
1997 interp->setSrc(s++, fp.position);
1998 if (mode & NV50_IR_INTERP_OFFSET)
1999 interp->setSrc(s++, getSrc(&insn->src[0], 0));
2000 interp->setInterpolate(mode);
2001 interp->setIndirect(0, 0, indirect);
2002 }
2003 } else {
2004 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
2005 }
2006 }
2007 break;
2008 }
2009 case nir_intrinsic_load_barycentric_at_offset:
2010 case nir_intrinsic_load_barycentric_at_sample:
2011 case nir_intrinsic_load_barycentric_centroid:
2012 case nir_intrinsic_load_barycentric_pixel:
2013 case nir_intrinsic_load_barycentric_sample: {
2014 LValues &newDefs = convert(&insn->dest);
2015 uint32_t mode;
2016
2017 if (op == nir_intrinsic_load_barycentric_centroid ||
2018 op == nir_intrinsic_load_barycentric_sample) {
2019 mode = NV50_IR_INTERP_CENTROID;
2020 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
2021 Value *offs[2];
2022 for (uint8_t c = 0; c < 2; c++) {
2023 offs[c] = getScratch();
2024 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
2025 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
2026 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
2027 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
2028 }
2029 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
2030
2031 mode = NV50_IR_INTERP_OFFSET;
2032 } else if (op == nir_intrinsic_load_barycentric_pixel) {
2033 mode = NV50_IR_INTERP_DEFAULT;
2034 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
2035 info->prop.fp.readsSampleLocations = true;
2036 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
2037 mode = NV50_IR_INTERP_OFFSET;
2038 } else {
2039 unreachable("all intrinsics already handled above");
2040 }
2041
2042 loadImm(newDefs[1], mode);
2043 break;
2044 }
2045 case nir_intrinsic_discard:
2046 mkOp(OP_DISCARD, TYPE_NONE, NULL);
2047 break;
2048 case nir_intrinsic_discard_if: {
2049 Value *pred = getSSA(1, FILE_PREDICATE);
2050 if (insn->num_components > 1) {
2051 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
2052 assert(false);
2053 return false;
2054 }
2055 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2056 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
2057 break;
2058 }
2059 case nir_intrinsic_load_base_vertex:
2060 case nir_intrinsic_load_base_instance:
2061 case nir_intrinsic_load_draw_id:
2062 case nir_intrinsic_load_front_face:
2063 case nir_intrinsic_load_helper_invocation:
2064 case nir_intrinsic_load_instance_id:
2065 case nir_intrinsic_load_invocation_id:
2066 case nir_intrinsic_load_local_group_size:
2067 case nir_intrinsic_load_local_invocation_id:
2068 case nir_intrinsic_load_num_work_groups:
2069 case nir_intrinsic_load_patch_vertices_in:
2070 case nir_intrinsic_load_primitive_id:
2071 case nir_intrinsic_load_sample_id:
2072 case nir_intrinsic_load_sample_mask_in:
2073 case nir_intrinsic_load_sample_pos:
2074 case nir_intrinsic_load_subgroup_eq_mask:
2075 case nir_intrinsic_load_subgroup_ge_mask:
2076 case nir_intrinsic_load_subgroup_gt_mask:
2077 case nir_intrinsic_load_subgroup_le_mask:
2078 case nir_intrinsic_load_subgroup_lt_mask:
2079 case nir_intrinsic_load_subgroup_invocation:
2080 case nir_intrinsic_load_tess_coord:
2081 case nir_intrinsic_load_tess_level_inner:
2082 case nir_intrinsic_load_tess_level_outer:
2083 case nir_intrinsic_load_vertex_id:
2084 case nir_intrinsic_load_work_group_id: {
2085 const DataType dType = getDType(insn);
2086 SVSemantic sv = convert(op);
2087 LValues &newDefs = convert(&insn->dest);
2088
2089 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2090 Value *def;
2091 if (typeSizeof(dType) == 8)
2092 def = getSSA();
2093 else
2094 def = newDefs[i];
2095
2096 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
2097 loadImm(def, 0u);
2098 } else {
2099 Symbol *sym = mkSysVal(sv, i);
2100 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
2101 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
2102 rdsv->perPatch = 1;
2103 }
2104
2105 if (typeSizeof(dType) == 8)
2106 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
2107 }
2108 break;
2109 }
2110 // constants
2111 case nir_intrinsic_load_subgroup_size: {
2112 LValues &newDefs = convert(&insn->dest);
2113 loadImm(newDefs[0], 32u);
2114 break;
2115 }
2116 case nir_intrinsic_vote_all:
2117 case nir_intrinsic_vote_any:
2118 case nir_intrinsic_vote_ieq: {
2119 LValues &newDefs = convert(&insn->dest);
2120 Value *pred = getScratch(1, FILE_PREDICATE);
2121 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2122 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
2123 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
2124 break;
2125 }
2126 case nir_intrinsic_ballot: {
2127 LValues &newDefs = convert(&insn->dest);
2128 Value *pred = getSSA(1, FILE_PREDICATE);
2129 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2130 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
2131 break;
2132 }
2133 case nir_intrinsic_read_first_invocation:
2134 case nir_intrinsic_read_invocation: {
2135 LValues &newDefs = convert(&insn->dest);
2136 const DataType dType = getDType(insn);
2137 Value *tmp = getScratch();
2138
2139 if (op == nir_intrinsic_read_first_invocation) {
2140 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
2141 mkOp2(OP_EXTBF, TYPE_U32, tmp, tmp, mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2142 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2143 } else
2144 tmp = getSrc(&insn->src[1], 0);
2145
2146 for (uint8_t i = 0; i < insn->num_components; ++i) {
2147 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
2148 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
2149 }
2150 break;
2151 }
2152 case nir_intrinsic_load_per_vertex_input: {
2153 const DataType dType = getDType(insn);
2154 LValues &newDefs = convert(&insn->dest);
2155 Value *indirectVertex;
2156 Value *indirectOffset;
2157 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2158 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2159
2160 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
2161 mkImm(baseVertex), indirectVertex);
2162 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2163 uint32_t address = getSlotAddress(insn, idx, i);
2164 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
2165 indirectOffset, vtxBase, info->in[idx].patch);
2166 }
2167 break;
2168 }
2169 case nir_intrinsic_load_per_vertex_output: {
2170 const DataType dType = getDType(insn);
2171 LValues &newDefs = convert(&insn->dest);
2172 Value *indirectVertex;
2173 Value *indirectOffset;
2174 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2175 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2176 Value *vtxBase = NULL;
2177
2178 if (indirectVertex)
2179 vtxBase = indirectVertex;
2180 else
2181 vtxBase = loadImm(NULL, baseVertex);
2182
2183 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
2184
2185 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2186 uint32_t address = getSlotAddress(insn, idx, i);
2187 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
2188 indirectOffset, vtxBase, info->in[idx].patch);
2189 }
2190 break;
2191 }
2192 case nir_intrinsic_emit_vertex:
2193 if (info->io.genUserClip > 0)
2194 handleUserClipPlanes();
2195 // fallthrough
2196 case nir_intrinsic_end_primitive: {
2197 uint32_t idx = nir_intrinsic_stream_id(insn);
2198 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
2199 break;
2200 }
2201 case nir_intrinsic_load_ubo: {
2202 const DataType dType = getDType(insn);
2203 LValues &newDefs = convert(&insn->dest);
2204 Value *indirectIndex;
2205 Value *indirectOffset;
2206 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
2207 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2208
2209 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2210 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
2211 indirectOffset, indirectIndex);
2212 }
2213 break;
2214 }
2215 case nir_intrinsic_get_buffer_size: {
2216 LValues &newDefs = convert(&insn->dest);
2217 const DataType dType = getDType(insn);
2218 Value *indirectBuffer;
2219 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2220
2221 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2222 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2223 break;
2224 }
2225 case nir_intrinsic_store_ssbo: {
2226 DataType sType = getSType(insn->src[0], false, false);
2227 Value *indirectBuffer;
2228 Value *indirectOffset;
2229 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2230 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2231
2232 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2233 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2234 continue;
2235 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2236 offset + i * typeSizeof(sType));
2237 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2238 ->setIndirect(0, 1, indirectBuffer);
2239 }
2240 info->io.globalAccess |= 0x2;
2241 break;
2242 }
2243 case nir_intrinsic_load_ssbo: {
2244 const DataType dType = getDType(insn);
2245 LValues &newDefs = convert(&insn->dest);
2246 Value *indirectBuffer;
2247 Value *indirectOffset;
2248 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2249 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2250
2251 for (uint8_t i = 0u; i < insn->num_components; ++i)
2252 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2253 indirectOffset, indirectBuffer);
2254
2255 info->io.globalAccess |= 0x1;
2256 break;
2257 }
2258 case nir_intrinsic_shared_atomic_add:
2259 case nir_intrinsic_shared_atomic_and:
2260 case nir_intrinsic_shared_atomic_comp_swap:
2261 case nir_intrinsic_shared_atomic_exchange:
2262 case nir_intrinsic_shared_atomic_or:
2263 case nir_intrinsic_shared_atomic_imax:
2264 case nir_intrinsic_shared_atomic_imin:
2265 case nir_intrinsic_shared_atomic_umax:
2266 case nir_intrinsic_shared_atomic_umin:
2267 case nir_intrinsic_shared_atomic_xor: {
2268 const DataType dType = getDType(insn);
2269 LValues &newDefs = convert(&insn->dest);
2270 Value *indirectOffset;
2271 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2272 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2273 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2274 if (op == nir_intrinsic_shared_atomic_comp_swap)
2275 atom->setSrc(2, getSrc(&insn->src[2], 0));
2276 atom->setIndirect(0, 0, indirectOffset);
2277 atom->subOp = getSubOp(op);
2278 break;
2279 }
2280 case nir_intrinsic_ssbo_atomic_add:
2281 case nir_intrinsic_ssbo_atomic_and:
2282 case nir_intrinsic_ssbo_atomic_comp_swap:
2283 case nir_intrinsic_ssbo_atomic_exchange:
2284 case nir_intrinsic_ssbo_atomic_or:
2285 case nir_intrinsic_ssbo_atomic_imax:
2286 case nir_intrinsic_ssbo_atomic_imin:
2287 case nir_intrinsic_ssbo_atomic_umax:
2288 case nir_intrinsic_ssbo_atomic_umin:
2289 case nir_intrinsic_ssbo_atomic_xor: {
2290 const DataType dType = getDType(insn);
2291 LValues &newDefs = convert(&insn->dest);
2292 Value *indirectBuffer;
2293 Value *indirectOffset;
2294 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2295 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2296
2297 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2298 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2299 getSrc(&insn->src[2], 0));
2300 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2301 atom->setSrc(2, getSrc(&insn->src[3], 0));
2302 atom->setIndirect(0, 0, indirectOffset);
2303 atom->setIndirect(0, 1, indirectBuffer);
2304 atom->subOp = getSubOp(op);
2305
2306 info->io.globalAccess |= 0x2;
2307 break;
2308 }
2309 case nir_intrinsic_image_deref_atomic_add:
2310 case nir_intrinsic_image_deref_atomic_and:
2311 case nir_intrinsic_image_deref_atomic_comp_swap:
2312 case nir_intrinsic_image_deref_atomic_exchange:
2313 case nir_intrinsic_image_deref_atomic_max:
2314 case nir_intrinsic_image_deref_atomic_min:
2315 case nir_intrinsic_image_deref_atomic_or:
2316 case nir_intrinsic_image_deref_atomic_xor:
2317 case nir_intrinsic_image_deref_load:
2318 case nir_intrinsic_image_deref_samples:
2319 case nir_intrinsic_image_deref_size:
2320 case nir_intrinsic_image_deref_store: {
2321 const nir_variable *tex;
2322 std::vector<Value*> srcs, defs;
2323 Value *indirect;
2324 DataType ty;
2325
2326 uint32_t mask = 0;
2327 nir_deref_instr *deref = nir_src_as_deref(insn->src[0]);
2328 const glsl_type *type = deref->type;
2329 TexInstruction::Target target =
2330 convert((glsl_sampler_dim)type->sampler_dimensionality,
2331 type->sampler_array, type->sampler_shadow);
2332 unsigned int argCount = getNIRArgCount(target);
2333 uint16_t location = handleDeref(deref, indirect, tex);
2334
2335 if (opInfo.has_dest) {
2336 LValues &newDefs = convert(&insn->dest);
2337 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2338 defs.push_back(newDefs[i]);
2339 mask |= 1 << i;
2340 }
2341 }
2342
2343 switch (op) {
2344 case nir_intrinsic_image_deref_atomic_add:
2345 case nir_intrinsic_image_deref_atomic_and:
2346 case nir_intrinsic_image_deref_atomic_comp_swap:
2347 case nir_intrinsic_image_deref_atomic_exchange:
2348 case nir_intrinsic_image_deref_atomic_max:
2349 case nir_intrinsic_image_deref_atomic_min:
2350 case nir_intrinsic_image_deref_atomic_or:
2351 case nir_intrinsic_image_deref_atomic_xor:
2352 ty = getDType(insn);
2353 mask = 0x1;
2354 info->io.globalAccess |= 0x2;
2355 break;
2356 case nir_intrinsic_image_deref_load:
2357 ty = TYPE_U32;
2358 info->io.globalAccess |= 0x1;
2359 break;
2360 case nir_intrinsic_image_deref_store:
2361 ty = TYPE_U32;
2362 mask = 0xf;
2363 info->io.globalAccess |= 0x2;
2364 break;
2365 case nir_intrinsic_image_deref_samples:
2366 mask = 0x8;
2367 ty = TYPE_U32;
2368 break;
2369 case nir_intrinsic_image_deref_size:
2370 ty = TYPE_U32;
2371 break;
2372 default:
2373 unreachable("unhandled image opcode");
2374 break;
2375 }
2376
2377 // coords
2378 if (opInfo.num_srcs >= 2)
2379 for (unsigned int i = 0u; i < argCount; ++i)
2380 srcs.push_back(getSrc(&insn->src[1], i));
2381
2382 // the sampler is just another src added after coords
2383 if (opInfo.num_srcs >= 3 && target.isMS())
2384 srcs.push_back(getSrc(&insn->src[2], 0));
2385
2386 if (opInfo.num_srcs >= 4) {
2387 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2388 for (uint8_t i = 0u; i < components; ++i)
2389 srcs.push_back(getSrc(&insn->src[3], i));
2390 }
2391
2392 if (opInfo.num_srcs >= 5)
2393 // 1 for aotmic swap
2394 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2395 srcs.push_back(getSrc(&insn->src[4], i));
2396
2397 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2398 texi->tex.bindless = false;
2399 texi->tex.format = &nv50_ir::TexInstruction::formatTable[convertGLImgFormat(tex->data.image.format)];
2400 texi->tex.mask = mask;
2401 texi->cache = getCacheModeFromVar(tex);
2402 texi->setType(ty);
2403 texi->subOp = getSubOp(op);
2404
2405 if (indirect)
2406 texi->setIndirectR(indirect);
2407
2408 break;
2409 }
2410 case nir_intrinsic_store_shared: {
2411 DataType sType = getSType(insn->src[0], false, false);
2412 Value *indirectOffset;
2413 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2414
2415 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2416 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2417 continue;
2418 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, sType, offset + i * typeSizeof(sType));
2419 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2420 }
2421 break;
2422 }
2423 case nir_intrinsic_load_shared: {
2424 const DataType dType = getDType(insn);
2425 LValues &newDefs = convert(&insn->dest);
2426 Value *indirectOffset;
2427 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2428
2429 for (uint8_t i = 0u; i < insn->num_components; ++i)
2430 loadFrom(FILE_MEMORY_SHARED, 0, dType, newDefs[i], offset, i, indirectOffset);
2431
2432 break;
2433 }
2434 case nir_intrinsic_barrier: {
2435 // TODO: add flag to shader_info
2436 info->numBarriers = 1;
2437 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2438 bar->fixed = 1;
2439 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2440 break;
2441 }
2442 case nir_intrinsic_group_memory_barrier:
2443 case nir_intrinsic_memory_barrier:
2444 case nir_intrinsic_memory_barrier_atomic_counter:
2445 case nir_intrinsic_memory_barrier_buffer:
2446 case nir_intrinsic_memory_barrier_image:
2447 case nir_intrinsic_memory_barrier_shared: {
2448 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2449 bar->fixed = 1;
2450 bar->subOp = getSubOp(op);
2451 break;
2452 }
2453 case nir_intrinsic_shader_clock: {
2454 const DataType dType = getDType(insn);
2455 LValues &newDefs = convert(&insn->dest);
2456
2457 loadImm(newDefs[0], 0u);
2458 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2459 break;
2460 }
2461 default:
2462 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2463 return false;
2464 }
2465
2466 return true;
2467 }
2468
2469 bool
2470 Converter::visit(nir_jump_instr *insn)
2471 {
2472 switch (insn->type) {
2473 case nir_jump_return:
2474 // TODO: this only works in the main function
2475 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2476 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2477 break;
2478 case nir_jump_break:
2479 case nir_jump_continue: {
2480 bool isBreak = insn->type == nir_jump_break;
2481 nir_block *block = insn->instr.block;
2482 assert(!block->successors[1]);
2483 BasicBlock *target = convert(block->successors[0]);
2484 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2485 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2486 break;
2487 }
2488 default:
2489 ERROR("unknown nir_jump_type %u\n", insn->type);
2490 return false;
2491 }
2492
2493 return true;
2494 }
2495
2496 bool
2497 Converter::visit(nir_load_const_instr *insn)
2498 {
2499 assert(insn->def.bit_size <= 64);
2500
2501 LValues &newDefs = convert(&insn->def);
2502 for (int i = 0; i < insn->def.num_components; i++) {
2503 switch (insn->def.bit_size) {
2504 case 64:
2505 loadImm(newDefs[i], insn->value.u64[i]);
2506 break;
2507 case 32:
2508 loadImm(newDefs[i], insn->value.u32[i]);
2509 break;
2510 case 16:
2511 loadImm(newDefs[i], insn->value.u16[i]);
2512 break;
2513 case 8:
2514 loadImm(newDefs[i], insn->value.u8[i]);
2515 break;
2516 }
2517 }
2518 return true;
2519 }
2520
2521 #define DEFAULT_CHECKS \
2522 if (insn->dest.dest.ssa.num_components > 1) { \
2523 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2524 return false; \
2525 } \
2526 if (insn->dest.write_mask != 1) { \
2527 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2528 return false; \
2529 }
2530 bool
2531 Converter::visit(nir_alu_instr *insn)
2532 {
2533 const nir_op op = insn->op;
2534 const nir_op_info &info = nir_op_infos[op];
2535 DataType dType = getDType(insn);
2536 const std::vector<DataType> sTypes = getSTypes(insn);
2537
2538 Instruction *oldPos = this->bb->getExit();
2539
2540 switch (op) {
2541 case nir_op_fabs:
2542 case nir_op_iabs:
2543 case nir_op_fadd:
2544 case nir_op_iadd:
2545 case nir_op_fand:
2546 case nir_op_iand:
2547 case nir_op_fceil:
2548 case nir_op_fcos:
2549 case nir_op_fddx:
2550 case nir_op_fddx_coarse:
2551 case nir_op_fddx_fine:
2552 case nir_op_fddy:
2553 case nir_op_fddy_coarse:
2554 case nir_op_fddy_fine:
2555 case nir_op_fdiv:
2556 case nir_op_idiv:
2557 case nir_op_udiv:
2558 case nir_op_fexp2:
2559 case nir_op_ffloor:
2560 case nir_op_ffma:
2561 case nir_op_flog2:
2562 case nir_op_fmax:
2563 case nir_op_imax:
2564 case nir_op_umax:
2565 case nir_op_fmin:
2566 case nir_op_imin:
2567 case nir_op_umin:
2568 case nir_op_fmod:
2569 case nir_op_imod:
2570 case nir_op_umod:
2571 case nir_op_fmul:
2572 case nir_op_imul:
2573 case nir_op_imul_high:
2574 case nir_op_umul_high:
2575 case nir_op_fneg:
2576 case nir_op_ineg:
2577 case nir_op_fnot:
2578 case nir_op_inot:
2579 case nir_op_for:
2580 case nir_op_ior:
2581 case nir_op_pack_64_2x32_split:
2582 case nir_op_fpow:
2583 case nir_op_frcp:
2584 case nir_op_frem:
2585 case nir_op_irem:
2586 case nir_op_frsq:
2587 case nir_op_fsat:
2588 case nir_op_ishr:
2589 case nir_op_ushr:
2590 case nir_op_fsin:
2591 case nir_op_fsqrt:
2592 case nir_op_fsub:
2593 case nir_op_isub:
2594 case nir_op_ftrunc:
2595 case nir_op_ishl:
2596 case nir_op_fxor:
2597 case nir_op_ixor: {
2598 DEFAULT_CHECKS;
2599 LValues &newDefs = convert(&insn->dest);
2600 operation preOp = preOperationNeeded(op);
2601 if (preOp != OP_NOP) {
2602 assert(info.num_inputs < 2);
2603 Value *tmp = getSSA(typeSizeof(dType));
2604 Instruction *i0 = mkOp(preOp, dType, tmp);
2605 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2606 if (info.num_inputs) {
2607 i0->setSrc(0, getSrc(&insn->src[0]));
2608 i1->setSrc(0, tmp);
2609 }
2610 i1->subOp = getSubOp(op);
2611 } else {
2612 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2613 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2614 i->setSrc(s, getSrc(&insn->src[s]));
2615 }
2616 i->subOp = getSubOp(op);
2617 }
2618 break;
2619 }
2620 case nir_op_ifind_msb:
2621 case nir_op_ufind_msb: {
2622 DEFAULT_CHECKS;
2623 LValues &newDefs = convert(&insn->dest);
2624 dType = sTypes[0];
2625 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2626 break;
2627 }
2628 case nir_op_fround_even: {
2629 DEFAULT_CHECKS;
2630 LValues &newDefs = convert(&insn->dest);
2631 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2632 break;
2633 }
2634 // convert instructions
2635 case nir_op_f2f32:
2636 case nir_op_f2i32:
2637 case nir_op_f2u32:
2638 case nir_op_i2f32:
2639 case nir_op_i2i32:
2640 case nir_op_u2f32:
2641 case nir_op_u2u32:
2642 case nir_op_f2f64:
2643 case nir_op_f2i64:
2644 case nir_op_f2u64:
2645 case nir_op_i2f64:
2646 case nir_op_i2i64:
2647 case nir_op_u2f64:
2648 case nir_op_u2u64: {
2649 DEFAULT_CHECKS;
2650 LValues &newDefs = convert(&insn->dest);
2651 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2652 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2653 i->rnd = ROUND_Z;
2654 i->sType = sTypes[0];
2655 break;
2656 }
2657 // compare instructions
2658 case nir_op_feq32:
2659 case nir_op_ieq32:
2660 case nir_op_fge32:
2661 case nir_op_ige32:
2662 case nir_op_uge32:
2663 case nir_op_flt32:
2664 case nir_op_ilt32:
2665 case nir_op_ult32:
2666 case nir_op_fne32:
2667 case nir_op_ine32: {
2668 DEFAULT_CHECKS;
2669 LValues &newDefs = convert(&insn->dest);
2670 Instruction *i = mkCmp(getOperation(op),
2671 getCondCode(op),
2672 dType,
2673 newDefs[0],
2674 dType,
2675 getSrc(&insn->src[0]),
2676 getSrc(&insn->src[1]));
2677 if (info.num_inputs == 3)
2678 i->setSrc(2, getSrc(&insn->src[2]));
2679 i->sType = sTypes[0];
2680 break;
2681 }
2682 // those are weird ALU ops and need special handling, because
2683 // 1. they are always componend based
2684 // 2. they basically just merge multiple values into one data type
2685 case nir_op_imov:
2686 case nir_op_fmov:
2687 if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
2688 nir_reg_dest& reg = insn->dest.dest.reg;
2689 uint32_t goffset = regToLmemOffset[reg.reg->index];
2690 uint8_t comps = reg.reg->num_components;
2691 uint8_t size = reg.reg->bit_size / 8;
2692 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2693 uint32_t aoffset = csize * reg.base_offset;
2694 Value *indirect = NULL;
2695
2696 if (reg.indirect)
2697 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS),
2698 getSrc(reg.indirect, 0), mkImm(csize));
2699
2700 for (uint8_t i = 0u; i < comps; ++i) {
2701 if (!((1u << i) & insn->dest.write_mask))
2702 continue;
2703
2704 Symbol *sym = mkSymbol(FILE_MEMORY_LOCAL, 0, dType, goffset + aoffset + i * size);
2705 mkStore(OP_STORE, dType, sym, indirect, getSrc(&insn->src[0], i));
2706 }
2707 break;
2708 } else if (!insn->src[0].src.is_ssa && insn->src[0].src.reg.reg->num_array_elems) {
2709 LValues &newDefs = convert(&insn->dest);
2710 nir_reg_src& reg = insn->src[0].src.reg;
2711 uint32_t goffset = regToLmemOffset[reg.reg->index];
2712 // uint8_t comps = reg.reg->num_components;
2713 uint8_t size = reg.reg->bit_size / 8;
2714 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2715 uint32_t aoffset = csize * reg.base_offset;
2716 Value *indirect = NULL;
2717
2718 if (reg.indirect)
2719 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS), getSrc(reg.indirect, 0), mkImm(csize));
2720
2721 for (uint8_t i = 0u; i < newDefs.size(); ++i)
2722 loadFrom(FILE_MEMORY_LOCAL, 0, dType, newDefs[i], goffset + aoffset, i, indirect);
2723
2724 break;
2725 } else {
2726 LValues &newDefs = convert(&insn->dest);
2727 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2728 mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
2729 }
2730 }
2731 break;
2732 case nir_op_vec2:
2733 case nir_op_vec3:
2734 case nir_op_vec4: {
2735 LValues &newDefs = convert(&insn->dest);
2736 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2737 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2738 }
2739 break;
2740 }
2741 // (un)pack
2742 case nir_op_pack_64_2x32: {
2743 LValues &newDefs = convert(&insn->dest);
2744 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2745 merge->setSrc(0, getSrc(&insn->src[0], 0));
2746 merge->setSrc(1, getSrc(&insn->src[0], 1));
2747 break;
2748 }
2749 case nir_op_pack_half_2x16_split: {
2750 LValues &newDefs = convert(&insn->dest);
2751 Value *tmpH = getSSA();
2752 Value *tmpL = getSSA();
2753
2754 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2755 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2756 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2757 break;
2758 }
2759 case nir_op_unpack_half_2x16_split_x:
2760 case nir_op_unpack_half_2x16_split_y: {
2761 LValues &newDefs = convert(&insn->dest);
2762 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2763 if (op == nir_op_unpack_half_2x16_split_y)
2764 cvt->subOp = 1;
2765 break;
2766 }
2767 case nir_op_unpack_64_2x32: {
2768 LValues &newDefs = convert(&insn->dest);
2769 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2770 break;
2771 }
2772 case nir_op_unpack_64_2x32_split_x: {
2773 LValues &newDefs = convert(&insn->dest);
2774 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2775 break;
2776 }
2777 case nir_op_unpack_64_2x32_split_y: {
2778 LValues &newDefs = convert(&insn->dest);
2779 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2780 break;
2781 }
2782 // special instructions
2783 case nir_op_fsign:
2784 case nir_op_isign: {
2785 DEFAULT_CHECKS;
2786 DataType iType;
2787 if (::isFloatType(dType))
2788 iType = TYPE_F32;
2789 else
2790 iType = TYPE_S32;
2791
2792 LValues &newDefs = convert(&insn->dest);
2793 LValue *val0 = getScratch();
2794 LValue *val1 = getScratch();
2795 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2796 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2797
2798 if (dType == TYPE_F64) {
2799 mkOp2(OP_SUB, iType, val0, val0, val1);
2800 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2801 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2802 mkOp2(OP_SUB, iType, val0, val1, val0);
2803 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2804 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2805 } else if (::isFloatType(dType))
2806 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2807 else
2808 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2809 break;
2810 }
2811 case nir_op_fcsel:
2812 case nir_op_b32csel: {
2813 DEFAULT_CHECKS;
2814 LValues &newDefs = convert(&insn->dest);
2815 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2816 break;
2817 }
2818 case nir_op_ibitfield_extract:
2819 case nir_op_ubitfield_extract: {
2820 DEFAULT_CHECKS;
2821 Value *tmp = getSSA();
2822 LValues &newDefs = convert(&insn->dest);
2823 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2824 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2825 break;
2826 }
2827 case nir_op_bfm: {
2828 DEFAULT_CHECKS;
2829 LValues &newDefs = convert(&insn->dest);
2830 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2831 break;
2832 }
2833 case nir_op_bitfield_insert: {
2834 DEFAULT_CHECKS;
2835 LValues &newDefs = convert(&insn->dest);
2836 LValue *temp = getSSA();
2837 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2838 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2839 break;
2840 }
2841 case nir_op_bit_count: {
2842 DEFAULT_CHECKS;
2843 LValues &newDefs = convert(&insn->dest);
2844 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2845 break;
2846 }
2847 case nir_op_bitfield_reverse: {
2848 DEFAULT_CHECKS;
2849 LValues &newDefs = convert(&insn->dest);
2850 mkOp2(OP_EXTBF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2851 break;
2852 }
2853 case nir_op_find_lsb: {
2854 DEFAULT_CHECKS;
2855 LValues &newDefs = convert(&insn->dest);
2856 Value *tmp = getSSA();
2857 mkOp2(OP_EXTBF, TYPE_U32, tmp, getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2858 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2859 break;
2860 }
2861 // boolean conversions
2862 case nir_op_b2f32: {
2863 DEFAULT_CHECKS;
2864 LValues &newDefs = convert(&insn->dest);
2865 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2866 break;
2867 }
2868 case nir_op_b2f64: {
2869 DEFAULT_CHECKS;
2870 LValues &newDefs = convert(&insn->dest);
2871 Value *tmp = getSSA(4);
2872 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2873 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2874 break;
2875 }
2876 case nir_op_f2b32:
2877 case nir_op_i2b32: {
2878 DEFAULT_CHECKS;
2879 LValues &newDefs = convert(&insn->dest);
2880 Value *src1;
2881 if (typeSizeof(sTypes[0]) == 8) {
2882 src1 = loadImm(getSSA(8), 0.0);
2883 } else {
2884 src1 = zero;
2885 }
2886 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2887 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2888 break;
2889 }
2890 case nir_op_b2i32: {
2891 DEFAULT_CHECKS;
2892 LValues &newDefs = convert(&insn->dest);
2893 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2894 break;
2895 }
2896 case nir_op_b2i64: {
2897 DEFAULT_CHECKS;
2898 LValues &newDefs = convert(&insn->dest);
2899 LValue *def = getScratch();
2900 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2901 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2902 break;
2903 }
2904 default:
2905 ERROR("unknown nir_op %s\n", info.name);
2906 return false;
2907 }
2908
2909 if (!oldPos) {
2910 oldPos = this->bb->getEntry();
2911 oldPos->precise = insn->exact;
2912 }
2913
2914 if (unlikely(!oldPos))
2915 return true;
2916
2917 while (oldPos->next) {
2918 oldPos = oldPos->next;
2919 oldPos->precise = insn->exact;
2920 }
2921 oldPos->saturate = insn->dest.saturate;
2922
2923 return true;
2924 }
2925 #undef DEFAULT_CHECKS
2926
2927 bool
2928 Converter::visit(nir_ssa_undef_instr *insn)
2929 {
2930 LValues &newDefs = convert(&insn->def);
2931 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2932 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2933 }
2934 return true;
2935 }
2936
2937 #define CASE_SAMPLER(ty) \
2938 case GLSL_SAMPLER_DIM_ ## ty : \
2939 if (isArray && !isShadow) \
2940 return TEX_TARGET_ ## ty ## _ARRAY; \
2941 else if (!isArray && isShadow) \
2942 return TEX_TARGET_## ty ## _SHADOW; \
2943 else if (isArray && isShadow) \
2944 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2945 else \
2946 return TEX_TARGET_ ## ty
2947
2948 TexTarget
2949 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2950 {
2951 switch (dim) {
2952 CASE_SAMPLER(1D);
2953 CASE_SAMPLER(2D);
2954 CASE_SAMPLER(CUBE);
2955 case GLSL_SAMPLER_DIM_3D:
2956 return TEX_TARGET_3D;
2957 case GLSL_SAMPLER_DIM_MS:
2958 if (isArray)
2959 return TEX_TARGET_2D_MS_ARRAY;
2960 return TEX_TARGET_2D_MS;
2961 case GLSL_SAMPLER_DIM_RECT:
2962 if (isShadow)
2963 return TEX_TARGET_RECT_SHADOW;
2964 return TEX_TARGET_RECT;
2965 case GLSL_SAMPLER_DIM_BUF:
2966 return TEX_TARGET_BUFFER;
2967 case GLSL_SAMPLER_DIM_EXTERNAL:
2968 return TEX_TARGET_2D;
2969 default:
2970 ERROR("unknown glsl_sampler_dim %u\n", dim);
2971 assert(false);
2972 return TEX_TARGET_COUNT;
2973 }
2974 }
2975 #undef CASE_SAMPLER
2976
2977 Value*
2978 Converter::applyProjection(Value *src, Value *proj)
2979 {
2980 if (!proj)
2981 return src;
2982 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2983 }
2984
2985 unsigned int
2986 Converter::getNIRArgCount(TexInstruction::Target& target)
2987 {
2988 unsigned int result = target.getArgCount();
2989 if (target.isCube() && target.isArray())
2990 result--;
2991 if (target.isMS())
2992 result--;
2993 return result;
2994 }
2995
2996 uint16_t
2997 Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_variable * &tex)
2998 {
2999 typedef std::pair<uint32_t,Value*> DerefPair;
3000 std::list<DerefPair> derefs;
3001
3002 uint16_t result = 0;
3003 while (deref->deref_type != nir_deref_type_var) {
3004 switch (deref->deref_type) {
3005 case nir_deref_type_array: {
3006 Value *indirect;
3007 uint8_t size = type_size(deref->type);
3008 result += size * getIndirect(&deref->arr.index, 0, indirect);
3009
3010 if (indirect) {
3011 derefs.push_front(std::make_pair(size, indirect));
3012 }
3013
3014 break;
3015 }
3016 case nir_deref_type_struct: {
3017 result += nir_deref_instr_parent(deref)->type->struct_location_offset(deref->strct.index);
3018 break;
3019 }
3020 case nir_deref_type_var:
3021 default:
3022 unreachable("nir_deref_type_var reached in handleDeref!");
3023 break;
3024 }
3025 deref = nir_deref_instr_parent(deref);
3026 }
3027
3028 indirect = NULL;
3029 for (std::list<DerefPair>::const_iterator it = derefs.begin(); it != derefs.end(); ++it) {
3030 Value *offset = mkOp2v(OP_MUL, TYPE_U32, getSSA(), loadImm(getSSA(), it->first), it->second);
3031 if (indirect)
3032 indirect = mkOp2v(OP_ADD, TYPE_U32, getSSA(), indirect, offset);
3033 else
3034 indirect = offset;
3035 }
3036
3037 tex = nir_deref_instr_get_variable(deref);
3038 assert(tex);
3039
3040 return result + tex->data.driver_location;
3041 }
3042
3043 CacheMode
3044 Converter::getCacheModeFromVar(const nir_variable *var)
3045 {
3046 if (var->data.image.access == ACCESS_VOLATILE)
3047 return CACHE_CV;
3048 if (var->data.image.access == ACCESS_COHERENT)
3049 return CACHE_CG;
3050 return CACHE_CA;
3051 }
3052
3053 bool
3054 Converter::visit(nir_tex_instr *insn)
3055 {
3056 switch (insn->op) {
3057 case nir_texop_lod:
3058 case nir_texop_query_levels:
3059 case nir_texop_tex:
3060 case nir_texop_texture_samples:
3061 case nir_texop_tg4:
3062 case nir_texop_txb:
3063 case nir_texop_txd:
3064 case nir_texop_txf:
3065 case nir_texop_txf_ms:
3066 case nir_texop_txl:
3067 case nir_texop_txs: {
3068 LValues &newDefs = convert(&insn->dest);
3069 std::vector<Value*> srcs;
3070 std::vector<Value*> defs;
3071 std::vector<nir_src*> offsets;
3072 uint8_t mask = 0;
3073 bool lz = false;
3074 Value *proj = NULL;
3075 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
3076 operation op = getOperation(insn->op);
3077
3078 int r, s;
3079 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
3080 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
3081 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
3082 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
3083 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
3084 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
3085 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
3086 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
3087 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
3088 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
3089 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
3090
3091 if (projIdx != -1)
3092 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
3093
3094 srcs.resize(insn->coord_components);
3095 for (uint8_t i = 0u; i < insn->coord_components; ++i)
3096 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
3097
3098 // sometimes we get less args than target.getArgCount, but codegen expects the latter
3099 if (insn->coord_components) {
3100 uint32_t argCount = target.getArgCount();
3101
3102 if (target.isMS())
3103 argCount -= 1;
3104
3105 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
3106 srcs.push_back(getSSA());
3107 }
3108
3109 if (insn->op == nir_texop_texture_samples)
3110 srcs.push_back(zero);
3111 else if (!insn->num_srcs)
3112 srcs.push_back(loadImm(NULL, 0));
3113 if (biasIdx != -1)
3114 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
3115 if (lodIdx != -1)
3116 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
3117 else if (op == OP_TXF)
3118 lz = true;
3119 if (msIdx != -1)
3120 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3121 if (offsetIdx != -1)
3122 offsets.push_back(&insn->src[offsetIdx].src);
3123 if (compIdx != -1)
3124 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3125 if (texOffIdx != -1) {
3126 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3127 texOffIdx = srcs.size() - 1;
3128 }
3129 if (sampOffIdx != -1) {
3130 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3131 sampOffIdx = srcs.size() - 1;
3132 }
3133
3134 r = insn->texture_index;
3135 s = insn->sampler_index;
3136
3137 defs.resize(newDefs.size());
3138 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3139 defs[d] = newDefs[d];
3140 mask |= 1 << d;
3141 }
3142 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3143 lz = true;
3144
3145 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3146 texi->tex.levelZero = lz;
3147 texi->tex.mask = mask;
3148
3149 if (texOffIdx != -1)
3150 texi->tex.rIndirectSrc = texOffIdx;
3151 if (sampOffIdx != -1)
3152 texi->tex.sIndirectSrc = sampOffIdx;
3153
3154 switch (insn->op) {
3155 case nir_texop_tg4:
3156 if (!target.isShadow())
3157 texi->tex.gatherComp = insn->component;
3158 break;
3159 case nir_texop_txs:
3160 texi->tex.query = TXQ_DIMS;
3161 break;
3162 case nir_texop_texture_samples:
3163 texi->tex.mask = 0x4;
3164 texi->tex.query = TXQ_TYPE;
3165 break;
3166 case nir_texop_query_levels:
3167 texi->tex.mask = 0x8;
3168 texi->tex.query = TXQ_DIMS;
3169 break;
3170 default:
3171 break;
3172 }
3173
3174 texi->tex.useOffsets = offsets.size();
3175 if (texi->tex.useOffsets) {
3176 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3177 for (uint32_t c = 0u; c < 3; ++c) {
3178 uint8_t s2 = std::min(c, target.getDim() - 1);
3179 texi->offset[s][c].set(getSrc(offsets[s], s2));
3180 texi->offset[s][c].setInsn(texi);
3181 }
3182 }
3183 }
3184
3185 if (ddxIdx != -1 && ddyIdx != -1) {
3186 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3187 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3188 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3189 }
3190 }
3191
3192 break;
3193 }
3194 default:
3195 ERROR("unknown nir_texop %u\n", insn->op);
3196 return false;
3197 }
3198 return true;
3199 }
3200
3201 bool
3202 Converter::visit(nir_deref_instr *deref)
3203 {
3204 // we just ignore those, because images intrinsics are the only place where
3205 // we should end up with deref sources and those have to backtrack anyway
3206 // to get the nir_variable. This code just exists to handle some special
3207 // cases.
3208 switch (deref->deref_type) {
3209 case nir_deref_type_array:
3210 case nir_deref_type_struct:
3211 case nir_deref_type_var:
3212 break;
3213 default:
3214 ERROR("unknown nir_deref_instr %u\n", deref->deref_type);
3215 return false;
3216 }
3217 return true;
3218 }
3219
3220 bool
3221 Converter::run()
3222 {
3223 bool progress;
3224
3225 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3226 nir_print_shader(nir, stderr);
3227
3228 struct nir_lower_subgroups_options subgroup_options = {
3229 .subgroup_size = 32,
3230 .ballot_bit_size = 32,
3231 };
3232
3233 NIR_PASS_V(nir, nir_lower_io, nir_var_all, type_size, (nir_lower_io_options)0);
3234 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3235 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3236 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3237 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3238 NIR_PASS_V(nir, nir_lower_alu_to_scalar);
3239 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3240
3241 do {
3242 progress = false;
3243 NIR_PASS(progress, nir, nir_copy_prop);
3244 NIR_PASS(progress, nir, nir_opt_remove_phis);
3245 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3246 NIR_PASS(progress, nir, nir_opt_cse);
3247 NIR_PASS(progress, nir, nir_opt_algebraic);
3248 NIR_PASS(progress, nir, nir_opt_constant_folding);
3249 NIR_PASS(progress, nir, nir_copy_prop);
3250 NIR_PASS(progress, nir, nir_opt_dce);
3251 NIR_PASS(progress, nir, nir_opt_dead_cf);
3252 } while (progress);
3253
3254 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3255 NIR_PASS_V(nir, nir_lower_locals_to_regs);
3256 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
3257 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3258
3259 // Garbage collect dead instructions
3260 nir_sweep(nir);
3261
3262 if (!parseNIR()) {
3263 ERROR("Couldn't prase NIR!\n");
3264 return false;
3265 }
3266
3267 if (!assignSlots()) {
3268 ERROR("Couldn't assign slots!\n");
3269 return false;
3270 }
3271
3272 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3273 nir_print_shader(nir, stderr);
3274
3275 nir_foreach_function(function, nir) {
3276 if (!visit(function))
3277 return false;
3278 }
3279
3280 return true;
3281 }
3282
3283 } // unnamed namespace
3284
3285 namespace nv50_ir {
3286
3287 bool
3288 Program::makeFromNIR(struct nv50_ir_prog_info *info)
3289 {
3290 nir_shader *nir = (nir_shader*)info->bin.source;
3291 Converter converter(this, nir, info);
3292 bool result = converter.run();
3293 if (!result)
3294 return result;
3295 LoweringHelper lowering;
3296 lowering.run(this);
3297 tlsSize = info->bin.tlsSpace;
3298 return result;
3299 }
3300
3301 } // namespace nv50_ir