nouveau: Reuse tgsi_get_sysval_semantic().
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
34
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
37 #else
38 #include <tr1/unordered_map>
39 #endif
40 #include <cstring>
41 #include <list>
42 #include <vector>
43
44 namespace {
45
46 #if __cplusplus >= 201103L
47 using std::hash;
48 using std::unordered_map;
49 #else
50 using std::tr1::hash;
51 using std::tr1::unordered_map;
52 #endif
53
54 using namespace nv50_ir;
55
56 int
57 type_size(const struct glsl_type *type, bool bindless)
58 {
59 return glsl_count_attribute_slots(type, false);
60 }
61
62 class Converter : public ConverterCommon
63 {
64 public:
65 Converter(Program *, nir_shader *, nv50_ir_prog_info *);
66
67 bool run();
68 private:
69 typedef std::vector<LValue*> LValues;
70 typedef unordered_map<unsigned, LValues> NirDefMap;
71 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
72 typedef unordered_map<unsigned, uint32_t> NirArrayLMemOffsets;
73 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
74
75 CacheMode convert(enum gl_access_qualifier);
76 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
77 LValues& convert(nir_alu_dest *);
78 BasicBlock* convert(nir_block *);
79 LValues& convert(nir_dest *);
80 SVSemantic convert(nir_intrinsic_op);
81 Value* convert(nir_load_const_instr*, uint8_t);
82 LValues& convert(nir_register *);
83 LValues& convert(nir_ssa_def *);
84
85 Value* getSrc(nir_alu_src *, uint8_t component = 0);
86 Value* getSrc(nir_register *, uint8_t);
87 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
88 Value* getSrc(nir_ssa_def *, uint8_t);
89
90 // returned value is the constant part of the given source (either the
91 // nir_src or the selected source component of an intrinsic). Even though
92 // this is mostly an optimization to be able to skip indirects in a few
93 // cases, sometimes we require immediate values or set some fileds on
94 // instructions (e.g. tex) in order for codegen to consume those.
95 // If the found value has not a constant part, the Value gets returned
96 // through the Value parameter.
97 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
98 // isScalar indicates that the addressing is scalar, vec4 addressing is
99 // assumed otherwise
100 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
101 bool isScalar = false);
102
103 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
104
105 void setInterpolate(nv50_ir_varying *,
106 uint8_t,
107 bool centroid,
108 unsigned semantics);
109
110 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
111 uint8_t c, Value *indirect0 = NULL,
112 Value *indirect1 = NULL, bool patch = false);
113 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
114 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
115 Value *indirect1 = NULL);
116
117 bool isFloatType(nir_alu_type);
118 bool isSignedType(nir_alu_type);
119 bool isResultFloat(nir_op);
120 bool isResultSigned(nir_op);
121
122 DataType getDType(nir_alu_instr *);
123 DataType getDType(nir_intrinsic_instr *);
124 DataType getDType(nir_intrinsic_instr *, bool isSigned);
125 DataType getDType(nir_op, uint8_t);
126
127 std::vector<DataType> getSTypes(nir_alu_instr *);
128 DataType getSType(nir_src &, bool isFloat, bool isSigned);
129
130 operation getOperation(nir_intrinsic_op);
131 operation getOperation(nir_op);
132 operation getOperation(nir_texop);
133 operation preOperationNeeded(nir_op);
134
135 int getSubOp(nir_intrinsic_op);
136 int getSubOp(nir_op);
137
138 CondCode getCondCode(nir_op);
139
140 bool assignSlots();
141 bool parseNIR();
142
143 bool visit(nir_alu_instr *);
144 bool visit(nir_block *);
145 bool visit(nir_cf_node *);
146 bool visit(nir_deref_instr *);
147 bool visit(nir_function *);
148 bool visit(nir_if *);
149 bool visit(nir_instr *);
150 bool visit(nir_intrinsic_instr *);
151 bool visit(nir_jump_instr *);
152 bool visit(nir_load_const_instr*);
153 bool visit(nir_loop *);
154 bool visit(nir_ssa_undef_instr *);
155 bool visit(nir_tex_instr *);
156
157 // tex stuff
158 Value* applyProjection(Value *src, Value *proj);
159 unsigned int getNIRArgCount(TexInstruction::Target&);
160
161 // image stuff
162 uint16_t handleDeref(nir_deref_instr *, Value * & indirect, const nir_variable * &);
163 CacheMode getCacheModeFromVar(const nir_variable *);
164
165 nir_shader *nir;
166
167 NirDefMap ssaDefs;
168 NirDefMap regDefs;
169 ImmediateMap immediates;
170 NirArrayLMemOffsets regToLmemOffset;
171 NirBlockMap blocks;
172 unsigned int curLoopDepth;
173
174 BasicBlock *exit;
175 Value *zero;
176 Instruction *immInsertPos;
177
178 int clipVertexOutput;
179
180 union {
181 struct {
182 Value *position;
183 } fp;
184 };
185 };
186
187 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info)
188 : ConverterCommon(prog, info),
189 nir(nir),
190 curLoopDepth(0),
191 clipVertexOutput(-1)
192 {
193 zero = mkImm((uint32_t)0);
194 }
195
196 BasicBlock *
197 Converter::convert(nir_block *block)
198 {
199 NirBlockMap::iterator it = blocks.find(block->index);
200 if (it != blocks.end())
201 return it->second;
202
203 BasicBlock *bb = new BasicBlock(func);
204 blocks[block->index] = bb;
205 return bb;
206 }
207
208 bool
209 Converter::isFloatType(nir_alu_type type)
210 {
211 return nir_alu_type_get_base_type(type) == nir_type_float;
212 }
213
214 bool
215 Converter::isSignedType(nir_alu_type type)
216 {
217 return nir_alu_type_get_base_type(type) == nir_type_int;
218 }
219
220 bool
221 Converter::isResultFloat(nir_op op)
222 {
223 const nir_op_info &info = nir_op_infos[op];
224 if (info.output_type != nir_type_invalid)
225 return isFloatType(info.output_type);
226
227 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
228 assert(false);
229 return true;
230 }
231
232 bool
233 Converter::isResultSigned(nir_op op)
234 {
235 switch (op) {
236 // there is no umul and we get wrong results if we treat all muls as signed
237 case nir_op_imul:
238 case nir_op_inot:
239 return false;
240 default:
241 const nir_op_info &info = nir_op_infos[op];
242 if (info.output_type != nir_type_invalid)
243 return isSignedType(info.output_type);
244 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
245 assert(false);
246 return true;
247 }
248 }
249
250 DataType
251 Converter::getDType(nir_alu_instr *insn)
252 {
253 if (insn->dest.dest.is_ssa)
254 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
255 else
256 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
257 }
258
259 DataType
260 Converter::getDType(nir_intrinsic_instr *insn)
261 {
262 bool isSigned;
263 switch (insn->intrinsic) {
264 case nir_intrinsic_shared_atomic_imax:
265 case nir_intrinsic_shared_atomic_imin:
266 case nir_intrinsic_ssbo_atomic_imax:
267 case nir_intrinsic_ssbo_atomic_imin:
268 isSigned = true;
269 break;
270 default:
271 isSigned = false;
272 break;
273 }
274
275 return getDType(insn, isSigned);
276 }
277
278 DataType
279 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
280 {
281 if (insn->dest.is_ssa)
282 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
283 else
284 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
285 }
286
287 DataType
288 Converter::getDType(nir_op op, uint8_t bitSize)
289 {
290 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
291 if (ty == TYPE_NONE) {
292 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
293 assert(false);
294 }
295 return ty;
296 }
297
298 std::vector<DataType>
299 Converter::getSTypes(nir_alu_instr *insn)
300 {
301 const nir_op_info &info = nir_op_infos[insn->op];
302 std::vector<DataType> res(info.num_inputs);
303
304 for (uint8_t i = 0; i < info.num_inputs; ++i) {
305 if (info.input_types[i] != nir_type_invalid) {
306 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
307 } else {
308 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
309 assert(false);
310 res[i] = TYPE_NONE;
311 break;
312 }
313 }
314
315 return res;
316 }
317
318 DataType
319 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
320 {
321 uint8_t bitSize;
322 if (src.is_ssa)
323 bitSize = src.ssa->bit_size;
324 else
325 bitSize = src.reg.reg->bit_size;
326
327 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
328 if (ty == TYPE_NONE) {
329 const char *str;
330 if (isFloat)
331 str = "float";
332 else if (isSigned)
333 str = "int";
334 else
335 str = "uint";
336 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
337 assert(false);
338 }
339 return ty;
340 }
341
342 operation
343 Converter::getOperation(nir_op op)
344 {
345 switch (op) {
346 // basic ops with float and int variants
347 case nir_op_fabs:
348 case nir_op_iabs:
349 return OP_ABS;
350 case nir_op_fadd:
351 case nir_op_iadd:
352 return OP_ADD;
353 case nir_op_iand:
354 return OP_AND;
355 case nir_op_ifind_msb:
356 case nir_op_ufind_msb:
357 return OP_BFIND;
358 case nir_op_fceil:
359 return OP_CEIL;
360 case nir_op_fcos:
361 return OP_COS;
362 case nir_op_f2f32:
363 case nir_op_f2f64:
364 case nir_op_f2i32:
365 case nir_op_f2i64:
366 case nir_op_f2u32:
367 case nir_op_f2u64:
368 case nir_op_i2f32:
369 case nir_op_i2f64:
370 case nir_op_i2i32:
371 case nir_op_i2i64:
372 case nir_op_u2f32:
373 case nir_op_u2f64:
374 case nir_op_u2u32:
375 case nir_op_u2u64:
376 return OP_CVT;
377 case nir_op_fddx:
378 case nir_op_fddx_coarse:
379 case nir_op_fddx_fine:
380 return OP_DFDX;
381 case nir_op_fddy:
382 case nir_op_fddy_coarse:
383 case nir_op_fddy_fine:
384 return OP_DFDY;
385 case nir_op_fdiv:
386 case nir_op_idiv:
387 case nir_op_udiv:
388 return OP_DIV;
389 case nir_op_fexp2:
390 return OP_EX2;
391 case nir_op_ffloor:
392 return OP_FLOOR;
393 case nir_op_ffma:
394 return OP_FMA;
395 case nir_op_flog2:
396 return OP_LG2;
397 case nir_op_fmax:
398 case nir_op_imax:
399 case nir_op_umax:
400 return OP_MAX;
401 case nir_op_pack_64_2x32_split:
402 return OP_MERGE;
403 case nir_op_fmin:
404 case nir_op_imin:
405 case nir_op_umin:
406 return OP_MIN;
407 case nir_op_fmod:
408 case nir_op_imod:
409 case nir_op_umod:
410 case nir_op_frem:
411 case nir_op_irem:
412 return OP_MOD;
413 case nir_op_fmul:
414 case nir_op_imul:
415 case nir_op_imul_high:
416 case nir_op_umul_high:
417 return OP_MUL;
418 case nir_op_fneg:
419 case nir_op_ineg:
420 return OP_NEG;
421 case nir_op_inot:
422 return OP_NOT;
423 case nir_op_ior:
424 return OP_OR;
425 case nir_op_fpow:
426 return OP_POW;
427 case nir_op_frcp:
428 return OP_RCP;
429 case nir_op_frsq:
430 return OP_RSQ;
431 case nir_op_fsat:
432 return OP_SAT;
433 case nir_op_feq32:
434 case nir_op_ieq32:
435 case nir_op_fge32:
436 case nir_op_ige32:
437 case nir_op_uge32:
438 case nir_op_flt32:
439 case nir_op_ilt32:
440 case nir_op_ult32:
441 case nir_op_fne32:
442 case nir_op_ine32:
443 return OP_SET;
444 case nir_op_ishl:
445 return OP_SHL;
446 case nir_op_ishr:
447 case nir_op_ushr:
448 return OP_SHR;
449 case nir_op_fsin:
450 return OP_SIN;
451 case nir_op_fsqrt:
452 return OP_SQRT;
453 case nir_op_ftrunc:
454 return OP_TRUNC;
455 case nir_op_ixor:
456 return OP_XOR;
457 default:
458 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
459 assert(false);
460 return OP_NOP;
461 }
462 }
463
464 operation
465 Converter::getOperation(nir_texop op)
466 {
467 switch (op) {
468 case nir_texop_tex:
469 return OP_TEX;
470 case nir_texop_lod:
471 return OP_TXLQ;
472 case nir_texop_txb:
473 return OP_TXB;
474 case nir_texop_txd:
475 return OP_TXD;
476 case nir_texop_txf:
477 case nir_texop_txf_ms:
478 return OP_TXF;
479 case nir_texop_tg4:
480 return OP_TXG;
481 case nir_texop_txl:
482 return OP_TXL;
483 case nir_texop_query_levels:
484 case nir_texop_texture_samples:
485 case nir_texop_txs:
486 return OP_TXQ;
487 default:
488 ERROR("couldn't get operation for nir_texop %u\n", op);
489 assert(false);
490 return OP_NOP;
491 }
492 }
493
494 operation
495 Converter::getOperation(nir_intrinsic_op op)
496 {
497 switch (op) {
498 case nir_intrinsic_emit_vertex:
499 return OP_EMIT;
500 case nir_intrinsic_end_primitive:
501 return OP_RESTART;
502 case nir_intrinsic_bindless_image_atomic_add:
503 case nir_intrinsic_image_atomic_add:
504 case nir_intrinsic_image_deref_atomic_add:
505 case nir_intrinsic_bindless_image_atomic_and:
506 case nir_intrinsic_image_atomic_and:
507 case nir_intrinsic_image_deref_atomic_and:
508 case nir_intrinsic_bindless_image_atomic_comp_swap:
509 case nir_intrinsic_image_atomic_comp_swap:
510 case nir_intrinsic_image_deref_atomic_comp_swap:
511 case nir_intrinsic_bindless_image_atomic_exchange:
512 case nir_intrinsic_image_atomic_exchange:
513 case nir_intrinsic_image_deref_atomic_exchange:
514 case nir_intrinsic_bindless_image_atomic_imax:
515 case nir_intrinsic_image_atomic_imax:
516 case nir_intrinsic_image_deref_atomic_imax:
517 case nir_intrinsic_bindless_image_atomic_umax:
518 case nir_intrinsic_image_atomic_umax:
519 case nir_intrinsic_image_deref_atomic_umax:
520 case nir_intrinsic_bindless_image_atomic_imin:
521 case nir_intrinsic_image_atomic_imin:
522 case nir_intrinsic_image_deref_atomic_imin:
523 case nir_intrinsic_bindless_image_atomic_umin:
524 case nir_intrinsic_image_atomic_umin:
525 case nir_intrinsic_image_deref_atomic_umin:
526 case nir_intrinsic_bindless_image_atomic_or:
527 case nir_intrinsic_image_atomic_or:
528 case nir_intrinsic_image_deref_atomic_or:
529 case nir_intrinsic_bindless_image_atomic_xor:
530 case nir_intrinsic_image_atomic_xor:
531 case nir_intrinsic_image_deref_atomic_xor:
532 return OP_SUREDP;
533 case nir_intrinsic_bindless_image_load:
534 case nir_intrinsic_image_load:
535 case nir_intrinsic_image_deref_load:
536 return OP_SULDP;
537 case nir_intrinsic_bindless_image_samples:
538 case nir_intrinsic_image_samples:
539 case nir_intrinsic_image_deref_samples:
540 case nir_intrinsic_bindless_image_size:
541 case nir_intrinsic_image_size:
542 case nir_intrinsic_image_deref_size:
543 return OP_SUQ;
544 case nir_intrinsic_bindless_image_store:
545 case nir_intrinsic_image_store:
546 case nir_intrinsic_image_deref_store:
547 return OP_SUSTP;
548 default:
549 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
550 assert(false);
551 return OP_NOP;
552 }
553 }
554
555 operation
556 Converter::preOperationNeeded(nir_op op)
557 {
558 switch (op) {
559 case nir_op_fcos:
560 case nir_op_fsin:
561 return OP_PRESIN;
562 default:
563 return OP_NOP;
564 }
565 }
566
567 int
568 Converter::getSubOp(nir_op op)
569 {
570 switch (op) {
571 case nir_op_imul_high:
572 case nir_op_umul_high:
573 return NV50_IR_SUBOP_MUL_HIGH;
574 default:
575 return 0;
576 }
577 }
578
579 int
580 Converter::getSubOp(nir_intrinsic_op op)
581 {
582 switch (op) {
583 case nir_intrinsic_bindless_image_atomic_add:
584 case nir_intrinsic_global_atomic_add:
585 case nir_intrinsic_image_atomic_add:
586 case nir_intrinsic_image_deref_atomic_add:
587 case nir_intrinsic_shared_atomic_add:
588 case nir_intrinsic_ssbo_atomic_add:
589 return NV50_IR_SUBOP_ATOM_ADD;
590 case nir_intrinsic_bindless_image_atomic_and:
591 case nir_intrinsic_global_atomic_and:
592 case nir_intrinsic_image_atomic_and:
593 case nir_intrinsic_image_deref_atomic_and:
594 case nir_intrinsic_shared_atomic_and:
595 case nir_intrinsic_ssbo_atomic_and:
596 return NV50_IR_SUBOP_ATOM_AND;
597 case nir_intrinsic_bindless_image_atomic_comp_swap:
598 case nir_intrinsic_global_atomic_comp_swap:
599 case nir_intrinsic_image_atomic_comp_swap:
600 case nir_intrinsic_image_deref_atomic_comp_swap:
601 case nir_intrinsic_shared_atomic_comp_swap:
602 case nir_intrinsic_ssbo_atomic_comp_swap:
603 return NV50_IR_SUBOP_ATOM_CAS;
604 case nir_intrinsic_bindless_image_atomic_exchange:
605 case nir_intrinsic_global_atomic_exchange:
606 case nir_intrinsic_image_atomic_exchange:
607 case nir_intrinsic_image_deref_atomic_exchange:
608 case nir_intrinsic_shared_atomic_exchange:
609 case nir_intrinsic_ssbo_atomic_exchange:
610 return NV50_IR_SUBOP_ATOM_EXCH;
611 case nir_intrinsic_bindless_image_atomic_or:
612 case nir_intrinsic_global_atomic_or:
613 case nir_intrinsic_image_atomic_or:
614 case nir_intrinsic_image_deref_atomic_or:
615 case nir_intrinsic_shared_atomic_or:
616 case nir_intrinsic_ssbo_atomic_or:
617 return NV50_IR_SUBOP_ATOM_OR;
618 case nir_intrinsic_bindless_image_atomic_imax:
619 case nir_intrinsic_bindless_image_atomic_umax:
620 case nir_intrinsic_global_atomic_imax:
621 case nir_intrinsic_global_atomic_umax:
622 case nir_intrinsic_image_atomic_imax:
623 case nir_intrinsic_image_atomic_umax:
624 case nir_intrinsic_image_deref_atomic_imax:
625 case nir_intrinsic_image_deref_atomic_umax:
626 case nir_intrinsic_shared_atomic_imax:
627 case nir_intrinsic_shared_atomic_umax:
628 case nir_intrinsic_ssbo_atomic_imax:
629 case nir_intrinsic_ssbo_atomic_umax:
630 return NV50_IR_SUBOP_ATOM_MAX;
631 case nir_intrinsic_bindless_image_atomic_imin:
632 case nir_intrinsic_bindless_image_atomic_umin:
633 case nir_intrinsic_global_atomic_imin:
634 case nir_intrinsic_global_atomic_umin:
635 case nir_intrinsic_image_atomic_imin:
636 case nir_intrinsic_image_atomic_umin:
637 case nir_intrinsic_image_deref_atomic_imin:
638 case nir_intrinsic_image_deref_atomic_umin:
639 case nir_intrinsic_shared_atomic_imin:
640 case nir_intrinsic_shared_atomic_umin:
641 case nir_intrinsic_ssbo_atomic_imin:
642 case nir_intrinsic_ssbo_atomic_umin:
643 return NV50_IR_SUBOP_ATOM_MIN;
644 case nir_intrinsic_bindless_image_atomic_xor:
645 case nir_intrinsic_global_atomic_xor:
646 case nir_intrinsic_image_atomic_xor:
647 case nir_intrinsic_image_deref_atomic_xor:
648 case nir_intrinsic_shared_atomic_xor:
649 case nir_intrinsic_ssbo_atomic_xor:
650 return NV50_IR_SUBOP_ATOM_XOR;
651
652 case nir_intrinsic_group_memory_barrier:
653 case nir_intrinsic_memory_barrier:
654 case nir_intrinsic_memory_barrier_buffer:
655 case nir_intrinsic_memory_barrier_image:
656 return NV50_IR_SUBOP_MEMBAR(M, GL);
657 case nir_intrinsic_memory_barrier_shared:
658 return NV50_IR_SUBOP_MEMBAR(M, CTA);
659
660 case nir_intrinsic_vote_all:
661 return NV50_IR_SUBOP_VOTE_ALL;
662 case nir_intrinsic_vote_any:
663 return NV50_IR_SUBOP_VOTE_ANY;
664 case nir_intrinsic_vote_ieq:
665 return NV50_IR_SUBOP_VOTE_UNI;
666 default:
667 return 0;
668 }
669 }
670
671 CondCode
672 Converter::getCondCode(nir_op op)
673 {
674 switch (op) {
675 case nir_op_feq32:
676 case nir_op_ieq32:
677 return CC_EQ;
678 case nir_op_fge32:
679 case nir_op_ige32:
680 case nir_op_uge32:
681 return CC_GE;
682 case nir_op_flt32:
683 case nir_op_ilt32:
684 case nir_op_ult32:
685 return CC_LT;
686 case nir_op_fne32:
687 return CC_NEU;
688 case nir_op_ine32:
689 return CC_NE;
690 default:
691 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
692 assert(false);
693 return CC_FL;
694 }
695 }
696
697 Converter::LValues&
698 Converter::convert(nir_alu_dest *dest)
699 {
700 return convert(&dest->dest);
701 }
702
703 Converter::LValues&
704 Converter::convert(nir_dest *dest)
705 {
706 if (dest->is_ssa)
707 return convert(&dest->ssa);
708 if (dest->reg.indirect) {
709 ERROR("no support for indirects.");
710 assert(false);
711 }
712 return convert(dest->reg.reg);
713 }
714
715 Converter::LValues&
716 Converter::convert(nir_register *reg)
717 {
718 NirDefMap::iterator it = regDefs.find(reg->index);
719 if (it != regDefs.end())
720 return it->second;
721
722 LValues newDef(reg->num_components);
723 for (uint8_t i = 0; i < reg->num_components; i++)
724 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
725 return regDefs[reg->index] = newDef;
726 }
727
728 Converter::LValues&
729 Converter::convert(nir_ssa_def *def)
730 {
731 NirDefMap::iterator it = ssaDefs.find(def->index);
732 if (it != ssaDefs.end())
733 return it->second;
734
735 LValues newDef(def->num_components);
736 for (uint8_t i = 0; i < def->num_components; i++)
737 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
738 return ssaDefs[def->index] = newDef;
739 }
740
741 Value*
742 Converter::getSrc(nir_alu_src *src, uint8_t component)
743 {
744 if (src->abs || src->negate) {
745 ERROR("modifiers currently not supported on nir_alu_src\n");
746 assert(false);
747 }
748 return getSrc(&src->src, src->swizzle[component]);
749 }
750
751 Value*
752 Converter::getSrc(nir_register *reg, uint8_t idx)
753 {
754 NirDefMap::iterator it = regDefs.find(reg->index);
755 if (it == regDefs.end())
756 return convert(reg)[idx];
757 return it->second[idx];
758 }
759
760 Value*
761 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
762 {
763 if (src->is_ssa)
764 return getSrc(src->ssa, idx);
765
766 if (src->reg.indirect) {
767 if (indirect)
768 return getSrc(src->reg.indirect, idx);
769 ERROR("no support for indirects.");
770 assert(false);
771 return NULL;
772 }
773
774 return getSrc(src->reg.reg, idx);
775 }
776
777 Value*
778 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
779 {
780 ImmediateMap::iterator iit = immediates.find(src->index);
781 if (iit != immediates.end())
782 return convert((*iit).second, idx);
783
784 NirDefMap::iterator it = ssaDefs.find(src->index);
785 if (it == ssaDefs.end()) {
786 ERROR("SSA value %u not found\n", src->index);
787 assert(false);
788 return NULL;
789 }
790 return it->second[idx];
791 }
792
793 uint32_t
794 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
795 {
796 nir_const_value *offset = nir_src_as_const_value(*src);
797
798 if (offset) {
799 indirect = NULL;
800 return offset[0].u32;
801 }
802
803 indirect = getSrc(src, idx, true);
804 return 0;
805 }
806
807 uint32_t
808 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
809 {
810 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
811 if (indirect && !isScalar)
812 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
813 return idx;
814 }
815
816 static void
817 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
818 {
819 assert(name && index);
820
821 if (slot >= VERT_ATTRIB_MAX) {
822 ERROR("invalid varying slot %u\n", slot);
823 assert(false);
824 return;
825 }
826
827 if (slot >= VERT_ATTRIB_GENERIC0 &&
828 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
829 *name = TGSI_SEMANTIC_GENERIC;
830 *index = slot - VERT_ATTRIB_GENERIC0;
831 return;
832 }
833
834 if (slot >= VERT_ATTRIB_TEX0 &&
835 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
836 *name = TGSI_SEMANTIC_TEXCOORD;
837 *index = slot - VERT_ATTRIB_TEX0;
838 return;
839 }
840
841 switch (slot) {
842 case VERT_ATTRIB_COLOR0:
843 *name = TGSI_SEMANTIC_COLOR;
844 *index = 0;
845 break;
846 case VERT_ATTRIB_COLOR1:
847 *name = TGSI_SEMANTIC_COLOR;
848 *index = 1;
849 break;
850 case VERT_ATTRIB_EDGEFLAG:
851 *name = TGSI_SEMANTIC_EDGEFLAG;
852 *index = 0;
853 break;
854 case VERT_ATTRIB_FOG:
855 *name = TGSI_SEMANTIC_FOG;
856 *index = 0;
857 break;
858 case VERT_ATTRIB_NORMAL:
859 *name = TGSI_SEMANTIC_NORMAL;
860 *index = 0;
861 break;
862 case VERT_ATTRIB_POS:
863 *name = TGSI_SEMANTIC_POSITION;
864 *index = 0;
865 break;
866 case VERT_ATTRIB_POINT_SIZE:
867 *name = TGSI_SEMANTIC_PSIZE;
868 *index = 0;
869 break;
870 default:
871 ERROR("unknown vert attrib slot %u\n", slot);
872 assert(false);
873 break;
874 }
875 }
876
877 static void
878 varying_slot_to_tgsi_semantic(gl_varying_slot slot, unsigned *name, unsigned *index)
879 {
880 assert(name && index);
881
882 if (slot >= VARYING_SLOT_TESS_MAX) {
883 ERROR("invalid varying slot %u\n", slot);
884 assert(false);
885 return;
886 }
887
888 if (slot >= VARYING_SLOT_PATCH0) {
889 *name = TGSI_SEMANTIC_PATCH;
890 *index = slot - VARYING_SLOT_PATCH0;
891 return;
892 }
893
894 if (slot >= VARYING_SLOT_VAR0) {
895 *name = TGSI_SEMANTIC_GENERIC;
896 *index = slot - VARYING_SLOT_VAR0;
897 return;
898 }
899
900 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
901 *name = TGSI_SEMANTIC_TEXCOORD;
902 *index = slot - VARYING_SLOT_TEX0;
903 return;
904 }
905
906 switch (slot) {
907 case VARYING_SLOT_BFC0:
908 *name = TGSI_SEMANTIC_BCOLOR;
909 *index = 0;
910 break;
911 case VARYING_SLOT_BFC1:
912 *name = TGSI_SEMANTIC_BCOLOR;
913 *index = 1;
914 break;
915 case VARYING_SLOT_CLIP_DIST0:
916 *name = TGSI_SEMANTIC_CLIPDIST;
917 *index = 0;
918 break;
919 case VARYING_SLOT_CLIP_DIST1:
920 *name = TGSI_SEMANTIC_CLIPDIST;
921 *index = 1;
922 break;
923 case VARYING_SLOT_CLIP_VERTEX:
924 *name = TGSI_SEMANTIC_CLIPVERTEX;
925 *index = 0;
926 break;
927 case VARYING_SLOT_COL0:
928 *name = TGSI_SEMANTIC_COLOR;
929 *index = 0;
930 break;
931 case VARYING_SLOT_COL1:
932 *name = TGSI_SEMANTIC_COLOR;
933 *index = 1;
934 break;
935 case VARYING_SLOT_EDGE:
936 *name = TGSI_SEMANTIC_EDGEFLAG;
937 *index = 0;
938 break;
939 case VARYING_SLOT_FACE:
940 *name = TGSI_SEMANTIC_FACE;
941 *index = 0;
942 break;
943 case VARYING_SLOT_FOGC:
944 *name = TGSI_SEMANTIC_FOG;
945 *index = 0;
946 break;
947 case VARYING_SLOT_LAYER:
948 *name = TGSI_SEMANTIC_LAYER;
949 *index = 0;
950 break;
951 case VARYING_SLOT_PNTC:
952 *name = TGSI_SEMANTIC_PCOORD;
953 *index = 0;
954 break;
955 case VARYING_SLOT_POS:
956 *name = TGSI_SEMANTIC_POSITION;
957 *index = 0;
958 break;
959 case VARYING_SLOT_PRIMITIVE_ID:
960 *name = TGSI_SEMANTIC_PRIMID;
961 *index = 0;
962 break;
963 case VARYING_SLOT_PSIZ:
964 *name = TGSI_SEMANTIC_PSIZE;
965 *index = 0;
966 break;
967 case VARYING_SLOT_TESS_LEVEL_INNER:
968 *name = TGSI_SEMANTIC_TESSINNER;
969 *index = 0;
970 break;
971 case VARYING_SLOT_TESS_LEVEL_OUTER:
972 *name = TGSI_SEMANTIC_TESSOUTER;
973 *index = 0;
974 break;
975 case VARYING_SLOT_VIEWPORT:
976 *name = TGSI_SEMANTIC_VIEWPORT_INDEX;
977 *index = 0;
978 break;
979 default:
980 ERROR("unknown varying slot %u\n", slot);
981 assert(false);
982 break;
983 }
984 }
985
986 static void
987 frag_result_to_tgsi_semantic(unsigned slot, unsigned *name, unsigned *index)
988 {
989 if (slot >= FRAG_RESULT_DATA0) {
990 *name = TGSI_SEMANTIC_COLOR;
991 *index = slot - FRAG_RESULT_COLOR - 2; // intentional
992 return;
993 }
994
995 switch (slot) {
996 case FRAG_RESULT_COLOR:
997 *name = TGSI_SEMANTIC_COLOR;
998 *index = 0;
999 break;
1000 case FRAG_RESULT_DEPTH:
1001 *name = TGSI_SEMANTIC_POSITION;
1002 *index = 0;
1003 break;
1004 case FRAG_RESULT_SAMPLE_MASK:
1005 *name = TGSI_SEMANTIC_SAMPLEMASK;
1006 *index = 0;
1007 break;
1008 default:
1009 ERROR("unknown frag result slot %u\n", slot);
1010 assert(false);
1011 break;
1012 }
1013 }
1014
1015 void
1016 Converter::setInterpolate(nv50_ir_varying *var,
1017 uint8_t mode,
1018 bool centroid,
1019 unsigned semantic)
1020 {
1021 switch (mode) {
1022 case INTERP_MODE_FLAT:
1023 var->flat = 1;
1024 break;
1025 case INTERP_MODE_NONE:
1026 if (semantic == TGSI_SEMANTIC_COLOR)
1027 var->sc = 1;
1028 else if (semantic == TGSI_SEMANTIC_POSITION)
1029 var->linear = 1;
1030 break;
1031 case INTERP_MODE_NOPERSPECTIVE:
1032 var->linear = 1;
1033 break;
1034 case INTERP_MODE_SMOOTH:
1035 break;
1036 }
1037 var->centroid = centroid;
1038 }
1039
1040 static uint16_t
1041 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
1042 bool input, const nir_variable *var)
1043 {
1044 if (!type->is_array())
1045 return type->count_attribute_slots(false);
1046
1047 uint16_t slots;
1048 switch (stage) {
1049 case Program::TYPE_GEOMETRY:
1050 slots = type->uniform_locations();
1051 if (input)
1052 slots /= info.gs.vertices_in;
1053 break;
1054 case Program::TYPE_TESSELLATION_CONTROL:
1055 case Program::TYPE_TESSELLATION_EVAL:
1056 // remove first dimension
1057 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
1058 slots = type->uniform_locations();
1059 else
1060 slots = type->fields.array->uniform_locations();
1061 break;
1062 default:
1063 slots = type->count_attribute_slots(false);
1064 break;
1065 }
1066
1067 return slots;
1068 }
1069
1070 bool Converter::assignSlots() {
1071 unsigned name;
1072 unsigned index;
1073
1074 info->io.viewportId = -1;
1075 info->numInputs = 0;
1076 info->numOutputs = 0;
1077
1078 // we have to fixup the uniform locations for arrays
1079 unsigned numImages = 0;
1080 nir_foreach_variable(var, &nir->uniforms) {
1081 const glsl_type *type = var->type;
1082 if (!type->without_array()->is_image())
1083 continue;
1084 var->data.driver_location = numImages;
1085 numImages += type->is_array() ? type->arrays_of_arrays_size() : 1;
1086 }
1087
1088 info->numSysVals = 0;
1089 for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
1090 if (!(nir->info.system_values_read & 1ull << i))
1091 continue;
1092
1093 info->sv[info->numSysVals].sn = tgsi_get_sysval_semantic(i);
1094 info->sv[info->numSysVals].si = 0;
1095 info->sv[info->numSysVals].input = 0; // TODO inferSysValDirection(sn);
1096
1097 switch (i) {
1098 case SYSTEM_VALUE_INSTANCE_ID:
1099 info->io.instanceId = info->numSysVals;
1100 break;
1101 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1102 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1103 info->sv[info->numSysVals].patch = 1;
1104 break;
1105 case SYSTEM_VALUE_VERTEX_ID:
1106 info->io.vertexId = info->numSysVals;
1107 break;
1108 default:
1109 break;
1110 }
1111
1112 info->numSysVals += 1;
1113 }
1114
1115 if (prog->getType() == Program::TYPE_COMPUTE)
1116 return true;
1117
1118 nir_foreach_variable(var, &nir->inputs) {
1119 const glsl_type *type = var->type;
1120 int slot = var->data.location;
1121 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1122 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1123 : type->component_slots();
1124 uint32_t frac = var->data.location_frac;
1125 uint32_t vary = var->data.driver_location;
1126
1127 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1128 if (comp > 2)
1129 slots *= 2;
1130 }
1131
1132 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1133
1134 switch(prog->getType()) {
1135 case Program::TYPE_FRAGMENT:
1136 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1137 for (uint16_t i = 0; i < slots; ++i) {
1138 setInterpolate(&info->in[vary + i], var->data.interpolation,
1139 var->data.centroid | var->data.sample, name);
1140 }
1141 break;
1142 case Program::TYPE_GEOMETRY:
1143 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1144 break;
1145 case Program::TYPE_TESSELLATION_CONTROL:
1146 case Program::TYPE_TESSELLATION_EVAL:
1147 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1148 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1149 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1150 break;
1151 case Program::TYPE_VERTEX:
1152 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1153 switch (name) {
1154 case TGSI_SEMANTIC_EDGEFLAG:
1155 info->io.edgeFlagIn = vary;
1156 break;
1157 default:
1158 break;
1159 }
1160 break;
1161 default:
1162 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1163 return false;
1164 }
1165
1166 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1167 info->in[vary].id = vary;
1168 info->in[vary].patch = var->data.patch;
1169 info->in[vary].sn = name;
1170 info->in[vary].si = index + i;
1171 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1172 if (i & 0x1)
1173 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1174 else
1175 info->in[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1176 else
1177 info->in[vary].mask |= ((1 << comp) - 1) << frac;
1178 }
1179 info->numInputs = std::max<uint8_t>(info->numInputs, vary);
1180 }
1181
1182 nir_foreach_variable(var, &nir->outputs) {
1183 const glsl_type *type = var->type;
1184 int slot = var->data.location;
1185 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1186 uint32_t comp = type->is_array() ? type->without_array()->component_slots()
1187 : type->component_slots();
1188 uint32_t frac = var->data.location_frac;
1189 uint32_t vary = var->data.driver_location;
1190
1191 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
1192 if (comp > 2)
1193 slots *= 2;
1194 }
1195
1196 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1197
1198 switch(prog->getType()) {
1199 case Program::TYPE_FRAGMENT:
1200 frag_result_to_tgsi_semantic((gl_frag_result)slot, &name, &index);
1201 switch (name) {
1202 case TGSI_SEMANTIC_COLOR:
1203 if (!var->data.fb_fetch_output)
1204 info->prop.fp.numColourResults++;
1205 info->prop.fp.separateFragData = true;
1206 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1207 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1208 index = index == 0 ? var->data.index : index;
1209 break;
1210 case TGSI_SEMANTIC_POSITION:
1211 info->io.fragDepth = vary;
1212 info->prop.fp.writesDepth = true;
1213 break;
1214 case TGSI_SEMANTIC_SAMPLEMASK:
1215 info->io.sampleMask = vary;
1216 break;
1217 default:
1218 break;
1219 }
1220 break;
1221 case Program::TYPE_GEOMETRY:
1222 case Program::TYPE_TESSELLATION_CONTROL:
1223 case Program::TYPE_TESSELLATION_EVAL:
1224 case Program::TYPE_VERTEX:
1225 varying_slot_to_tgsi_semantic((gl_varying_slot)slot, &name, &index);
1226
1227 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1228 name != TGSI_SEMANTIC_TESSOUTER)
1229 info->numPatchConstants = MAX2(info->numPatchConstants, index + slots);
1230
1231 switch (name) {
1232 case TGSI_SEMANTIC_CLIPDIST:
1233 info->io.genUserClip = -1;
1234 break;
1235 case TGSI_SEMANTIC_CLIPVERTEX:
1236 clipVertexOutput = vary;
1237 break;
1238 case TGSI_SEMANTIC_EDGEFLAG:
1239 info->io.edgeFlagOut = vary;
1240 break;
1241 case TGSI_SEMANTIC_POSITION:
1242 if (clipVertexOutput < 0)
1243 clipVertexOutput = vary;
1244 break;
1245 default:
1246 break;
1247 }
1248 break;
1249 default:
1250 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1251 return false;
1252 }
1253
1254 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1255 info->out[vary].id = vary;
1256 info->out[vary].patch = var->data.patch;
1257 info->out[vary].sn = name;
1258 info->out[vary].si = index + i;
1259 if (glsl_base_type_is_64bit(type->without_array()->base_type))
1260 if (i & 0x1)
1261 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) >> 0x4);
1262 else
1263 info->out[vary].mask |= (((1 << (comp * 2)) - 1) << (frac * 2) & 0xf);
1264 else
1265 info->out[vary].mask |= ((1 << comp) - 1) << frac;
1266
1267 if (nir->info.outputs_read & 1ull << slot)
1268 info->out[vary].oread = 1;
1269 }
1270 info->numOutputs = std::max<uint8_t>(info->numOutputs, vary);
1271 }
1272
1273 if (info->io.genUserClip > 0) {
1274 info->io.clipDistances = info->io.genUserClip;
1275
1276 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1277
1278 for (unsigned int n = 0; n < nOut; ++n) {
1279 unsigned int i = info->numOutputs++;
1280 info->out[i].id = i;
1281 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1282 info->out[i].si = n;
1283 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1284 }
1285 }
1286
1287 return info->assignSlots(info) == 0;
1288 }
1289
1290 uint32_t
1291 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1292 {
1293 DataType ty;
1294 int offset = nir_intrinsic_component(insn);
1295 bool input;
1296
1297 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1298 ty = getDType(insn);
1299 else
1300 ty = getSType(insn->src[0], false, false);
1301
1302 switch (insn->intrinsic) {
1303 case nir_intrinsic_load_input:
1304 case nir_intrinsic_load_interpolated_input:
1305 case nir_intrinsic_load_per_vertex_input:
1306 input = true;
1307 break;
1308 case nir_intrinsic_load_output:
1309 case nir_intrinsic_load_per_vertex_output:
1310 case nir_intrinsic_store_output:
1311 case nir_intrinsic_store_per_vertex_output:
1312 input = false;
1313 break;
1314 default:
1315 ERROR("unknown intrinsic in getSlotAddress %s",
1316 nir_intrinsic_infos[insn->intrinsic].name);
1317 input = false;
1318 assert(false);
1319 break;
1320 }
1321
1322 if (typeSizeof(ty) == 8) {
1323 slot *= 2;
1324 slot += offset;
1325 if (slot >= 4) {
1326 idx += 1;
1327 slot -= 4;
1328 }
1329 } else {
1330 slot += offset;
1331 }
1332
1333 assert(slot < 4);
1334 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1335 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1336
1337 const nv50_ir_varying *vary = input ? info->in : info->out;
1338 return vary[idx].slot[slot] * 4;
1339 }
1340
1341 Instruction *
1342 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1343 uint32_t base, uint8_t c, Value *indirect0,
1344 Value *indirect1, bool patch)
1345 {
1346 unsigned int tySize = typeSizeof(ty);
1347
1348 if (tySize == 8 &&
1349 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1350 Value *lo = getSSA();
1351 Value *hi = getSSA();
1352
1353 Instruction *loi =
1354 mkLoad(TYPE_U32, lo,
1355 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1356 indirect0);
1357 loi->setIndirect(0, 1, indirect1);
1358 loi->perPatch = patch;
1359
1360 Instruction *hii =
1361 mkLoad(TYPE_U32, hi,
1362 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1363 indirect0);
1364 hii->setIndirect(0, 1, indirect1);
1365 hii->perPatch = patch;
1366
1367 return mkOp2(OP_MERGE, ty, def, lo, hi);
1368 } else {
1369 Instruction *ld =
1370 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1371 ld->setIndirect(0, 1, indirect1);
1372 ld->perPatch = patch;
1373 return ld;
1374 }
1375 }
1376
1377 void
1378 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1379 DataType ty, Value *src, uint8_t idx, uint8_t c,
1380 Value *indirect0, Value *indirect1)
1381 {
1382 uint8_t size = typeSizeof(ty);
1383 uint32_t address = getSlotAddress(insn, idx, c);
1384
1385 if (size == 8 && indirect0) {
1386 Value *split[2];
1387 mkSplit(split, 4, src);
1388
1389 if (op == OP_EXPORT) {
1390 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1391 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1392 }
1393
1394 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1395 split[0])->perPatch = info->out[idx].patch;
1396 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1397 split[1])->perPatch = info->out[idx].patch;
1398 } else {
1399 if (op == OP_EXPORT)
1400 src = mkMov(getSSA(size), src, ty)->getDef(0);
1401 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1402 src)->perPatch = info->out[idx].patch;
1403 }
1404 }
1405
1406 bool
1407 Converter::parseNIR()
1408 {
1409 info->bin.tlsSpace = 0;
1410 info->io.clipDistances = nir->info.clip_distance_array_size;
1411 info->io.cullDistances = nir->info.cull_distance_array_size;
1412
1413 switch(prog->getType()) {
1414 case Program::TYPE_COMPUTE:
1415 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1416 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1417 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1418 info->bin.smemSize = nir->info.cs.shared_size;
1419 break;
1420 case Program::TYPE_FRAGMENT:
1421 info->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1422 info->prop.fp.persampleInvocation =
1423 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1424 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1425 info->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1426 info->prop.fp.readsSampleLocations =
1427 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1428 info->prop.fp.usesDiscard = nir->info.fs.uses_discard;
1429 info->prop.fp.usesSampleMaskIn =
1430 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1431 break;
1432 case Program::TYPE_GEOMETRY:
1433 info->prop.gp.inputPrim = nir->info.gs.input_primitive;
1434 info->prop.gp.instanceCount = nir->info.gs.invocations;
1435 info->prop.gp.maxVertices = nir->info.gs.vertices_out;
1436 info->prop.gp.outputPrim = nir->info.gs.output_primitive;
1437 break;
1438 case Program::TYPE_TESSELLATION_CONTROL:
1439 case Program::TYPE_TESSELLATION_EVAL:
1440 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1441 info->prop.tp.domain = GL_LINES;
1442 else
1443 info->prop.tp.domain = nir->info.tess.primitive_mode;
1444 info->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1445 info->prop.tp.outputPrim =
1446 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1447 info->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1448 info->prop.tp.winding = !nir->info.tess.ccw;
1449 break;
1450 case Program::TYPE_VERTEX:
1451 info->prop.vp.usesDrawParameters =
1452 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1453 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1454 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1455 break;
1456 default:
1457 break;
1458 }
1459
1460 return true;
1461 }
1462
1463 bool
1464 Converter::visit(nir_function *function)
1465 {
1466 assert(function->impl);
1467
1468 // usually the blocks will set everything up, but main is special
1469 BasicBlock *entry = new BasicBlock(prog->main);
1470 exit = new BasicBlock(prog->main);
1471 blocks[nir_start_block(function->impl)->index] = entry;
1472 prog->main->setEntry(entry);
1473 prog->main->setExit(exit);
1474
1475 setPosition(entry, true);
1476
1477 if (info->io.genUserClip > 0) {
1478 for (int c = 0; c < 4; ++c)
1479 clipVtx[c] = getScratch();
1480 }
1481
1482 switch (prog->getType()) {
1483 case Program::TYPE_TESSELLATION_CONTROL:
1484 outBase = mkOp2v(
1485 OP_SUB, TYPE_U32, getSSA(),
1486 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1487 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1488 break;
1489 case Program::TYPE_FRAGMENT: {
1490 Symbol *sv = mkSysVal(SV_POSITION, 3);
1491 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1492 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1493 break;
1494 }
1495 default:
1496 break;
1497 }
1498
1499 nir_foreach_register(reg, &function->impl->registers) {
1500 if (reg->num_array_elems) {
1501 // TODO: packed variables would be nice, but MemoryOpt fails
1502 // replace 4 with reg->num_components
1503 uint32_t size = 4 * reg->num_array_elems * (reg->bit_size / 8);
1504 regToLmemOffset[reg->index] = info->bin.tlsSpace;
1505 info->bin.tlsSpace += size;
1506 }
1507 }
1508
1509 nir_index_ssa_defs(function->impl);
1510 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1511 if (!visit(node))
1512 return false;
1513 }
1514
1515 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1516 setPosition(exit, true);
1517
1518 if ((prog->getType() == Program::TYPE_VERTEX ||
1519 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1520 && info->io.genUserClip > 0)
1521 handleUserClipPlanes();
1522
1523 // TODO: for non main function this needs to be a OP_RETURN
1524 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1525 return true;
1526 }
1527
1528 bool
1529 Converter::visit(nir_cf_node *node)
1530 {
1531 switch (node->type) {
1532 case nir_cf_node_block:
1533 return visit(nir_cf_node_as_block(node));
1534 case nir_cf_node_if:
1535 return visit(nir_cf_node_as_if(node));
1536 case nir_cf_node_loop:
1537 return visit(nir_cf_node_as_loop(node));
1538 default:
1539 ERROR("unknown nir_cf_node type %u\n", node->type);
1540 return false;
1541 }
1542 }
1543
1544 bool
1545 Converter::visit(nir_block *block)
1546 {
1547 if (!block->predecessors->entries && block->instr_list.is_empty())
1548 return true;
1549
1550 BasicBlock *bb = convert(block);
1551
1552 setPosition(bb, true);
1553 nir_foreach_instr(insn, block) {
1554 if (!visit(insn))
1555 return false;
1556 }
1557 return true;
1558 }
1559
1560 bool
1561 Converter::visit(nir_if *nif)
1562 {
1563 DataType sType = getSType(nif->condition, false, false);
1564 Value *src = getSrc(&nif->condition, 0);
1565
1566 nir_block *lastThen = nir_if_last_then_block(nif);
1567 nir_block *lastElse = nir_if_last_else_block(nif);
1568
1569 assert(!lastThen->successors[1]);
1570 assert(!lastElse->successors[1]);
1571
1572 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1573 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1574
1575 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1576 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1577
1578 // we only insert joinats, if both nodes end up at the end of the if again.
1579 // the reason for this to not happens are breaks/continues/ret/... which
1580 // have their own handling
1581 if (lastThen->successors[0] == lastElse->successors[0])
1582 bb->joinAt = mkFlow(OP_JOINAT, convert(lastThen->successors[0]),
1583 CC_ALWAYS, NULL);
1584
1585 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1586
1587 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1588 if (!visit(node))
1589 return false;
1590 }
1591 setPosition(convert(lastThen), true);
1592 if (!bb->getExit() ||
1593 !bb->getExit()->asFlow() ||
1594 bb->getExit()->asFlow()->op == OP_JOIN) {
1595 BasicBlock *tailBB = convert(lastThen->successors[0]);
1596 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1597 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1598 }
1599
1600 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1601 if (!visit(node))
1602 return false;
1603 }
1604 setPosition(convert(lastElse), true);
1605 if (!bb->getExit() ||
1606 !bb->getExit()->asFlow() ||
1607 bb->getExit()->asFlow()->op == OP_JOIN) {
1608 BasicBlock *tailBB = convert(lastElse->successors[0]);
1609 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1610 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1611 }
1612
1613 if (lastThen->successors[0] == lastElse->successors[0]) {
1614 setPosition(convert(lastThen->successors[0]), true);
1615 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1616 }
1617
1618 return true;
1619 }
1620
1621 bool
1622 Converter::visit(nir_loop *loop)
1623 {
1624 curLoopDepth += 1;
1625 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1626
1627 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1628 BasicBlock *tailBB =
1629 convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1630 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1631
1632 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1633 setPosition(loopBB, false);
1634 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1635
1636 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1637 if (!visit(node))
1638 return false;
1639 }
1640 Instruction *insn = bb->getExit();
1641 if (bb->cfg.incidentCount() != 0) {
1642 if (!insn || !insn->asFlow()) {
1643 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1644 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1645 } else if (insn && insn->op == OP_BRA && !insn->getPredicate() &&
1646 tailBB->cfg.incidentCount() == 0) {
1647 // RA doesn't like having blocks around with no incident edge,
1648 // so we create a fake one to make it happy
1649 bb->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1650 }
1651 }
1652
1653 curLoopDepth -= 1;
1654
1655 return true;
1656 }
1657
1658 bool
1659 Converter::visit(nir_instr *insn)
1660 {
1661 // we need an insertion point for on the fly generated immediate loads
1662 immInsertPos = bb->getExit();
1663 switch (insn->type) {
1664 case nir_instr_type_alu:
1665 return visit(nir_instr_as_alu(insn));
1666 case nir_instr_type_deref:
1667 return visit(nir_instr_as_deref(insn));
1668 case nir_instr_type_intrinsic:
1669 return visit(nir_instr_as_intrinsic(insn));
1670 case nir_instr_type_jump:
1671 return visit(nir_instr_as_jump(insn));
1672 case nir_instr_type_load_const:
1673 return visit(nir_instr_as_load_const(insn));
1674 case nir_instr_type_ssa_undef:
1675 return visit(nir_instr_as_ssa_undef(insn));
1676 case nir_instr_type_tex:
1677 return visit(nir_instr_as_tex(insn));
1678 default:
1679 ERROR("unknown nir_instr type %u\n", insn->type);
1680 return false;
1681 }
1682 return true;
1683 }
1684
1685 SVSemantic
1686 Converter::convert(nir_intrinsic_op intr)
1687 {
1688 switch (intr) {
1689 case nir_intrinsic_load_base_vertex:
1690 return SV_BASEVERTEX;
1691 case nir_intrinsic_load_base_instance:
1692 return SV_BASEINSTANCE;
1693 case nir_intrinsic_load_draw_id:
1694 return SV_DRAWID;
1695 case nir_intrinsic_load_front_face:
1696 return SV_FACE;
1697 case nir_intrinsic_load_helper_invocation:
1698 return SV_THREAD_KILL;
1699 case nir_intrinsic_load_instance_id:
1700 return SV_INSTANCE_ID;
1701 case nir_intrinsic_load_invocation_id:
1702 return SV_INVOCATION_ID;
1703 case nir_intrinsic_load_local_group_size:
1704 return SV_NTID;
1705 case nir_intrinsic_load_local_invocation_id:
1706 return SV_TID;
1707 case nir_intrinsic_load_num_work_groups:
1708 return SV_NCTAID;
1709 case nir_intrinsic_load_patch_vertices_in:
1710 return SV_VERTEX_COUNT;
1711 case nir_intrinsic_load_primitive_id:
1712 return SV_PRIMITIVE_ID;
1713 case nir_intrinsic_load_sample_id:
1714 return SV_SAMPLE_INDEX;
1715 case nir_intrinsic_load_sample_mask_in:
1716 return SV_SAMPLE_MASK;
1717 case nir_intrinsic_load_sample_pos:
1718 return SV_SAMPLE_POS;
1719 case nir_intrinsic_load_subgroup_eq_mask:
1720 return SV_LANEMASK_EQ;
1721 case nir_intrinsic_load_subgroup_ge_mask:
1722 return SV_LANEMASK_GE;
1723 case nir_intrinsic_load_subgroup_gt_mask:
1724 return SV_LANEMASK_GT;
1725 case nir_intrinsic_load_subgroup_le_mask:
1726 return SV_LANEMASK_LE;
1727 case nir_intrinsic_load_subgroup_lt_mask:
1728 return SV_LANEMASK_LT;
1729 case nir_intrinsic_load_subgroup_invocation:
1730 return SV_LANEID;
1731 case nir_intrinsic_load_tess_coord:
1732 return SV_TESS_COORD;
1733 case nir_intrinsic_load_tess_level_inner:
1734 return SV_TESS_INNER;
1735 case nir_intrinsic_load_tess_level_outer:
1736 return SV_TESS_OUTER;
1737 case nir_intrinsic_load_vertex_id:
1738 return SV_VERTEX_ID;
1739 case nir_intrinsic_load_work_group_id:
1740 return SV_CTAID;
1741 default:
1742 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1743 nir_intrinsic_infos[intr].name);
1744 assert(false);
1745 return SV_LAST;
1746 }
1747 }
1748
1749 bool
1750 Converter::visit(nir_intrinsic_instr *insn)
1751 {
1752 nir_intrinsic_op op = insn->intrinsic;
1753 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1754
1755 switch (op) {
1756 case nir_intrinsic_load_uniform: {
1757 LValues &newDefs = convert(&insn->dest);
1758 const DataType dType = getDType(insn);
1759 Value *indirect;
1760 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1761 for (uint8_t i = 0; i < insn->num_components; ++i) {
1762 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1763 }
1764 break;
1765 }
1766 case nir_intrinsic_store_output:
1767 case nir_intrinsic_store_per_vertex_output: {
1768 Value *indirect;
1769 DataType dType = getSType(insn->src[0], false, false);
1770 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1771
1772 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1773 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1774 continue;
1775
1776 uint8_t offset = 0;
1777 Value *src = getSrc(&insn->src[0], i);
1778 switch (prog->getType()) {
1779 case Program::TYPE_FRAGMENT: {
1780 if (info->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1781 // TGSI uses a different interface than NIR, TGSI stores that
1782 // value in the z component, NIR in X
1783 offset += 2;
1784 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1785 }
1786 break;
1787 }
1788 case Program::TYPE_GEOMETRY:
1789 case Program::TYPE_VERTEX: {
1790 if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1791 mkMov(clipVtx[i], src);
1792 src = clipVtx[i];
1793 }
1794 break;
1795 }
1796 default:
1797 break;
1798 }
1799
1800 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1801 }
1802 break;
1803 }
1804 case nir_intrinsic_load_input:
1805 case nir_intrinsic_load_interpolated_input:
1806 case nir_intrinsic_load_output: {
1807 LValues &newDefs = convert(&insn->dest);
1808
1809 // FBFetch
1810 if (prog->getType() == Program::TYPE_FRAGMENT &&
1811 op == nir_intrinsic_load_output) {
1812 std::vector<Value*> defs, srcs;
1813 uint8_t mask = 0;
1814
1815 srcs.push_back(getSSA());
1816 srcs.push_back(getSSA());
1817 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1818 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1819 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1820 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1821
1822 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1823 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1824
1825 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1826 defs.push_back(newDefs[i]);
1827 mask |= 1 << i;
1828 }
1829
1830 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1831 texi->tex.levelZero = 1;
1832 texi->tex.mask = mask;
1833 texi->tex.useOffsets = 0;
1834 texi->tex.r = 0xffff;
1835 texi->tex.s = 0xffff;
1836
1837 info->prop.fp.readsFramebuffer = true;
1838 break;
1839 }
1840
1841 const DataType dType = getDType(insn);
1842 Value *indirect;
1843 bool input = op != nir_intrinsic_load_output;
1844 operation nvirOp;
1845 uint32_t mode = 0;
1846
1847 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1848 nv50_ir_varying& vary = input ? info->in[idx] : info->out[idx];
1849
1850 // see load_barycentric_* handling
1851 if (prog->getType() == Program::TYPE_FRAGMENT) {
1852 mode = translateInterpMode(&vary, nvirOp);
1853 if (op == nir_intrinsic_load_interpolated_input) {
1854 ImmediateValue immMode;
1855 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1856 mode |= immMode.reg.data.u32;
1857 }
1858 }
1859
1860 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1861 uint32_t address = getSlotAddress(insn, idx, i);
1862 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1863 if (prog->getType() == Program::TYPE_FRAGMENT) {
1864 int s = 1;
1865 if (typeSizeof(dType) == 8) {
1866 Value *lo = getSSA();
1867 Value *hi = getSSA();
1868 Instruction *interp;
1869
1870 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1871 if (nvirOp == OP_PINTERP)
1872 interp->setSrc(s++, fp.position);
1873 if (mode & NV50_IR_INTERP_OFFSET)
1874 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1875 interp->setInterpolate(mode);
1876 interp->setIndirect(0, 0, indirect);
1877
1878 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1879 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1880 if (nvirOp == OP_PINTERP)
1881 interp->setSrc(s++, fp.position);
1882 if (mode & NV50_IR_INTERP_OFFSET)
1883 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1884 interp->setInterpolate(mode);
1885 interp->setIndirect(0, 0, indirect);
1886
1887 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1888 } else {
1889 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1890 if (nvirOp == OP_PINTERP)
1891 interp->setSrc(s++, fp.position);
1892 if (mode & NV50_IR_INTERP_OFFSET)
1893 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1894 interp->setInterpolate(mode);
1895 interp->setIndirect(0, 0, indirect);
1896 }
1897 } else {
1898 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1899 }
1900 }
1901 break;
1902 }
1903 case nir_intrinsic_load_kernel_input: {
1904 assert(prog->getType() == Program::TYPE_COMPUTE);
1905 assert(insn->num_components == 1);
1906
1907 LValues &newDefs = convert(&insn->dest);
1908 const DataType dType = getDType(insn);
1909 Value *indirect;
1910 uint32_t idx = getIndirect(insn, 0, 0, indirect, true);
1911
1912 mkLoad(dType, newDefs[0], mkSymbol(FILE_SHADER_INPUT, 0, dType, idx), indirect);
1913 break;
1914 }
1915 case nir_intrinsic_load_barycentric_at_offset:
1916 case nir_intrinsic_load_barycentric_at_sample:
1917 case nir_intrinsic_load_barycentric_centroid:
1918 case nir_intrinsic_load_barycentric_pixel:
1919 case nir_intrinsic_load_barycentric_sample: {
1920 LValues &newDefs = convert(&insn->dest);
1921 uint32_t mode;
1922
1923 if (op == nir_intrinsic_load_barycentric_centroid ||
1924 op == nir_intrinsic_load_barycentric_sample) {
1925 mode = NV50_IR_INTERP_CENTROID;
1926 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1927 Value *offs[2];
1928 for (uint8_t c = 0; c < 2; c++) {
1929 offs[c] = getScratch();
1930 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1931 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1932 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1933 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1934 }
1935 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1936
1937 mode = NV50_IR_INTERP_OFFSET;
1938 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1939 mode = NV50_IR_INTERP_DEFAULT;
1940 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1941 info->prop.fp.readsSampleLocations = true;
1942 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1943 mode = NV50_IR_INTERP_OFFSET;
1944 } else {
1945 unreachable("all intrinsics already handled above");
1946 }
1947
1948 loadImm(newDefs[1], mode);
1949 break;
1950 }
1951 case nir_intrinsic_discard:
1952 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1953 break;
1954 case nir_intrinsic_discard_if: {
1955 Value *pred = getSSA(1, FILE_PREDICATE);
1956 if (insn->num_components > 1) {
1957 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1958 assert(false);
1959 return false;
1960 }
1961 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1962 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1963 break;
1964 }
1965 case nir_intrinsic_load_base_vertex:
1966 case nir_intrinsic_load_base_instance:
1967 case nir_intrinsic_load_draw_id:
1968 case nir_intrinsic_load_front_face:
1969 case nir_intrinsic_load_helper_invocation:
1970 case nir_intrinsic_load_instance_id:
1971 case nir_intrinsic_load_invocation_id:
1972 case nir_intrinsic_load_local_group_size:
1973 case nir_intrinsic_load_local_invocation_id:
1974 case nir_intrinsic_load_num_work_groups:
1975 case nir_intrinsic_load_patch_vertices_in:
1976 case nir_intrinsic_load_primitive_id:
1977 case nir_intrinsic_load_sample_id:
1978 case nir_intrinsic_load_sample_mask_in:
1979 case nir_intrinsic_load_sample_pos:
1980 case nir_intrinsic_load_subgroup_eq_mask:
1981 case nir_intrinsic_load_subgroup_ge_mask:
1982 case nir_intrinsic_load_subgroup_gt_mask:
1983 case nir_intrinsic_load_subgroup_le_mask:
1984 case nir_intrinsic_load_subgroup_lt_mask:
1985 case nir_intrinsic_load_subgroup_invocation:
1986 case nir_intrinsic_load_tess_coord:
1987 case nir_intrinsic_load_tess_level_inner:
1988 case nir_intrinsic_load_tess_level_outer:
1989 case nir_intrinsic_load_vertex_id:
1990 case nir_intrinsic_load_work_group_id: {
1991 const DataType dType = getDType(insn);
1992 SVSemantic sv = convert(op);
1993 LValues &newDefs = convert(&insn->dest);
1994
1995 for (uint8_t i = 0u; i < insn->num_components; ++i) {
1996 Value *def;
1997 if (typeSizeof(dType) == 8)
1998 def = getSSA();
1999 else
2000 def = newDefs[i];
2001
2002 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
2003 loadImm(def, 0u);
2004 } else {
2005 Symbol *sym = mkSysVal(sv, i);
2006 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
2007 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
2008 rdsv->perPatch = 1;
2009 }
2010
2011 if (typeSizeof(dType) == 8)
2012 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
2013 }
2014 break;
2015 }
2016 // constants
2017 case nir_intrinsic_load_subgroup_size: {
2018 LValues &newDefs = convert(&insn->dest);
2019 loadImm(newDefs[0], 32u);
2020 break;
2021 }
2022 case nir_intrinsic_vote_all:
2023 case nir_intrinsic_vote_any:
2024 case nir_intrinsic_vote_ieq: {
2025 LValues &newDefs = convert(&insn->dest);
2026 Value *pred = getScratch(1, FILE_PREDICATE);
2027 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2028 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
2029 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
2030 break;
2031 }
2032 case nir_intrinsic_ballot: {
2033 LValues &newDefs = convert(&insn->dest);
2034 Value *pred = getSSA(1, FILE_PREDICATE);
2035 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
2036 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
2037 break;
2038 }
2039 case nir_intrinsic_read_first_invocation:
2040 case nir_intrinsic_read_invocation: {
2041 LValues &newDefs = convert(&insn->dest);
2042 const DataType dType = getDType(insn);
2043 Value *tmp = getScratch();
2044
2045 if (op == nir_intrinsic_read_first_invocation) {
2046 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
2047 mkOp2(OP_EXTBF, TYPE_U32, tmp, tmp, mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2048 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2049 } else
2050 tmp = getSrc(&insn->src[1], 0);
2051
2052 for (uint8_t i = 0; i < insn->num_components; ++i) {
2053 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
2054 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
2055 }
2056 break;
2057 }
2058 case nir_intrinsic_load_per_vertex_input: {
2059 const DataType dType = getDType(insn);
2060 LValues &newDefs = convert(&insn->dest);
2061 Value *indirectVertex;
2062 Value *indirectOffset;
2063 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2064 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2065
2066 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
2067 mkImm(baseVertex), indirectVertex);
2068 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2069 uint32_t address = getSlotAddress(insn, idx, i);
2070 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
2071 indirectOffset, vtxBase, info->in[idx].patch);
2072 }
2073 break;
2074 }
2075 case nir_intrinsic_load_per_vertex_output: {
2076 const DataType dType = getDType(insn);
2077 LValues &newDefs = convert(&insn->dest);
2078 Value *indirectVertex;
2079 Value *indirectOffset;
2080 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
2081 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
2082 Value *vtxBase = NULL;
2083
2084 if (indirectVertex)
2085 vtxBase = indirectVertex;
2086 else
2087 vtxBase = loadImm(NULL, baseVertex);
2088
2089 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
2090
2091 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2092 uint32_t address = getSlotAddress(insn, idx, i);
2093 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
2094 indirectOffset, vtxBase, info->in[idx].patch);
2095 }
2096 break;
2097 }
2098 case nir_intrinsic_emit_vertex:
2099 if (info->io.genUserClip > 0)
2100 handleUserClipPlanes();
2101 // fallthrough
2102 case nir_intrinsic_end_primitive: {
2103 uint32_t idx = nir_intrinsic_stream_id(insn);
2104 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
2105 break;
2106 }
2107 case nir_intrinsic_load_ubo: {
2108 const DataType dType = getDType(insn);
2109 LValues &newDefs = convert(&insn->dest);
2110 Value *indirectIndex;
2111 Value *indirectOffset;
2112 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
2113 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2114
2115 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2116 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
2117 indirectOffset, indirectIndex);
2118 }
2119 break;
2120 }
2121 case nir_intrinsic_get_buffer_size: {
2122 LValues &newDefs = convert(&insn->dest);
2123 const DataType dType = getDType(insn);
2124 Value *indirectBuffer;
2125 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2126
2127 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2128 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2129 break;
2130 }
2131 case nir_intrinsic_store_ssbo: {
2132 DataType sType = getSType(insn->src[0], false, false);
2133 Value *indirectBuffer;
2134 Value *indirectOffset;
2135 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2136 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2137
2138 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2139 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2140 continue;
2141 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2142 offset + i * typeSizeof(sType));
2143 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2144 ->setIndirect(0, 1, indirectBuffer);
2145 }
2146 info->io.globalAccess |= 0x2;
2147 break;
2148 }
2149 case nir_intrinsic_load_ssbo: {
2150 const DataType dType = getDType(insn);
2151 LValues &newDefs = convert(&insn->dest);
2152 Value *indirectBuffer;
2153 Value *indirectOffset;
2154 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2155 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2156
2157 for (uint8_t i = 0u; i < insn->num_components; ++i)
2158 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2159 indirectOffset, indirectBuffer);
2160
2161 info->io.globalAccess |= 0x1;
2162 break;
2163 }
2164 case nir_intrinsic_shared_atomic_add:
2165 case nir_intrinsic_shared_atomic_and:
2166 case nir_intrinsic_shared_atomic_comp_swap:
2167 case nir_intrinsic_shared_atomic_exchange:
2168 case nir_intrinsic_shared_atomic_or:
2169 case nir_intrinsic_shared_atomic_imax:
2170 case nir_intrinsic_shared_atomic_imin:
2171 case nir_intrinsic_shared_atomic_umax:
2172 case nir_intrinsic_shared_atomic_umin:
2173 case nir_intrinsic_shared_atomic_xor: {
2174 const DataType dType = getDType(insn);
2175 LValues &newDefs = convert(&insn->dest);
2176 Value *indirectOffset;
2177 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2178 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2179 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2180 if (op == nir_intrinsic_shared_atomic_comp_swap)
2181 atom->setSrc(2, getSrc(&insn->src[2], 0));
2182 atom->setIndirect(0, 0, indirectOffset);
2183 atom->subOp = getSubOp(op);
2184 break;
2185 }
2186 case nir_intrinsic_ssbo_atomic_add:
2187 case nir_intrinsic_ssbo_atomic_and:
2188 case nir_intrinsic_ssbo_atomic_comp_swap:
2189 case nir_intrinsic_ssbo_atomic_exchange:
2190 case nir_intrinsic_ssbo_atomic_or:
2191 case nir_intrinsic_ssbo_atomic_imax:
2192 case nir_intrinsic_ssbo_atomic_imin:
2193 case nir_intrinsic_ssbo_atomic_umax:
2194 case nir_intrinsic_ssbo_atomic_umin:
2195 case nir_intrinsic_ssbo_atomic_xor: {
2196 const DataType dType = getDType(insn);
2197 LValues &newDefs = convert(&insn->dest);
2198 Value *indirectBuffer;
2199 Value *indirectOffset;
2200 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2201 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2202
2203 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2204 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2205 getSrc(&insn->src[2], 0));
2206 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2207 atom->setSrc(2, getSrc(&insn->src[3], 0));
2208 atom->setIndirect(0, 0, indirectOffset);
2209 atom->setIndirect(0, 1, indirectBuffer);
2210 atom->subOp = getSubOp(op);
2211
2212 info->io.globalAccess |= 0x2;
2213 break;
2214 }
2215 case nir_intrinsic_global_atomic_add:
2216 case nir_intrinsic_global_atomic_and:
2217 case nir_intrinsic_global_atomic_comp_swap:
2218 case nir_intrinsic_global_atomic_exchange:
2219 case nir_intrinsic_global_atomic_or:
2220 case nir_intrinsic_global_atomic_imax:
2221 case nir_intrinsic_global_atomic_imin:
2222 case nir_intrinsic_global_atomic_umax:
2223 case nir_intrinsic_global_atomic_umin:
2224 case nir_intrinsic_global_atomic_xor: {
2225 const DataType dType = getDType(insn);
2226 LValues &newDefs = convert(&insn->dest);
2227 Value *address;
2228 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2229
2230 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2231 Instruction *atom =
2232 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2233 atom->setIndirect(0, 0, address);
2234 atom->subOp = getSubOp(op);
2235
2236 info->io.globalAccess |= 0x2;
2237 break;
2238 }
2239 case nir_intrinsic_bindless_image_atomic_add:
2240 case nir_intrinsic_bindless_image_atomic_and:
2241 case nir_intrinsic_bindless_image_atomic_comp_swap:
2242 case nir_intrinsic_bindless_image_atomic_exchange:
2243 case nir_intrinsic_bindless_image_atomic_imax:
2244 case nir_intrinsic_bindless_image_atomic_umax:
2245 case nir_intrinsic_bindless_image_atomic_imin:
2246 case nir_intrinsic_bindless_image_atomic_umin:
2247 case nir_intrinsic_bindless_image_atomic_or:
2248 case nir_intrinsic_bindless_image_atomic_xor:
2249 case nir_intrinsic_bindless_image_load:
2250 case nir_intrinsic_bindless_image_samples:
2251 case nir_intrinsic_bindless_image_size:
2252 case nir_intrinsic_bindless_image_store: {
2253 std::vector<Value*> srcs, defs;
2254 Value *indirect = getSrc(&insn->src[0], 0);
2255 DataType ty;
2256
2257 uint32_t mask = 0;
2258 TexInstruction::Target target =
2259 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2260 unsigned int argCount = getNIRArgCount(target);
2261 uint16_t location = 0;
2262
2263 if (opInfo.has_dest) {
2264 LValues &newDefs = convert(&insn->dest);
2265 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2266 defs.push_back(newDefs[i]);
2267 mask |= 1 << i;
2268 }
2269 }
2270
2271 switch (op) {
2272 case nir_intrinsic_bindless_image_atomic_add:
2273 case nir_intrinsic_bindless_image_atomic_and:
2274 case nir_intrinsic_bindless_image_atomic_comp_swap:
2275 case nir_intrinsic_bindless_image_atomic_exchange:
2276 case nir_intrinsic_bindless_image_atomic_imax:
2277 case nir_intrinsic_bindless_image_atomic_umax:
2278 case nir_intrinsic_bindless_image_atomic_imin:
2279 case nir_intrinsic_bindless_image_atomic_umin:
2280 case nir_intrinsic_bindless_image_atomic_or:
2281 case nir_intrinsic_bindless_image_atomic_xor:
2282 ty = getDType(insn);
2283 mask = 0x1;
2284 info->io.globalAccess |= 0x2;
2285 break;
2286 case nir_intrinsic_bindless_image_load:
2287 ty = TYPE_U32;
2288 info->io.globalAccess |= 0x1;
2289 break;
2290 case nir_intrinsic_bindless_image_store:
2291 ty = TYPE_U32;
2292 mask = 0xf;
2293 info->io.globalAccess |= 0x2;
2294 break;
2295 case nir_intrinsic_bindless_image_samples:
2296 mask = 0x8;
2297 ty = TYPE_U32;
2298 break;
2299 case nir_intrinsic_bindless_image_size:
2300 ty = TYPE_U32;
2301 break;
2302 default:
2303 unreachable("unhandled image opcode");
2304 break;
2305 }
2306
2307 // coords
2308 if (opInfo.num_srcs >= 2)
2309 for (unsigned int i = 0u; i < argCount; ++i)
2310 srcs.push_back(getSrc(&insn->src[1], i));
2311
2312 // the sampler is just another src added after coords
2313 if (opInfo.num_srcs >= 3 && target.isMS())
2314 srcs.push_back(getSrc(&insn->src[2], 0));
2315
2316 if (opInfo.num_srcs >= 4) {
2317 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2318 for (uint8_t i = 0u; i < components; ++i)
2319 srcs.push_back(getSrc(&insn->src[3], i));
2320 }
2321
2322 if (opInfo.num_srcs >= 5)
2323 // 1 for aotmic swap
2324 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2325 srcs.push_back(getSrc(&insn->src[4], i));
2326
2327 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2328 texi->tex.bindless = false;
2329 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2330 texi->tex.mask = mask;
2331 texi->tex.bindless = true;
2332 texi->cache = convert(nir_intrinsic_access(insn));
2333 texi->setType(ty);
2334 texi->subOp = getSubOp(op);
2335
2336 if (indirect)
2337 texi->setIndirectR(indirect);
2338
2339 break;
2340 }
2341 case nir_intrinsic_image_deref_atomic_add:
2342 case nir_intrinsic_image_deref_atomic_and:
2343 case nir_intrinsic_image_deref_atomic_comp_swap:
2344 case nir_intrinsic_image_deref_atomic_exchange:
2345 case nir_intrinsic_image_deref_atomic_imax:
2346 case nir_intrinsic_image_deref_atomic_umax:
2347 case nir_intrinsic_image_deref_atomic_imin:
2348 case nir_intrinsic_image_deref_atomic_umin:
2349 case nir_intrinsic_image_deref_atomic_or:
2350 case nir_intrinsic_image_deref_atomic_xor:
2351 case nir_intrinsic_image_deref_load:
2352 case nir_intrinsic_image_deref_samples:
2353 case nir_intrinsic_image_deref_size:
2354 case nir_intrinsic_image_deref_store: {
2355 const nir_variable *tex;
2356 std::vector<Value*> srcs, defs;
2357 Value *indirect;
2358 DataType ty;
2359
2360 uint32_t mask = 0;
2361 nir_deref_instr *deref = nir_src_as_deref(insn->src[0]);
2362 const glsl_type *type = deref->type;
2363 TexInstruction::Target target =
2364 convert((glsl_sampler_dim)type->sampler_dimensionality,
2365 type->sampler_array, type->sampler_shadow);
2366 unsigned int argCount = getNIRArgCount(target);
2367 uint16_t location = handleDeref(deref, indirect, tex);
2368
2369 if (opInfo.has_dest) {
2370 LValues &newDefs = convert(&insn->dest);
2371 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2372 defs.push_back(newDefs[i]);
2373 mask |= 1 << i;
2374 }
2375 }
2376
2377 switch (op) {
2378 case nir_intrinsic_image_deref_atomic_add:
2379 case nir_intrinsic_image_deref_atomic_and:
2380 case nir_intrinsic_image_deref_atomic_comp_swap:
2381 case nir_intrinsic_image_deref_atomic_exchange:
2382 case nir_intrinsic_image_deref_atomic_imax:
2383 case nir_intrinsic_image_deref_atomic_umax:
2384 case nir_intrinsic_image_deref_atomic_imin:
2385 case nir_intrinsic_image_deref_atomic_umin:
2386 case nir_intrinsic_image_deref_atomic_or:
2387 case nir_intrinsic_image_deref_atomic_xor:
2388 ty = getDType(insn);
2389 mask = 0x1;
2390 info->io.globalAccess |= 0x2;
2391 break;
2392 case nir_intrinsic_image_deref_load:
2393 ty = TYPE_U32;
2394 info->io.globalAccess |= 0x1;
2395 break;
2396 case nir_intrinsic_image_deref_store:
2397 ty = TYPE_U32;
2398 mask = 0xf;
2399 info->io.globalAccess |= 0x2;
2400 break;
2401 case nir_intrinsic_image_deref_samples:
2402 mask = 0x8;
2403 ty = TYPE_U32;
2404 break;
2405 case nir_intrinsic_image_deref_size:
2406 ty = TYPE_U32;
2407 break;
2408 default:
2409 unreachable("unhandled image opcode");
2410 break;
2411 }
2412
2413 // coords
2414 if (opInfo.num_srcs >= 2)
2415 for (unsigned int i = 0u; i < argCount; ++i)
2416 srcs.push_back(getSrc(&insn->src[1], i));
2417
2418 // the sampler is just another src added after coords
2419 if (opInfo.num_srcs >= 3 && target.isMS())
2420 srcs.push_back(getSrc(&insn->src[2], 0));
2421
2422 if (opInfo.num_srcs >= 4) {
2423 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2424 for (uint8_t i = 0u; i < components; ++i)
2425 srcs.push_back(getSrc(&insn->src[3], i));
2426 }
2427
2428 if (opInfo.num_srcs >= 5)
2429 // 1 for aotmic swap
2430 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2431 srcs.push_back(getSrc(&insn->src[4], i));
2432
2433 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2434 texi->tex.bindless = false;
2435 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(tex->data.image.format);
2436 texi->tex.mask = mask;
2437 texi->cache = getCacheModeFromVar(tex);
2438 texi->setType(ty);
2439 texi->subOp = getSubOp(op);
2440
2441 if (indirect)
2442 texi->setIndirectR(indirect);
2443
2444 break;
2445 }
2446 case nir_intrinsic_store_shared: {
2447 DataType sType = getSType(insn->src[0], false, false);
2448 Value *indirectOffset;
2449 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2450
2451 for (uint8_t i = 0u; i < insn->num_components; ++i) {
2452 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2453 continue;
2454 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, sType, offset + i * typeSizeof(sType));
2455 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2456 }
2457 break;
2458 }
2459 case nir_intrinsic_load_shared: {
2460 const DataType dType = getDType(insn);
2461 LValues &newDefs = convert(&insn->dest);
2462 Value *indirectOffset;
2463 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2464
2465 for (uint8_t i = 0u; i < insn->num_components; ++i)
2466 loadFrom(FILE_MEMORY_SHARED, 0, dType, newDefs[i], offset, i, indirectOffset);
2467
2468 break;
2469 }
2470 case nir_intrinsic_control_barrier: {
2471 // TODO: add flag to shader_info
2472 info->numBarriers = 1;
2473 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2474 bar->fixed = 1;
2475 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2476 break;
2477 }
2478 case nir_intrinsic_group_memory_barrier:
2479 case nir_intrinsic_memory_barrier:
2480 case nir_intrinsic_memory_barrier_buffer:
2481 case nir_intrinsic_memory_barrier_image:
2482 case nir_intrinsic_memory_barrier_shared: {
2483 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2484 bar->fixed = 1;
2485 bar->subOp = getSubOp(op);
2486 break;
2487 }
2488 case nir_intrinsic_memory_barrier_tcs_patch:
2489 break;
2490 case nir_intrinsic_shader_clock: {
2491 const DataType dType = getDType(insn);
2492 LValues &newDefs = convert(&insn->dest);
2493
2494 loadImm(newDefs[0], 0u);
2495 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2496 break;
2497 }
2498 case nir_intrinsic_load_global: {
2499 const DataType dType = getDType(insn);
2500 LValues &newDefs = convert(&insn->dest);
2501 Value *indirectOffset;
2502 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2503
2504 for (auto i = 0u; i < insn->num_components; ++i)
2505 loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2506
2507 info->io.globalAccess |= 0x1;
2508 break;
2509 }
2510 case nir_intrinsic_store_global: {
2511 DataType sType = getSType(insn->src[0], false, false);
2512
2513 for (auto i = 0u; i < insn->num_components; ++i) {
2514 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2515 continue;
2516 if (typeSizeof(sType) == 8) {
2517 Value *split[2];
2518 mkSplit(split, 4, getSrc(&insn->src[0], i));
2519
2520 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2521 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2522
2523 sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2524 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2525 } else {
2526 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2527 mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2528 }
2529 }
2530
2531 info->io.globalAccess |= 0x2;
2532 break;
2533 }
2534 default:
2535 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2536 return false;
2537 }
2538
2539 return true;
2540 }
2541
2542 bool
2543 Converter::visit(nir_jump_instr *insn)
2544 {
2545 switch (insn->type) {
2546 case nir_jump_return:
2547 // TODO: this only works in the main function
2548 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2549 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2550 break;
2551 case nir_jump_break:
2552 case nir_jump_continue: {
2553 bool isBreak = insn->type == nir_jump_break;
2554 nir_block *block = insn->instr.block;
2555 assert(!block->successors[1]);
2556 BasicBlock *target = convert(block->successors[0]);
2557 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2558 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2559 break;
2560 }
2561 default:
2562 ERROR("unknown nir_jump_type %u\n", insn->type);
2563 return false;
2564 }
2565
2566 return true;
2567 }
2568
2569 Value*
2570 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2571 {
2572 Value *val;
2573
2574 if (immInsertPos)
2575 setPosition(immInsertPos, true);
2576 else
2577 setPosition(bb, false);
2578
2579 switch (insn->def.bit_size) {
2580 case 64:
2581 val = loadImm(getSSA(8), insn->value[idx].u64);
2582 break;
2583 case 32:
2584 val = loadImm(getSSA(4), insn->value[idx].u32);
2585 break;
2586 case 16:
2587 val = loadImm(getSSA(2), insn->value[idx].u16);
2588 break;
2589 case 8:
2590 val = loadImm(getSSA(1), insn->value[idx].u8);
2591 break;
2592 default:
2593 unreachable("unhandled bit size!\n");
2594 }
2595 setPosition(bb, true);
2596 return val;
2597 }
2598
2599 bool
2600 Converter::visit(nir_load_const_instr *insn)
2601 {
2602 assert(insn->def.bit_size <= 64);
2603 immediates[insn->def.index] = insn;
2604 return true;
2605 }
2606
2607 #define DEFAULT_CHECKS \
2608 if (insn->dest.dest.ssa.num_components > 1) { \
2609 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2610 return false; \
2611 } \
2612 if (insn->dest.write_mask != 1) { \
2613 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2614 return false; \
2615 }
2616 bool
2617 Converter::visit(nir_alu_instr *insn)
2618 {
2619 const nir_op op = insn->op;
2620 const nir_op_info &info = nir_op_infos[op];
2621 DataType dType = getDType(insn);
2622 const std::vector<DataType> sTypes = getSTypes(insn);
2623
2624 Instruction *oldPos = this->bb->getExit();
2625
2626 switch (op) {
2627 case nir_op_fabs:
2628 case nir_op_iabs:
2629 case nir_op_fadd:
2630 case nir_op_iadd:
2631 case nir_op_iand:
2632 case nir_op_fceil:
2633 case nir_op_fcos:
2634 case nir_op_fddx:
2635 case nir_op_fddx_coarse:
2636 case nir_op_fddx_fine:
2637 case nir_op_fddy:
2638 case nir_op_fddy_coarse:
2639 case nir_op_fddy_fine:
2640 case nir_op_fdiv:
2641 case nir_op_idiv:
2642 case nir_op_udiv:
2643 case nir_op_fexp2:
2644 case nir_op_ffloor:
2645 case nir_op_ffma:
2646 case nir_op_flog2:
2647 case nir_op_fmax:
2648 case nir_op_imax:
2649 case nir_op_umax:
2650 case nir_op_fmin:
2651 case nir_op_imin:
2652 case nir_op_umin:
2653 case nir_op_fmod:
2654 case nir_op_imod:
2655 case nir_op_umod:
2656 case nir_op_fmul:
2657 case nir_op_imul:
2658 case nir_op_imul_high:
2659 case nir_op_umul_high:
2660 case nir_op_fneg:
2661 case nir_op_ineg:
2662 case nir_op_inot:
2663 case nir_op_ior:
2664 case nir_op_pack_64_2x32_split:
2665 case nir_op_fpow:
2666 case nir_op_frcp:
2667 case nir_op_frem:
2668 case nir_op_irem:
2669 case nir_op_frsq:
2670 case nir_op_fsat:
2671 case nir_op_ishr:
2672 case nir_op_ushr:
2673 case nir_op_fsin:
2674 case nir_op_fsqrt:
2675 case nir_op_ftrunc:
2676 case nir_op_ishl:
2677 case nir_op_ixor: {
2678 DEFAULT_CHECKS;
2679 LValues &newDefs = convert(&insn->dest);
2680 operation preOp = preOperationNeeded(op);
2681 if (preOp != OP_NOP) {
2682 assert(info.num_inputs < 2);
2683 Value *tmp = getSSA(typeSizeof(dType));
2684 Instruction *i0 = mkOp(preOp, dType, tmp);
2685 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2686 if (info.num_inputs) {
2687 i0->setSrc(0, getSrc(&insn->src[0]));
2688 i1->setSrc(0, tmp);
2689 }
2690 i1->subOp = getSubOp(op);
2691 } else {
2692 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2693 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2694 i->setSrc(s, getSrc(&insn->src[s]));
2695 }
2696 i->subOp = getSubOp(op);
2697 }
2698 break;
2699 }
2700 case nir_op_ifind_msb:
2701 case nir_op_ufind_msb: {
2702 DEFAULT_CHECKS;
2703 LValues &newDefs = convert(&insn->dest);
2704 dType = sTypes[0];
2705 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2706 break;
2707 }
2708 case nir_op_fround_even: {
2709 DEFAULT_CHECKS;
2710 LValues &newDefs = convert(&insn->dest);
2711 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2712 break;
2713 }
2714 // convert instructions
2715 case nir_op_f2f32:
2716 case nir_op_f2i32:
2717 case nir_op_f2u32:
2718 case nir_op_i2f32:
2719 case nir_op_i2i32:
2720 case nir_op_u2f32:
2721 case nir_op_u2u32:
2722 case nir_op_f2f64:
2723 case nir_op_f2i64:
2724 case nir_op_f2u64:
2725 case nir_op_i2f64:
2726 case nir_op_i2i64:
2727 case nir_op_u2f64:
2728 case nir_op_u2u64: {
2729 DEFAULT_CHECKS;
2730 LValues &newDefs = convert(&insn->dest);
2731 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2732 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2733 i->rnd = ROUND_Z;
2734 i->sType = sTypes[0];
2735 break;
2736 }
2737 // compare instructions
2738 case nir_op_feq32:
2739 case nir_op_ieq32:
2740 case nir_op_fge32:
2741 case nir_op_ige32:
2742 case nir_op_uge32:
2743 case nir_op_flt32:
2744 case nir_op_ilt32:
2745 case nir_op_ult32:
2746 case nir_op_fne32:
2747 case nir_op_ine32: {
2748 DEFAULT_CHECKS;
2749 LValues &newDefs = convert(&insn->dest);
2750 Instruction *i = mkCmp(getOperation(op),
2751 getCondCode(op),
2752 dType,
2753 newDefs[0],
2754 dType,
2755 getSrc(&insn->src[0]),
2756 getSrc(&insn->src[1]));
2757 if (info.num_inputs == 3)
2758 i->setSrc(2, getSrc(&insn->src[2]));
2759 i->sType = sTypes[0];
2760 break;
2761 }
2762 // those are weird ALU ops and need special handling, because
2763 // 1. they are always componend based
2764 // 2. they basically just merge multiple values into one data type
2765 case nir_op_mov:
2766 if (!insn->dest.dest.is_ssa && insn->dest.dest.reg.reg->num_array_elems) {
2767 nir_reg_dest& reg = insn->dest.dest.reg;
2768 uint32_t goffset = regToLmemOffset[reg.reg->index];
2769 uint8_t comps = reg.reg->num_components;
2770 uint8_t size = reg.reg->bit_size / 8;
2771 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2772 uint32_t aoffset = csize * reg.base_offset;
2773 Value *indirect = NULL;
2774
2775 if (reg.indirect)
2776 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS),
2777 getSrc(reg.indirect, 0), mkImm(csize));
2778
2779 for (uint8_t i = 0u; i < comps; ++i) {
2780 if (!((1u << i) & insn->dest.write_mask))
2781 continue;
2782
2783 Symbol *sym = mkSymbol(FILE_MEMORY_LOCAL, 0, dType, goffset + aoffset + i * size);
2784 mkStore(OP_STORE, dType, sym, indirect, getSrc(&insn->src[0], i));
2785 }
2786 break;
2787 } else if (!insn->src[0].src.is_ssa && insn->src[0].src.reg.reg->num_array_elems) {
2788 LValues &newDefs = convert(&insn->dest);
2789 nir_reg_src& reg = insn->src[0].src.reg;
2790 uint32_t goffset = regToLmemOffset[reg.reg->index];
2791 // uint8_t comps = reg.reg->num_components;
2792 uint8_t size = reg.reg->bit_size / 8;
2793 uint8_t csize = 4 * size; // TODO after fixing MemoryOpts: comps * size;
2794 uint32_t aoffset = csize * reg.base_offset;
2795 Value *indirect = NULL;
2796
2797 if (reg.indirect)
2798 indirect = mkOp2v(OP_MUL, TYPE_U32, getSSA(4, FILE_ADDRESS), getSrc(reg.indirect, 0), mkImm(csize));
2799
2800 for (uint8_t i = 0u; i < newDefs.size(); ++i)
2801 loadFrom(FILE_MEMORY_LOCAL, 0, dType, newDefs[i], goffset + aoffset, i, indirect);
2802
2803 break;
2804 } else {
2805 LValues &newDefs = convert(&insn->dest);
2806 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2807 mkMov(newDefs[c], getSrc(&insn->src[0], c), dType);
2808 }
2809 }
2810 break;
2811 case nir_op_vec2:
2812 case nir_op_vec3:
2813 case nir_op_vec4:
2814 case nir_op_vec8:
2815 case nir_op_vec16: {
2816 LValues &newDefs = convert(&insn->dest);
2817 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2818 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2819 }
2820 break;
2821 }
2822 // (un)pack
2823 case nir_op_pack_64_2x32: {
2824 LValues &newDefs = convert(&insn->dest);
2825 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2826 merge->setSrc(0, getSrc(&insn->src[0], 0));
2827 merge->setSrc(1, getSrc(&insn->src[0], 1));
2828 break;
2829 }
2830 case nir_op_pack_half_2x16_split: {
2831 LValues &newDefs = convert(&insn->dest);
2832 Value *tmpH = getSSA();
2833 Value *tmpL = getSSA();
2834
2835 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2836 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2837 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2838 break;
2839 }
2840 case nir_op_unpack_half_2x16_split_x:
2841 case nir_op_unpack_half_2x16_split_y: {
2842 LValues &newDefs = convert(&insn->dest);
2843 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2844 if (op == nir_op_unpack_half_2x16_split_y)
2845 cvt->subOp = 1;
2846 break;
2847 }
2848 case nir_op_unpack_64_2x32: {
2849 LValues &newDefs = convert(&insn->dest);
2850 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2851 break;
2852 }
2853 case nir_op_unpack_64_2x32_split_x: {
2854 LValues &newDefs = convert(&insn->dest);
2855 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2856 break;
2857 }
2858 case nir_op_unpack_64_2x32_split_y: {
2859 LValues &newDefs = convert(&insn->dest);
2860 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2861 break;
2862 }
2863 // special instructions
2864 case nir_op_fsign:
2865 case nir_op_isign: {
2866 DEFAULT_CHECKS;
2867 DataType iType;
2868 if (::isFloatType(dType))
2869 iType = TYPE_F32;
2870 else
2871 iType = TYPE_S32;
2872
2873 LValues &newDefs = convert(&insn->dest);
2874 LValue *val0 = getScratch();
2875 LValue *val1 = getScratch();
2876 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2877 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2878
2879 if (dType == TYPE_F64) {
2880 mkOp2(OP_SUB, iType, val0, val0, val1);
2881 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2882 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2883 mkOp2(OP_SUB, iType, val0, val1, val0);
2884 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2885 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2886 } else if (::isFloatType(dType))
2887 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2888 else
2889 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2890 break;
2891 }
2892 case nir_op_fcsel:
2893 case nir_op_b32csel: {
2894 DEFAULT_CHECKS;
2895 LValues &newDefs = convert(&insn->dest);
2896 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2897 break;
2898 }
2899 case nir_op_ibitfield_extract:
2900 case nir_op_ubitfield_extract: {
2901 DEFAULT_CHECKS;
2902 Value *tmp = getSSA();
2903 LValues &newDefs = convert(&insn->dest);
2904 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2905 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2906 break;
2907 }
2908 case nir_op_bfm: {
2909 DEFAULT_CHECKS;
2910 LValues &newDefs = convert(&insn->dest);
2911 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2912 break;
2913 }
2914 case nir_op_bitfield_insert: {
2915 DEFAULT_CHECKS;
2916 LValues &newDefs = convert(&insn->dest);
2917 LValue *temp = getSSA();
2918 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2919 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2920 break;
2921 }
2922 case nir_op_bit_count: {
2923 DEFAULT_CHECKS;
2924 LValues &newDefs = convert(&insn->dest);
2925 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2926 break;
2927 }
2928 case nir_op_bitfield_reverse: {
2929 DEFAULT_CHECKS;
2930 LValues &newDefs = convert(&insn->dest);
2931 mkOp2(OP_EXTBF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2932 break;
2933 }
2934 case nir_op_find_lsb: {
2935 DEFAULT_CHECKS;
2936 LValues &newDefs = convert(&insn->dest);
2937 Value *tmp = getSSA();
2938 mkOp2(OP_EXTBF, TYPE_U32, tmp, getSrc(&insn->src[0]), mkImm(0x2000))->subOp = NV50_IR_SUBOP_EXTBF_REV;
2939 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2940 break;
2941 }
2942 // boolean conversions
2943 case nir_op_b2f32: {
2944 DEFAULT_CHECKS;
2945 LValues &newDefs = convert(&insn->dest);
2946 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2947 break;
2948 }
2949 case nir_op_b2f64: {
2950 DEFAULT_CHECKS;
2951 LValues &newDefs = convert(&insn->dest);
2952 Value *tmp = getSSA(4);
2953 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2954 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2955 break;
2956 }
2957 case nir_op_f2b32:
2958 case nir_op_i2b32: {
2959 DEFAULT_CHECKS;
2960 LValues &newDefs = convert(&insn->dest);
2961 Value *src1;
2962 if (typeSizeof(sTypes[0]) == 8) {
2963 src1 = loadImm(getSSA(8), 0.0);
2964 } else {
2965 src1 = zero;
2966 }
2967 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2968 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2969 break;
2970 }
2971 case nir_op_b2i32: {
2972 DEFAULT_CHECKS;
2973 LValues &newDefs = convert(&insn->dest);
2974 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2975 break;
2976 }
2977 case nir_op_b2i64: {
2978 DEFAULT_CHECKS;
2979 LValues &newDefs = convert(&insn->dest);
2980 LValue *def = getScratch();
2981 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2982 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2983 break;
2984 }
2985 default:
2986 ERROR("unknown nir_op %s\n", info.name);
2987 return false;
2988 }
2989
2990 if (!oldPos) {
2991 oldPos = this->bb->getEntry();
2992 oldPos->precise = insn->exact;
2993 }
2994
2995 if (unlikely(!oldPos))
2996 return true;
2997
2998 while (oldPos->next) {
2999 oldPos = oldPos->next;
3000 oldPos->precise = insn->exact;
3001 }
3002 oldPos->saturate = insn->dest.saturate;
3003
3004 return true;
3005 }
3006 #undef DEFAULT_CHECKS
3007
3008 bool
3009 Converter::visit(nir_ssa_undef_instr *insn)
3010 {
3011 LValues &newDefs = convert(&insn->def);
3012 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
3013 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
3014 }
3015 return true;
3016 }
3017
3018 #define CASE_SAMPLER(ty) \
3019 case GLSL_SAMPLER_DIM_ ## ty : \
3020 if (isArray && !isShadow) \
3021 return TEX_TARGET_ ## ty ## _ARRAY; \
3022 else if (!isArray && isShadow) \
3023 return TEX_TARGET_## ty ## _SHADOW; \
3024 else if (isArray && isShadow) \
3025 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
3026 else \
3027 return TEX_TARGET_ ## ty
3028
3029 TexTarget
3030 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
3031 {
3032 switch (dim) {
3033 CASE_SAMPLER(1D);
3034 CASE_SAMPLER(2D);
3035 CASE_SAMPLER(CUBE);
3036 case GLSL_SAMPLER_DIM_3D:
3037 return TEX_TARGET_3D;
3038 case GLSL_SAMPLER_DIM_MS:
3039 if (isArray)
3040 return TEX_TARGET_2D_MS_ARRAY;
3041 return TEX_TARGET_2D_MS;
3042 case GLSL_SAMPLER_DIM_RECT:
3043 if (isShadow)
3044 return TEX_TARGET_RECT_SHADOW;
3045 return TEX_TARGET_RECT;
3046 case GLSL_SAMPLER_DIM_BUF:
3047 return TEX_TARGET_BUFFER;
3048 case GLSL_SAMPLER_DIM_EXTERNAL:
3049 return TEX_TARGET_2D;
3050 default:
3051 ERROR("unknown glsl_sampler_dim %u\n", dim);
3052 assert(false);
3053 return TEX_TARGET_COUNT;
3054 }
3055 }
3056 #undef CASE_SAMPLER
3057
3058 Value*
3059 Converter::applyProjection(Value *src, Value *proj)
3060 {
3061 if (!proj)
3062 return src;
3063 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
3064 }
3065
3066 unsigned int
3067 Converter::getNIRArgCount(TexInstruction::Target& target)
3068 {
3069 unsigned int result = target.getArgCount();
3070 if (target.isCube() && target.isArray())
3071 result--;
3072 if (target.isMS())
3073 result--;
3074 return result;
3075 }
3076
3077 uint16_t
3078 Converter::handleDeref(nir_deref_instr *deref, Value * &indirect, const nir_variable * &tex)
3079 {
3080 typedef std::pair<uint32_t,Value*> DerefPair;
3081 std::list<DerefPair> derefs;
3082
3083 uint16_t result = 0;
3084 while (deref->deref_type != nir_deref_type_var) {
3085 switch (deref->deref_type) {
3086 case nir_deref_type_array: {
3087 Value *indirect;
3088 uint8_t size = type_size(deref->type, true);
3089 result += size * getIndirect(&deref->arr.index, 0, indirect);
3090
3091 if (indirect) {
3092 derefs.push_front(std::make_pair(size, indirect));
3093 }
3094
3095 break;
3096 }
3097 case nir_deref_type_struct: {
3098 result += nir_deref_instr_parent(deref)->type->struct_location_offset(deref->strct.index);
3099 break;
3100 }
3101 case nir_deref_type_var:
3102 default:
3103 unreachable("nir_deref_type_var reached in handleDeref!");
3104 break;
3105 }
3106 deref = nir_deref_instr_parent(deref);
3107 }
3108
3109 indirect = NULL;
3110 for (std::list<DerefPair>::const_iterator it = derefs.begin(); it != derefs.end(); ++it) {
3111 Value *offset = mkOp2v(OP_MUL, TYPE_U32, getSSA(), loadImm(getSSA(), it->first), it->second);
3112 if (indirect)
3113 indirect = mkOp2v(OP_ADD, TYPE_U32, getSSA(), indirect, offset);
3114 else
3115 indirect = offset;
3116 }
3117
3118 tex = nir_deref_instr_get_variable(deref);
3119 assert(tex);
3120
3121 return result + tex->data.driver_location;
3122 }
3123
3124 CacheMode
3125 Converter::convert(enum gl_access_qualifier access)
3126 {
3127 switch (access) {
3128 case ACCESS_VOLATILE:
3129 return CACHE_CV;
3130 case ACCESS_COHERENT:
3131 return CACHE_CG;
3132 default:
3133 return CACHE_CA;
3134 }
3135 }
3136
3137 CacheMode
3138 Converter::getCacheModeFromVar(const nir_variable *var)
3139 {
3140 return convert(var->data.access);
3141 }
3142
3143 bool
3144 Converter::visit(nir_tex_instr *insn)
3145 {
3146 switch (insn->op) {
3147 case nir_texop_lod:
3148 case nir_texop_query_levels:
3149 case nir_texop_tex:
3150 case nir_texop_texture_samples:
3151 case nir_texop_tg4:
3152 case nir_texop_txb:
3153 case nir_texop_txd:
3154 case nir_texop_txf:
3155 case nir_texop_txf_ms:
3156 case nir_texop_txl:
3157 case nir_texop_txs: {
3158 LValues &newDefs = convert(&insn->dest);
3159 std::vector<Value*> srcs;
3160 std::vector<Value*> defs;
3161 std::vector<nir_src*> offsets;
3162 uint8_t mask = 0;
3163 bool lz = false;
3164 Value *proj = NULL;
3165 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
3166 operation op = getOperation(insn->op);
3167
3168 int r, s;
3169 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
3170 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
3171 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
3172 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
3173 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
3174 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
3175 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
3176 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
3177 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
3178 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
3179 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
3180 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
3181 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
3182
3183 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
3184 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
3185
3186 if (projIdx != -1)
3187 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
3188
3189 srcs.resize(insn->coord_components);
3190 for (uint8_t i = 0u; i < insn->coord_components; ++i)
3191 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
3192
3193 // sometimes we get less args than target.getArgCount, but codegen expects the latter
3194 if (insn->coord_components) {
3195 uint32_t argCount = target.getArgCount();
3196
3197 if (target.isMS())
3198 argCount -= 1;
3199
3200 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
3201 srcs.push_back(getSSA());
3202 }
3203
3204 if (insn->op == nir_texop_texture_samples)
3205 srcs.push_back(zero);
3206 else if (!insn->num_srcs)
3207 srcs.push_back(loadImm(NULL, 0));
3208 if (biasIdx != -1)
3209 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
3210 if (lodIdx != -1)
3211 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
3212 else if (op == OP_TXF)
3213 lz = true;
3214 if (msIdx != -1)
3215 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3216 if (offsetIdx != -1)
3217 offsets.push_back(&insn->src[offsetIdx].src);
3218 if (compIdx != -1)
3219 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3220 if (texOffIdx != -1) {
3221 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3222 texOffIdx = srcs.size() - 1;
3223 }
3224 if (sampOffIdx != -1) {
3225 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3226 sampOffIdx = srcs.size() - 1;
3227 }
3228 if (bindless) {
3229 // currently we use the lower bits
3230 Value *split[2];
3231 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3232
3233 mkSplit(split, 4, handle);
3234
3235 srcs.push_back(split[0]);
3236 texOffIdx = srcs.size() - 1;
3237 }
3238
3239 r = bindless ? 0xff : insn->texture_index;
3240 s = bindless ? 0x1f : insn->sampler_index;
3241
3242 defs.resize(newDefs.size());
3243 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3244 defs[d] = newDefs[d];
3245 mask |= 1 << d;
3246 }
3247 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3248 lz = true;
3249
3250 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3251 texi->tex.levelZero = lz;
3252 texi->tex.mask = mask;
3253 texi->tex.bindless = bindless;
3254
3255 if (texOffIdx != -1)
3256 texi->tex.rIndirectSrc = texOffIdx;
3257 if (sampOffIdx != -1)
3258 texi->tex.sIndirectSrc = sampOffIdx;
3259
3260 switch (insn->op) {
3261 case nir_texop_tg4:
3262 if (!target.isShadow())
3263 texi->tex.gatherComp = insn->component;
3264 break;
3265 case nir_texop_txs:
3266 texi->tex.query = TXQ_DIMS;
3267 break;
3268 case nir_texop_texture_samples:
3269 texi->tex.mask = 0x4;
3270 texi->tex.query = TXQ_TYPE;
3271 break;
3272 case nir_texop_query_levels:
3273 texi->tex.mask = 0x8;
3274 texi->tex.query = TXQ_DIMS;
3275 break;
3276 default:
3277 break;
3278 }
3279
3280 texi->tex.useOffsets = offsets.size();
3281 if (texi->tex.useOffsets) {
3282 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3283 for (uint32_t c = 0u; c < 3; ++c) {
3284 uint8_t s2 = std::min(c, target.getDim() - 1);
3285 texi->offset[s][c].set(getSrc(offsets[s], s2));
3286 texi->offset[s][c].setInsn(texi);
3287 }
3288 }
3289 }
3290
3291 if (op == OP_TXG && offsetIdx == -1) {
3292 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3293 texi->tex.useOffsets = 4;
3294 setPosition(texi, false);
3295 for (uint8_t i = 0; i < 4; ++i) {
3296 for (uint8_t j = 0; j < 2; ++j) {
3297 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3298 texi->offset[i][j].setInsn(texi);
3299 }
3300 }
3301 setPosition(texi, true);
3302 }
3303 }
3304
3305 if (ddxIdx != -1 && ddyIdx != -1) {
3306 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3307 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3308 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3309 }
3310 }
3311
3312 break;
3313 }
3314 default:
3315 ERROR("unknown nir_texop %u\n", insn->op);
3316 return false;
3317 }
3318 return true;
3319 }
3320
3321 bool
3322 Converter::visit(nir_deref_instr *deref)
3323 {
3324 // we just ignore those, because images intrinsics are the only place where
3325 // we should end up with deref sources and those have to backtrack anyway
3326 // to get the nir_variable. This code just exists to handle some special
3327 // cases.
3328 switch (deref->deref_type) {
3329 case nir_deref_type_array:
3330 case nir_deref_type_struct:
3331 case nir_deref_type_var:
3332 break;
3333 default:
3334 ERROR("unknown nir_deref_instr %u\n", deref->deref_type);
3335 return false;
3336 }
3337 return true;
3338 }
3339
3340 bool
3341 Converter::run()
3342 {
3343 bool progress;
3344
3345 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3346 nir_print_shader(nir, stderr);
3347
3348 struct nir_lower_subgroups_options subgroup_options = {
3349 .subgroup_size = 32,
3350 .ballot_bit_size = 32,
3351 };
3352
3353 NIR_PASS_V(nir, nir_lower_io, nir_var_all, type_size, (nir_lower_io_options)0);
3354 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3355 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3356 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3357 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3358 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3359 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3360
3361 do {
3362 progress = false;
3363 NIR_PASS(progress, nir, nir_copy_prop);
3364 NIR_PASS(progress, nir, nir_opt_remove_phis);
3365 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3366 NIR_PASS(progress, nir, nir_opt_cse);
3367 NIR_PASS(progress, nir, nir_opt_algebraic);
3368 NIR_PASS(progress, nir, nir_opt_constant_folding);
3369 NIR_PASS(progress, nir, nir_copy_prop);
3370 NIR_PASS(progress, nir, nir_opt_dce);
3371 NIR_PASS(progress, nir, nir_opt_dead_cf);
3372 } while (progress);
3373
3374 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3375 NIR_PASS_V(nir, nir_lower_locals_to_regs);
3376 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
3377 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3378
3379 // Garbage collect dead instructions
3380 nir_sweep(nir);
3381
3382 if (!parseNIR()) {
3383 ERROR("Couldn't prase NIR!\n");
3384 return false;
3385 }
3386
3387 if (!assignSlots()) {
3388 ERROR("Couldn't assign slots!\n");
3389 return false;
3390 }
3391
3392 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3393 nir_print_shader(nir, stderr);
3394
3395 nir_foreach_function(function, nir) {
3396 if (!visit(function))
3397 return false;
3398 }
3399
3400 return true;
3401 }
3402
3403 } // unnamed namespace
3404
3405 namespace nv50_ir {
3406
3407 bool
3408 Program::makeFromNIR(struct nv50_ir_prog_info *info)
3409 {
3410 nir_shader *nir = (nir_shader*)info->bin.source;
3411 Converter converter(this, nir, info);
3412 bool result = converter.run();
3413 if (!result)
3414 return result;
3415 LoweringHelper lowering;
3416 lowering.run(this);
3417 tlsSize = info->bin.tlsSpace;
3418 return result;
3419 }
3420
3421 } // namespace nv50_ir