nir: use enum operator helper for nir_variable_mode and nir_metadata
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_nir.cpp
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
34
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
37 #else
38 #include <tr1/unordered_map>
39 #endif
40 #include <cstring>
41 #include <list>
42 #include <vector>
43
44 namespace {
45
46 #if __cplusplus >= 201103L
47 using std::hash;
48 using std::unordered_map;
49 #else
50 using std::tr1::hash;
51 using std::tr1::unordered_map;
52 #endif
53
54 using namespace nv50_ir;
55
56 int
57 type_size(const struct glsl_type *type, bool bindless)
58 {
59 return glsl_count_attribute_slots(type, false);
60 }
61
62 static void
63 function_temp_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
64 {
65 assert(glsl_type_is_vector_or_scalar(type));
66
67 unsigned comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
68 unsigned length = glsl_get_vector_elements(type);
69
70 *size = comp_size * length;
71 *align = 0x10;
72 }
73
74 class Converter : public ConverterCommon
75 {
76 public:
77 Converter(Program *, nir_shader *, nv50_ir_prog_info *, nv50_ir_prog_info_out *);
78
79 bool run();
80 private:
81 typedef std::vector<LValue*> LValues;
82 typedef unordered_map<unsigned, LValues> NirDefMap;
83 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
84 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
85
86 CacheMode convert(enum gl_access_qualifier);
87 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
88 LValues& convert(nir_alu_dest *);
89 BasicBlock* convert(nir_block *);
90 LValues& convert(nir_dest *);
91 SVSemantic convert(nir_intrinsic_op);
92 Value* convert(nir_load_const_instr*, uint8_t);
93 LValues& convert(nir_register *);
94 LValues& convert(nir_ssa_def *);
95
96 Value* getSrc(nir_alu_src *, uint8_t component = 0);
97 Value* getSrc(nir_register *, uint8_t);
98 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
99 Value* getSrc(nir_ssa_def *, uint8_t);
100
101 // returned value is the constant part of the given source (either the
102 // nir_src or the selected source component of an intrinsic). Even though
103 // this is mostly an optimization to be able to skip indirects in a few
104 // cases, sometimes we require immediate values or set some fileds on
105 // instructions (e.g. tex) in order for codegen to consume those.
106 // If the found value has not a constant part, the Value gets returned
107 // through the Value parameter.
108 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
109 // isScalar indicates that the addressing is scalar, vec4 addressing is
110 // assumed otherwise
111 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
112 bool isScalar = false);
113
114 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
115
116 void setInterpolate(nv50_ir_varying *,
117 uint8_t,
118 bool centroid,
119 unsigned semantics);
120
121 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
122 uint8_t c, Value *indirect0 = NULL,
123 Value *indirect1 = NULL, bool patch = false);
124 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
125 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
126 Value *indirect1 = NULL);
127
128 bool isFloatType(nir_alu_type);
129 bool isSignedType(nir_alu_type);
130 bool isResultFloat(nir_op);
131 bool isResultSigned(nir_op);
132
133 DataType getDType(nir_alu_instr *);
134 DataType getDType(nir_intrinsic_instr *);
135 DataType getDType(nir_intrinsic_instr *, bool isSigned);
136 DataType getDType(nir_op, uint8_t);
137
138 DataFile getFile(nir_intrinsic_op);
139
140 std::vector<DataType> getSTypes(nir_alu_instr *);
141 DataType getSType(nir_src &, bool isFloat, bool isSigned);
142
143 operation getOperation(nir_intrinsic_op);
144 operation getOperation(nir_op);
145 operation getOperation(nir_texop);
146 operation preOperationNeeded(nir_op);
147
148 int getSubOp(nir_intrinsic_op);
149 int getSubOp(nir_op);
150
151 CondCode getCondCode(nir_op);
152
153 bool assignSlots();
154 bool parseNIR();
155
156 bool visit(nir_alu_instr *);
157 bool visit(nir_block *);
158 bool visit(nir_cf_node *);
159 bool visit(nir_function *);
160 bool visit(nir_if *);
161 bool visit(nir_instr *);
162 bool visit(nir_intrinsic_instr *);
163 bool visit(nir_jump_instr *);
164 bool visit(nir_load_const_instr*);
165 bool visit(nir_loop *);
166 bool visit(nir_ssa_undef_instr *);
167 bool visit(nir_tex_instr *);
168
169 // tex stuff
170 Value* applyProjection(Value *src, Value *proj);
171 unsigned int getNIRArgCount(TexInstruction::Target&);
172
173 nir_shader *nir;
174
175 NirDefMap ssaDefs;
176 NirDefMap regDefs;
177 ImmediateMap immediates;
178 NirBlockMap blocks;
179 unsigned int curLoopDepth;
180 unsigned int curIfDepth;
181
182 BasicBlock *exit;
183 Value *zero;
184 Instruction *immInsertPos;
185
186 int clipVertexOutput;
187
188 union {
189 struct {
190 Value *position;
191 } fp;
192 };
193 };
194
195 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info,
196 nv50_ir_prog_info_out *info_out)
197 : ConverterCommon(prog, info, info_out),
198 nir(nir),
199 curLoopDepth(0),
200 curIfDepth(0),
201 clipVertexOutput(-1)
202 {
203 zero = mkImm((uint32_t)0);
204 }
205
206 BasicBlock *
207 Converter::convert(nir_block *block)
208 {
209 NirBlockMap::iterator it = blocks.find(block->index);
210 if (it != blocks.end())
211 return it->second;
212
213 BasicBlock *bb = new BasicBlock(func);
214 blocks[block->index] = bb;
215 return bb;
216 }
217
218 bool
219 Converter::isFloatType(nir_alu_type type)
220 {
221 return nir_alu_type_get_base_type(type) == nir_type_float;
222 }
223
224 bool
225 Converter::isSignedType(nir_alu_type type)
226 {
227 return nir_alu_type_get_base_type(type) == nir_type_int;
228 }
229
230 bool
231 Converter::isResultFloat(nir_op op)
232 {
233 const nir_op_info &info = nir_op_infos[op];
234 if (info.output_type != nir_type_invalid)
235 return isFloatType(info.output_type);
236
237 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
238 assert(false);
239 return true;
240 }
241
242 bool
243 Converter::isResultSigned(nir_op op)
244 {
245 switch (op) {
246 // there is no umul and we get wrong results if we treat all muls as signed
247 case nir_op_imul:
248 case nir_op_inot:
249 return false;
250 default:
251 const nir_op_info &info = nir_op_infos[op];
252 if (info.output_type != nir_type_invalid)
253 return isSignedType(info.output_type);
254 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
255 assert(false);
256 return true;
257 }
258 }
259
260 DataType
261 Converter::getDType(nir_alu_instr *insn)
262 {
263 if (insn->dest.dest.is_ssa)
264 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
265 else
266 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
267 }
268
269 DataType
270 Converter::getDType(nir_intrinsic_instr *insn)
271 {
272 bool isSigned;
273 switch (insn->intrinsic) {
274 case nir_intrinsic_shared_atomic_imax:
275 case nir_intrinsic_shared_atomic_imin:
276 case nir_intrinsic_ssbo_atomic_imax:
277 case nir_intrinsic_ssbo_atomic_imin:
278 isSigned = true;
279 break;
280 default:
281 isSigned = false;
282 break;
283 }
284
285 return getDType(insn, isSigned);
286 }
287
288 DataType
289 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
290 {
291 if (insn->dest.is_ssa)
292 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
293 else
294 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
295 }
296
297 DataType
298 Converter::getDType(nir_op op, uint8_t bitSize)
299 {
300 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
301 if (ty == TYPE_NONE) {
302 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
303 assert(false);
304 }
305 return ty;
306 }
307
308 std::vector<DataType>
309 Converter::getSTypes(nir_alu_instr *insn)
310 {
311 const nir_op_info &info = nir_op_infos[insn->op];
312 std::vector<DataType> res(info.num_inputs);
313
314 for (uint8_t i = 0; i < info.num_inputs; ++i) {
315 if (info.input_types[i] != nir_type_invalid) {
316 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
317 } else {
318 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
319 assert(false);
320 res[i] = TYPE_NONE;
321 break;
322 }
323 }
324
325 return res;
326 }
327
328 DataType
329 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
330 {
331 uint8_t bitSize;
332 if (src.is_ssa)
333 bitSize = src.ssa->bit_size;
334 else
335 bitSize = src.reg.reg->bit_size;
336
337 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
338 if (ty == TYPE_NONE) {
339 const char *str;
340 if (isFloat)
341 str = "float";
342 else if (isSigned)
343 str = "int";
344 else
345 str = "uint";
346 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
347 assert(false);
348 }
349 return ty;
350 }
351
352 DataFile
353 Converter::getFile(nir_intrinsic_op op)
354 {
355 switch (op) {
356 case nir_intrinsic_load_global:
357 case nir_intrinsic_store_global:
358 return FILE_MEMORY_GLOBAL;
359 case nir_intrinsic_load_scratch:
360 case nir_intrinsic_store_scratch:
361 return FILE_MEMORY_LOCAL;
362 case nir_intrinsic_load_shared:
363 case nir_intrinsic_store_shared:
364 return FILE_MEMORY_SHARED;
365 case nir_intrinsic_load_kernel_input:
366 return FILE_SHADER_INPUT;
367 default:
368 ERROR("couldn't get DateFile for op %s\n", nir_intrinsic_infos[op].name);
369 assert(false);
370 }
371 return FILE_NULL;
372 }
373
374 operation
375 Converter::getOperation(nir_op op)
376 {
377 switch (op) {
378 // basic ops with float and int variants
379 case nir_op_fabs:
380 case nir_op_iabs:
381 return OP_ABS;
382 case nir_op_fadd:
383 case nir_op_iadd:
384 return OP_ADD;
385 case nir_op_iand:
386 return OP_AND;
387 case nir_op_ifind_msb:
388 case nir_op_ufind_msb:
389 return OP_BFIND;
390 case nir_op_fceil:
391 return OP_CEIL;
392 case nir_op_fcos:
393 return OP_COS;
394 case nir_op_f2f32:
395 case nir_op_f2f64:
396 case nir_op_f2i32:
397 case nir_op_f2i64:
398 case nir_op_f2u32:
399 case nir_op_f2u64:
400 case nir_op_i2f32:
401 case nir_op_i2f64:
402 case nir_op_i2i32:
403 case nir_op_i2i64:
404 case nir_op_u2f32:
405 case nir_op_u2f64:
406 case nir_op_u2u32:
407 case nir_op_u2u64:
408 return OP_CVT;
409 case nir_op_fddx:
410 case nir_op_fddx_coarse:
411 case nir_op_fddx_fine:
412 return OP_DFDX;
413 case nir_op_fddy:
414 case nir_op_fddy_coarse:
415 case nir_op_fddy_fine:
416 return OP_DFDY;
417 case nir_op_fdiv:
418 case nir_op_idiv:
419 case nir_op_udiv:
420 return OP_DIV;
421 case nir_op_fexp2:
422 return OP_EX2;
423 case nir_op_ffloor:
424 return OP_FLOOR;
425 case nir_op_ffma:
426 return OP_FMA;
427 case nir_op_flog2:
428 return OP_LG2;
429 case nir_op_fmax:
430 case nir_op_imax:
431 case nir_op_umax:
432 return OP_MAX;
433 case nir_op_pack_64_2x32_split:
434 return OP_MERGE;
435 case nir_op_fmin:
436 case nir_op_imin:
437 case nir_op_umin:
438 return OP_MIN;
439 case nir_op_fmod:
440 case nir_op_imod:
441 case nir_op_umod:
442 case nir_op_frem:
443 case nir_op_irem:
444 return OP_MOD;
445 case nir_op_fmul:
446 case nir_op_imul:
447 case nir_op_imul_high:
448 case nir_op_umul_high:
449 return OP_MUL;
450 case nir_op_fneg:
451 case nir_op_ineg:
452 return OP_NEG;
453 case nir_op_inot:
454 return OP_NOT;
455 case nir_op_ior:
456 return OP_OR;
457 case nir_op_fpow:
458 return OP_POW;
459 case nir_op_frcp:
460 return OP_RCP;
461 case nir_op_frsq:
462 return OP_RSQ;
463 case nir_op_fsat:
464 return OP_SAT;
465 case nir_op_feq32:
466 case nir_op_ieq32:
467 case nir_op_fge32:
468 case nir_op_ige32:
469 case nir_op_uge32:
470 case nir_op_flt32:
471 case nir_op_ilt32:
472 case nir_op_ult32:
473 case nir_op_fneu32:
474 case nir_op_ine32:
475 return OP_SET;
476 case nir_op_ishl:
477 return OP_SHL;
478 case nir_op_ishr:
479 case nir_op_ushr:
480 return OP_SHR;
481 case nir_op_fsin:
482 return OP_SIN;
483 case nir_op_fsqrt:
484 return OP_SQRT;
485 case nir_op_ftrunc:
486 return OP_TRUNC;
487 case nir_op_ixor:
488 return OP_XOR;
489 default:
490 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
491 assert(false);
492 return OP_NOP;
493 }
494 }
495
496 operation
497 Converter::getOperation(nir_texop op)
498 {
499 switch (op) {
500 case nir_texop_tex:
501 return OP_TEX;
502 case nir_texop_lod:
503 return OP_TXLQ;
504 case nir_texop_txb:
505 return OP_TXB;
506 case nir_texop_txd:
507 return OP_TXD;
508 case nir_texop_txf:
509 case nir_texop_txf_ms:
510 return OP_TXF;
511 case nir_texop_tg4:
512 return OP_TXG;
513 case nir_texop_txl:
514 return OP_TXL;
515 case nir_texop_query_levels:
516 case nir_texop_texture_samples:
517 case nir_texop_txs:
518 return OP_TXQ;
519 default:
520 ERROR("couldn't get operation for nir_texop %u\n", op);
521 assert(false);
522 return OP_NOP;
523 }
524 }
525
526 operation
527 Converter::getOperation(nir_intrinsic_op op)
528 {
529 switch (op) {
530 case nir_intrinsic_emit_vertex:
531 return OP_EMIT;
532 case nir_intrinsic_end_primitive:
533 return OP_RESTART;
534 case nir_intrinsic_bindless_image_atomic_add:
535 case nir_intrinsic_image_atomic_add:
536 case nir_intrinsic_bindless_image_atomic_and:
537 case nir_intrinsic_image_atomic_and:
538 case nir_intrinsic_bindless_image_atomic_comp_swap:
539 case nir_intrinsic_image_atomic_comp_swap:
540 case nir_intrinsic_bindless_image_atomic_exchange:
541 case nir_intrinsic_image_atomic_exchange:
542 case nir_intrinsic_bindless_image_atomic_imax:
543 case nir_intrinsic_image_atomic_imax:
544 case nir_intrinsic_bindless_image_atomic_umax:
545 case nir_intrinsic_image_atomic_umax:
546 case nir_intrinsic_bindless_image_atomic_imin:
547 case nir_intrinsic_image_atomic_imin:
548 case nir_intrinsic_bindless_image_atomic_umin:
549 case nir_intrinsic_image_atomic_umin:
550 case nir_intrinsic_bindless_image_atomic_or:
551 case nir_intrinsic_image_atomic_or:
552 case nir_intrinsic_bindless_image_atomic_xor:
553 case nir_intrinsic_image_atomic_xor:
554 case nir_intrinsic_bindless_image_atomic_inc_wrap:
555 case nir_intrinsic_image_atomic_inc_wrap:
556 case nir_intrinsic_bindless_image_atomic_dec_wrap:
557 case nir_intrinsic_image_atomic_dec_wrap:
558 return OP_SUREDP;
559 case nir_intrinsic_bindless_image_load:
560 case nir_intrinsic_image_load:
561 return OP_SULDP;
562 case nir_intrinsic_bindless_image_samples:
563 case nir_intrinsic_image_samples:
564 case nir_intrinsic_bindless_image_size:
565 case nir_intrinsic_image_size:
566 return OP_SUQ;
567 case nir_intrinsic_bindless_image_store:
568 case nir_intrinsic_image_store:
569 return OP_SUSTP;
570 default:
571 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
572 assert(false);
573 return OP_NOP;
574 }
575 }
576
577 operation
578 Converter::preOperationNeeded(nir_op op)
579 {
580 switch (op) {
581 case nir_op_fcos:
582 case nir_op_fsin:
583 return OP_PRESIN;
584 default:
585 return OP_NOP;
586 }
587 }
588
589 int
590 Converter::getSubOp(nir_op op)
591 {
592 switch (op) {
593 case nir_op_imul_high:
594 case nir_op_umul_high:
595 return NV50_IR_SUBOP_MUL_HIGH;
596 case nir_op_ishl:
597 case nir_op_ishr:
598 case nir_op_ushr:
599 return NV50_IR_SUBOP_SHIFT_WRAP;
600 default:
601 return 0;
602 }
603 }
604
605 int
606 Converter::getSubOp(nir_intrinsic_op op)
607 {
608 switch (op) {
609 case nir_intrinsic_bindless_image_atomic_add:
610 case nir_intrinsic_global_atomic_add:
611 case nir_intrinsic_image_atomic_add:
612 case nir_intrinsic_shared_atomic_add:
613 case nir_intrinsic_ssbo_atomic_add:
614 return NV50_IR_SUBOP_ATOM_ADD;
615 case nir_intrinsic_bindless_image_atomic_and:
616 case nir_intrinsic_global_atomic_and:
617 case nir_intrinsic_image_atomic_and:
618 case nir_intrinsic_shared_atomic_and:
619 case nir_intrinsic_ssbo_atomic_and:
620 return NV50_IR_SUBOP_ATOM_AND;
621 case nir_intrinsic_bindless_image_atomic_comp_swap:
622 case nir_intrinsic_global_atomic_comp_swap:
623 case nir_intrinsic_image_atomic_comp_swap:
624 case nir_intrinsic_shared_atomic_comp_swap:
625 case nir_intrinsic_ssbo_atomic_comp_swap:
626 return NV50_IR_SUBOP_ATOM_CAS;
627 case nir_intrinsic_bindless_image_atomic_exchange:
628 case nir_intrinsic_global_atomic_exchange:
629 case nir_intrinsic_image_atomic_exchange:
630 case nir_intrinsic_shared_atomic_exchange:
631 case nir_intrinsic_ssbo_atomic_exchange:
632 return NV50_IR_SUBOP_ATOM_EXCH;
633 case nir_intrinsic_bindless_image_atomic_or:
634 case nir_intrinsic_global_atomic_or:
635 case nir_intrinsic_image_atomic_or:
636 case nir_intrinsic_shared_atomic_or:
637 case nir_intrinsic_ssbo_atomic_or:
638 return NV50_IR_SUBOP_ATOM_OR;
639 case nir_intrinsic_bindless_image_atomic_imax:
640 case nir_intrinsic_bindless_image_atomic_umax:
641 case nir_intrinsic_global_atomic_imax:
642 case nir_intrinsic_global_atomic_umax:
643 case nir_intrinsic_image_atomic_imax:
644 case nir_intrinsic_image_atomic_umax:
645 case nir_intrinsic_shared_atomic_imax:
646 case nir_intrinsic_shared_atomic_umax:
647 case nir_intrinsic_ssbo_atomic_imax:
648 case nir_intrinsic_ssbo_atomic_umax:
649 return NV50_IR_SUBOP_ATOM_MAX;
650 case nir_intrinsic_bindless_image_atomic_imin:
651 case nir_intrinsic_bindless_image_atomic_umin:
652 case nir_intrinsic_global_atomic_imin:
653 case nir_intrinsic_global_atomic_umin:
654 case nir_intrinsic_image_atomic_imin:
655 case nir_intrinsic_image_atomic_umin:
656 case nir_intrinsic_shared_atomic_imin:
657 case nir_intrinsic_shared_atomic_umin:
658 case nir_intrinsic_ssbo_atomic_imin:
659 case nir_intrinsic_ssbo_atomic_umin:
660 return NV50_IR_SUBOP_ATOM_MIN;
661 case nir_intrinsic_bindless_image_atomic_xor:
662 case nir_intrinsic_global_atomic_xor:
663 case nir_intrinsic_image_atomic_xor:
664 case nir_intrinsic_shared_atomic_xor:
665 case nir_intrinsic_ssbo_atomic_xor:
666 return NV50_IR_SUBOP_ATOM_XOR;
667 case nir_intrinsic_bindless_image_atomic_inc_wrap:
668 case nir_intrinsic_image_atomic_inc_wrap:
669 return NV50_IR_SUBOP_ATOM_INC;
670 case nir_intrinsic_bindless_image_atomic_dec_wrap:
671 case nir_intrinsic_image_atomic_dec_wrap:
672 return NV50_IR_SUBOP_ATOM_DEC;
673
674 case nir_intrinsic_group_memory_barrier:
675 case nir_intrinsic_memory_barrier:
676 case nir_intrinsic_memory_barrier_buffer:
677 case nir_intrinsic_memory_barrier_image:
678 return NV50_IR_SUBOP_MEMBAR(M, GL);
679 case nir_intrinsic_memory_barrier_shared:
680 return NV50_IR_SUBOP_MEMBAR(M, CTA);
681
682 case nir_intrinsic_vote_all:
683 return NV50_IR_SUBOP_VOTE_ALL;
684 case nir_intrinsic_vote_any:
685 return NV50_IR_SUBOP_VOTE_ANY;
686 case nir_intrinsic_vote_ieq:
687 return NV50_IR_SUBOP_VOTE_UNI;
688 default:
689 return 0;
690 }
691 }
692
693 CondCode
694 Converter::getCondCode(nir_op op)
695 {
696 switch (op) {
697 case nir_op_feq32:
698 case nir_op_ieq32:
699 return CC_EQ;
700 case nir_op_fge32:
701 case nir_op_ige32:
702 case nir_op_uge32:
703 return CC_GE;
704 case nir_op_flt32:
705 case nir_op_ilt32:
706 case nir_op_ult32:
707 return CC_LT;
708 case nir_op_fneu32:
709 return CC_NEU;
710 case nir_op_ine32:
711 return CC_NE;
712 default:
713 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
714 assert(false);
715 return CC_FL;
716 }
717 }
718
719 Converter::LValues&
720 Converter::convert(nir_alu_dest *dest)
721 {
722 return convert(&dest->dest);
723 }
724
725 Converter::LValues&
726 Converter::convert(nir_dest *dest)
727 {
728 if (dest->is_ssa)
729 return convert(&dest->ssa);
730 if (dest->reg.indirect) {
731 ERROR("no support for indirects.");
732 assert(false);
733 }
734 return convert(dest->reg.reg);
735 }
736
737 Converter::LValues&
738 Converter::convert(nir_register *reg)
739 {
740 assert(!reg->num_array_elems);
741
742 NirDefMap::iterator it = regDefs.find(reg->index);
743 if (it != regDefs.end())
744 return it->second;
745
746 LValues newDef(reg->num_components);
747 for (uint8_t i = 0; i < reg->num_components; i++)
748 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
749 return regDefs[reg->index] = newDef;
750 }
751
752 Converter::LValues&
753 Converter::convert(nir_ssa_def *def)
754 {
755 NirDefMap::iterator it = ssaDefs.find(def->index);
756 if (it != ssaDefs.end())
757 return it->second;
758
759 LValues newDef(def->num_components);
760 for (uint8_t i = 0; i < def->num_components; i++)
761 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
762 return ssaDefs[def->index] = newDef;
763 }
764
765 Value*
766 Converter::getSrc(nir_alu_src *src, uint8_t component)
767 {
768 if (src->abs || src->negate) {
769 ERROR("modifiers currently not supported on nir_alu_src\n");
770 assert(false);
771 }
772 return getSrc(&src->src, src->swizzle[component]);
773 }
774
775 Value*
776 Converter::getSrc(nir_register *reg, uint8_t idx)
777 {
778 NirDefMap::iterator it = regDefs.find(reg->index);
779 if (it == regDefs.end())
780 return convert(reg)[idx];
781 return it->second[idx];
782 }
783
784 Value*
785 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
786 {
787 if (src->is_ssa)
788 return getSrc(src->ssa, idx);
789
790 if (src->reg.indirect) {
791 if (indirect)
792 return getSrc(src->reg.indirect, idx);
793 ERROR("no support for indirects.");
794 assert(false);
795 return NULL;
796 }
797
798 return getSrc(src->reg.reg, idx);
799 }
800
801 Value*
802 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
803 {
804 ImmediateMap::iterator iit = immediates.find(src->index);
805 if (iit != immediates.end())
806 return convert((*iit).second, idx);
807
808 NirDefMap::iterator it = ssaDefs.find(src->index);
809 if (it == ssaDefs.end()) {
810 ERROR("SSA value %u not found\n", src->index);
811 assert(false);
812 return NULL;
813 }
814 return it->second[idx];
815 }
816
817 uint32_t
818 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
819 {
820 nir_const_value *offset = nir_src_as_const_value(*src);
821
822 if (offset) {
823 indirect = NULL;
824 return offset[0].u32;
825 }
826
827 indirect = getSrc(src, idx, true);
828 return 0;
829 }
830
831 uint32_t
832 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
833 {
834 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
835 if (indirect && !isScalar)
836 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
837 return idx;
838 }
839
840 static void
841 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
842 {
843 assert(name && index);
844
845 if (slot >= VERT_ATTRIB_MAX) {
846 ERROR("invalid varying slot %u\n", slot);
847 assert(false);
848 return;
849 }
850
851 if (slot >= VERT_ATTRIB_GENERIC0 &&
852 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
853 *name = TGSI_SEMANTIC_GENERIC;
854 *index = slot - VERT_ATTRIB_GENERIC0;
855 return;
856 }
857
858 if (slot >= VERT_ATTRIB_TEX0 &&
859 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
860 *name = TGSI_SEMANTIC_TEXCOORD;
861 *index = slot - VERT_ATTRIB_TEX0;
862 return;
863 }
864
865 switch (slot) {
866 case VERT_ATTRIB_COLOR0:
867 *name = TGSI_SEMANTIC_COLOR;
868 *index = 0;
869 break;
870 case VERT_ATTRIB_COLOR1:
871 *name = TGSI_SEMANTIC_COLOR;
872 *index = 1;
873 break;
874 case VERT_ATTRIB_EDGEFLAG:
875 *name = TGSI_SEMANTIC_EDGEFLAG;
876 *index = 0;
877 break;
878 case VERT_ATTRIB_FOG:
879 *name = TGSI_SEMANTIC_FOG;
880 *index = 0;
881 break;
882 case VERT_ATTRIB_NORMAL:
883 *name = TGSI_SEMANTIC_NORMAL;
884 *index = 0;
885 break;
886 case VERT_ATTRIB_POS:
887 *name = TGSI_SEMANTIC_POSITION;
888 *index = 0;
889 break;
890 case VERT_ATTRIB_POINT_SIZE:
891 *name = TGSI_SEMANTIC_PSIZE;
892 *index = 0;
893 break;
894 default:
895 ERROR("unknown vert attrib slot %u\n", slot);
896 assert(false);
897 break;
898 }
899 }
900
901 void
902 Converter::setInterpolate(nv50_ir_varying *var,
903 uint8_t mode,
904 bool centroid,
905 unsigned semantic)
906 {
907 switch (mode) {
908 case INTERP_MODE_FLAT:
909 var->flat = 1;
910 break;
911 case INTERP_MODE_NONE:
912 if (semantic == TGSI_SEMANTIC_COLOR)
913 var->sc = 1;
914 else if (semantic == TGSI_SEMANTIC_POSITION)
915 var->linear = 1;
916 break;
917 case INTERP_MODE_NOPERSPECTIVE:
918 var->linear = 1;
919 break;
920 case INTERP_MODE_SMOOTH:
921 break;
922 }
923 var->centroid = centroid;
924 }
925
926 static uint16_t
927 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
928 bool input, const nir_variable *var)
929 {
930 if (!type->is_array())
931 return type->count_attribute_slots(false);
932
933 uint16_t slots;
934 switch (stage) {
935 case Program::TYPE_GEOMETRY:
936 slots = type->count_attribute_slots(false);
937 if (input)
938 slots /= info.gs.vertices_in;
939 break;
940 case Program::TYPE_TESSELLATION_CONTROL:
941 case Program::TYPE_TESSELLATION_EVAL:
942 // remove first dimension
943 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
944 slots = type->count_attribute_slots(false);
945 else
946 slots = type->fields.array->count_attribute_slots(false);
947 break;
948 default:
949 slots = type->count_attribute_slots(false);
950 break;
951 }
952
953 return slots;
954 }
955
956 static uint8_t
957 getMaskForType(const glsl_type *type, uint8_t slot) {
958 uint16_t comp = type->without_array()->components();
959 comp = comp ? comp : 4;
960
961 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
962 comp *= 2;
963 if (comp > 4) {
964 if (slot % 2)
965 comp -= 4;
966 else
967 comp = 4;
968 }
969 }
970
971 return (1 << comp) - 1;
972 }
973
974 bool Converter::assignSlots() {
975 unsigned name;
976 unsigned index;
977
978 info->io.viewportId = -1;
979 info_out->numInputs = 0;
980 info_out->numOutputs = 0;
981 info_out->numSysVals = 0;
982
983 for (uint8_t i = 0; i < SYSTEM_VALUE_MAX; ++i) {
984 if (!(nir->info.system_values_read & 1ull << i))
985 continue;
986
987 info_out->sv[info_out->numSysVals].sn = tgsi_get_sysval_semantic(i);
988 info_out->sv[info_out->numSysVals].si = 0;
989 info_out->sv[info_out->numSysVals].input = 0; // TODO inferSysValDirection(sn);
990
991 switch (i) {
992 case SYSTEM_VALUE_INSTANCE_ID:
993 info_out->io.instanceId = info_out->numSysVals;
994 break;
995 case SYSTEM_VALUE_TESS_LEVEL_INNER:
996 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
997 info_out->sv[info_out->numSysVals].patch = 1;
998 break;
999 case SYSTEM_VALUE_VERTEX_ID:
1000 info_out->io.vertexId = info_out->numSysVals;
1001 break;
1002 default:
1003 break;
1004 }
1005
1006 info_out->numSysVals += 1;
1007 }
1008
1009 if (prog->getType() == Program::TYPE_COMPUTE)
1010 return true;
1011
1012 nir_foreach_shader_in_variable(var, nir) {
1013 const glsl_type *type = var->type;
1014 int slot = var->data.location;
1015 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1016 uint32_t vary = var->data.driver_location;
1017
1018 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1019
1020 switch(prog->getType()) {
1021 case Program::TYPE_FRAGMENT:
1022 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1023 &name, &index);
1024 for (uint16_t i = 0; i < slots; ++i) {
1025 setInterpolate(&info_out->in[vary + i], var->data.interpolation,
1026 var->data.centroid | var->data.sample, name);
1027 }
1028 break;
1029 case Program::TYPE_GEOMETRY:
1030 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1031 &name, &index);
1032 break;
1033 case Program::TYPE_TESSELLATION_CONTROL:
1034 case Program::TYPE_TESSELLATION_EVAL:
1035 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1036 &name, &index);
1037 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1038 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1039 break;
1040 case Program::TYPE_VERTEX:
1041 if (slot >= VERT_ATTRIB_GENERIC0)
1042 slot = VERT_ATTRIB_GENERIC0 + vary;
1043 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1044 switch (name) {
1045 case TGSI_SEMANTIC_EDGEFLAG:
1046 info_out->io.edgeFlagIn = vary;
1047 break;
1048 default:
1049 break;
1050 }
1051 break;
1052 default:
1053 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1054 return false;
1055 }
1056
1057 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1058 nv50_ir_varying *v = &info_out->in[vary];
1059
1060 v->patch = var->data.patch;
1061 v->sn = name;
1062 v->si = index + i;
1063 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1064 }
1065 info_out->numInputs = std::max<uint8_t>(info_out->numInputs, vary);
1066 }
1067
1068 nir_foreach_shader_out_variable(var, nir) {
1069 const glsl_type *type = var->type;
1070 int slot = var->data.location;
1071 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1072 uint32_t vary = var->data.driver_location;
1073
1074 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1075
1076 switch(prog->getType()) {
1077 case Program::TYPE_FRAGMENT:
1078 tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1079 switch (name) {
1080 case TGSI_SEMANTIC_COLOR:
1081 if (!var->data.fb_fetch_output)
1082 info_out->prop.fp.numColourResults++;
1083 if (var->data.location == FRAG_RESULT_COLOR &&
1084 nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1085 info_out->prop.fp.separateFragData = true;
1086 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1087 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1088 index = index == 0 ? var->data.index : index;
1089 break;
1090 case TGSI_SEMANTIC_POSITION:
1091 info_out->io.fragDepth = vary;
1092 info_out->prop.fp.writesDepth = true;
1093 break;
1094 case TGSI_SEMANTIC_SAMPLEMASK:
1095 info_out->io.sampleMask = vary;
1096 break;
1097 default:
1098 break;
1099 }
1100 break;
1101 case Program::TYPE_GEOMETRY:
1102 case Program::TYPE_TESSELLATION_CONTROL:
1103 case Program::TYPE_TESSELLATION_EVAL:
1104 case Program::TYPE_VERTEX:
1105 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1106 &name, &index);
1107
1108 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1109 name != TGSI_SEMANTIC_TESSOUTER)
1110 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1111
1112 switch (name) {
1113 case TGSI_SEMANTIC_CLIPDIST:
1114 info_out->io.genUserClip = -1;
1115 break;
1116 case TGSI_SEMANTIC_CLIPVERTEX:
1117 clipVertexOutput = vary;
1118 break;
1119 case TGSI_SEMANTIC_EDGEFLAG:
1120 info_out->io.edgeFlagOut = vary;
1121 break;
1122 case TGSI_SEMANTIC_POSITION:
1123 if (clipVertexOutput < 0)
1124 clipVertexOutput = vary;
1125 break;
1126 default:
1127 break;
1128 }
1129 break;
1130 default:
1131 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1132 return false;
1133 }
1134
1135 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1136 nv50_ir_varying *v = &info_out->out[vary];
1137 v->patch = var->data.patch;
1138 v->sn = name;
1139 v->si = index + i;
1140 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1141
1142 if (nir->info.outputs_read & 1ull << slot)
1143 v->oread = 1;
1144 }
1145 info_out->numOutputs = std::max<uint8_t>(info_out->numOutputs, vary);
1146 }
1147
1148 if (info_out->io.genUserClip > 0) {
1149 info_out->io.clipDistances = info_out->io.genUserClip;
1150
1151 const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1152
1153 for (unsigned int n = 0; n < nOut; ++n) {
1154 unsigned int i = info_out->numOutputs++;
1155 info_out->out[i].id = i;
1156 info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1157 info_out->out[i].si = n;
1158 info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1159 }
1160 }
1161
1162 return info->assignSlots(info_out) == 0;
1163 }
1164
1165 uint32_t
1166 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1167 {
1168 DataType ty;
1169 int offset = nir_intrinsic_component(insn);
1170 bool input;
1171
1172 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1173 ty = getDType(insn);
1174 else
1175 ty = getSType(insn->src[0], false, false);
1176
1177 switch (insn->intrinsic) {
1178 case nir_intrinsic_load_input:
1179 case nir_intrinsic_load_interpolated_input:
1180 case nir_intrinsic_load_per_vertex_input:
1181 input = true;
1182 break;
1183 case nir_intrinsic_load_output:
1184 case nir_intrinsic_load_per_vertex_output:
1185 case nir_intrinsic_store_output:
1186 case nir_intrinsic_store_per_vertex_output:
1187 input = false;
1188 break;
1189 default:
1190 ERROR("unknown intrinsic in getSlotAddress %s",
1191 nir_intrinsic_infos[insn->intrinsic].name);
1192 input = false;
1193 assert(false);
1194 break;
1195 }
1196
1197 if (typeSizeof(ty) == 8) {
1198 slot *= 2;
1199 slot += offset;
1200 if (slot >= 4) {
1201 idx += 1;
1202 slot -= 4;
1203 }
1204 } else {
1205 slot += offset;
1206 }
1207
1208 assert(slot < 4);
1209 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1210 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1211
1212 const nv50_ir_varying *vary = input ? info_out->in : info_out->out;
1213 return vary[idx].slot[slot] * 4;
1214 }
1215
1216 Instruction *
1217 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1218 uint32_t base, uint8_t c, Value *indirect0,
1219 Value *indirect1, bool patch)
1220 {
1221 unsigned int tySize = typeSizeof(ty);
1222
1223 if (tySize == 8 &&
1224 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1225 Value *lo = getSSA();
1226 Value *hi = getSSA();
1227
1228 Instruction *loi =
1229 mkLoad(TYPE_U32, lo,
1230 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1231 indirect0);
1232 loi->setIndirect(0, 1, indirect1);
1233 loi->perPatch = patch;
1234
1235 Instruction *hii =
1236 mkLoad(TYPE_U32, hi,
1237 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1238 indirect0);
1239 hii->setIndirect(0, 1, indirect1);
1240 hii->perPatch = patch;
1241
1242 return mkOp2(OP_MERGE, ty, def, lo, hi);
1243 } else {
1244 Instruction *ld =
1245 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1246 ld->setIndirect(0, 1, indirect1);
1247 ld->perPatch = patch;
1248 return ld;
1249 }
1250 }
1251
1252 void
1253 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1254 DataType ty, Value *src, uint8_t idx, uint8_t c,
1255 Value *indirect0, Value *indirect1)
1256 {
1257 uint8_t size = typeSizeof(ty);
1258 uint32_t address = getSlotAddress(insn, idx, c);
1259
1260 if (size == 8 && indirect0) {
1261 Value *split[2];
1262 mkSplit(split, 4, src);
1263
1264 if (op == OP_EXPORT) {
1265 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1266 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1267 }
1268
1269 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1270 split[0])->perPatch = info_out->out[idx].patch;
1271 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1272 split[1])->perPatch = info_out->out[idx].patch;
1273 } else {
1274 if (op == OP_EXPORT)
1275 src = mkMov(getSSA(size), src, ty)->getDef(0);
1276 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1277 src)->perPatch = info_out->out[idx].patch;
1278 }
1279 }
1280
1281 bool
1282 Converter::parseNIR()
1283 {
1284 info_out->bin.tlsSpace = nir->scratch_size;
1285 info_out->io.clipDistances = nir->info.clip_distance_array_size;
1286 info_out->io.cullDistances = nir->info.cull_distance_array_size;
1287 info_out->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1288
1289 switch(prog->getType()) {
1290 case Program::TYPE_COMPUTE:
1291 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1292 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1293 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1294 info_out->bin.smemSize += nir->info.cs.shared_size;
1295 break;
1296 case Program::TYPE_FRAGMENT:
1297 info_out->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1298 prog->persampleInvocation =
1299 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_ID) ||
1300 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1301 info_out->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1302 info_out->prop.fp.readsSampleLocations =
1303 (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
1304 info_out->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
1305 info_out->prop.fp.usesSampleMaskIn =
1306 !!(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN);
1307 break;
1308 case Program::TYPE_GEOMETRY:
1309 info_out->prop.gp.instanceCount = nir->info.gs.invocations;
1310 info_out->prop.gp.maxVertices = nir->info.gs.vertices_out;
1311 info_out->prop.gp.outputPrim = nir->info.gs.output_primitive;
1312 break;
1313 case Program::TYPE_TESSELLATION_CONTROL:
1314 case Program::TYPE_TESSELLATION_EVAL:
1315 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1316 info_out->prop.tp.domain = GL_LINES;
1317 else
1318 info_out->prop.tp.domain = nir->info.tess.primitive_mode;
1319 info_out->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1320 info_out->prop.tp.outputPrim =
1321 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1322 info_out->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1323 info_out->prop.tp.winding = !nir->info.tess.ccw;
1324 break;
1325 case Program::TYPE_VERTEX:
1326 info_out->prop.vp.usesDrawParameters =
1327 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX)) ||
1328 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE)) ||
1329 (nir->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID));
1330 break;
1331 default:
1332 break;
1333 }
1334
1335 return true;
1336 }
1337
1338 bool
1339 Converter::visit(nir_function *function)
1340 {
1341 assert(function->impl);
1342
1343 // usually the blocks will set everything up, but main is special
1344 BasicBlock *entry = new BasicBlock(prog->main);
1345 exit = new BasicBlock(prog->main);
1346 blocks[nir_start_block(function->impl)->index] = entry;
1347 prog->main->setEntry(entry);
1348 prog->main->setExit(exit);
1349
1350 setPosition(entry, true);
1351
1352 if (info_out->io.genUserClip > 0) {
1353 for (int c = 0; c < 4; ++c)
1354 clipVtx[c] = getScratch();
1355 }
1356
1357 switch (prog->getType()) {
1358 case Program::TYPE_TESSELLATION_CONTROL:
1359 outBase = mkOp2v(
1360 OP_SUB, TYPE_U32, getSSA(),
1361 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1362 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1363 break;
1364 case Program::TYPE_FRAGMENT: {
1365 Symbol *sv = mkSysVal(SV_POSITION, 3);
1366 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1367 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1368 break;
1369 }
1370 default:
1371 break;
1372 }
1373
1374 nir_index_ssa_defs(function->impl);
1375 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1376 if (!visit(node))
1377 return false;
1378 }
1379
1380 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1381 setPosition(exit, true);
1382
1383 if ((prog->getType() == Program::TYPE_VERTEX ||
1384 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1385 && info_out->io.genUserClip > 0)
1386 handleUserClipPlanes();
1387
1388 // TODO: for non main function this needs to be a OP_RETURN
1389 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1390 return true;
1391 }
1392
1393 bool
1394 Converter::visit(nir_cf_node *node)
1395 {
1396 switch (node->type) {
1397 case nir_cf_node_block:
1398 return visit(nir_cf_node_as_block(node));
1399 case nir_cf_node_if:
1400 return visit(nir_cf_node_as_if(node));
1401 case nir_cf_node_loop:
1402 return visit(nir_cf_node_as_loop(node));
1403 default:
1404 ERROR("unknown nir_cf_node type %u\n", node->type);
1405 return false;
1406 }
1407 }
1408
1409 bool
1410 Converter::visit(nir_block *block)
1411 {
1412 if (!block->predecessors->entries && block->instr_list.is_empty())
1413 return true;
1414
1415 BasicBlock *bb = convert(block);
1416
1417 setPosition(bb, true);
1418 nir_foreach_instr(insn, block) {
1419 if (!visit(insn))
1420 return false;
1421 }
1422 return true;
1423 }
1424
1425 bool
1426 Converter::visit(nir_if *nif)
1427 {
1428 curIfDepth++;
1429
1430 DataType sType = getSType(nif->condition, false, false);
1431 Value *src = getSrc(&nif->condition, 0);
1432
1433 nir_block *lastThen = nir_if_last_then_block(nif);
1434 nir_block *lastElse = nir_if_last_else_block(nif);
1435
1436 BasicBlock *headBB = bb;
1437 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1438 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1439
1440 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1441 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1442
1443 bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1444 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1445
1446 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1447 if (!visit(node))
1448 return false;
1449 }
1450
1451 setPosition(convert(lastThen), true);
1452 if (!bb->isTerminated()) {
1453 BasicBlock *tailBB = convert(lastThen->successors[0]);
1454 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1455 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1456 } else {
1457 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1458 }
1459
1460 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1461 if (!visit(node))
1462 return false;
1463 }
1464
1465 setPosition(convert(lastElse), true);
1466 if (!bb->isTerminated()) {
1467 BasicBlock *tailBB = convert(lastElse->successors[0]);
1468 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1469 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1470 } else {
1471 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1472 }
1473
1474 /* only insert joins for the most outer if */
1475 if (--curIfDepth)
1476 insertJoins = false;
1477
1478 /* we made sure that all threads would converge at the same block */
1479 if (insertJoins) {
1480 BasicBlock *conv = convert(lastThen->successors[0]);
1481 setPosition(headBB->getExit(), false);
1482 headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1483 setPosition(conv, false);
1484 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1485 }
1486
1487 return true;
1488 }
1489
1490 // TODO: add convergency
1491 bool
1492 Converter::visit(nir_loop *loop)
1493 {
1494 curLoopDepth += 1;
1495 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1496
1497 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1498 BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1499
1500 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1501
1502 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1503 setPosition(loopBB, false);
1504 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1505
1506 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1507 if (!visit(node))
1508 return false;
1509 }
1510
1511 if (!bb->isTerminated()) {
1512 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1513 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1514 }
1515
1516 if (tailBB->cfg.incidentCount() == 0)
1517 loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1518
1519 curLoopDepth -= 1;
1520
1521 return true;
1522 }
1523
1524 bool
1525 Converter::visit(nir_instr *insn)
1526 {
1527 // we need an insertion point for on the fly generated immediate loads
1528 immInsertPos = bb->getExit();
1529 switch (insn->type) {
1530 case nir_instr_type_alu:
1531 return visit(nir_instr_as_alu(insn));
1532 case nir_instr_type_intrinsic:
1533 return visit(nir_instr_as_intrinsic(insn));
1534 case nir_instr_type_jump:
1535 return visit(nir_instr_as_jump(insn));
1536 case nir_instr_type_load_const:
1537 return visit(nir_instr_as_load_const(insn));
1538 case nir_instr_type_ssa_undef:
1539 return visit(nir_instr_as_ssa_undef(insn));
1540 case nir_instr_type_tex:
1541 return visit(nir_instr_as_tex(insn));
1542 default:
1543 ERROR("unknown nir_instr type %u\n", insn->type);
1544 return false;
1545 }
1546 return true;
1547 }
1548
1549 SVSemantic
1550 Converter::convert(nir_intrinsic_op intr)
1551 {
1552 switch (intr) {
1553 case nir_intrinsic_load_base_vertex:
1554 return SV_BASEVERTEX;
1555 case nir_intrinsic_load_base_instance:
1556 return SV_BASEINSTANCE;
1557 case nir_intrinsic_load_draw_id:
1558 return SV_DRAWID;
1559 case nir_intrinsic_load_front_face:
1560 return SV_FACE;
1561 case nir_intrinsic_is_helper_invocation:
1562 case nir_intrinsic_load_helper_invocation:
1563 return SV_THREAD_KILL;
1564 case nir_intrinsic_load_instance_id:
1565 return SV_INSTANCE_ID;
1566 case nir_intrinsic_load_invocation_id:
1567 return SV_INVOCATION_ID;
1568 case nir_intrinsic_load_local_group_size:
1569 return SV_NTID;
1570 case nir_intrinsic_load_local_invocation_id:
1571 return SV_TID;
1572 case nir_intrinsic_load_num_work_groups:
1573 return SV_NCTAID;
1574 case nir_intrinsic_load_patch_vertices_in:
1575 return SV_VERTEX_COUNT;
1576 case nir_intrinsic_load_primitive_id:
1577 return SV_PRIMITIVE_ID;
1578 case nir_intrinsic_load_sample_id:
1579 return SV_SAMPLE_INDEX;
1580 case nir_intrinsic_load_sample_mask_in:
1581 return SV_SAMPLE_MASK;
1582 case nir_intrinsic_load_sample_pos:
1583 return SV_SAMPLE_POS;
1584 case nir_intrinsic_load_subgroup_eq_mask:
1585 return SV_LANEMASK_EQ;
1586 case nir_intrinsic_load_subgroup_ge_mask:
1587 return SV_LANEMASK_GE;
1588 case nir_intrinsic_load_subgroup_gt_mask:
1589 return SV_LANEMASK_GT;
1590 case nir_intrinsic_load_subgroup_le_mask:
1591 return SV_LANEMASK_LE;
1592 case nir_intrinsic_load_subgroup_lt_mask:
1593 return SV_LANEMASK_LT;
1594 case nir_intrinsic_load_subgroup_invocation:
1595 return SV_LANEID;
1596 case nir_intrinsic_load_tess_coord:
1597 return SV_TESS_COORD;
1598 case nir_intrinsic_load_tess_level_inner:
1599 return SV_TESS_INNER;
1600 case nir_intrinsic_load_tess_level_outer:
1601 return SV_TESS_OUTER;
1602 case nir_intrinsic_load_vertex_id:
1603 return SV_VERTEX_ID;
1604 case nir_intrinsic_load_work_group_id:
1605 return SV_CTAID;
1606 case nir_intrinsic_load_work_dim:
1607 return SV_WORK_DIM;
1608 default:
1609 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1610 nir_intrinsic_infos[intr].name);
1611 assert(false);
1612 return SV_LAST;
1613 }
1614 }
1615
1616 bool
1617 Converter::visit(nir_intrinsic_instr *insn)
1618 {
1619 nir_intrinsic_op op = insn->intrinsic;
1620 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1621 unsigned dest_components = nir_intrinsic_dest_components(insn);
1622
1623 switch (op) {
1624 case nir_intrinsic_load_uniform: {
1625 LValues &newDefs = convert(&insn->dest);
1626 const DataType dType = getDType(insn);
1627 Value *indirect;
1628 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1629 for (uint8_t i = 0; i < dest_components; ++i) {
1630 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1631 }
1632 break;
1633 }
1634 case nir_intrinsic_store_output:
1635 case nir_intrinsic_store_per_vertex_output: {
1636 Value *indirect;
1637 DataType dType = getSType(insn->src[0], false, false);
1638 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1639
1640 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1641 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1642 continue;
1643
1644 uint8_t offset = 0;
1645 Value *src = getSrc(&insn->src[0], i);
1646 switch (prog->getType()) {
1647 case Program::TYPE_FRAGMENT: {
1648 if (info_out->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1649 // TGSI uses a different interface than NIR, TGSI stores that
1650 // value in the z component, NIR in X
1651 offset += 2;
1652 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1653 }
1654 break;
1655 }
1656 case Program::TYPE_GEOMETRY:
1657 case Program::TYPE_TESSELLATION_EVAL:
1658 case Program::TYPE_VERTEX: {
1659 if (info_out->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1660 mkMov(clipVtx[i], src);
1661 src = clipVtx[i];
1662 }
1663 break;
1664 }
1665 default:
1666 break;
1667 }
1668
1669 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1670 }
1671 break;
1672 }
1673 case nir_intrinsic_load_input:
1674 case nir_intrinsic_load_interpolated_input:
1675 case nir_intrinsic_load_output: {
1676 LValues &newDefs = convert(&insn->dest);
1677
1678 // FBFetch
1679 if (prog->getType() == Program::TYPE_FRAGMENT &&
1680 op == nir_intrinsic_load_output) {
1681 std::vector<Value*> defs, srcs;
1682 uint8_t mask = 0;
1683
1684 srcs.push_back(getSSA());
1685 srcs.push_back(getSSA());
1686 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1687 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1688 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1689 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1690
1691 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1692 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1693
1694 for (uint8_t i = 0u; i < dest_components; ++i) {
1695 defs.push_back(newDefs[i]);
1696 mask |= 1 << i;
1697 }
1698
1699 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1700 texi->tex.levelZero = 1;
1701 texi->tex.mask = mask;
1702 texi->tex.useOffsets = 0;
1703 texi->tex.r = 0xffff;
1704 texi->tex.s = 0xffff;
1705
1706 info_out->prop.fp.readsFramebuffer = true;
1707 break;
1708 }
1709
1710 const DataType dType = getDType(insn);
1711 Value *indirect;
1712 bool input = op != nir_intrinsic_load_output;
1713 operation nvirOp;
1714 uint32_t mode = 0;
1715
1716 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1717 nv50_ir_varying& vary = input ? info_out->in[idx] : info_out->out[idx];
1718
1719 // see load_barycentric_* handling
1720 if (prog->getType() == Program::TYPE_FRAGMENT) {
1721 if (op == nir_intrinsic_load_interpolated_input) {
1722 ImmediateValue immMode;
1723 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1724 mode = immMode.reg.data.u32;
1725 }
1726 if (mode == NV50_IR_INTERP_DEFAULT)
1727 mode |= translateInterpMode(&vary, nvirOp);
1728 else {
1729 if (vary.linear) {
1730 nvirOp = OP_LINTERP;
1731 mode |= NV50_IR_INTERP_LINEAR;
1732 } else {
1733 nvirOp = OP_PINTERP;
1734 mode |= NV50_IR_INTERP_PERSPECTIVE;
1735 }
1736 }
1737 }
1738
1739 for (uint8_t i = 0u; i < dest_components; ++i) {
1740 uint32_t address = getSlotAddress(insn, idx, i);
1741 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1742 if (prog->getType() == Program::TYPE_FRAGMENT) {
1743 int s = 1;
1744 if (typeSizeof(dType) == 8) {
1745 Value *lo = getSSA();
1746 Value *hi = getSSA();
1747 Instruction *interp;
1748
1749 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1750 if (nvirOp == OP_PINTERP)
1751 interp->setSrc(s++, fp.position);
1752 if (mode & NV50_IR_INTERP_OFFSET)
1753 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1754 interp->setInterpolate(mode);
1755 interp->setIndirect(0, 0, indirect);
1756
1757 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1758 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1759 if (nvirOp == OP_PINTERP)
1760 interp->setSrc(s++, fp.position);
1761 if (mode & NV50_IR_INTERP_OFFSET)
1762 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1763 interp->setInterpolate(mode);
1764 interp->setIndirect(0, 0, indirect);
1765
1766 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1767 } else {
1768 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1769 if (nvirOp == OP_PINTERP)
1770 interp->setSrc(s++, fp.position);
1771 if (mode & NV50_IR_INTERP_OFFSET)
1772 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1773 interp->setInterpolate(mode);
1774 interp->setIndirect(0, 0, indirect);
1775 }
1776 } else {
1777 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1778 }
1779 }
1780 break;
1781 }
1782 case nir_intrinsic_load_barycentric_at_offset:
1783 case nir_intrinsic_load_barycentric_at_sample:
1784 case nir_intrinsic_load_barycentric_centroid:
1785 case nir_intrinsic_load_barycentric_pixel:
1786 case nir_intrinsic_load_barycentric_sample: {
1787 LValues &newDefs = convert(&insn->dest);
1788 uint32_t mode;
1789
1790 if (op == nir_intrinsic_load_barycentric_centroid ||
1791 op == nir_intrinsic_load_barycentric_sample) {
1792 mode = NV50_IR_INTERP_CENTROID;
1793 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1794 Value *offs[2];
1795 for (uint8_t c = 0; c < 2; c++) {
1796 offs[c] = getScratch();
1797 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1798 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1799 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1800 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1801 }
1802 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1803
1804 mode = NV50_IR_INTERP_OFFSET;
1805 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1806 mode = NV50_IR_INTERP_DEFAULT;
1807 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1808 info_out->prop.fp.readsSampleLocations = true;
1809 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1810 mode = NV50_IR_INTERP_OFFSET;
1811 } else {
1812 unreachable("all intrinsics already handled above");
1813 }
1814
1815 loadImm(newDefs[1], mode);
1816 break;
1817 }
1818 case nir_intrinsic_demote:
1819 case nir_intrinsic_discard:
1820 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1821 break;
1822 case nir_intrinsic_demote_if:
1823 case nir_intrinsic_discard_if: {
1824 Value *pred = getSSA(1, FILE_PREDICATE);
1825 if (insn->num_components > 1) {
1826 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1827 assert(false);
1828 return false;
1829 }
1830 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1831 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1832 break;
1833 }
1834 case nir_intrinsic_load_base_vertex:
1835 case nir_intrinsic_load_base_instance:
1836 case nir_intrinsic_load_draw_id:
1837 case nir_intrinsic_load_front_face:
1838 case nir_intrinsic_is_helper_invocation:
1839 case nir_intrinsic_load_helper_invocation:
1840 case nir_intrinsic_load_instance_id:
1841 case nir_intrinsic_load_invocation_id:
1842 case nir_intrinsic_load_local_group_size:
1843 case nir_intrinsic_load_local_invocation_id:
1844 case nir_intrinsic_load_num_work_groups:
1845 case nir_intrinsic_load_patch_vertices_in:
1846 case nir_intrinsic_load_primitive_id:
1847 case nir_intrinsic_load_sample_id:
1848 case nir_intrinsic_load_sample_mask_in:
1849 case nir_intrinsic_load_sample_pos:
1850 case nir_intrinsic_load_subgroup_eq_mask:
1851 case nir_intrinsic_load_subgroup_ge_mask:
1852 case nir_intrinsic_load_subgroup_gt_mask:
1853 case nir_intrinsic_load_subgroup_le_mask:
1854 case nir_intrinsic_load_subgroup_lt_mask:
1855 case nir_intrinsic_load_subgroup_invocation:
1856 case nir_intrinsic_load_tess_coord:
1857 case nir_intrinsic_load_tess_level_inner:
1858 case nir_intrinsic_load_tess_level_outer:
1859 case nir_intrinsic_load_vertex_id:
1860 case nir_intrinsic_load_work_group_id:
1861 case nir_intrinsic_load_work_dim: {
1862 const DataType dType = getDType(insn);
1863 SVSemantic sv = convert(op);
1864 LValues &newDefs = convert(&insn->dest);
1865
1866 for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
1867 Value *def;
1868 if (typeSizeof(dType) == 8)
1869 def = getSSA();
1870 else
1871 def = newDefs[i];
1872
1873 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1874 loadImm(def, 0u);
1875 } else {
1876 Symbol *sym = mkSysVal(sv, i);
1877 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1878 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1879 rdsv->perPatch = 1;
1880 }
1881
1882 if (typeSizeof(dType) == 8)
1883 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1884 }
1885 break;
1886 }
1887 // constants
1888 case nir_intrinsic_load_subgroup_size: {
1889 LValues &newDefs = convert(&insn->dest);
1890 loadImm(newDefs[0], 32u);
1891 break;
1892 }
1893 case nir_intrinsic_vote_all:
1894 case nir_intrinsic_vote_any:
1895 case nir_intrinsic_vote_ieq: {
1896 LValues &newDefs = convert(&insn->dest);
1897 Value *pred = getScratch(1, FILE_PREDICATE);
1898 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1899 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1900 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1901 break;
1902 }
1903 case nir_intrinsic_ballot: {
1904 LValues &newDefs = convert(&insn->dest);
1905 Value *pred = getSSA(1, FILE_PREDICATE);
1906 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1907 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1908 break;
1909 }
1910 case nir_intrinsic_read_first_invocation:
1911 case nir_intrinsic_read_invocation: {
1912 LValues &newDefs = convert(&insn->dest);
1913 const DataType dType = getDType(insn);
1914 Value *tmp = getScratch();
1915
1916 if (op == nir_intrinsic_read_first_invocation) {
1917 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
1918 mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
1919 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
1920 } else
1921 tmp = getSrc(&insn->src[1], 0);
1922
1923 for (uint8_t i = 0; i < dest_components; ++i) {
1924 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
1925 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
1926 }
1927 break;
1928 }
1929 case nir_intrinsic_load_per_vertex_input: {
1930 const DataType dType = getDType(insn);
1931 LValues &newDefs = convert(&insn->dest);
1932 Value *indirectVertex;
1933 Value *indirectOffset;
1934 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1935 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1936
1937 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1938 mkImm(baseVertex), indirectVertex);
1939 for (uint8_t i = 0u; i < dest_components; ++i) {
1940 uint32_t address = getSlotAddress(insn, idx, i);
1941 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
1942 indirectOffset, vtxBase, info_out->in[idx].patch);
1943 }
1944 break;
1945 }
1946 case nir_intrinsic_load_per_vertex_output: {
1947 const DataType dType = getDType(insn);
1948 LValues &newDefs = convert(&insn->dest);
1949 Value *indirectVertex;
1950 Value *indirectOffset;
1951 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1952 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1953 Value *vtxBase = NULL;
1954
1955 if (indirectVertex)
1956 vtxBase = indirectVertex;
1957 else
1958 vtxBase = loadImm(NULL, baseVertex);
1959
1960 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
1961
1962 for (uint8_t i = 0u; i < dest_components; ++i) {
1963 uint32_t address = getSlotAddress(insn, idx, i);
1964 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
1965 indirectOffset, vtxBase, info_out->in[idx].patch);
1966 }
1967 break;
1968 }
1969 case nir_intrinsic_emit_vertex: {
1970 if (info_out->io.genUserClip > 0)
1971 handleUserClipPlanes();
1972 uint32_t idx = nir_intrinsic_stream_id(insn);
1973 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1974 break;
1975 }
1976 case nir_intrinsic_end_primitive: {
1977 uint32_t idx = nir_intrinsic_stream_id(insn);
1978 if (idx)
1979 break;
1980 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1981 break;
1982 }
1983 case nir_intrinsic_load_ubo: {
1984 const DataType dType = getDType(insn);
1985 LValues &newDefs = convert(&insn->dest);
1986 Value *indirectIndex;
1987 Value *indirectOffset;
1988 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
1989 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
1990
1991 for (uint8_t i = 0u; i < dest_components; ++i) {
1992 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
1993 indirectOffset, indirectIndex);
1994 }
1995 break;
1996 }
1997 case nir_intrinsic_get_buffer_size: {
1998 LValues &newDefs = convert(&insn->dest);
1999 const DataType dType = getDType(insn);
2000 Value *indirectBuffer;
2001 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2002
2003 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2004 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2005 break;
2006 }
2007 case nir_intrinsic_store_ssbo: {
2008 DataType sType = getSType(insn->src[0], false, false);
2009 Value *indirectBuffer;
2010 Value *indirectOffset;
2011 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2012 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2013
2014 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2015 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2016 continue;
2017 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2018 offset + i * typeSizeof(sType));
2019 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2020 ->setIndirect(0, 1, indirectBuffer);
2021 }
2022 info_out->io.globalAccess |= 0x2;
2023 break;
2024 }
2025 case nir_intrinsic_load_ssbo: {
2026 const DataType dType = getDType(insn);
2027 LValues &newDefs = convert(&insn->dest);
2028 Value *indirectBuffer;
2029 Value *indirectOffset;
2030 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2031 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2032
2033 for (uint8_t i = 0u; i < dest_components; ++i)
2034 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2035 indirectOffset, indirectBuffer);
2036
2037 info_out->io.globalAccess |= 0x1;
2038 break;
2039 }
2040 case nir_intrinsic_shared_atomic_add:
2041 case nir_intrinsic_shared_atomic_and:
2042 case nir_intrinsic_shared_atomic_comp_swap:
2043 case nir_intrinsic_shared_atomic_exchange:
2044 case nir_intrinsic_shared_atomic_or:
2045 case nir_intrinsic_shared_atomic_imax:
2046 case nir_intrinsic_shared_atomic_imin:
2047 case nir_intrinsic_shared_atomic_umax:
2048 case nir_intrinsic_shared_atomic_umin:
2049 case nir_intrinsic_shared_atomic_xor: {
2050 const DataType dType = getDType(insn);
2051 LValues &newDefs = convert(&insn->dest);
2052 Value *indirectOffset;
2053 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2054 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2055 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2056 if (op == nir_intrinsic_shared_atomic_comp_swap)
2057 atom->setSrc(2, getSrc(&insn->src[2], 0));
2058 atom->setIndirect(0, 0, indirectOffset);
2059 atom->subOp = getSubOp(op);
2060 break;
2061 }
2062 case nir_intrinsic_ssbo_atomic_add:
2063 case nir_intrinsic_ssbo_atomic_and:
2064 case nir_intrinsic_ssbo_atomic_comp_swap:
2065 case nir_intrinsic_ssbo_atomic_exchange:
2066 case nir_intrinsic_ssbo_atomic_or:
2067 case nir_intrinsic_ssbo_atomic_imax:
2068 case nir_intrinsic_ssbo_atomic_imin:
2069 case nir_intrinsic_ssbo_atomic_umax:
2070 case nir_intrinsic_ssbo_atomic_umin:
2071 case nir_intrinsic_ssbo_atomic_xor: {
2072 const DataType dType = getDType(insn);
2073 LValues &newDefs = convert(&insn->dest);
2074 Value *indirectBuffer;
2075 Value *indirectOffset;
2076 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2077 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2078
2079 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2080 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2081 getSrc(&insn->src[2], 0));
2082 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2083 atom->setSrc(2, getSrc(&insn->src[3], 0));
2084 atom->setIndirect(0, 0, indirectOffset);
2085 atom->setIndirect(0, 1, indirectBuffer);
2086 atom->subOp = getSubOp(op);
2087
2088 info_out->io.globalAccess |= 0x2;
2089 break;
2090 }
2091 case nir_intrinsic_global_atomic_add:
2092 case nir_intrinsic_global_atomic_and:
2093 case nir_intrinsic_global_atomic_comp_swap:
2094 case nir_intrinsic_global_atomic_exchange:
2095 case nir_intrinsic_global_atomic_or:
2096 case nir_intrinsic_global_atomic_imax:
2097 case nir_intrinsic_global_atomic_imin:
2098 case nir_intrinsic_global_atomic_umax:
2099 case nir_intrinsic_global_atomic_umin:
2100 case nir_intrinsic_global_atomic_xor: {
2101 const DataType dType = getDType(insn);
2102 LValues &newDefs = convert(&insn->dest);
2103 Value *address;
2104 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2105
2106 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2107 Instruction *atom =
2108 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2109 if (op == nir_intrinsic_global_atomic_comp_swap)
2110 atom->setSrc(2, getSrc(&insn->src[2], 0));
2111 atom->setIndirect(0, 0, address);
2112 atom->subOp = getSubOp(op);
2113
2114 info_out->io.globalAccess |= 0x2;
2115 break;
2116 }
2117 case nir_intrinsic_bindless_image_atomic_add:
2118 case nir_intrinsic_bindless_image_atomic_and:
2119 case nir_intrinsic_bindless_image_atomic_comp_swap:
2120 case nir_intrinsic_bindless_image_atomic_exchange:
2121 case nir_intrinsic_bindless_image_atomic_imax:
2122 case nir_intrinsic_bindless_image_atomic_umax:
2123 case nir_intrinsic_bindless_image_atomic_imin:
2124 case nir_intrinsic_bindless_image_atomic_umin:
2125 case nir_intrinsic_bindless_image_atomic_or:
2126 case nir_intrinsic_bindless_image_atomic_xor:
2127 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2128 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2129 case nir_intrinsic_bindless_image_load:
2130 case nir_intrinsic_bindless_image_samples:
2131 case nir_intrinsic_bindless_image_size:
2132 case nir_intrinsic_bindless_image_store:
2133 case nir_intrinsic_image_atomic_add:
2134 case nir_intrinsic_image_atomic_and:
2135 case nir_intrinsic_image_atomic_comp_swap:
2136 case nir_intrinsic_image_atomic_exchange:
2137 case nir_intrinsic_image_atomic_imax:
2138 case nir_intrinsic_image_atomic_umax:
2139 case nir_intrinsic_image_atomic_imin:
2140 case nir_intrinsic_image_atomic_umin:
2141 case nir_intrinsic_image_atomic_or:
2142 case nir_intrinsic_image_atomic_xor:
2143 case nir_intrinsic_image_atomic_inc_wrap:
2144 case nir_intrinsic_image_atomic_dec_wrap:
2145 case nir_intrinsic_image_load:
2146 case nir_intrinsic_image_samples:
2147 case nir_intrinsic_image_size:
2148 case nir_intrinsic_image_store: {
2149 std::vector<Value*> srcs, defs;
2150 Value *indirect;
2151 DataType ty;
2152
2153 uint32_t mask = 0;
2154 TexInstruction::Target target =
2155 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2156 unsigned int argCount = getNIRArgCount(target);
2157 uint16_t location = 0;
2158
2159 if (opInfo.has_dest) {
2160 LValues &newDefs = convert(&insn->dest);
2161 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2162 defs.push_back(newDefs[i]);
2163 mask |= 1 << i;
2164 }
2165 }
2166
2167 int lod_src = -1;
2168 bool bindless = false;
2169 switch (op) {
2170 case nir_intrinsic_bindless_image_atomic_add:
2171 case nir_intrinsic_bindless_image_atomic_and:
2172 case nir_intrinsic_bindless_image_atomic_comp_swap:
2173 case nir_intrinsic_bindless_image_atomic_exchange:
2174 case nir_intrinsic_bindless_image_atomic_imax:
2175 case nir_intrinsic_bindless_image_atomic_umax:
2176 case nir_intrinsic_bindless_image_atomic_imin:
2177 case nir_intrinsic_bindless_image_atomic_umin:
2178 case nir_intrinsic_bindless_image_atomic_or:
2179 case nir_intrinsic_bindless_image_atomic_xor:
2180 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2181 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2182 ty = getDType(insn);
2183 bindless = true;
2184 info_out->io.globalAccess |= 0x2;
2185 mask = 0x1;
2186 break;
2187 case nir_intrinsic_image_atomic_add:
2188 case nir_intrinsic_image_atomic_and:
2189 case nir_intrinsic_image_atomic_comp_swap:
2190 case nir_intrinsic_image_atomic_exchange:
2191 case nir_intrinsic_image_atomic_imax:
2192 case nir_intrinsic_image_atomic_umax:
2193 case nir_intrinsic_image_atomic_imin:
2194 case nir_intrinsic_image_atomic_umin:
2195 case nir_intrinsic_image_atomic_or:
2196 case nir_intrinsic_image_atomic_xor:
2197 case nir_intrinsic_image_atomic_inc_wrap:
2198 case nir_intrinsic_image_atomic_dec_wrap:
2199 ty = getDType(insn);
2200 bindless = false;
2201 info_out->io.globalAccess |= 0x2;
2202 mask = 0x1;
2203 break;
2204 case nir_intrinsic_bindless_image_load:
2205 case nir_intrinsic_image_load:
2206 ty = TYPE_U32;
2207 bindless = op == nir_intrinsic_bindless_image_load;
2208 info_out->io.globalAccess |= 0x1;
2209 lod_src = 4;
2210 break;
2211 case nir_intrinsic_bindless_image_store:
2212 case nir_intrinsic_image_store:
2213 ty = TYPE_U32;
2214 mask = 0xf;
2215 bindless = op == nir_intrinsic_bindless_image_store;
2216 info_out->io.globalAccess |= 0x2;
2217 lod_src = 5;
2218 mask = 0xf;
2219 break;
2220 case nir_intrinsic_bindless_image_samples:
2221 mask = 0x8;
2222 case nir_intrinsic_image_samples:
2223 ty = TYPE_U32;
2224 bindless = op == nir_intrinsic_bindless_image_samples;
2225 mask = 0x8;
2226 break;
2227 case nir_intrinsic_bindless_image_size:
2228 case nir_intrinsic_image_size:
2229 assert(nir_src_as_uint(insn->src[1]) == 0);
2230 ty = TYPE_U32;
2231 bindless = op == nir_intrinsic_bindless_image_size;
2232 break;
2233 default:
2234 unreachable("unhandled image opcode");
2235 break;
2236 }
2237
2238 if (bindless)
2239 indirect = getSrc(&insn->src[0], 0);
2240 else
2241 location = getIndirect(&insn->src[0], 0, indirect);
2242
2243 // coords
2244 if (opInfo.num_srcs >= 2)
2245 for (unsigned int i = 0u; i < argCount; ++i)
2246 srcs.push_back(getSrc(&insn->src[1], i));
2247
2248 // the sampler is just another src added after coords
2249 if (opInfo.num_srcs >= 3 && target.isMS())
2250 srcs.push_back(getSrc(&insn->src[2], 0));
2251
2252 if (opInfo.num_srcs >= 4 && lod_src != 4) {
2253 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2254 for (uint8_t i = 0u; i < components; ++i)
2255 srcs.push_back(getSrc(&insn->src[3], i));
2256 }
2257
2258 if (opInfo.num_srcs >= 5 && lod_src != 5)
2259 // 1 for aotmic swap
2260 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2261 srcs.push_back(getSrc(&insn->src[4], i));
2262
2263 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2264 texi->tex.bindless = bindless;
2265 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2266 texi->tex.mask = mask;
2267 texi->cache = convert(nir_intrinsic_access(insn));
2268 texi->setType(ty);
2269 texi->subOp = getSubOp(op);
2270
2271 if (indirect)
2272 texi->setIndirectR(indirect);
2273
2274 break;
2275 }
2276 case nir_intrinsic_store_scratch:
2277 case nir_intrinsic_store_shared: {
2278 DataType sType = getSType(insn->src[0], false, false);
2279 Value *indirectOffset;
2280 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2281
2282 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2283 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2284 continue;
2285 Symbol *sym = mkSymbol(getFile(op), 0, sType, offset + i * typeSizeof(sType));
2286 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2287 }
2288 break;
2289 }
2290 case nir_intrinsic_load_kernel_input:
2291 case nir_intrinsic_load_scratch:
2292 case nir_intrinsic_load_shared: {
2293 const DataType dType = getDType(insn);
2294 LValues &newDefs = convert(&insn->dest);
2295 Value *indirectOffset;
2296 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2297
2298 for (uint8_t i = 0u; i < dest_components; ++i)
2299 loadFrom(getFile(op), 0, dType, newDefs[i], offset, i, indirectOffset);
2300
2301 break;
2302 }
2303 case nir_intrinsic_control_barrier: {
2304 // TODO: add flag to shader_info
2305 info_out->numBarriers = 1;
2306 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2307 bar->fixed = 1;
2308 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2309 break;
2310 }
2311 case nir_intrinsic_group_memory_barrier:
2312 case nir_intrinsic_memory_barrier:
2313 case nir_intrinsic_memory_barrier_buffer:
2314 case nir_intrinsic_memory_barrier_image:
2315 case nir_intrinsic_memory_barrier_shared: {
2316 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2317 bar->fixed = 1;
2318 bar->subOp = getSubOp(op);
2319 break;
2320 }
2321 case nir_intrinsic_memory_barrier_tcs_patch:
2322 break;
2323 case nir_intrinsic_shader_clock: {
2324 const DataType dType = getDType(insn);
2325 LValues &newDefs = convert(&insn->dest);
2326
2327 loadImm(newDefs[0], 0u);
2328 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2329 break;
2330 }
2331 case nir_intrinsic_load_global: {
2332 const DataType dType = getDType(insn);
2333 LValues &newDefs = convert(&insn->dest);
2334 Value *indirectOffset;
2335 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2336
2337 for (auto i = 0u; i < dest_components; ++i)
2338 loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2339
2340 info_out->io.globalAccess |= 0x1;
2341 break;
2342 }
2343 case nir_intrinsic_store_global: {
2344 DataType sType = getSType(insn->src[0], false, false);
2345
2346 for (auto i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2347 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2348 continue;
2349 if (typeSizeof(sType) == 8) {
2350 Value *split[2];
2351 mkSplit(split, 4, getSrc(&insn->src[0], i));
2352
2353 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2354 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2355
2356 sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2357 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2358 } else {
2359 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2360 mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2361 }
2362 }
2363
2364 info_out->io.globalAccess |= 0x2;
2365 break;
2366 }
2367 default:
2368 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2369 return false;
2370 }
2371
2372 return true;
2373 }
2374
2375 bool
2376 Converter::visit(nir_jump_instr *insn)
2377 {
2378 switch (insn->type) {
2379 case nir_jump_return:
2380 // TODO: this only works in the main function
2381 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2382 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2383 break;
2384 case nir_jump_break:
2385 case nir_jump_continue: {
2386 bool isBreak = insn->type == nir_jump_break;
2387 nir_block *block = insn->instr.block;
2388 BasicBlock *target = convert(block->successors[0]);
2389 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2390 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2391 break;
2392 }
2393 default:
2394 ERROR("unknown nir_jump_type %u\n", insn->type);
2395 return false;
2396 }
2397
2398 return true;
2399 }
2400
2401 Value*
2402 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2403 {
2404 Value *val;
2405
2406 if (immInsertPos)
2407 setPosition(immInsertPos, true);
2408 else
2409 setPosition(bb, false);
2410
2411 switch (insn->def.bit_size) {
2412 case 64:
2413 val = loadImm(getSSA(8), insn->value[idx].u64);
2414 break;
2415 case 32:
2416 val = loadImm(getSSA(4), insn->value[idx].u32);
2417 break;
2418 case 16:
2419 val = loadImm(getSSA(2), insn->value[idx].u16);
2420 break;
2421 case 8:
2422 val = loadImm(getSSA(1), insn->value[idx].u8);
2423 break;
2424 default:
2425 unreachable("unhandled bit size!\n");
2426 }
2427 setPosition(bb, true);
2428 return val;
2429 }
2430
2431 bool
2432 Converter::visit(nir_load_const_instr *insn)
2433 {
2434 assert(insn->def.bit_size <= 64);
2435 immediates[insn->def.index] = insn;
2436 return true;
2437 }
2438
2439 #define DEFAULT_CHECKS \
2440 if (insn->dest.dest.ssa.num_components > 1) { \
2441 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2442 return false; \
2443 } \
2444 if (insn->dest.write_mask != 1) { \
2445 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2446 return false; \
2447 }
2448 bool
2449 Converter::visit(nir_alu_instr *insn)
2450 {
2451 const nir_op op = insn->op;
2452 const nir_op_info &info = nir_op_infos[op];
2453 DataType dType = getDType(insn);
2454 const std::vector<DataType> sTypes = getSTypes(insn);
2455
2456 Instruction *oldPos = this->bb->getExit();
2457
2458 switch (op) {
2459 case nir_op_fabs:
2460 case nir_op_iabs:
2461 case nir_op_fadd:
2462 case nir_op_iadd:
2463 case nir_op_iand:
2464 case nir_op_fceil:
2465 case nir_op_fcos:
2466 case nir_op_fddx:
2467 case nir_op_fddx_coarse:
2468 case nir_op_fddx_fine:
2469 case nir_op_fddy:
2470 case nir_op_fddy_coarse:
2471 case nir_op_fddy_fine:
2472 case nir_op_fdiv:
2473 case nir_op_idiv:
2474 case nir_op_udiv:
2475 case nir_op_fexp2:
2476 case nir_op_ffloor:
2477 case nir_op_ffma:
2478 case nir_op_flog2:
2479 case nir_op_fmax:
2480 case nir_op_imax:
2481 case nir_op_umax:
2482 case nir_op_fmin:
2483 case nir_op_imin:
2484 case nir_op_umin:
2485 case nir_op_fmod:
2486 case nir_op_imod:
2487 case nir_op_umod:
2488 case nir_op_fmul:
2489 case nir_op_imul:
2490 case nir_op_imul_high:
2491 case nir_op_umul_high:
2492 case nir_op_fneg:
2493 case nir_op_ineg:
2494 case nir_op_inot:
2495 case nir_op_ior:
2496 case nir_op_pack_64_2x32_split:
2497 case nir_op_fpow:
2498 case nir_op_frcp:
2499 case nir_op_frem:
2500 case nir_op_irem:
2501 case nir_op_frsq:
2502 case nir_op_fsat:
2503 case nir_op_ishr:
2504 case nir_op_ushr:
2505 case nir_op_fsin:
2506 case nir_op_fsqrt:
2507 case nir_op_ftrunc:
2508 case nir_op_ishl:
2509 case nir_op_ixor: {
2510 DEFAULT_CHECKS;
2511 LValues &newDefs = convert(&insn->dest);
2512 operation preOp = preOperationNeeded(op);
2513 if (preOp != OP_NOP) {
2514 assert(info.num_inputs < 2);
2515 Value *tmp = getSSA(typeSizeof(dType));
2516 Instruction *i0 = mkOp(preOp, dType, tmp);
2517 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2518 if (info.num_inputs) {
2519 i0->setSrc(0, getSrc(&insn->src[0]));
2520 i1->setSrc(0, tmp);
2521 }
2522 i1->subOp = getSubOp(op);
2523 } else {
2524 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2525 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2526 i->setSrc(s, getSrc(&insn->src[s]));
2527 }
2528 i->subOp = getSubOp(op);
2529 }
2530 break;
2531 }
2532 case nir_op_ifind_msb:
2533 case nir_op_ufind_msb: {
2534 DEFAULT_CHECKS;
2535 LValues &newDefs = convert(&insn->dest);
2536 dType = sTypes[0];
2537 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2538 break;
2539 }
2540 case nir_op_fround_even: {
2541 DEFAULT_CHECKS;
2542 LValues &newDefs = convert(&insn->dest);
2543 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2544 break;
2545 }
2546 // convert instructions
2547 case nir_op_f2f32:
2548 case nir_op_f2i32:
2549 case nir_op_f2u32:
2550 case nir_op_i2f32:
2551 case nir_op_i2i32:
2552 case nir_op_u2f32:
2553 case nir_op_u2u32:
2554 case nir_op_f2f64:
2555 case nir_op_f2i64:
2556 case nir_op_f2u64:
2557 case nir_op_i2f64:
2558 case nir_op_i2i64:
2559 case nir_op_u2f64:
2560 case nir_op_u2u64: {
2561 DEFAULT_CHECKS;
2562 LValues &newDefs = convert(&insn->dest);
2563 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2564 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2565 i->rnd = ROUND_Z;
2566 i->sType = sTypes[0];
2567 break;
2568 }
2569 // compare instructions
2570 case nir_op_feq32:
2571 case nir_op_ieq32:
2572 case nir_op_fge32:
2573 case nir_op_ige32:
2574 case nir_op_uge32:
2575 case nir_op_flt32:
2576 case nir_op_ilt32:
2577 case nir_op_ult32:
2578 case nir_op_fneu32:
2579 case nir_op_ine32: {
2580 DEFAULT_CHECKS;
2581 LValues &newDefs = convert(&insn->dest);
2582 Instruction *i = mkCmp(getOperation(op),
2583 getCondCode(op),
2584 dType,
2585 newDefs[0],
2586 dType,
2587 getSrc(&insn->src[0]),
2588 getSrc(&insn->src[1]));
2589 if (info.num_inputs == 3)
2590 i->setSrc(2, getSrc(&insn->src[2]));
2591 i->sType = sTypes[0];
2592 break;
2593 }
2594 case nir_op_mov:
2595 case nir_op_vec2:
2596 case nir_op_vec3:
2597 case nir_op_vec4:
2598 case nir_op_vec8:
2599 case nir_op_vec16: {
2600 LValues &newDefs = convert(&insn->dest);
2601 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2602 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2603 }
2604 break;
2605 }
2606 // (un)pack
2607 case nir_op_pack_64_2x32: {
2608 LValues &newDefs = convert(&insn->dest);
2609 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2610 merge->setSrc(0, getSrc(&insn->src[0], 0));
2611 merge->setSrc(1, getSrc(&insn->src[0], 1));
2612 break;
2613 }
2614 case nir_op_pack_half_2x16_split: {
2615 LValues &newDefs = convert(&insn->dest);
2616 Value *tmpH = getSSA();
2617 Value *tmpL = getSSA();
2618
2619 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2620 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2621 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2622 break;
2623 }
2624 case nir_op_unpack_half_2x16_split_x:
2625 case nir_op_unpack_half_2x16_split_y: {
2626 LValues &newDefs = convert(&insn->dest);
2627 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2628 if (op == nir_op_unpack_half_2x16_split_y)
2629 cvt->subOp = 1;
2630 break;
2631 }
2632 case nir_op_unpack_64_2x32: {
2633 LValues &newDefs = convert(&insn->dest);
2634 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2635 break;
2636 }
2637 case nir_op_unpack_64_2x32_split_x: {
2638 LValues &newDefs = convert(&insn->dest);
2639 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2640 break;
2641 }
2642 case nir_op_unpack_64_2x32_split_y: {
2643 LValues &newDefs = convert(&insn->dest);
2644 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2645 break;
2646 }
2647 // special instructions
2648 case nir_op_fsign:
2649 case nir_op_isign: {
2650 DEFAULT_CHECKS;
2651 DataType iType;
2652 if (::isFloatType(dType))
2653 iType = TYPE_F32;
2654 else
2655 iType = TYPE_S32;
2656
2657 LValues &newDefs = convert(&insn->dest);
2658 LValue *val0 = getScratch();
2659 LValue *val1 = getScratch();
2660 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2661 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2662
2663 if (dType == TYPE_F64) {
2664 mkOp2(OP_SUB, iType, val0, val0, val1);
2665 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2666 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2667 mkOp2(OP_SUB, iType, val0, val1, val0);
2668 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2669 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2670 } else if (::isFloatType(dType))
2671 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2672 else
2673 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2674 break;
2675 }
2676 case nir_op_fcsel:
2677 case nir_op_b32csel: {
2678 DEFAULT_CHECKS;
2679 LValues &newDefs = convert(&insn->dest);
2680 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2681 break;
2682 }
2683 case nir_op_ibitfield_extract:
2684 case nir_op_ubitfield_extract: {
2685 DEFAULT_CHECKS;
2686 Value *tmp = getSSA();
2687 LValues &newDefs = convert(&insn->dest);
2688 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2689 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2690 break;
2691 }
2692 case nir_op_bfm: {
2693 DEFAULT_CHECKS;
2694 LValues &newDefs = convert(&insn->dest);
2695 mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2696 break;
2697 }
2698 case nir_op_bitfield_insert: {
2699 DEFAULT_CHECKS;
2700 LValues &newDefs = convert(&insn->dest);
2701 LValue *temp = getSSA();
2702 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2703 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2704 break;
2705 }
2706 case nir_op_bit_count: {
2707 DEFAULT_CHECKS;
2708 LValues &newDefs = convert(&insn->dest);
2709 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2710 break;
2711 }
2712 case nir_op_bitfield_reverse: {
2713 DEFAULT_CHECKS;
2714 LValues &newDefs = convert(&insn->dest);
2715 mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2716 break;
2717 }
2718 case nir_op_find_lsb: {
2719 DEFAULT_CHECKS;
2720 LValues &newDefs = convert(&insn->dest);
2721 Value *tmp = getSSA();
2722 mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2723 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2724 break;
2725 }
2726 case nir_op_extract_u8: {
2727 DEFAULT_CHECKS;
2728 LValues &newDefs = convert(&insn->dest);
2729 Value *prmt = getSSA();
2730 mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2731 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2732 break;
2733 }
2734 case nir_op_extract_i8: {
2735 DEFAULT_CHECKS;
2736 LValues &newDefs = convert(&insn->dest);
2737 Value *prmt = getSSA();
2738 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2739 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2740 break;
2741 }
2742 case nir_op_extract_u16: {
2743 DEFAULT_CHECKS;
2744 LValues &newDefs = convert(&insn->dest);
2745 Value *prmt = getSSA();
2746 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2747 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2748 break;
2749 }
2750 case nir_op_extract_i16: {
2751 DEFAULT_CHECKS;
2752 LValues &newDefs = convert(&insn->dest);
2753 Value *prmt = getSSA();
2754 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2755 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2756 break;
2757 }
2758 case nir_op_urol: {
2759 DEFAULT_CHECKS;
2760 LValues &newDefs = convert(&insn->dest);
2761 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2762 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2763 ->subOp = NV50_IR_SUBOP_SHF_L |
2764 NV50_IR_SUBOP_SHF_W |
2765 NV50_IR_SUBOP_SHF_HI;
2766 break;
2767 }
2768 case nir_op_uror: {
2769 DEFAULT_CHECKS;
2770 LValues &newDefs = convert(&insn->dest);
2771 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2772 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2773 ->subOp = NV50_IR_SUBOP_SHF_R |
2774 NV50_IR_SUBOP_SHF_W |
2775 NV50_IR_SUBOP_SHF_LO;
2776 break;
2777 }
2778 // boolean conversions
2779 case nir_op_b2f32: {
2780 DEFAULT_CHECKS;
2781 LValues &newDefs = convert(&insn->dest);
2782 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2783 break;
2784 }
2785 case nir_op_b2f64: {
2786 DEFAULT_CHECKS;
2787 LValues &newDefs = convert(&insn->dest);
2788 Value *tmp = getSSA(4);
2789 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2790 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2791 break;
2792 }
2793 case nir_op_f2b32:
2794 case nir_op_i2b32: {
2795 DEFAULT_CHECKS;
2796 LValues &newDefs = convert(&insn->dest);
2797 Value *src1;
2798 if (typeSizeof(sTypes[0]) == 8) {
2799 src1 = loadImm(getSSA(8), 0.0);
2800 } else {
2801 src1 = zero;
2802 }
2803 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2804 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2805 break;
2806 }
2807 case nir_op_b2i32: {
2808 DEFAULT_CHECKS;
2809 LValues &newDefs = convert(&insn->dest);
2810 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2811 break;
2812 }
2813 case nir_op_b2i64: {
2814 DEFAULT_CHECKS;
2815 LValues &newDefs = convert(&insn->dest);
2816 LValue *def = getScratch();
2817 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2818 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2819 break;
2820 }
2821 default:
2822 ERROR("unknown nir_op %s\n", info.name);
2823 assert(false);
2824 return false;
2825 }
2826
2827 if (!oldPos) {
2828 oldPos = this->bb->getEntry();
2829 oldPos->precise = insn->exact;
2830 }
2831
2832 if (unlikely(!oldPos))
2833 return true;
2834
2835 while (oldPos->next) {
2836 oldPos = oldPos->next;
2837 oldPos->precise = insn->exact;
2838 }
2839 oldPos->saturate = insn->dest.saturate;
2840
2841 return true;
2842 }
2843 #undef DEFAULT_CHECKS
2844
2845 bool
2846 Converter::visit(nir_ssa_undef_instr *insn)
2847 {
2848 LValues &newDefs = convert(&insn->def);
2849 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2850 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2851 }
2852 return true;
2853 }
2854
2855 #define CASE_SAMPLER(ty) \
2856 case GLSL_SAMPLER_DIM_ ## ty : \
2857 if (isArray && !isShadow) \
2858 return TEX_TARGET_ ## ty ## _ARRAY; \
2859 else if (!isArray && isShadow) \
2860 return TEX_TARGET_## ty ## _SHADOW; \
2861 else if (isArray && isShadow) \
2862 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2863 else \
2864 return TEX_TARGET_ ## ty
2865
2866 TexTarget
2867 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2868 {
2869 switch (dim) {
2870 CASE_SAMPLER(1D);
2871 CASE_SAMPLER(2D);
2872 CASE_SAMPLER(CUBE);
2873 case GLSL_SAMPLER_DIM_3D:
2874 return TEX_TARGET_3D;
2875 case GLSL_SAMPLER_DIM_MS:
2876 if (isArray)
2877 return TEX_TARGET_2D_MS_ARRAY;
2878 return TEX_TARGET_2D_MS;
2879 case GLSL_SAMPLER_DIM_RECT:
2880 if (isShadow)
2881 return TEX_TARGET_RECT_SHADOW;
2882 return TEX_TARGET_RECT;
2883 case GLSL_SAMPLER_DIM_BUF:
2884 return TEX_TARGET_BUFFER;
2885 case GLSL_SAMPLER_DIM_EXTERNAL:
2886 return TEX_TARGET_2D;
2887 default:
2888 ERROR("unknown glsl_sampler_dim %u\n", dim);
2889 assert(false);
2890 return TEX_TARGET_COUNT;
2891 }
2892 }
2893 #undef CASE_SAMPLER
2894
2895 Value*
2896 Converter::applyProjection(Value *src, Value *proj)
2897 {
2898 if (!proj)
2899 return src;
2900 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2901 }
2902
2903 unsigned int
2904 Converter::getNIRArgCount(TexInstruction::Target& target)
2905 {
2906 unsigned int result = target.getArgCount();
2907 if (target.isCube() && target.isArray())
2908 result--;
2909 if (target.isMS())
2910 result--;
2911 return result;
2912 }
2913
2914 CacheMode
2915 Converter::convert(enum gl_access_qualifier access)
2916 {
2917 if (access & ACCESS_VOLATILE)
2918 return CACHE_CV;
2919 if (access & ACCESS_COHERENT)
2920 return CACHE_CG;
2921 return CACHE_CA;
2922 }
2923
2924 bool
2925 Converter::visit(nir_tex_instr *insn)
2926 {
2927 switch (insn->op) {
2928 case nir_texop_lod:
2929 case nir_texop_query_levels:
2930 case nir_texop_tex:
2931 case nir_texop_texture_samples:
2932 case nir_texop_tg4:
2933 case nir_texop_txb:
2934 case nir_texop_txd:
2935 case nir_texop_txf:
2936 case nir_texop_txf_ms:
2937 case nir_texop_txl:
2938 case nir_texop_txs: {
2939 LValues &newDefs = convert(&insn->dest);
2940 std::vector<Value*> srcs;
2941 std::vector<Value*> defs;
2942 std::vector<nir_src*> offsets;
2943 uint8_t mask = 0;
2944 bool lz = false;
2945 Value *proj = NULL;
2946 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2947 operation op = getOperation(insn->op);
2948
2949 int r, s;
2950 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2951 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2952 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2953 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2954 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2955 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2956 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2957 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2958 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
2959 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2960 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2961 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
2962 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
2963
2964 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
2965 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
2966
2967 if (projIdx != -1)
2968 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
2969
2970 srcs.resize(insn->coord_components);
2971 for (uint8_t i = 0u; i < insn->coord_components; ++i)
2972 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
2973
2974 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2975 if (insn->coord_components) {
2976 uint32_t argCount = target.getArgCount();
2977
2978 if (target.isMS())
2979 argCount -= 1;
2980
2981 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
2982 srcs.push_back(getSSA());
2983 }
2984
2985 if (insn->op == nir_texop_texture_samples)
2986 srcs.push_back(zero);
2987 else if (!insn->num_srcs)
2988 srcs.push_back(loadImm(NULL, 0));
2989 if (biasIdx != -1)
2990 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
2991 if (lodIdx != -1)
2992 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
2993 else if (op == OP_TXF)
2994 lz = true;
2995 if (msIdx != -1)
2996 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
2997 if (offsetIdx != -1)
2998 offsets.push_back(&insn->src[offsetIdx].src);
2999 if (compIdx != -1)
3000 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3001 if (texOffIdx != -1) {
3002 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3003 texOffIdx = srcs.size() - 1;
3004 }
3005 if (sampOffIdx != -1) {
3006 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3007 sampOffIdx = srcs.size() - 1;
3008 }
3009 if (bindless) {
3010 // currently we use the lower bits
3011 Value *split[2];
3012 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3013
3014 mkSplit(split, 4, handle);
3015
3016 srcs.push_back(split[0]);
3017 texOffIdx = srcs.size() - 1;
3018 }
3019
3020 r = bindless ? 0xff : insn->texture_index;
3021 s = bindless ? 0x1f : insn->sampler_index;
3022
3023 defs.resize(newDefs.size());
3024 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3025 defs[d] = newDefs[d];
3026 mask |= 1 << d;
3027 }
3028 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3029 lz = true;
3030
3031 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3032 texi->tex.levelZero = lz;
3033 texi->tex.mask = mask;
3034 texi->tex.bindless = bindless;
3035
3036 if (texOffIdx != -1)
3037 texi->tex.rIndirectSrc = texOffIdx;
3038 if (sampOffIdx != -1)
3039 texi->tex.sIndirectSrc = sampOffIdx;
3040
3041 switch (insn->op) {
3042 case nir_texop_tg4:
3043 if (!target.isShadow())
3044 texi->tex.gatherComp = insn->component;
3045 break;
3046 case nir_texop_txs:
3047 texi->tex.query = TXQ_DIMS;
3048 break;
3049 case nir_texop_texture_samples:
3050 texi->tex.mask = 0x4;
3051 texi->tex.query = TXQ_TYPE;
3052 break;
3053 case nir_texop_query_levels:
3054 texi->tex.mask = 0x8;
3055 texi->tex.query = TXQ_DIMS;
3056 break;
3057 default:
3058 break;
3059 }
3060
3061 texi->tex.useOffsets = offsets.size();
3062 if (texi->tex.useOffsets) {
3063 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3064 for (uint32_t c = 0u; c < 3; ++c) {
3065 uint8_t s2 = std::min(c, target.getDim() - 1);
3066 texi->offset[s][c].set(getSrc(offsets[s], s2));
3067 texi->offset[s][c].setInsn(texi);
3068 }
3069 }
3070 }
3071
3072 if (op == OP_TXG && offsetIdx == -1) {
3073 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3074 texi->tex.useOffsets = 4;
3075 setPosition(texi, false);
3076 for (uint8_t i = 0; i < 4; ++i) {
3077 for (uint8_t j = 0; j < 2; ++j) {
3078 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3079 texi->offset[i][j].setInsn(texi);
3080 }
3081 }
3082 setPosition(texi, true);
3083 }
3084 }
3085
3086 if (ddxIdx != -1 && ddyIdx != -1) {
3087 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3088 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3089 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3090 }
3091 }
3092
3093 break;
3094 }
3095 default:
3096 ERROR("unknown nir_texop %u\n", insn->op);
3097 return false;
3098 }
3099 return true;
3100 }
3101
3102 bool
3103 Converter::run()
3104 {
3105 bool progress;
3106
3107 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3108 nir_print_shader(nir, stderr);
3109
3110 struct nir_lower_subgroups_options subgroup_options = {
3111 .subgroup_size = 32,
3112 .ballot_bit_size = 32,
3113 };
3114
3115 /* prepare for IO lowering */
3116 NIR_PASS_V(nir, nir_opt_deref);
3117 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3118 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3119
3120 /* codegen assumes vec4 alignment for memory */
3121 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_function_temp, function_temp_type_info);
3122 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, nir_address_format_32bit_offset);
3123 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3124
3125 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3126 type_size, (nir_lower_io_options)0);
3127
3128 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3129
3130 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3131 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3132 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3133
3134 /*TODO: improve this lowering/optimisation loop so that we can use
3135 * nir_opt_idiv_const effectively before this.
3136 */
3137 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_precise);
3138
3139 do {
3140 progress = false;
3141 NIR_PASS(progress, nir, nir_copy_prop);
3142 NIR_PASS(progress, nir, nir_opt_remove_phis);
3143 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3144 NIR_PASS(progress, nir, nir_opt_cse);
3145 NIR_PASS(progress, nir, nir_opt_algebraic);
3146 NIR_PASS(progress, nir, nir_opt_constant_folding);
3147 NIR_PASS(progress, nir, nir_copy_prop);
3148 NIR_PASS(progress, nir, nir_opt_dce);
3149 NIR_PASS(progress, nir, nir_opt_dead_cf);
3150 } while (progress);
3151
3152 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3153 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3154
3155 // Garbage collect dead instructions
3156 nir_sweep(nir);
3157
3158 if (!parseNIR()) {
3159 ERROR("Couldn't prase NIR!\n");
3160 return false;
3161 }
3162
3163 if (!assignSlots()) {
3164 ERROR("Couldn't assign slots!\n");
3165 return false;
3166 }
3167
3168 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3169 nir_print_shader(nir, stderr);
3170
3171 nir_foreach_function(function, nir) {
3172 if (!visit(function))
3173 return false;
3174 }
3175
3176 return true;
3177 }
3178
3179 } // unnamed namespace
3180
3181 namespace nv50_ir {
3182
3183 bool
3184 Program::makeFromNIR(struct nv50_ir_prog_info *info,
3185 struct nv50_ir_prog_info_out *info_out)
3186 {
3187 nir_shader *nir = (nir_shader*)info->bin.source;
3188 Converter converter(this, nir, info, info_out);
3189 bool result = converter.run();
3190 if (!result)
3191 return result;
3192 LoweringHelper lowering;
3193 lowering.run(this);
3194 tlsSize = info_out->bin.tlsSpace;
3195 return result;
3196 }
3197
3198 } // namespace nv50_ir
3199
3200 static nir_shader_compiler_options
3201 nvir_nir_shader_compiler_options(int chipset)
3202 {
3203 nir_shader_compiler_options op = {};
3204 op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3205 op.lower_ffma = false;
3206 op.fuse_ffma = false; /* nir doesn't track mad vs fma */
3207 op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3208 op.lower_flrp32 = true;
3209 op.lower_flrp64 = true;
3210 op.lower_fpow = false; // TODO: nir's lowering is broken, or we could use it
3211 op.lower_fsat = false;
3212 op.lower_fsqrt = false; // TODO: only before gm200
3213 op.lower_sincos = false;
3214 op.lower_fmod = true;
3215 op.lower_bitfield_extract = false;
3216 op.lower_bitfield_extract_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3217 op.lower_bitfield_insert = false;
3218 op.lower_bitfield_insert_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3219 op.lower_bitfield_insert_to_bitfield_select = false;
3220 op.lower_bitfield_reverse = false;
3221 op.lower_bit_count = false;
3222 op.lower_ifind_msb = false;
3223 op.lower_find_lsb = false;
3224 op.lower_uadd_carry = true; // TODO
3225 op.lower_usub_borrow = true; // TODO
3226 op.lower_mul_high = false;
3227 op.lower_negate = false;
3228 op.lower_sub = true;
3229 op.lower_scmp = true; // TODO: not implemented yet
3230 op.lower_vector_cmp = false;
3231 op.lower_idiv = true;
3232 op.lower_bitops = false;
3233 op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3234 op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3235 op.lower_fdph = false;
3236 op.lower_fdot = false;
3237 op.fdot_replicates = false; // TODO
3238 op.lower_ffloor = false; // TODO
3239 op.lower_ffract = true;
3240 op.lower_fceil = false; // TODO
3241 op.lower_ftrunc = false;
3242 op.lower_ldexp = true;
3243 op.lower_pack_half_2x16 = true;
3244 op.lower_pack_unorm_2x16 = true;
3245 op.lower_pack_snorm_2x16 = true;
3246 op.lower_pack_unorm_4x8 = true;
3247 op.lower_pack_snorm_4x8 = true;
3248 op.lower_unpack_half_2x16 = true;
3249 op.lower_unpack_unorm_2x16 = true;
3250 op.lower_unpack_snorm_2x16 = true;
3251 op.lower_unpack_unorm_4x8 = true;
3252 op.lower_unpack_snorm_4x8 = true;
3253 op.lower_pack_split = false;
3254 op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3255 op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3256 op.lower_all_io_to_temps = false;
3257 op.lower_all_io_to_elements = false;
3258 op.vertex_id_zero_based = false;
3259 op.lower_base_vertex = false;
3260 op.lower_helper_invocation = false;
3261 op.optimize_sample_mask_in = false;
3262 op.lower_cs_local_index_from_id = true;
3263 op.lower_cs_local_id_from_index = false;
3264 op.lower_device_index_to_zero = false; // TODO
3265 op.lower_wpos_pntc = false; // TODO
3266 op.lower_hadd = true; // TODO
3267 op.lower_add_sat = true; // TODO
3268 op.vectorize_io = false;
3269 op.lower_to_scalar = false;
3270 op.unify_interfaces = false;
3271 op.use_interpolated_input_intrinsics = true;
3272 op.lower_mul_2x32_64 = true; // TODO
3273 op.lower_rotate = (chipset < NVISA_GV100_CHIPSET);
3274 op.has_imul24 = false;
3275 op.intel_vec4 = false;
3276 op.max_unroll_iterations = 32;
3277 op.lower_int64_options = (nir_lower_int64_options) (
3278 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3279 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3280 nir_lower_divmod64 |
3281 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3282 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_mov64 : 0) |
3283 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3284 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3285 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3286 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3287 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3288 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3289 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_2x32_64 : 0) |
3290 ((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3291 nir_lower_ufind_msb64
3292 );
3293 op.lower_doubles_options = (nir_lower_doubles_options) (
3294 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3295 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3296 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3297 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3298 nir_lower_dmod |
3299 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3300 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3301 );
3302 return op;
3303 }
3304
3305 static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3306 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET);
3307 static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3308 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET);
3309 static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3310 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET);
3311
3312 const nir_shader_compiler_options *
3313 nv50_ir_nir_shader_compiler_options(int chipset)
3314 {
3315 if (chipset >= NVISA_GV100_CHIPSET)
3316 return &gv100_nir_shader_compiler_options;
3317 if (chipset >= NVISA_GM107_CHIPSET)
3318 return &gm107_nir_shader_compiler_options;
3319 return &gf100_nir_shader_compiler_options;
3320 }