nv50/ir: add nv50_ir_prog_info_out
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_tgsi.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "tgsi/tgsi_build.h"
24 #include "tgsi/tgsi_dump.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_util.h"
27
28 #include <set>
29
30 #include "codegen/nv50_ir.h"
31 #include "codegen/nv50_ir_from_common.h"
32 #include "codegen/nv50_ir_util.h"
33
34 namespace tgsi {
35
36 class Source;
37
38 static nv50_ir::operation translateOpcode(uint opcode);
39 static nv50_ir::DataFile translateFile(uint file);
40 static nv50_ir::TexTarget translateTexture(uint texTarg);
41 static nv50_ir::SVSemantic translateSysVal(uint sysval);
42 static nv50_ir::CacheMode translateCacheMode(uint qualifier);
43
44 class Instruction
45 {
46 public:
47 Instruction(const struct tgsi_full_instruction *inst) : insn(inst) { }
48
49 class SrcRegister
50 {
51 public:
52 SrcRegister(const struct tgsi_full_src_register *src)
53 : reg(src->Register),
54 fsr(src)
55 { }
56
57 SrcRegister(const struct tgsi_src_register& src) : reg(src), fsr(NULL) { }
58
59 SrcRegister(const struct tgsi_ind_register& ind)
60 : reg(tgsi_util_get_src_from_ind(&ind)),
61 fsr(NULL)
62 { }
63
64 struct tgsi_src_register offsetToSrc(struct tgsi_texture_offset off)
65 {
66 struct tgsi_src_register reg;
67 memset(&reg, 0, sizeof(reg));
68 reg.Index = off.Index;
69 reg.File = off.File;
70 reg.SwizzleX = off.SwizzleX;
71 reg.SwizzleY = off.SwizzleY;
72 reg.SwizzleZ = off.SwizzleZ;
73 return reg;
74 }
75
76 SrcRegister(const struct tgsi_texture_offset& off) :
77 reg(offsetToSrc(off)),
78 fsr(NULL)
79 { }
80
81 uint getFile() const { return reg.File; }
82
83 bool is2D() const { return reg.Dimension; }
84
85 bool isIndirect(int dim) const
86 {
87 return (dim && fsr) ? fsr->Dimension.Indirect : reg.Indirect;
88 }
89
90 int getIndex(int dim) const
91 {
92 return (dim && fsr) ? fsr->Dimension.Index : reg.Index;
93 }
94
95 int getSwizzle(int chan) const
96 {
97 return tgsi_util_get_src_register_swizzle(&reg, chan);
98 }
99
100 int getArrayId() const
101 {
102 if (isIndirect(0))
103 return fsr->Indirect.ArrayID;
104 return 0;
105 }
106
107 nv50_ir::Modifier getMod(int chan) const;
108
109 SrcRegister getIndirect(int dim) const
110 {
111 assert(fsr && isIndirect(dim));
112 if (dim)
113 return SrcRegister(fsr->DimIndirect);
114 return SrcRegister(fsr->Indirect);
115 }
116
117 uint32_t getValueU32(int c, const uint32_t *data) const
118 {
119 assert(reg.File == TGSI_FILE_IMMEDIATE);
120 assert(!reg.Absolute);
121 assert(!reg.Negate);
122 return data[reg.Index * 4 + getSwizzle(c)];
123 }
124
125 private:
126 const struct tgsi_src_register reg;
127 const struct tgsi_full_src_register *fsr;
128 };
129
130 class DstRegister
131 {
132 public:
133 DstRegister(const struct tgsi_full_dst_register *dst)
134 : reg(dst->Register),
135 fdr(dst)
136 { }
137
138 DstRegister(const struct tgsi_dst_register& dst) : reg(dst), fdr(NULL) { }
139
140 uint getFile() const { return reg.File; }
141
142 bool is2D() const { return reg.Dimension; }
143
144 bool isIndirect(int dim) const
145 {
146 return (dim && fdr) ? fdr->Dimension.Indirect : reg.Indirect;
147 }
148
149 int getIndex(int dim) const
150 {
151 return (dim && fdr) ? fdr->Dimension.Dimension : reg.Index;
152 }
153
154 unsigned int getMask() const { return reg.WriteMask; }
155
156 bool isMasked(int chan) const { return !(getMask() & (1 << chan)); }
157
158 SrcRegister getIndirect(int dim) const
159 {
160 assert(fdr && isIndirect(dim));
161 if (dim)
162 return SrcRegister(fdr->DimIndirect);
163 return SrcRegister(fdr->Indirect);
164 }
165
166 struct tgsi_full_src_register asSrc()
167 {
168 assert(fdr);
169 return tgsi_full_src_register_from_dst(fdr);
170 }
171
172 int getArrayId() const
173 {
174 if (isIndirect(0))
175 return fdr->Indirect.ArrayID;
176 return 0;
177 }
178
179 private:
180 const struct tgsi_dst_register reg;
181 const struct tgsi_full_dst_register *fdr;
182 };
183
184 inline uint getOpcode() const { return insn->Instruction.Opcode; }
185
186 unsigned int srcCount() const { return insn->Instruction.NumSrcRegs; }
187 unsigned int dstCount() const { return insn->Instruction.NumDstRegs; }
188
189 // mask of used components of source s
190 unsigned int srcMask(unsigned int s) const;
191 unsigned int texOffsetMask() const;
192
193 SrcRegister getSrc(unsigned int s) const
194 {
195 assert(s < srcCount());
196 return SrcRegister(&insn->Src[s]);
197 }
198
199 DstRegister getDst(unsigned int d) const
200 {
201 assert(d < dstCount());
202 return DstRegister(&insn->Dst[d]);
203 }
204
205 SrcRegister getTexOffset(unsigned int i) const
206 {
207 assert(i < TGSI_FULL_MAX_TEX_OFFSETS);
208 return SrcRegister(insn->TexOffsets[i]);
209 }
210
211 unsigned int getNumTexOffsets() const { return insn->Texture.NumOffsets; }
212
213 bool checkDstSrcAliasing() const;
214
215 inline nv50_ir::operation getOP() const {
216 return translateOpcode(getOpcode()); }
217
218 nv50_ir::DataType inferSrcType() const;
219 nv50_ir::DataType inferDstType() const;
220
221 nv50_ir::CondCode getSetCond() const;
222
223 nv50_ir::TexInstruction::Target getTexture(const Source *, int s) const;
224
225 const nv50_ir::TexInstruction::ImgFormatDesc *getImageFormat() const {
226 return nv50_ir::TexInstruction::translateImgFormat((enum pipe_format)insn->Memory.Format);
227 }
228
229 nv50_ir::TexTarget getImageTarget() const {
230 return translateTexture(insn->Memory.Texture);
231 }
232
233 nv50_ir::CacheMode getCacheMode() const {
234 if (!insn->Instruction.Memory)
235 return nv50_ir::CACHE_CA;
236 return translateCacheMode(insn->Memory.Qualifier);
237 }
238
239 inline uint getLabel() { return insn->Label.Label; }
240
241 unsigned getSaturate() const { return insn->Instruction.Saturate; }
242
243 void print() const
244 {
245 tgsi_dump_instruction(insn, 1);
246 }
247
248 private:
249 const struct tgsi_full_instruction *insn;
250 };
251
252 unsigned int Instruction::texOffsetMask() const
253 {
254 const struct tgsi_instruction_texture *tex = &insn->Texture;
255 assert(insn->Instruction.Texture);
256
257 switch (tex->Texture) {
258 case TGSI_TEXTURE_BUFFER:
259 case TGSI_TEXTURE_1D:
260 case TGSI_TEXTURE_SHADOW1D:
261 case TGSI_TEXTURE_1D_ARRAY:
262 case TGSI_TEXTURE_SHADOW1D_ARRAY:
263 return 0x1;
264 case TGSI_TEXTURE_2D:
265 case TGSI_TEXTURE_SHADOW2D:
266 case TGSI_TEXTURE_2D_ARRAY:
267 case TGSI_TEXTURE_SHADOW2D_ARRAY:
268 case TGSI_TEXTURE_RECT:
269 case TGSI_TEXTURE_SHADOWRECT:
270 case TGSI_TEXTURE_2D_MSAA:
271 case TGSI_TEXTURE_2D_ARRAY_MSAA:
272 return 0x3;
273 case TGSI_TEXTURE_3D:
274 return 0x7;
275 default:
276 assert(!"Unexpected texture target");
277 return 0xf;
278 }
279 }
280
281 unsigned int Instruction::srcMask(unsigned int s) const
282 {
283 unsigned int mask = insn->Dst[0].Register.WriteMask;
284
285 switch (insn->Instruction.Opcode) {
286 case TGSI_OPCODE_COS:
287 case TGSI_OPCODE_SIN:
288 return (mask & 0x8) | ((mask & 0x7) ? 0x1 : 0x0);
289 case TGSI_OPCODE_DP2:
290 return 0x3;
291 case TGSI_OPCODE_DP3:
292 return 0x7;
293 case TGSI_OPCODE_DP4:
294 case TGSI_OPCODE_KILL_IF: /* WriteMask ignored */
295 return 0xf;
296 case TGSI_OPCODE_DST:
297 return mask & (s ? 0xa : 0x6);
298 case TGSI_OPCODE_EX2:
299 case TGSI_OPCODE_EXP:
300 case TGSI_OPCODE_LG2:
301 case TGSI_OPCODE_LOG:
302 case TGSI_OPCODE_POW:
303 case TGSI_OPCODE_RCP:
304 case TGSI_OPCODE_RSQ:
305 return 0x1;
306 case TGSI_OPCODE_IF:
307 case TGSI_OPCODE_UIF:
308 return 0x1;
309 case TGSI_OPCODE_LIT:
310 return 0xb;
311 case TGSI_OPCODE_TEX2:
312 case TGSI_OPCODE_TXB2:
313 case TGSI_OPCODE_TXL2:
314 return (s == 0) ? 0xf : 0x3;
315 case TGSI_OPCODE_TEX:
316 case TGSI_OPCODE_TXB:
317 case TGSI_OPCODE_TXD:
318 case TGSI_OPCODE_TXL:
319 case TGSI_OPCODE_TXP:
320 case TGSI_OPCODE_TXF:
321 case TGSI_OPCODE_TG4:
322 case TGSI_OPCODE_TEX_LZ:
323 case TGSI_OPCODE_TXF_LZ:
324 case TGSI_OPCODE_LODQ:
325 {
326 const struct tgsi_instruction_texture *tex = &insn->Texture;
327
328 assert(insn->Instruction.Texture);
329
330 mask = 0x7;
331 if (insn->Instruction.Opcode != TGSI_OPCODE_TEX &&
332 insn->Instruction.Opcode != TGSI_OPCODE_TEX_LZ &&
333 insn->Instruction.Opcode != TGSI_OPCODE_TXF_LZ &&
334 insn->Instruction.Opcode != TGSI_OPCODE_TXD)
335 mask |= 0x8; /* bias, lod or proj */
336
337 switch (tex->Texture) {
338 case TGSI_TEXTURE_1D:
339 mask &= 0x9;
340 break;
341 case TGSI_TEXTURE_SHADOW1D:
342 mask &= 0xd;
343 break;
344 case TGSI_TEXTURE_1D_ARRAY:
345 case TGSI_TEXTURE_2D:
346 case TGSI_TEXTURE_RECT:
347 mask &= 0xb;
348 break;
349 case TGSI_TEXTURE_CUBE_ARRAY:
350 case TGSI_TEXTURE_SHADOW2D_ARRAY:
351 case TGSI_TEXTURE_SHADOWCUBE:
352 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
353 mask |= 0x8;
354 break;
355 default:
356 break;
357 }
358 }
359 return mask;
360 case TGSI_OPCODE_TXQ:
361 return 1;
362 case TGSI_OPCODE_D2I:
363 case TGSI_OPCODE_D2U:
364 case TGSI_OPCODE_D2F:
365 case TGSI_OPCODE_DSLT:
366 case TGSI_OPCODE_DSGE:
367 case TGSI_OPCODE_DSEQ:
368 case TGSI_OPCODE_DSNE:
369 case TGSI_OPCODE_U64SEQ:
370 case TGSI_OPCODE_U64SNE:
371 case TGSI_OPCODE_I64SLT:
372 case TGSI_OPCODE_U64SLT:
373 case TGSI_OPCODE_I64SGE:
374 case TGSI_OPCODE_U64SGE:
375 case TGSI_OPCODE_I642F:
376 case TGSI_OPCODE_U642F:
377 switch (util_bitcount(mask)) {
378 case 1: return 0x3;
379 case 2: return 0xf;
380 default:
381 assert(!"unexpected mask");
382 return 0xf;
383 }
384 case TGSI_OPCODE_I2D:
385 case TGSI_OPCODE_U2D:
386 case TGSI_OPCODE_F2D: {
387 unsigned int x = 0;
388 if ((mask & 0x3) == 0x3)
389 x |= 1;
390 if ((mask & 0xc) == 0xc)
391 x |= 2;
392 return x;
393 }
394 case TGSI_OPCODE_PK2H:
395 return 0x3;
396 case TGSI_OPCODE_UP2H:
397 return 0x1;
398 default:
399 break;
400 }
401
402 return mask;
403 }
404
405 nv50_ir::Modifier Instruction::SrcRegister::getMod(int chan) const
406 {
407 nv50_ir::Modifier m(0);
408
409 if (reg.Absolute)
410 m = m | nv50_ir::Modifier(NV50_IR_MOD_ABS);
411 if (reg.Negate)
412 m = m | nv50_ir::Modifier(NV50_IR_MOD_NEG);
413 return m;
414 }
415
416 static nv50_ir::DataFile translateFile(uint file)
417 {
418 switch (file) {
419 case TGSI_FILE_CONSTANT: return nv50_ir::FILE_MEMORY_CONST;
420 case TGSI_FILE_INPUT: return nv50_ir::FILE_SHADER_INPUT;
421 case TGSI_FILE_OUTPUT: return nv50_ir::FILE_SHADER_OUTPUT;
422 case TGSI_FILE_TEMPORARY: return nv50_ir::FILE_GPR;
423 case TGSI_FILE_ADDRESS: return nv50_ir::FILE_ADDRESS;
424 case TGSI_FILE_IMMEDIATE: return nv50_ir::FILE_IMMEDIATE;
425 case TGSI_FILE_SYSTEM_VALUE: return nv50_ir::FILE_SYSTEM_VALUE;
426 case TGSI_FILE_BUFFER: return nv50_ir::FILE_MEMORY_BUFFER;
427 case TGSI_FILE_IMAGE: return nv50_ir::FILE_MEMORY_GLOBAL;
428 case TGSI_FILE_MEMORY: return nv50_ir::FILE_MEMORY_GLOBAL;
429 case TGSI_FILE_SAMPLER:
430 case TGSI_FILE_NULL:
431 default:
432 return nv50_ir::FILE_NULL;
433 }
434 }
435
436 static nv50_ir::SVSemantic translateSysVal(uint sysval)
437 {
438 switch (sysval) {
439 case TGSI_SEMANTIC_FACE: return nv50_ir::SV_FACE;
440 case TGSI_SEMANTIC_PSIZE: return nv50_ir::SV_POINT_SIZE;
441 case TGSI_SEMANTIC_PRIMID: return nv50_ir::SV_PRIMITIVE_ID;
442 case TGSI_SEMANTIC_INSTANCEID: return nv50_ir::SV_INSTANCE_ID;
443 case TGSI_SEMANTIC_VERTEXID: return nv50_ir::SV_VERTEX_ID;
444 case TGSI_SEMANTIC_GRID_SIZE: return nv50_ir::SV_NCTAID;
445 case TGSI_SEMANTIC_BLOCK_ID: return nv50_ir::SV_CTAID;
446 case TGSI_SEMANTIC_BLOCK_SIZE: return nv50_ir::SV_NTID;
447 case TGSI_SEMANTIC_THREAD_ID: return nv50_ir::SV_TID;
448 case TGSI_SEMANTIC_SAMPLEID: return nv50_ir::SV_SAMPLE_INDEX;
449 case TGSI_SEMANTIC_SAMPLEPOS: return nv50_ir::SV_SAMPLE_POS;
450 case TGSI_SEMANTIC_SAMPLEMASK: return nv50_ir::SV_SAMPLE_MASK;
451 case TGSI_SEMANTIC_INVOCATIONID: return nv50_ir::SV_INVOCATION_ID;
452 case TGSI_SEMANTIC_TESSCOORD: return nv50_ir::SV_TESS_COORD;
453 case TGSI_SEMANTIC_TESSOUTER: return nv50_ir::SV_TESS_OUTER;
454 case TGSI_SEMANTIC_TESSINNER: return nv50_ir::SV_TESS_INNER;
455 case TGSI_SEMANTIC_VERTICESIN: return nv50_ir::SV_VERTEX_COUNT;
456 case TGSI_SEMANTIC_HELPER_INVOCATION: return nv50_ir::SV_THREAD_KILL;
457 case TGSI_SEMANTIC_BASEVERTEX: return nv50_ir::SV_BASEVERTEX;
458 case TGSI_SEMANTIC_BASEINSTANCE: return nv50_ir::SV_BASEINSTANCE;
459 case TGSI_SEMANTIC_DRAWID: return nv50_ir::SV_DRAWID;
460 case TGSI_SEMANTIC_WORK_DIM: return nv50_ir::SV_WORK_DIM;
461 case TGSI_SEMANTIC_SUBGROUP_INVOCATION: return nv50_ir::SV_LANEID;
462 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK: return nv50_ir::SV_LANEMASK_EQ;
463 case TGSI_SEMANTIC_SUBGROUP_LT_MASK: return nv50_ir::SV_LANEMASK_LT;
464 case TGSI_SEMANTIC_SUBGROUP_LE_MASK: return nv50_ir::SV_LANEMASK_LE;
465 case TGSI_SEMANTIC_SUBGROUP_GT_MASK: return nv50_ir::SV_LANEMASK_GT;
466 case TGSI_SEMANTIC_SUBGROUP_GE_MASK: return nv50_ir::SV_LANEMASK_GE;
467 default:
468 assert(0);
469 return nv50_ir::SV_CLOCK;
470 }
471 }
472
473 #define NV50_IR_TEX_TARG_CASE(a, b) \
474 case TGSI_TEXTURE_##a: return nv50_ir::TEX_TARGET_##b;
475
476 static nv50_ir::TexTarget translateTexture(uint tex)
477 {
478 switch (tex) {
479 NV50_IR_TEX_TARG_CASE(1D, 1D);
480 NV50_IR_TEX_TARG_CASE(2D, 2D);
481 NV50_IR_TEX_TARG_CASE(2D_MSAA, 2D_MS);
482 NV50_IR_TEX_TARG_CASE(3D, 3D);
483 NV50_IR_TEX_TARG_CASE(CUBE, CUBE);
484 NV50_IR_TEX_TARG_CASE(RECT, RECT);
485 NV50_IR_TEX_TARG_CASE(1D_ARRAY, 1D_ARRAY);
486 NV50_IR_TEX_TARG_CASE(2D_ARRAY, 2D_ARRAY);
487 NV50_IR_TEX_TARG_CASE(2D_ARRAY_MSAA, 2D_MS_ARRAY);
488 NV50_IR_TEX_TARG_CASE(CUBE_ARRAY, CUBE_ARRAY);
489 NV50_IR_TEX_TARG_CASE(SHADOW1D, 1D_SHADOW);
490 NV50_IR_TEX_TARG_CASE(SHADOW2D, 2D_SHADOW);
491 NV50_IR_TEX_TARG_CASE(SHADOWCUBE, CUBE_SHADOW);
492 NV50_IR_TEX_TARG_CASE(SHADOWRECT, RECT_SHADOW);
493 NV50_IR_TEX_TARG_CASE(SHADOW1D_ARRAY, 1D_ARRAY_SHADOW);
494 NV50_IR_TEX_TARG_CASE(SHADOW2D_ARRAY, 2D_ARRAY_SHADOW);
495 NV50_IR_TEX_TARG_CASE(SHADOWCUBE_ARRAY, CUBE_ARRAY_SHADOW);
496 NV50_IR_TEX_TARG_CASE(BUFFER, BUFFER);
497
498 case TGSI_TEXTURE_UNKNOWN:
499 default:
500 assert(!"invalid texture target");
501 return nv50_ir::TEX_TARGET_2D;
502 }
503 }
504
505 static nv50_ir::CacheMode translateCacheMode(uint qualifier)
506 {
507 if (qualifier & TGSI_MEMORY_VOLATILE)
508 return nv50_ir::CACHE_CV;
509 if (qualifier & TGSI_MEMORY_COHERENT)
510 return nv50_ir::CACHE_CG;
511 return nv50_ir::CACHE_CA;
512 }
513
514 nv50_ir::DataType Instruction::inferSrcType() const
515 {
516 switch (getOpcode()) {
517 case TGSI_OPCODE_UIF:
518 case TGSI_OPCODE_AND:
519 case TGSI_OPCODE_OR:
520 case TGSI_OPCODE_XOR:
521 case TGSI_OPCODE_NOT:
522 case TGSI_OPCODE_SHL:
523 case TGSI_OPCODE_U2F:
524 case TGSI_OPCODE_U2D:
525 case TGSI_OPCODE_U2I64:
526 case TGSI_OPCODE_UADD:
527 case TGSI_OPCODE_UDIV:
528 case TGSI_OPCODE_UMOD:
529 case TGSI_OPCODE_UMAD:
530 case TGSI_OPCODE_UMUL:
531 case TGSI_OPCODE_UMUL_HI:
532 case TGSI_OPCODE_UMAX:
533 case TGSI_OPCODE_UMIN:
534 case TGSI_OPCODE_USEQ:
535 case TGSI_OPCODE_USGE:
536 case TGSI_OPCODE_USLT:
537 case TGSI_OPCODE_USNE:
538 case TGSI_OPCODE_USHR:
539 case TGSI_OPCODE_ATOMUADD:
540 case TGSI_OPCODE_ATOMXCHG:
541 case TGSI_OPCODE_ATOMCAS:
542 case TGSI_OPCODE_ATOMAND:
543 case TGSI_OPCODE_ATOMOR:
544 case TGSI_OPCODE_ATOMXOR:
545 case TGSI_OPCODE_ATOMUMIN:
546 case TGSI_OPCODE_ATOMUMAX:
547 case TGSI_OPCODE_ATOMDEC_WRAP:
548 case TGSI_OPCODE_ATOMINC_WRAP:
549 case TGSI_OPCODE_UBFE:
550 case TGSI_OPCODE_UMSB:
551 case TGSI_OPCODE_UP2H:
552 case TGSI_OPCODE_VOTE_ALL:
553 case TGSI_OPCODE_VOTE_ANY:
554 case TGSI_OPCODE_VOTE_EQ:
555 return nv50_ir::TYPE_U32;
556 case TGSI_OPCODE_I2F:
557 case TGSI_OPCODE_I2D:
558 case TGSI_OPCODE_I2I64:
559 case TGSI_OPCODE_IDIV:
560 case TGSI_OPCODE_IMUL_HI:
561 case TGSI_OPCODE_IMAX:
562 case TGSI_OPCODE_IMIN:
563 case TGSI_OPCODE_IABS:
564 case TGSI_OPCODE_INEG:
565 case TGSI_OPCODE_ISGE:
566 case TGSI_OPCODE_ISHR:
567 case TGSI_OPCODE_ISLT:
568 case TGSI_OPCODE_ISSG:
569 case TGSI_OPCODE_MOD:
570 case TGSI_OPCODE_UARL:
571 case TGSI_OPCODE_ATOMIMIN:
572 case TGSI_OPCODE_ATOMIMAX:
573 case TGSI_OPCODE_IBFE:
574 case TGSI_OPCODE_IMSB:
575 return nv50_ir::TYPE_S32;
576 case TGSI_OPCODE_D2F:
577 case TGSI_OPCODE_D2I:
578 case TGSI_OPCODE_D2U:
579 case TGSI_OPCODE_D2I64:
580 case TGSI_OPCODE_D2U64:
581 case TGSI_OPCODE_DABS:
582 case TGSI_OPCODE_DNEG:
583 case TGSI_OPCODE_DADD:
584 case TGSI_OPCODE_DMUL:
585 case TGSI_OPCODE_DDIV:
586 case TGSI_OPCODE_DMAX:
587 case TGSI_OPCODE_DMIN:
588 case TGSI_OPCODE_DSLT:
589 case TGSI_OPCODE_DSGE:
590 case TGSI_OPCODE_DSEQ:
591 case TGSI_OPCODE_DSNE:
592 case TGSI_OPCODE_DRCP:
593 case TGSI_OPCODE_DSQRT:
594 case TGSI_OPCODE_DMAD:
595 case TGSI_OPCODE_DFMA:
596 case TGSI_OPCODE_DFRAC:
597 case TGSI_OPCODE_DRSQ:
598 case TGSI_OPCODE_DTRUNC:
599 case TGSI_OPCODE_DCEIL:
600 case TGSI_OPCODE_DFLR:
601 case TGSI_OPCODE_DROUND:
602 return nv50_ir::TYPE_F64;
603 case TGSI_OPCODE_U64SEQ:
604 case TGSI_OPCODE_U64SNE:
605 case TGSI_OPCODE_U64SLT:
606 case TGSI_OPCODE_U64SGE:
607 case TGSI_OPCODE_U64MIN:
608 case TGSI_OPCODE_U64MAX:
609 case TGSI_OPCODE_U64ADD:
610 case TGSI_OPCODE_U64MUL:
611 case TGSI_OPCODE_U64SHL:
612 case TGSI_OPCODE_U64SHR:
613 case TGSI_OPCODE_U64DIV:
614 case TGSI_OPCODE_U64MOD:
615 case TGSI_OPCODE_U642F:
616 case TGSI_OPCODE_U642D:
617 return nv50_ir::TYPE_U64;
618 case TGSI_OPCODE_I64ABS:
619 case TGSI_OPCODE_I64SSG:
620 case TGSI_OPCODE_I64NEG:
621 case TGSI_OPCODE_I64SLT:
622 case TGSI_OPCODE_I64SGE:
623 case TGSI_OPCODE_I64MIN:
624 case TGSI_OPCODE_I64MAX:
625 case TGSI_OPCODE_I64SHR:
626 case TGSI_OPCODE_I64DIV:
627 case TGSI_OPCODE_I64MOD:
628 case TGSI_OPCODE_I642F:
629 case TGSI_OPCODE_I642D:
630 return nv50_ir::TYPE_S64;
631 default:
632 return nv50_ir::TYPE_F32;
633 }
634 }
635
636 nv50_ir::DataType Instruction::inferDstType() const
637 {
638 switch (getOpcode()) {
639 case TGSI_OPCODE_D2U:
640 case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32;
641 case TGSI_OPCODE_D2I:
642 case TGSI_OPCODE_F2I: return nv50_ir::TYPE_S32;
643 case TGSI_OPCODE_FSEQ:
644 case TGSI_OPCODE_FSGE:
645 case TGSI_OPCODE_FSLT:
646 case TGSI_OPCODE_FSNE:
647 case TGSI_OPCODE_DSEQ:
648 case TGSI_OPCODE_DSGE:
649 case TGSI_OPCODE_DSLT:
650 case TGSI_OPCODE_DSNE:
651 case TGSI_OPCODE_I64SLT:
652 case TGSI_OPCODE_I64SGE:
653 case TGSI_OPCODE_U64SEQ:
654 case TGSI_OPCODE_U64SNE:
655 case TGSI_OPCODE_U64SLT:
656 case TGSI_OPCODE_U64SGE:
657 case TGSI_OPCODE_PK2H:
658 return nv50_ir::TYPE_U32;
659 case TGSI_OPCODE_I2F:
660 case TGSI_OPCODE_U2F:
661 case TGSI_OPCODE_D2F:
662 case TGSI_OPCODE_I642F:
663 case TGSI_OPCODE_U642F:
664 case TGSI_OPCODE_UP2H:
665 return nv50_ir::TYPE_F32;
666 case TGSI_OPCODE_I2D:
667 case TGSI_OPCODE_U2D:
668 case TGSI_OPCODE_F2D:
669 case TGSI_OPCODE_I642D:
670 case TGSI_OPCODE_U642D:
671 return nv50_ir::TYPE_F64;
672 case TGSI_OPCODE_I2I64:
673 case TGSI_OPCODE_U2I64:
674 case TGSI_OPCODE_F2I64:
675 case TGSI_OPCODE_D2I64:
676 return nv50_ir::TYPE_S64;
677 case TGSI_OPCODE_F2U64:
678 case TGSI_OPCODE_D2U64:
679 return nv50_ir::TYPE_U64;
680 default:
681 return inferSrcType();
682 }
683 }
684
685 nv50_ir::CondCode Instruction::getSetCond() const
686 {
687 using namespace nv50_ir;
688
689 switch (getOpcode()) {
690 case TGSI_OPCODE_SLT:
691 case TGSI_OPCODE_ISLT:
692 case TGSI_OPCODE_USLT:
693 case TGSI_OPCODE_FSLT:
694 case TGSI_OPCODE_DSLT:
695 case TGSI_OPCODE_I64SLT:
696 case TGSI_OPCODE_U64SLT:
697 return CC_LT;
698 case TGSI_OPCODE_SLE:
699 return CC_LE;
700 case TGSI_OPCODE_SGE:
701 case TGSI_OPCODE_ISGE:
702 case TGSI_OPCODE_USGE:
703 case TGSI_OPCODE_FSGE:
704 case TGSI_OPCODE_DSGE:
705 case TGSI_OPCODE_I64SGE:
706 case TGSI_OPCODE_U64SGE:
707 return CC_GE;
708 case TGSI_OPCODE_SGT:
709 return CC_GT;
710 case TGSI_OPCODE_SEQ:
711 case TGSI_OPCODE_USEQ:
712 case TGSI_OPCODE_FSEQ:
713 case TGSI_OPCODE_DSEQ:
714 case TGSI_OPCODE_U64SEQ:
715 return CC_EQ;
716 case TGSI_OPCODE_SNE:
717 case TGSI_OPCODE_FSNE:
718 case TGSI_OPCODE_DSNE:
719 case TGSI_OPCODE_U64SNE:
720 return CC_NEU;
721 case TGSI_OPCODE_USNE:
722 return CC_NE;
723 default:
724 return CC_ALWAYS;
725 }
726 }
727
728 #define NV50_IR_OPCODE_CASE(a, b) case TGSI_OPCODE_##a: return nv50_ir::OP_##b
729
730 static nv50_ir::operation translateOpcode(uint opcode)
731 {
732 switch (opcode) {
733 NV50_IR_OPCODE_CASE(ARL, SHL);
734 NV50_IR_OPCODE_CASE(MOV, MOV);
735
736 NV50_IR_OPCODE_CASE(RCP, RCP);
737 NV50_IR_OPCODE_CASE(RSQ, RSQ);
738 NV50_IR_OPCODE_CASE(SQRT, SQRT);
739
740 NV50_IR_OPCODE_CASE(MUL, MUL);
741 NV50_IR_OPCODE_CASE(ADD, ADD);
742
743 NV50_IR_OPCODE_CASE(MIN, MIN);
744 NV50_IR_OPCODE_CASE(MAX, MAX);
745 NV50_IR_OPCODE_CASE(SLT, SET);
746 NV50_IR_OPCODE_CASE(SGE, SET);
747 NV50_IR_OPCODE_CASE(MAD, MAD);
748 NV50_IR_OPCODE_CASE(FMA, FMA);
749
750 NV50_IR_OPCODE_CASE(FLR, FLOOR);
751 NV50_IR_OPCODE_CASE(ROUND, CVT);
752 NV50_IR_OPCODE_CASE(EX2, EX2);
753 NV50_IR_OPCODE_CASE(LG2, LG2);
754 NV50_IR_OPCODE_CASE(POW, POW);
755
756 NV50_IR_OPCODE_CASE(COS, COS);
757 NV50_IR_OPCODE_CASE(DDX, DFDX);
758 NV50_IR_OPCODE_CASE(DDX_FINE, DFDX);
759 NV50_IR_OPCODE_CASE(DDY, DFDY);
760 NV50_IR_OPCODE_CASE(DDY_FINE, DFDY);
761 NV50_IR_OPCODE_CASE(KILL, DISCARD);
762 NV50_IR_OPCODE_CASE(DEMOTE, DISCARD);
763
764 NV50_IR_OPCODE_CASE(SEQ, SET);
765 NV50_IR_OPCODE_CASE(SGT, SET);
766 NV50_IR_OPCODE_CASE(SIN, SIN);
767 NV50_IR_OPCODE_CASE(SLE, SET);
768 NV50_IR_OPCODE_CASE(SNE, SET);
769 NV50_IR_OPCODE_CASE(TEX, TEX);
770 NV50_IR_OPCODE_CASE(TXD, TXD);
771 NV50_IR_OPCODE_CASE(TXP, TEX);
772
773 NV50_IR_OPCODE_CASE(CAL, CALL);
774 NV50_IR_OPCODE_CASE(RET, RET);
775 NV50_IR_OPCODE_CASE(CMP, SLCT);
776
777 NV50_IR_OPCODE_CASE(TXB, TXB);
778
779 NV50_IR_OPCODE_CASE(DIV, DIV);
780
781 NV50_IR_OPCODE_CASE(TXL, TXL);
782 NV50_IR_OPCODE_CASE(TEX_LZ, TXL);
783
784 NV50_IR_OPCODE_CASE(CEIL, CEIL);
785 NV50_IR_OPCODE_CASE(I2F, CVT);
786 NV50_IR_OPCODE_CASE(NOT, NOT);
787 NV50_IR_OPCODE_CASE(TRUNC, TRUNC);
788 NV50_IR_OPCODE_CASE(SHL, SHL);
789
790 NV50_IR_OPCODE_CASE(AND, AND);
791 NV50_IR_OPCODE_CASE(OR, OR);
792 NV50_IR_OPCODE_CASE(MOD, MOD);
793 NV50_IR_OPCODE_CASE(XOR, XOR);
794 NV50_IR_OPCODE_CASE(TXF, TXF);
795 NV50_IR_OPCODE_CASE(TXF_LZ, TXF);
796 NV50_IR_OPCODE_CASE(TXQ, TXQ);
797 NV50_IR_OPCODE_CASE(TXQS, TXQ);
798 NV50_IR_OPCODE_CASE(TG4, TXG);
799 NV50_IR_OPCODE_CASE(LODQ, TXLQ);
800
801 NV50_IR_OPCODE_CASE(EMIT, EMIT);
802 NV50_IR_OPCODE_CASE(ENDPRIM, RESTART);
803
804 NV50_IR_OPCODE_CASE(KILL_IF, DISCARD);
805
806 NV50_IR_OPCODE_CASE(F2I, CVT);
807 NV50_IR_OPCODE_CASE(FSEQ, SET);
808 NV50_IR_OPCODE_CASE(FSGE, SET);
809 NV50_IR_OPCODE_CASE(FSLT, SET);
810 NV50_IR_OPCODE_CASE(FSNE, SET);
811 NV50_IR_OPCODE_CASE(IDIV, DIV);
812 NV50_IR_OPCODE_CASE(IMAX, MAX);
813 NV50_IR_OPCODE_CASE(IMIN, MIN);
814 NV50_IR_OPCODE_CASE(IABS, ABS);
815 NV50_IR_OPCODE_CASE(INEG, NEG);
816 NV50_IR_OPCODE_CASE(ISGE, SET);
817 NV50_IR_OPCODE_CASE(ISHR, SHR);
818 NV50_IR_OPCODE_CASE(ISLT, SET);
819 NV50_IR_OPCODE_CASE(F2U, CVT);
820 NV50_IR_OPCODE_CASE(U2F, CVT);
821 NV50_IR_OPCODE_CASE(UADD, ADD);
822 NV50_IR_OPCODE_CASE(UDIV, DIV);
823 NV50_IR_OPCODE_CASE(UMAD, MAD);
824 NV50_IR_OPCODE_CASE(UMAX, MAX);
825 NV50_IR_OPCODE_CASE(UMIN, MIN);
826 NV50_IR_OPCODE_CASE(UMOD, MOD);
827 NV50_IR_OPCODE_CASE(UMUL, MUL);
828 NV50_IR_OPCODE_CASE(USEQ, SET);
829 NV50_IR_OPCODE_CASE(USGE, SET);
830 NV50_IR_OPCODE_CASE(USHR, SHR);
831 NV50_IR_OPCODE_CASE(USLT, SET);
832 NV50_IR_OPCODE_CASE(USNE, SET);
833
834 NV50_IR_OPCODE_CASE(DABS, ABS);
835 NV50_IR_OPCODE_CASE(DNEG, NEG);
836 NV50_IR_OPCODE_CASE(DADD, ADD);
837 NV50_IR_OPCODE_CASE(DMUL, MUL);
838 NV50_IR_OPCODE_CASE(DDIV, DIV);
839 NV50_IR_OPCODE_CASE(DMAX, MAX);
840 NV50_IR_OPCODE_CASE(DMIN, MIN);
841 NV50_IR_OPCODE_CASE(DSLT, SET);
842 NV50_IR_OPCODE_CASE(DSGE, SET);
843 NV50_IR_OPCODE_CASE(DSEQ, SET);
844 NV50_IR_OPCODE_CASE(DSNE, SET);
845 NV50_IR_OPCODE_CASE(DRCP, RCP);
846 NV50_IR_OPCODE_CASE(DSQRT, SQRT);
847 NV50_IR_OPCODE_CASE(DMAD, MAD);
848 NV50_IR_OPCODE_CASE(DFMA, FMA);
849 NV50_IR_OPCODE_CASE(D2I, CVT);
850 NV50_IR_OPCODE_CASE(D2U, CVT);
851 NV50_IR_OPCODE_CASE(I2D, CVT);
852 NV50_IR_OPCODE_CASE(U2D, CVT);
853 NV50_IR_OPCODE_CASE(DRSQ, RSQ);
854 NV50_IR_OPCODE_CASE(DTRUNC, TRUNC);
855 NV50_IR_OPCODE_CASE(DCEIL, CEIL);
856 NV50_IR_OPCODE_CASE(DFLR, FLOOR);
857 NV50_IR_OPCODE_CASE(DROUND, CVT);
858
859 NV50_IR_OPCODE_CASE(U64SEQ, SET);
860 NV50_IR_OPCODE_CASE(U64SNE, SET);
861 NV50_IR_OPCODE_CASE(U64SLT, SET);
862 NV50_IR_OPCODE_CASE(U64SGE, SET);
863 NV50_IR_OPCODE_CASE(I64SLT, SET);
864 NV50_IR_OPCODE_CASE(I64SGE, SET);
865 NV50_IR_OPCODE_CASE(I2I64, CVT);
866 NV50_IR_OPCODE_CASE(U2I64, CVT);
867 NV50_IR_OPCODE_CASE(F2I64, CVT);
868 NV50_IR_OPCODE_CASE(F2U64, CVT);
869 NV50_IR_OPCODE_CASE(D2I64, CVT);
870 NV50_IR_OPCODE_CASE(D2U64, CVT);
871 NV50_IR_OPCODE_CASE(I642F, CVT);
872 NV50_IR_OPCODE_CASE(U642F, CVT);
873 NV50_IR_OPCODE_CASE(I642D, CVT);
874 NV50_IR_OPCODE_CASE(U642D, CVT);
875
876 NV50_IR_OPCODE_CASE(I64MIN, MIN);
877 NV50_IR_OPCODE_CASE(U64MIN, MIN);
878 NV50_IR_OPCODE_CASE(I64MAX, MAX);
879 NV50_IR_OPCODE_CASE(U64MAX, MAX);
880 NV50_IR_OPCODE_CASE(I64ABS, ABS);
881 NV50_IR_OPCODE_CASE(I64NEG, NEG);
882 NV50_IR_OPCODE_CASE(U64ADD, ADD);
883 NV50_IR_OPCODE_CASE(U64MUL, MUL);
884 NV50_IR_OPCODE_CASE(U64SHL, SHL);
885 NV50_IR_OPCODE_CASE(I64SHR, SHR);
886 NV50_IR_OPCODE_CASE(U64SHR, SHR);
887
888 NV50_IR_OPCODE_CASE(IMUL_HI, MUL);
889 NV50_IR_OPCODE_CASE(UMUL_HI, MUL);
890
891 NV50_IR_OPCODE_CASE(SAMPLE, TEX);
892 NV50_IR_OPCODE_CASE(SAMPLE_B, TXB);
893 NV50_IR_OPCODE_CASE(SAMPLE_C, TEX);
894 NV50_IR_OPCODE_CASE(SAMPLE_C_LZ, TEX);
895 NV50_IR_OPCODE_CASE(SAMPLE_D, TXD);
896 NV50_IR_OPCODE_CASE(SAMPLE_L, TXL);
897 NV50_IR_OPCODE_CASE(SAMPLE_I, TXF);
898 NV50_IR_OPCODE_CASE(SAMPLE_I_MS, TXF);
899 NV50_IR_OPCODE_CASE(GATHER4, TXG);
900 NV50_IR_OPCODE_CASE(SVIEWINFO, TXQ);
901
902 NV50_IR_OPCODE_CASE(ATOMUADD, ATOM);
903 NV50_IR_OPCODE_CASE(ATOMXCHG, ATOM);
904 NV50_IR_OPCODE_CASE(ATOMCAS, ATOM);
905 NV50_IR_OPCODE_CASE(ATOMAND, ATOM);
906 NV50_IR_OPCODE_CASE(ATOMOR, ATOM);
907 NV50_IR_OPCODE_CASE(ATOMXOR, ATOM);
908 NV50_IR_OPCODE_CASE(ATOMUMIN, ATOM);
909 NV50_IR_OPCODE_CASE(ATOMUMAX, ATOM);
910 NV50_IR_OPCODE_CASE(ATOMIMIN, ATOM);
911 NV50_IR_OPCODE_CASE(ATOMIMAX, ATOM);
912 NV50_IR_OPCODE_CASE(ATOMFADD, ATOM);
913 NV50_IR_OPCODE_CASE(ATOMDEC_WRAP, ATOM);
914 NV50_IR_OPCODE_CASE(ATOMINC_WRAP, ATOM);
915
916 NV50_IR_OPCODE_CASE(TEX2, TEX);
917 NV50_IR_OPCODE_CASE(TXB2, TXB);
918 NV50_IR_OPCODE_CASE(TXL2, TXL);
919
920 NV50_IR_OPCODE_CASE(IBFE, EXTBF);
921 NV50_IR_OPCODE_CASE(UBFE, EXTBF);
922 NV50_IR_OPCODE_CASE(BFI, INSBF);
923 NV50_IR_OPCODE_CASE(BREV, EXTBF);
924 NV50_IR_OPCODE_CASE(POPC, POPCNT);
925 NV50_IR_OPCODE_CASE(LSB, BFIND);
926 NV50_IR_OPCODE_CASE(IMSB, BFIND);
927 NV50_IR_OPCODE_CASE(UMSB, BFIND);
928
929 NV50_IR_OPCODE_CASE(VOTE_ALL, VOTE);
930 NV50_IR_OPCODE_CASE(VOTE_ANY, VOTE);
931 NV50_IR_OPCODE_CASE(VOTE_EQ, VOTE);
932
933 NV50_IR_OPCODE_CASE(BALLOT, VOTE);
934 NV50_IR_OPCODE_CASE(READ_INVOC, SHFL);
935 NV50_IR_OPCODE_CASE(READ_FIRST, SHFL);
936
937 NV50_IR_OPCODE_CASE(END, EXIT);
938
939 default:
940 return nv50_ir::OP_NOP;
941 }
942 }
943
944 static uint16_t opcodeToSubOp(uint opcode)
945 {
946 switch (opcode) {
947 case TGSI_OPCODE_ATOMUADD: return NV50_IR_SUBOP_ATOM_ADD;
948 case TGSI_OPCODE_ATOMXCHG: return NV50_IR_SUBOP_ATOM_EXCH;
949 case TGSI_OPCODE_ATOMCAS: return NV50_IR_SUBOP_ATOM_CAS;
950 case TGSI_OPCODE_ATOMAND: return NV50_IR_SUBOP_ATOM_AND;
951 case TGSI_OPCODE_ATOMOR: return NV50_IR_SUBOP_ATOM_OR;
952 case TGSI_OPCODE_ATOMXOR: return NV50_IR_SUBOP_ATOM_XOR;
953 case TGSI_OPCODE_ATOMUMIN: return NV50_IR_SUBOP_ATOM_MIN;
954 case TGSI_OPCODE_ATOMIMIN: return NV50_IR_SUBOP_ATOM_MIN;
955 case TGSI_OPCODE_ATOMUMAX: return NV50_IR_SUBOP_ATOM_MAX;
956 case TGSI_OPCODE_ATOMIMAX: return NV50_IR_SUBOP_ATOM_MAX;
957 case TGSI_OPCODE_ATOMFADD: return NV50_IR_SUBOP_ATOM_ADD;
958 case TGSI_OPCODE_ATOMDEC_WRAP: return NV50_IR_SUBOP_ATOM_DEC;
959 case TGSI_OPCODE_ATOMINC_WRAP: return NV50_IR_SUBOP_ATOM_INC;
960 case TGSI_OPCODE_IMUL_HI:
961 case TGSI_OPCODE_UMUL_HI:
962 return NV50_IR_SUBOP_MUL_HIGH;
963 case TGSI_OPCODE_VOTE_ALL: return NV50_IR_SUBOP_VOTE_ALL;
964 case TGSI_OPCODE_VOTE_ANY: return NV50_IR_SUBOP_VOTE_ANY;
965 case TGSI_OPCODE_VOTE_EQ: return NV50_IR_SUBOP_VOTE_UNI;
966 default:
967 return 0;
968 }
969 }
970
971 bool Instruction::checkDstSrcAliasing() const
972 {
973 if (insn->Dst[0].Register.Indirect) // no danger if indirect, using memory
974 return false;
975
976 for (int s = 0; s < TGSI_FULL_MAX_SRC_REGISTERS; ++s) {
977 if (insn->Src[s].Register.File == TGSI_FILE_NULL)
978 break;
979 if (insn->Src[s].Register.File == insn->Dst[0].Register.File &&
980 insn->Src[s].Register.Index == insn->Dst[0].Register.Index)
981 return true;
982 }
983 return false;
984 }
985
986 class Source
987 {
988 public:
989 Source(struct nv50_ir_prog_info *, struct nv50_ir_prog_info_out *, nv50_ir::Program *);
990 ~Source();
991
992 public:
993 bool scanSource();
994 unsigned fileSize(unsigned file) const { return scan.file_max[file] + 1; }
995
996 public:
997 struct tgsi_shader_info scan;
998 struct tgsi_full_instruction *insns;
999 const struct tgsi_token *tokens;
1000 struct nv50_ir_prog_info *info;
1001 struct nv50_ir_prog_info_out *info_out;
1002
1003 nv50_ir::DynArray tempArrays;
1004 nv50_ir::DynArray immdArrays;
1005
1006 typedef nv50_ir::BuildUtil::Location Location;
1007 // these registers are per-subroutine, cannot be used for parameter passing
1008 std::set<Location> locals;
1009
1010 std::set<int> indirectTempArrays;
1011 std::map<int, int> indirectTempOffsets;
1012 std::map<int, std::pair<int, int> > tempArrayInfo;
1013 std::vector<int> tempArrayId;
1014
1015 int clipVertexOutput;
1016
1017 struct TextureView {
1018 uint8_t target; // TGSI_TEXTURE_*
1019 };
1020 std::vector<TextureView> textureViews;
1021
1022 /*
1023 struct Resource {
1024 uint8_t target; // TGSI_TEXTURE_*
1025 bool raw;
1026 uint8_t slot; // $surface index
1027 };
1028 std::vector<Resource> resources;
1029 */
1030
1031 struct MemoryFile {
1032 uint8_t mem_type; // TGSI_MEMORY_TYPE_*
1033 };
1034 std::vector<MemoryFile> memoryFiles;
1035
1036 std::vector<bool> bufferAtomics;
1037
1038 struct {
1039 uint16_t count; /* count of inline immediates */
1040 uint32_t *data; /* inline immediate data */
1041 } immd;
1042
1043 private:
1044 nv50_ir::Program *prog;
1045 int inferSysValDirection(unsigned sn) const;
1046 bool scanDeclaration(const struct tgsi_full_declaration *);
1047 bool scanInstruction(const struct tgsi_full_instruction *);
1048 void scanInstructionSrc(const Instruction& insn,
1049 const Instruction::SrcRegister& src,
1050 unsigned mask);
1051 void scanProperty(const struct tgsi_full_property *);
1052 void scanImmediate(const struct tgsi_full_immediate *);
1053
1054 inline bool isEdgeFlagPassthrough(const Instruction&) const;
1055 };
1056
1057 Source::Source(struct nv50_ir_prog_info *info, struct nv50_ir_prog_info_out *info_out,
1058 nv50_ir::Program *prog)
1059 : info(info), info_out(info_out), prog(prog)
1060 {
1061 tokens = (const struct tgsi_token *)info->bin.source;
1062
1063 if (info->dbgFlags & NV50_IR_DEBUG_BASIC)
1064 tgsi_dump(tokens, 0);
1065
1066 tgsi_scan_shader(tokens, &scan);
1067
1068 immd.count = 0;
1069 immd.data = (uint32_t *)MALLOC(scan.immediate_count * 16);
1070 }
1071
1072 Source::~Source()
1073 {
1074 if (insns)
1075 FREE(insns);
1076
1077 if (immd.data)
1078 FREE(immd.data);
1079 }
1080
1081 bool Source::scanSource()
1082 {
1083 unsigned insnCount = 0;
1084 struct tgsi_parse_context parse;
1085
1086 insns = (struct tgsi_full_instruction *)MALLOC(scan.num_instructions *
1087 sizeof(insns[0]));
1088 if (!insns)
1089 return false;
1090
1091 clipVertexOutput = -1;
1092
1093 textureViews.resize(scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1);
1094 //resources.resize(scan.file_max[TGSI_FILE_RESOURCE] + 1);
1095 tempArrayId.resize(scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1096 memoryFiles.resize(scan.file_max[TGSI_FILE_MEMORY] + 1);
1097 bufferAtomics.resize(scan.file_max[TGSI_FILE_BUFFER] + 1);
1098
1099 info_out->numInputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1100 info_out->numOutputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1101 info_out->numSysVals = scan.file_max[TGSI_FILE_SYSTEM_VALUE] + 1;
1102
1103 if (info->type == PIPE_SHADER_FRAGMENT) {
1104 info_out->prop.fp.writesDepth = scan.writes_z;
1105 info_out->prop.fp.usesDiscard = scan.uses_kill || info->io.alphaRefBase;
1106 } else
1107 if (info->type == PIPE_SHADER_GEOMETRY) {
1108 info_out->prop.gp.instanceCount = 1; // default value
1109 }
1110
1111 info->io.viewportId = -1;
1112
1113 tgsi_parse_init(&parse, tokens);
1114 while (!tgsi_parse_end_of_tokens(&parse)) {
1115 tgsi_parse_token(&parse);
1116
1117 switch (parse.FullToken.Token.Type) {
1118 case TGSI_TOKEN_TYPE_IMMEDIATE:
1119 scanImmediate(&parse.FullToken.FullImmediate);
1120 break;
1121 case TGSI_TOKEN_TYPE_DECLARATION:
1122 scanDeclaration(&parse.FullToken.FullDeclaration);
1123 break;
1124 case TGSI_TOKEN_TYPE_INSTRUCTION:
1125 insns[insnCount++] = parse.FullToken.FullInstruction;
1126 scanInstruction(&parse.FullToken.FullInstruction);
1127 break;
1128 case TGSI_TOKEN_TYPE_PROPERTY:
1129 scanProperty(&parse.FullToken.FullProperty);
1130 break;
1131 default:
1132 INFO("unknown TGSI token type: %d\n", parse.FullToken.Token.Type);
1133 break;
1134 }
1135 }
1136 tgsi_parse_free(&parse);
1137
1138 if (indirectTempArrays.size()) {
1139 int tempBase = 0;
1140 for (std::set<int>::const_iterator it = indirectTempArrays.begin();
1141 it != indirectTempArrays.end(); ++it) {
1142 std::pair<int, int>& info = tempArrayInfo[*it];
1143 indirectTempOffsets.insert(std::make_pair(*it, tempBase - info.first));
1144 tempBase += info.second;
1145 }
1146 info_out->bin.tlsSpace += tempBase * 16;
1147 }
1148
1149 if (info_out->io.genUserClip > 0) {
1150 info_out->io.clipDistances = info_out->io.genUserClip;
1151
1152 const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1153
1154 for (unsigned int n = 0; n < nOut; ++n) {
1155 unsigned int i = info_out->numOutputs++;
1156 info_out->out[i].id = i;
1157 info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1158 info_out->out[i].si = n;
1159 info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1160 }
1161 }
1162
1163 return info->assignSlots(info_out) == 0;
1164 }
1165
1166 void Source::scanProperty(const struct tgsi_full_property *prop)
1167 {
1168 switch (prop->Property.PropertyName) {
1169 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1170 info_out->prop.gp.outputPrim = prop->u[0].Data;
1171 break;
1172 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1173 info_out->prop.gp.maxVertices = prop->u[0].Data;
1174 break;
1175 case TGSI_PROPERTY_GS_INVOCATIONS:
1176 info_out->prop.gp.instanceCount = prop->u[0].Data;
1177 break;
1178 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1179 info_out->prop.fp.separateFragData = true;
1180 break;
1181 case TGSI_PROPERTY_FS_COORD_ORIGIN:
1182 case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1183 case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
1184 case TGSI_PROPERTY_GS_INPUT_PRIM:
1185 // we don't care
1186 break;
1187 case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1188 info_out->io.genUserClip = -1;
1189 break;
1190 case TGSI_PROPERTY_TCS_VERTICES_OUT:
1191 info_out->prop.tp.outputPatchSize = prop->u[0].Data;
1192 break;
1193 case TGSI_PROPERTY_TES_PRIM_MODE:
1194 info_out->prop.tp.domain = prop->u[0].Data;
1195 break;
1196 case TGSI_PROPERTY_TES_SPACING:
1197 info_out->prop.tp.partitioning = prop->u[0].Data;
1198 break;
1199 case TGSI_PROPERTY_TES_VERTEX_ORDER_CW:
1200 info_out->prop.tp.winding = prop->u[0].Data;
1201 break;
1202 case TGSI_PROPERTY_TES_POINT_MODE:
1203 if (prop->u[0].Data)
1204 info_out->prop.tp.outputPrim = PIPE_PRIM_POINTS;
1205 else
1206 info_out->prop.tp.outputPrim = PIPE_PRIM_TRIANGLES; /* anything but points */
1207 break;
1208 case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
1209 info->prop.cp.numThreads[0] = prop->u[0].Data;
1210 break;
1211 case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
1212 info->prop.cp.numThreads[1] = prop->u[0].Data;
1213 break;
1214 case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
1215 info->prop.cp.numThreads[2] = prop->u[0].Data;
1216 break;
1217 case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
1218 info_out->io.clipDistances = prop->u[0].Data;
1219 break;
1220 case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
1221 info_out->io.cullDistances = prop->u[0].Data;
1222 break;
1223 case TGSI_PROPERTY_NEXT_SHADER:
1224 /* Do not need to know the next shader stage. */
1225 break;
1226 case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
1227 info_out->prop.fp.earlyFragTests = prop->u[0].Data;
1228 break;
1229 case TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE:
1230 info_out->prop.fp.postDepthCoverage = prop->u[0].Data;
1231 break;
1232 case TGSI_PROPERTY_MUL_ZERO_WINS:
1233 info->io.mul_zero_wins = prop->u[0].Data;
1234 break;
1235 case TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE:
1236 info_out->io.layer_viewport_relative = prop->u[0].Data;
1237 break;
1238 default:
1239 INFO("unhandled TGSI property %d\n", prop->Property.PropertyName);
1240 break;
1241 }
1242 }
1243
1244 void Source::scanImmediate(const struct tgsi_full_immediate *imm)
1245 {
1246 const unsigned n = immd.count++;
1247
1248 assert(n < scan.immediate_count);
1249
1250 for (int c = 0; c < 4; ++c)
1251 immd.data[n * 4 + c] = imm->u[c].Uint;
1252 }
1253
1254 int Source::inferSysValDirection(unsigned sn) const
1255 {
1256 switch (sn) {
1257 case TGSI_SEMANTIC_INSTANCEID:
1258 case TGSI_SEMANTIC_VERTEXID:
1259 return 1;
1260 case TGSI_SEMANTIC_LAYER:
1261 #if 0
1262 case TGSI_SEMANTIC_VIEWPORTINDEX:
1263 return 0;
1264 #endif
1265 case TGSI_SEMANTIC_PRIMID:
1266 return (info->type == PIPE_SHADER_FRAGMENT) ? 1 : 0;
1267 default:
1268 return 0;
1269 }
1270 }
1271
1272 bool Source::scanDeclaration(const struct tgsi_full_declaration *decl)
1273 {
1274 unsigned i, c;
1275 unsigned sn = TGSI_SEMANTIC_GENERIC;
1276 unsigned si = 0;
1277 const unsigned first = decl->Range.First, last = decl->Range.Last;
1278 const int arrayId = decl->Array.ArrayID;
1279
1280 if (decl->Declaration.Semantic) {
1281 sn = decl->Semantic.Name;
1282 si = decl->Semantic.Index;
1283 }
1284
1285 if (decl->Declaration.Local || decl->Declaration.File == TGSI_FILE_ADDRESS) {
1286 for (i = first; i <= last; ++i) {
1287 for (c = 0; c < 4; ++c) {
1288 locals.insert(
1289 Location(decl->Declaration.File, decl->Dim.Index2D, i, c));
1290 }
1291 }
1292 }
1293
1294 switch (decl->Declaration.File) {
1295 case TGSI_FILE_INPUT:
1296 if (info->type == PIPE_SHADER_VERTEX) {
1297 // all vertex attributes are equal
1298 for (i = first; i <= last; ++i) {
1299 info_out->in[i].sn = TGSI_SEMANTIC_GENERIC;
1300 info_out->in[i].si = i;
1301 }
1302 } else {
1303 for (i = first; i <= last; ++i, ++si) {
1304 info_out->in[i].id = i;
1305 info_out->in[i].sn = sn;
1306 info_out->in[i].si = si;
1307 if (info->type == PIPE_SHADER_FRAGMENT) {
1308 // translate interpolation mode
1309 switch (decl->Interp.Interpolate) {
1310 case TGSI_INTERPOLATE_CONSTANT:
1311 info_out->in[i].flat = 1;
1312 break;
1313 case TGSI_INTERPOLATE_COLOR:
1314 info_out->in[i].sc = 1;
1315 break;
1316 case TGSI_INTERPOLATE_LINEAR:
1317 info_out->in[i].linear = 1;
1318 break;
1319 default:
1320 break;
1321 }
1322 if (decl->Interp.Location)
1323 info_out->in[i].centroid = 1;
1324 }
1325
1326 if (sn == TGSI_SEMANTIC_PATCH)
1327 info_out->in[i].patch = 1;
1328 if (sn == TGSI_SEMANTIC_PATCH)
1329 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, si + 1);
1330 }
1331 }
1332 break;
1333 case TGSI_FILE_OUTPUT:
1334 for (i = first; i <= last; ++i, ++si) {
1335 switch (sn) {
1336 case TGSI_SEMANTIC_POSITION:
1337 if (info->type == PIPE_SHADER_FRAGMENT)
1338 info_out->io.fragDepth = i;
1339 else
1340 if (clipVertexOutput < 0)
1341 clipVertexOutput = i;
1342 break;
1343 case TGSI_SEMANTIC_COLOR:
1344 if (info->type == PIPE_SHADER_FRAGMENT)
1345 info_out->prop.fp.numColourResults++;
1346 break;
1347 case TGSI_SEMANTIC_EDGEFLAG:
1348 info_out->io.edgeFlagOut = i;
1349 break;
1350 case TGSI_SEMANTIC_CLIPVERTEX:
1351 clipVertexOutput = i;
1352 break;
1353 case TGSI_SEMANTIC_CLIPDIST:
1354 info_out->io.genUserClip = -1;
1355 break;
1356 case TGSI_SEMANTIC_SAMPLEMASK:
1357 info_out->io.sampleMask = i;
1358 break;
1359 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1360 info->io.viewportId = i;
1361 break;
1362 case TGSI_SEMANTIC_PATCH:
1363 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, si + 1);
1364 /* fallthrough */
1365 case TGSI_SEMANTIC_TESSOUTER:
1366 case TGSI_SEMANTIC_TESSINNER:
1367 info_out->out[i].patch = 1;
1368 break;
1369 default:
1370 break;
1371 }
1372 info_out->out[i].id = i;
1373 info_out->out[i].sn = sn;
1374 info_out->out[i].si = si;
1375 }
1376 break;
1377 case TGSI_FILE_SYSTEM_VALUE:
1378 switch (sn) {
1379 case TGSI_SEMANTIC_INSTANCEID:
1380 info_out->io.instanceId = first;
1381 break;
1382 case TGSI_SEMANTIC_VERTEXID:
1383 info_out->io.vertexId = first;
1384 break;
1385 case TGSI_SEMANTIC_BASEVERTEX:
1386 case TGSI_SEMANTIC_BASEINSTANCE:
1387 case TGSI_SEMANTIC_DRAWID:
1388 info_out->prop.vp.usesDrawParameters = true;
1389 break;
1390 case TGSI_SEMANTIC_SAMPLEID:
1391 case TGSI_SEMANTIC_SAMPLEPOS:
1392 prog->persampleInvocation = true;
1393 break;
1394 case TGSI_SEMANTIC_SAMPLEMASK:
1395 info_out->prop.fp.usesSampleMaskIn = true;
1396 break;
1397 default:
1398 break;
1399 }
1400 for (i = first; i <= last; ++i, ++si) {
1401 info_out->sv[i].sn = sn;
1402 info_out->sv[i].si = si;
1403 info_out->sv[i].input = inferSysValDirection(sn);
1404
1405 switch (sn) {
1406 case TGSI_SEMANTIC_TESSOUTER:
1407 case TGSI_SEMANTIC_TESSINNER:
1408 info_out->sv[i].patch = 1;
1409 break;
1410 }
1411 }
1412 break;
1413 /*
1414 case TGSI_FILE_RESOURCE:
1415 for (i = first; i <= last; ++i) {
1416 resources[i].target = decl->Resource.Resource;
1417 resources[i].raw = decl->Resource.Raw;
1418 resources[i].slot = i;
1419 }
1420 break;
1421 */
1422 case TGSI_FILE_SAMPLER_VIEW:
1423 for (i = first; i <= last; ++i)
1424 textureViews[i].target = decl->SamplerView.Resource;
1425 break;
1426 case TGSI_FILE_MEMORY:
1427 for (i = first; i <= last; ++i)
1428 memoryFiles[i].mem_type = decl->Declaration.MemType;
1429 break;
1430 case TGSI_FILE_NULL:
1431 case TGSI_FILE_TEMPORARY:
1432 for (i = first; i <= last; ++i)
1433 tempArrayId[i] = arrayId;
1434 if (arrayId)
1435 tempArrayInfo.insert(std::make_pair(arrayId, std::make_pair(
1436 first, last - first + 1)));
1437 break;
1438 case TGSI_FILE_BUFFER:
1439 for (i = first; i <= last; ++i)
1440 bufferAtomics[i] = decl->Declaration.Atomic;
1441 break;
1442 case TGSI_FILE_ADDRESS:
1443 case TGSI_FILE_CONSTANT:
1444 case TGSI_FILE_IMMEDIATE:
1445 case TGSI_FILE_SAMPLER:
1446 case TGSI_FILE_IMAGE:
1447 break;
1448 default:
1449 ERROR("unhandled TGSI_FILE %d\n", decl->Declaration.File);
1450 return false;
1451 }
1452 return true;
1453 }
1454
1455 inline bool Source::isEdgeFlagPassthrough(const Instruction& insn) const
1456 {
1457 return insn.getOpcode() == TGSI_OPCODE_MOV &&
1458 insn.getDst(0).getIndex(0) == info_out->io.edgeFlagOut &&
1459 insn.getSrc(0).getFile() == TGSI_FILE_INPUT;
1460 }
1461
1462 void Source::scanInstructionSrc(const Instruction& insn,
1463 const Instruction::SrcRegister& src,
1464 unsigned mask)
1465 {
1466 if (src.getFile() == TGSI_FILE_TEMPORARY) {
1467 if (src.isIndirect(0))
1468 indirectTempArrays.insert(src.getArrayId());
1469 } else
1470 if (src.getFile() == TGSI_FILE_OUTPUT) {
1471 if (src.isIndirect(0)) {
1472 // We don't know which one is accessed, just mark everything for
1473 // reading. This is an extremely unlikely occurrence.
1474 for (unsigned i = 0; i < info_out->numOutputs; ++i)
1475 info_out->out[i].oread = 1;
1476 } else {
1477 info_out->out[src.getIndex(0)].oread = 1;
1478 }
1479 }
1480 if (src.getFile() == TGSI_FILE_SYSTEM_VALUE) {
1481 if (info_out->sv[src.getIndex(0)].sn == TGSI_SEMANTIC_SAMPLEPOS)
1482 info_out->prop.fp.readsSampleLocations = true;
1483 }
1484 if (src.getFile() != TGSI_FILE_INPUT)
1485 return;
1486
1487 if (src.isIndirect(0)) {
1488 for (unsigned i = 0; i < info_out->numInputs; ++i)
1489 info_out->in[i].mask = 0xf;
1490 } else {
1491 const int i = src.getIndex(0);
1492 for (unsigned c = 0; c < 4; ++c) {
1493 if (!(mask & (1 << c)))
1494 continue;
1495 int k = src.getSwizzle(c);
1496 if (k <= TGSI_SWIZZLE_W)
1497 info_out->in[i].mask |= 1 << k;
1498 }
1499 switch (info_out->in[i].sn) {
1500 case TGSI_SEMANTIC_PSIZE:
1501 case TGSI_SEMANTIC_PRIMID:
1502 case TGSI_SEMANTIC_FOG:
1503 info_out->in[i].mask &= 0x1;
1504 break;
1505 case TGSI_SEMANTIC_PCOORD:
1506 info_out->in[i].mask &= 0x3;
1507 break;
1508 default:
1509 break;
1510 }
1511 }
1512 }
1513
1514 bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
1515 {
1516 Instruction insn(inst);
1517
1518 if (insn.getOpcode() == TGSI_OPCODE_BARRIER)
1519 info_out->numBarriers = 1;
1520
1521 if (insn.getOpcode() == TGSI_OPCODE_FBFETCH)
1522 info_out->prop.fp.readsFramebuffer = true;
1523
1524 if (insn.getOpcode() == TGSI_OPCODE_INTERP_SAMPLE)
1525 info_out->prop.fp.readsSampleLocations = true;
1526
1527 if (insn.getOpcode() == TGSI_OPCODE_DEMOTE)
1528 info_out->prop.fp.usesDiscard = true;
1529
1530 if (insn.dstCount()) {
1531 Instruction::DstRegister dst = insn.getDst(0);
1532
1533 if (insn.getOpcode() == TGSI_OPCODE_STORE &&
1534 dst.getFile() != TGSI_FILE_MEMORY) {
1535 info_out->io.globalAccess |= 0x2;
1536
1537 if (dst.getFile() == TGSI_FILE_INPUT) {
1538 // TODO: Handle indirect somehow?
1539 const int i = dst.getIndex(0);
1540 info_out->in[i].mask |= 1;
1541 }
1542 }
1543
1544 if (dst.getFile() == TGSI_FILE_OUTPUT) {
1545 if (dst.isIndirect(0))
1546 for (unsigned i = 0; i < info_out->numOutputs; ++i)
1547 info_out->out[i].mask = 0xf;
1548 else
1549 info_out->out[dst.getIndex(0)].mask |= dst.getMask();
1550
1551 if (info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PSIZE ||
1552 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PRIMID ||
1553 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_LAYER ||
1554 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_VIEWPORT_INDEX ||
1555 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_FOG)
1556 info_out->out[dst.getIndex(0)].mask &= 1;
1557
1558 if (isEdgeFlagPassthrough(insn))
1559 info_out->io.edgeFlagIn = insn.getSrc(0).getIndex(0);
1560 } else
1561 if (dst.getFile() == TGSI_FILE_TEMPORARY) {
1562 if (dst.isIndirect(0))
1563 indirectTempArrays.insert(dst.getArrayId());
1564 } else
1565 if (dst.getFile() == TGSI_FILE_BUFFER ||
1566 dst.getFile() == TGSI_FILE_IMAGE ||
1567 (dst.getFile() == TGSI_FILE_MEMORY &&
1568 memoryFiles[dst.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1569 info_out->io.globalAccess |= 0x2;
1570 }
1571 }
1572
1573 if (insn.srcCount() && (
1574 insn.getSrc(0).getFile() != TGSI_FILE_MEMORY ||
1575 memoryFiles[insn.getSrc(0).getIndex(0)].mem_type ==
1576 TGSI_MEMORY_TYPE_GLOBAL)) {
1577 switch (insn.getOpcode()) {
1578 case TGSI_OPCODE_ATOMUADD:
1579 case TGSI_OPCODE_ATOMXCHG:
1580 case TGSI_OPCODE_ATOMCAS:
1581 case TGSI_OPCODE_ATOMAND:
1582 case TGSI_OPCODE_ATOMOR:
1583 case TGSI_OPCODE_ATOMXOR:
1584 case TGSI_OPCODE_ATOMUMIN:
1585 case TGSI_OPCODE_ATOMIMIN:
1586 case TGSI_OPCODE_ATOMUMAX:
1587 case TGSI_OPCODE_ATOMIMAX:
1588 case TGSI_OPCODE_ATOMFADD:
1589 case TGSI_OPCODE_ATOMDEC_WRAP:
1590 case TGSI_OPCODE_ATOMINC_WRAP:
1591 case TGSI_OPCODE_LOAD:
1592 info_out->io.globalAccess |= (insn.getOpcode() == TGSI_OPCODE_LOAD) ?
1593 0x1 : 0x2;
1594 break;
1595 }
1596 }
1597
1598
1599 for (unsigned s = 0; s < insn.srcCount(); ++s)
1600 scanInstructionSrc(insn, insn.getSrc(s), insn.srcMask(s));
1601
1602 for (unsigned s = 0; s < insn.getNumTexOffsets(); ++s)
1603 scanInstructionSrc(insn, insn.getTexOffset(s), insn.texOffsetMask());
1604
1605 return true;
1606 }
1607
1608 nv50_ir::TexInstruction::Target
1609 Instruction::getTexture(const tgsi::Source *code, int s) const
1610 {
1611 // XXX: indirect access
1612 unsigned int r;
1613
1614 switch (getSrc(s).getFile()) {
1615 /*
1616 case TGSI_FILE_RESOURCE:
1617 r = getSrc(s).getIndex(0);
1618 return translateTexture(code->resources.at(r).target);
1619 */
1620 case TGSI_FILE_SAMPLER_VIEW:
1621 r = getSrc(s).getIndex(0);
1622 return translateTexture(code->textureViews.at(r).target);
1623 default:
1624 return translateTexture(insn->Texture.Texture);
1625 }
1626 }
1627
1628 } // namespace tgsi
1629
1630 namespace {
1631
1632 using namespace nv50_ir;
1633
1634 class Converter : public ConverterCommon
1635 {
1636 public:
1637 Converter(Program *, const tgsi::Source *, nv50_ir_prog_info_out *);
1638 ~Converter();
1639
1640 bool run();
1641
1642 private:
1643 Value *shiftAddress(Value *);
1644 Value *getVertexBase(int s);
1645 Value *getOutputBase(int s);
1646 DataArray *getArrayForFile(unsigned file, int idx);
1647 Value *fetchSrc(int s, int c);
1648 Value *fetchDst(int d, int c);
1649 Value *acquireDst(int d, int c);
1650 void storeDst(int d, int c, Value *);
1651
1652 Value *fetchSrc(const tgsi::Instruction::SrcRegister src, int c, Value *ptr);
1653 void storeDst(const tgsi::Instruction::DstRegister dst, int c,
1654 Value *val, Value *ptr);
1655
1656 void adjustTempIndex(int arrayId, int &idx, int &idx2d) const;
1657 Value *applySrcMod(Value *, int s, int c);
1658
1659 Symbol *makeSym(uint file, int fileIndex, int idx, int c, uint32_t addr);
1660 Symbol *srcToSym(tgsi::Instruction::SrcRegister, int c);
1661 Symbol *dstToSym(tgsi::Instruction::DstRegister, int c);
1662
1663 bool isSubGroupMask(uint8_t semantic);
1664
1665 bool handleInstruction(const struct tgsi_full_instruction *);
1666 void exportOutputs();
1667 inline bool isEndOfSubroutine(uint ip);
1668
1669 void loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask);
1670
1671 // R,S,L,C,Dx,Dy encode TGSI sources for respective values (0xSf for auto)
1672 void setTexRS(TexInstruction *, unsigned int& s, int R, int S);
1673 void handleTEX(Value *dst0[4], int R, int S, int L, int C, int Dx, int Dy);
1674 void handleTXF(Value *dst0[4], int R, int L_M);
1675 void handleTXQ(Value *dst0[4], enum TexQuery, int R);
1676 void handleFBFETCH(Value *dst0[4]);
1677 void handleLIT(Value *dst0[4]);
1678
1679 // Symbol *getResourceBase(int r);
1680 void getImageCoords(std::vector<Value *>&, int s);
1681
1682 void handleLOAD(Value *dst0[4]);
1683 void handleSTORE();
1684 void handleATOM(Value *dst0[4], DataType, uint16_t subOp);
1685
1686 void handleINTERP(Value *dst0[4]);
1687
1688 Value *interpolate(tgsi::Instruction::SrcRegister, int c, Value *ptr);
1689
1690 void insertConvergenceOps(BasicBlock *conv, BasicBlock *fork);
1691
1692 Value *buildDot(int dim);
1693
1694 class BindArgumentsPass : public Pass {
1695 public:
1696 BindArgumentsPass(Converter &conv) : conv(conv) { }
1697
1698 private:
1699 Converter &conv;
1700 Subroutine *sub;
1701
1702 inline const Location *getValueLocation(Subroutine *, Value *);
1703
1704 template<typename T> inline void
1705 updateCallArgs(Instruction *i, void (Instruction::*setArg)(int, Value *),
1706 T (Function::*proto));
1707
1708 template<typename T> inline void
1709 updatePrototype(BitSet *set, void (Function::*updateSet)(),
1710 T (Function::*proto));
1711
1712 protected:
1713 bool visit(Function *);
1714 bool visit(BasicBlock *bb) { return false; }
1715 };
1716
1717 private:
1718 const tgsi::Source *code;
1719
1720 uint ip; // instruction pointer
1721
1722 tgsi::Instruction tgsi;
1723
1724 DataType dstTy;
1725 DataType srcTy;
1726
1727 DataArray tData; // TGSI_FILE_TEMPORARY
1728 DataArray lData; // TGSI_FILE_TEMPORARY, for indirect arrays
1729 DataArray aData; // TGSI_FILE_ADDRESS
1730 DataArray oData; // TGSI_FILE_OUTPUT (if outputs in registers)
1731
1732 Value *zero;
1733
1734 Value *vtxBase[5]; // base address of vertex in primitive (for TP/GP)
1735 uint8_t vtxBaseValid;
1736
1737 Stack condBBs; // fork BB, then else clause BB
1738 Stack joinBBs; // fork BB, for inserting join ops on ENDIF
1739 Stack loopBBs; // loop headers
1740 Stack breakBBs; // end of / after loop
1741
1742 Value *viewport;
1743 };
1744
1745 Symbol *
1746 Converter::srcToSym(tgsi::Instruction::SrcRegister src, int c)
1747 {
1748 const int swz = src.getSwizzle(c);
1749
1750 /* TODO: Use Array ID when it's available for the index */
1751 return makeSym(src.getFile(),
1752 src.is2D() ? src.getIndex(1) : 0,
1753 src.getIndex(0), swz,
1754 src.getIndex(0) * 16 + swz * 4);
1755 }
1756
1757 Symbol *
1758 Converter::dstToSym(tgsi::Instruction::DstRegister dst, int c)
1759 {
1760 /* TODO: Use Array ID when it's available for the index */
1761 return makeSym(dst.getFile(),
1762 dst.is2D() ? dst.getIndex(1) : 0,
1763 dst.getIndex(0), c,
1764 dst.getIndex(0) * 16 + c * 4);
1765 }
1766
1767 Symbol *
1768 Converter::makeSym(uint tgsiFile, int fileIdx, int idx, int c, uint32_t address)
1769 {
1770 Symbol *sym = new_Symbol(prog, tgsi::translateFile(tgsiFile));
1771
1772 sym->reg.fileIndex = fileIdx;
1773
1774 if (tgsiFile == TGSI_FILE_MEMORY) {
1775 switch (code->memoryFiles[fileIdx].mem_type) {
1776 case TGSI_MEMORY_TYPE_GLOBAL:
1777 /* No-op this is the default for TGSI_FILE_MEMORY */
1778 sym->setFile(FILE_MEMORY_GLOBAL);
1779 break;
1780 case TGSI_MEMORY_TYPE_SHARED:
1781 sym->setFile(FILE_MEMORY_SHARED);
1782 break;
1783 case TGSI_MEMORY_TYPE_INPUT:
1784 assert(prog->getType() == Program::TYPE_COMPUTE);
1785 assert(idx == -1);
1786 sym->setFile(FILE_SHADER_INPUT);
1787 address += info->prop.cp.inputOffset;
1788 break;
1789 default:
1790 assert(0); /* TODO: Add support for global and private memory */
1791 }
1792 }
1793
1794 if (idx >= 0) {
1795 if (sym->reg.file == FILE_SHADER_INPUT)
1796 sym->setOffset(info_out->in[idx].slot[c] * 4);
1797 else
1798 if (sym->reg.file == FILE_SHADER_OUTPUT)
1799 sym->setOffset(info_out->out[idx].slot[c] * 4);
1800 else
1801 if (sym->reg.file == FILE_SYSTEM_VALUE)
1802 sym->setSV(tgsi::translateSysVal(info_out->sv[idx].sn), c);
1803 else
1804 sym->setOffset(address);
1805 } else {
1806 sym->setOffset(address);
1807 }
1808 return sym;
1809 }
1810
1811 Value *
1812 Converter::interpolate(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1813 {
1814 operation op;
1815
1816 // XXX: no way to know interpolation mode if we don't know what's accessed
1817 const uint8_t mode = translateInterpMode(&info_out->in[ptr ? 0 :
1818 src.getIndex(0)], op);
1819
1820 Instruction *insn = new_Instruction(func, op, TYPE_F32);
1821
1822 insn->setDef(0, getScratch());
1823 insn->setSrc(0, srcToSym(src, c));
1824 if (op == OP_PINTERP)
1825 insn->setSrc(1, fragCoord[3]);
1826 if (ptr)
1827 insn->setIndirect(0, 0, ptr);
1828
1829 insn->setInterpolate(mode);
1830
1831 bb->insertTail(insn);
1832 return insn->getDef(0);
1833 }
1834
1835 Value *
1836 Converter::applySrcMod(Value *val, int s, int c)
1837 {
1838 Modifier m = tgsi.getSrc(s).getMod(c);
1839 DataType ty = tgsi.inferSrcType();
1840
1841 if (m & Modifier(NV50_IR_MOD_ABS))
1842 val = mkOp1v(OP_ABS, ty, getScratch(), val);
1843
1844 if (m & Modifier(NV50_IR_MOD_NEG))
1845 val = mkOp1v(OP_NEG, ty, getScratch(), val);
1846
1847 return val;
1848 }
1849
1850 Value *
1851 Converter::getVertexBase(int s)
1852 {
1853 assert(s < 5);
1854 if (!(vtxBaseValid & (1 << s))) {
1855 const int index = tgsi.getSrc(s).getIndex(1);
1856 Value *rel = NULL;
1857 if (tgsi.getSrc(s).isIndirect(1))
1858 rel = fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL);
1859 vtxBaseValid |= 1 << s;
1860 vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1861 mkImm(index), rel);
1862 }
1863 return vtxBase[s];
1864 }
1865
1866 Value *
1867 Converter::getOutputBase(int s)
1868 {
1869 assert(s < 5);
1870 if (!(vtxBaseValid & (1 << s))) {
1871 Value *offset = loadImm(NULL, tgsi.getSrc(s).getIndex(1));
1872 if (tgsi.getSrc(s).isIndirect(1))
1873 offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(),
1874 fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL),
1875 offset);
1876 vtxBaseValid |= 1 << s;
1877 vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset);
1878 }
1879 return vtxBase[s];
1880 }
1881
1882 Value *
1883 Converter::fetchSrc(int s, int c)
1884 {
1885 Value *res;
1886 Value *ptr = NULL, *dimRel = NULL;
1887
1888 tgsi::Instruction::SrcRegister src = tgsi.getSrc(s);
1889
1890 if (src.isIndirect(0))
1891 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
1892
1893 if (src.is2D()) {
1894 switch (src.getFile()) {
1895 case TGSI_FILE_OUTPUT:
1896 dimRel = getOutputBase(s);
1897 break;
1898 case TGSI_FILE_INPUT:
1899 dimRel = getVertexBase(s);
1900 break;
1901 case TGSI_FILE_CONSTANT:
1902 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1903 if (src.isIndirect(1))
1904 dimRel = fetchSrc(src.getIndirect(1), 0, 0);
1905 break;
1906 default:
1907 break;
1908 }
1909 }
1910
1911 res = fetchSrc(src, c, ptr);
1912
1913 if (dimRel)
1914 res->getInsn()->setIndirect(0, 1, dimRel);
1915
1916 return applySrcMod(res, s, c);
1917 }
1918
1919 Value *
1920 Converter::fetchDst(int d, int c)
1921 {
1922 Value *res;
1923 Value *ptr = NULL, *dimRel = NULL;
1924
1925 tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
1926
1927 if (dst.isIndirect(0))
1928 ptr = fetchSrc(dst.getIndirect(0), 0, NULL);
1929
1930 if (dst.is2D()) {
1931 switch (dst.getFile()) {
1932 case TGSI_FILE_OUTPUT:
1933 assert(0); // TODO
1934 dimRel = NULL;
1935 break;
1936 case TGSI_FILE_INPUT:
1937 assert(0); // TODO
1938 dimRel = NULL;
1939 break;
1940 case TGSI_FILE_CONSTANT:
1941 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1942 if (dst.isIndirect(1))
1943 dimRel = fetchSrc(dst.getIndirect(1), 0, 0);
1944 break;
1945 default:
1946 break;
1947 }
1948 }
1949
1950 struct tgsi_full_src_register fsr = dst.asSrc();
1951 tgsi::Instruction::SrcRegister src(&fsr);
1952 res = fetchSrc(src, c, ptr);
1953
1954 if (dimRel)
1955 res->getInsn()->setIndirect(0, 1, dimRel);
1956
1957 return res;
1958 }
1959
1960 Converter::DataArray *
1961 Converter::getArrayForFile(unsigned file, int idx)
1962 {
1963 switch (file) {
1964 case TGSI_FILE_TEMPORARY:
1965 return idx == 0 ? &tData : &lData;
1966 case TGSI_FILE_ADDRESS:
1967 return &aData;
1968 case TGSI_FILE_OUTPUT:
1969 assert(prog->getType() == Program::TYPE_FRAGMENT);
1970 return &oData;
1971 default:
1972 assert(!"invalid/unhandled TGSI source file");
1973 return NULL;
1974 }
1975 }
1976
1977 Value *
1978 Converter::shiftAddress(Value *index)
1979 {
1980 if (!index)
1981 return NULL;
1982 return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4));
1983 }
1984
1985 void
1986 Converter::adjustTempIndex(int arrayId, int &idx, int &idx2d) const
1987 {
1988 std::map<int, int>::const_iterator it =
1989 code->indirectTempOffsets.find(arrayId);
1990 if (it == code->indirectTempOffsets.end())
1991 return;
1992
1993 idx2d = 1;
1994 idx += it->second;
1995 }
1996
1997 bool
1998 Converter::isSubGroupMask(uint8_t semantic)
1999 {
2000 switch (semantic) {
2001 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2002 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2003 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2004 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2005 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2006 return true;
2007 default:
2008 return false;
2009 }
2010 }
2011
2012 Value *
2013 Converter::fetchSrc(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
2014 {
2015 int idx2d = src.is2D() ? src.getIndex(1) : 0;
2016 int idx = src.getIndex(0);
2017 const int swz = src.getSwizzle(c);
2018 Instruction *ld;
2019
2020 switch (src.getFile()) {
2021 case TGSI_FILE_IMMEDIATE:
2022 assert(!ptr);
2023 return loadImm(NULL, code->immd.data[idx * 4 + swz]);
2024 case TGSI_FILE_CONSTANT:
2025 return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr));
2026 case TGSI_FILE_INPUT:
2027 if (prog->getType() == Program::TYPE_FRAGMENT) {
2028 // don't load masked inputs, won't be assigned a slot
2029 if (!ptr && !(info_out->in[idx].mask & (1 << swz)))
2030 return loadImm(NULL, swz == TGSI_SWIZZLE_W ? 1.0f : 0.0f);
2031 return interpolate(src, c, shiftAddress(ptr));
2032 } else
2033 if (prog->getType() == Program::TYPE_GEOMETRY) {
2034 if (!ptr && info_out->in[idx].sn == TGSI_SEMANTIC_PRIMID)
2035 return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0));
2036 // XXX: This is going to be a problem with scalar arrays, i.e. when
2037 // we cannot assume that the address is given in units of vec4.
2038 //
2039 // nv50 and nvc0 need different things here, so let the lowering
2040 // passes decide what to do with the address
2041 if (ptr)
2042 return mkLoadv(TYPE_U32, srcToSym(src, c), ptr);
2043 }
2044 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2045 ld->perPatch = info_out->in[idx].patch;
2046 return ld->getDef(0);
2047 case TGSI_FILE_OUTPUT:
2048 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
2049 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2050 ld->perPatch = info_out->out[idx].patch;
2051 return ld->getDef(0);
2052 case TGSI_FILE_SYSTEM_VALUE:
2053 assert(!ptr);
2054 if (info_out->sv[idx].sn == TGSI_SEMANTIC_THREAD_ID &&
2055 info->prop.cp.numThreads[swz] == 1)
2056 return loadImm(NULL, 0u);
2057 if (isSubGroupMask(info_out->sv[idx].sn) && swz > 0)
2058 return loadImm(NULL, 0u);
2059 if (info_out->sv[idx].sn == TGSI_SEMANTIC_SUBGROUP_SIZE)
2060 return loadImm(NULL, 32u);
2061 ld = mkOp1(OP_RDSV, TYPE_U32, getSSA(), srcToSym(src, c));
2062 ld->perPatch = info_out->sv[idx].patch;
2063 return ld->getDef(0);
2064 case TGSI_FILE_TEMPORARY: {
2065 int arrayid = src.getArrayId();
2066 if (!arrayid)
2067 arrayid = code->tempArrayId[idx];
2068 adjustTempIndex(arrayid, idx, idx2d);
2069 }
2070 /* fallthrough */
2071 default:
2072 return getArrayForFile(src.getFile(), idx2d)->load(
2073 sub.cur->values, idx, swz, shiftAddress(ptr));
2074 }
2075 }
2076
2077 Value *
2078 Converter::acquireDst(int d, int c)
2079 {
2080 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2081 const unsigned f = dst.getFile();
2082 int idx = dst.getIndex(0);
2083 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2084
2085 if (dst.isMasked(c) || f == TGSI_FILE_BUFFER || f == TGSI_FILE_MEMORY ||
2086 f == TGSI_FILE_IMAGE)
2087 return NULL;
2088
2089 if (dst.isIndirect(0) ||
2090 f == TGSI_FILE_SYSTEM_VALUE ||
2091 (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT))
2092 return getScratch();
2093
2094 if (f == TGSI_FILE_TEMPORARY) {
2095 int arrayid = dst.getArrayId();
2096 if (!arrayid)
2097 arrayid = code->tempArrayId[idx];
2098 adjustTempIndex(arrayid, idx, idx2d);
2099 }
2100
2101 return getArrayForFile(f, idx2d)-> acquire(sub.cur->values, idx, c);
2102 }
2103
2104 void
2105 Converter::storeDst(int d, int c, Value *val)
2106 {
2107 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2108
2109 if (tgsi.getSaturate()) {
2110 mkOp1(OP_SAT, dstTy, val, val);
2111 }
2112
2113 Value *ptr = NULL;
2114 if (dst.isIndirect(0))
2115 ptr = shiftAddress(fetchSrc(dst.getIndirect(0), 0, NULL));
2116
2117 if (info_out->io.genUserClip > 0 &&
2118 dst.getFile() == TGSI_FILE_OUTPUT &&
2119 !dst.isIndirect(0) && dst.getIndex(0) == code->clipVertexOutput) {
2120 mkMov(clipVtx[c], val);
2121 val = clipVtx[c];
2122 }
2123
2124 storeDst(dst, c, val, ptr);
2125 }
2126
2127 void
2128 Converter::storeDst(const tgsi::Instruction::DstRegister dst, int c,
2129 Value *val, Value *ptr)
2130 {
2131 const unsigned f = dst.getFile();
2132 int idx = dst.getIndex(0);
2133 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2134
2135 if (f == TGSI_FILE_SYSTEM_VALUE) {
2136 assert(!ptr);
2137 mkOp2(OP_WRSV, TYPE_U32, NULL, dstToSym(dst, c), val);
2138 } else
2139 if (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT) {
2140
2141 if (ptr || (info_out->out[idx].mask & (1 << c))) {
2142 /* Save the viewport index into a scratch register so that it can be
2143 exported at EMIT time */
2144 if (info_out->out[idx].sn == TGSI_SEMANTIC_VIEWPORT_INDEX &&
2145 prog->getType() == Program::TYPE_GEOMETRY &&
2146 viewport != NULL)
2147 mkOp1(OP_MOV, TYPE_U32, viewport, val);
2148 else
2149 mkStore(OP_EXPORT, TYPE_U32, dstToSym(dst, c), ptr, val)->perPatch =
2150 info_out->out[idx].patch;
2151 }
2152 } else
2153 if (f == TGSI_FILE_TEMPORARY ||
2154 f == TGSI_FILE_ADDRESS ||
2155 f == TGSI_FILE_OUTPUT) {
2156 if (f == TGSI_FILE_TEMPORARY) {
2157 int arrayid = dst.getArrayId();
2158 if (!arrayid)
2159 arrayid = code->tempArrayId[idx];
2160 adjustTempIndex(arrayid, idx, idx2d);
2161 }
2162
2163 getArrayForFile(f, idx2d)->store(sub.cur->values, idx, c, ptr, val);
2164 } else {
2165 assert(!"invalid dst file");
2166 }
2167 }
2168
2169 #define FOR_EACH_DST_ENABLED_CHANNEL(d, chan, inst) \
2170 for (chan = 0; chan < 4; ++chan) \
2171 if (!inst.getDst(d).isMasked(chan))
2172
2173 Value *
2174 Converter::buildDot(int dim)
2175 {
2176 assert(dim > 0);
2177
2178 Value *src0 = fetchSrc(0, 0), *src1 = fetchSrc(1, 0);
2179 Value *dotp = getScratch();
2180
2181 mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1)
2182 ->dnz = info->io.mul_zero_wins;
2183
2184 for (int c = 1; c < dim; ++c) {
2185 src0 = fetchSrc(0, c);
2186 src1 = fetchSrc(1, c);
2187 mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp)
2188 ->dnz = info->io.mul_zero_wins;
2189 }
2190 return dotp;
2191 }
2192
2193 void
2194 Converter::insertConvergenceOps(BasicBlock *conv, BasicBlock *fork)
2195 {
2196 FlowInstruction *join = new_FlowInstruction(func, OP_JOIN, NULL);
2197 join->fixed = 1;
2198 conv->insertHead(join);
2199
2200 assert(!fork->joinAt);
2201 fork->joinAt = new_FlowInstruction(func, OP_JOINAT, conv);
2202 fork->insertBefore(fork->getExit(), fork->joinAt);
2203 }
2204
2205 void
2206 Converter::setTexRS(TexInstruction *tex, unsigned int& s, int R, int S)
2207 {
2208 unsigned rIdx = 0, sIdx = 0;
2209
2210 if (R >= 0 && tgsi.getSrc(R).getFile() != TGSI_FILE_SAMPLER) {
2211 // This is the bindless case. We have to get the actual value and pass
2212 // it in. This will be the complete handle.
2213 tex->tex.rIndirectSrc = s;
2214 tex->setSrc(s++, fetchSrc(R, 0));
2215 tex->setTexture(tgsi.getTexture(code, R), 0xff, 0x1f);
2216 tex->tex.bindless = true;
2217 return;
2218 }
2219
2220 if (R >= 0)
2221 rIdx = tgsi.getSrc(R).getIndex(0);
2222 if (S >= 0)
2223 sIdx = tgsi.getSrc(S).getIndex(0);
2224
2225 tex->setTexture(tgsi.getTexture(code, R), rIdx, sIdx);
2226
2227 if (tgsi.getSrc(R).isIndirect(0)) {
2228 tex->tex.rIndirectSrc = s;
2229 tex->setSrc(s++, fetchSrc(tgsi.getSrc(R).getIndirect(0), 0, NULL));
2230 }
2231 if (S >= 0 && tgsi.getSrc(S).isIndirect(0)) {
2232 tex->tex.sIndirectSrc = s;
2233 tex->setSrc(s++, fetchSrc(tgsi.getSrc(S).getIndirect(0), 0, NULL));
2234 }
2235 }
2236
2237 void
2238 Converter::handleTXQ(Value *dst0[4], enum TexQuery query, int R)
2239 {
2240 TexInstruction *tex = new_TexInstruction(func, OP_TXQ);
2241 tex->tex.query = query;
2242 unsigned int c, d;
2243
2244 for (d = 0, c = 0; c < 4; ++c) {
2245 if (!dst0[c])
2246 continue;
2247 tex->tex.mask |= 1 << c;
2248 tex->setDef(d++, dst0[c]);
2249 }
2250 if (query == TXQ_DIMS)
2251 tex->setSrc((c = 0), fetchSrc(0, 0)); // mip level
2252 else
2253 tex->setSrc((c = 0), zero);
2254
2255 setTexRS(tex, ++c, R, -1);
2256
2257 bb->insertTail(tex);
2258 }
2259
2260 void
2261 Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
2262 {
2263 Value *proj = fetchSrc(0, 3);
2264 Instruction *insn = proj->getUniqueInsn();
2265 int c;
2266
2267 if (insn->op == OP_PINTERP) {
2268 bb->insertTail(insn = cloneForward(func, insn));
2269 insn->op = OP_LINTERP;
2270 insn->setInterpolate(NV50_IR_INTERP_LINEAR | insn->getSampleMode());
2271 insn->setSrc(1, NULL);
2272 proj = insn->getDef(0);
2273 }
2274 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj);
2275
2276 for (c = 0; c < 4; ++c) {
2277 if (!(mask & (1 << c)))
2278 continue;
2279 if ((insn = src[c]->getUniqueInsn())->op != OP_PINTERP)
2280 continue;
2281 mask &= ~(1 << c);
2282
2283 bb->insertTail(insn = cloneForward(func, insn));
2284 insn->setInterpolate(NV50_IR_INTERP_PERSPECTIVE | insn->getSampleMode());
2285 insn->setSrc(1, proj);
2286 dst[c] = insn->getDef(0);
2287 }
2288 if (!mask)
2289 return;
2290
2291 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3));
2292
2293 for (c = 0; c < 4; ++c)
2294 if (mask & (1 << c))
2295 dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj);
2296 }
2297
2298 // order of nv50 ir sources: x y z layer lod/bias shadow
2299 // order of TGSI TEX sources: x y z layer shadow lod/bias
2300 // lowering will finally set the hw specific order (like array first on nvc0)
2301 void
2302 Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
2303 {
2304 Value *arg[4], *src[8];
2305 Value *lod = NULL, *shd = NULL;
2306 unsigned int s, c, d;
2307 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2308
2309 TexInstruction::Target tgt = tgsi.getTexture(code, R);
2310
2311 for (s = 0; s < tgt.getArgCount(); ++s)
2312 arg[s] = src[s] = fetchSrc(0, s);
2313
2314 if (tgsi.getOpcode() == TGSI_OPCODE_TEX_LZ)
2315 lod = loadImm(NULL, 0);
2316 else if (texi->op == OP_TXL || texi->op == OP_TXB)
2317 lod = fetchSrc(L >> 4, L & 3);
2318
2319 if (C == 0x0f)
2320 C = 0x00 | MAX2(tgt.getArgCount(), 2); // guess DC src
2321
2322 if (tgt == TEX_TARGET_CUBE_ARRAY_SHADOW) {
2323 switch (tgsi.getOpcode()) {
2324 case TGSI_OPCODE_TG4: shd = fetchSrc(1, 0); break;
2325 case TGSI_OPCODE_TEX2: shd = fetchSrc(1, 0); break;
2326 case TGSI_OPCODE_TXB2: shd = fetchSrc(1, 1); break;
2327 case TGSI_OPCODE_TXL2: shd = fetchSrc(1, 1); break;
2328 default: assert(!"unexpected opcode with cube array shadow"); break;
2329 }
2330 }
2331 else if (tgt.isShadow())
2332 shd = fetchSrc(C >> 4, C & 3);
2333
2334 if (texi->op == OP_TXD) {
2335 for (c = 0; c < tgt.getDim() + tgt.isCube(); ++c) {
2336 texi->dPdx[c].set(fetchSrc(Dx >> 4, (Dx & 3) + c));
2337 texi->dPdy[c].set(fetchSrc(Dy >> 4, (Dy & 3) + c));
2338 }
2339 }
2340
2341 // cube textures don't care about projection value, it's divided out
2342 if (tgsi.getOpcode() == TGSI_OPCODE_TXP && !tgt.isCube() && !tgt.isArray()) {
2343 unsigned int n = tgt.getDim();
2344 if (shd) {
2345 arg[n] = shd;
2346 ++n;
2347 assert(tgt.getDim() == tgt.getArgCount());
2348 }
2349 loadProjTexCoords(src, arg, (1 << n) - 1);
2350 if (shd)
2351 shd = src[n - 1];
2352 }
2353
2354 for (c = 0, d = 0; c < 4; ++c) {
2355 if (dst[c]) {
2356 texi->setDef(d++, dst[c]);
2357 texi->tex.mask |= 1 << c;
2358 } else {
2359 // NOTE: maybe hook up def too, for CSE
2360 }
2361 }
2362 for (s = 0; s < tgt.getArgCount(); ++s)
2363 texi->setSrc(s, src[s]);
2364 if (lod)
2365 texi->setSrc(s++, lod);
2366 if (shd)
2367 texi->setSrc(s++, shd);
2368
2369 setTexRS(texi, s, R, S);
2370
2371 if (tgsi.getOpcode() == TGSI_OPCODE_SAMPLE_C_LZ)
2372 texi->tex.levelZero = true;
2373 if (prog->getType() != Program::TYPE_FRAGMENT &&
2374 (tgsi.getOpcode() == TGSI_OPCODE_TEX ||
2375 tgsi.getOpcode() == TGSI_OPCODE_TEX2 ||
2376 tgsi.getOpcode() == TGSI_OPCODE_TXP))
2377 texi->tex.levelZero = true;
2378 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 && !tgt.isShadow())
2379 texi->tex.gatherComp = tgsi.getSrc(1).getValueU32(0, code->immd.data);
2380
2381 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2382 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2383 for (c = 0; c < 3; ++c) {
2384 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2385 texi->offset[s][c].setInsn(texi);
2386 }
2387 }
2388
2389 bb->insertTail(texi);
2390 }
2391
2392 // 1st source: xyz = coordinates, w = lod/sample
2393 // 2nd source: offset
2394 void
2395 Converter::handleTXF(Value *dst[4], int R, int L_M)
2396 {
2397 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2398 int ms;
2399 unsigned int c, d, s;
2400
2401 texi->tex.target = tgsi.getTexture(code, R);
2402
2403 ms = texi->tex.target.isMS() ? 1 : 0;
2404 texi->tex.levelZero = ms; /* MS textures don't have mip-maps */
2405
2406 for (c = 0, d = 0; c < 4; ++c) {
2407 if (dst[c]) {
2408 texi->setDef(d++, dst[c]);
2409 texi->tex.mask |= 1 << c;
2410 }
2411 }
2412 for (c = 0; c < (texi->tex.target.getArgCount() - ms); ++c)
2413 texi->setSrc(c, fetchSrc(0, c));
2414 if (!ms && tgsi.getOpcode() == TGSI_OPCODE_TXF_LZ)
2415 texi->setSrc(c++, loadImm(NULL, 0));
2416 else
2417 texi->setSrc(c++, fetchSrc(L_M >> 4, L_M & 3)); // lod or ms
2418
2419 setTexRS(texi, c, R, -1);
2420
2421 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2422 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2423 for (c = 0; c < 3; ++c) {
2424 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2425 texi->offset[s][c].setInsn(texi);
2426 }
2427 }
2428
2429 bb->insertTail(texi);
2430 }
2431
2432 void
2433 Converter::handleFBFETCH(Value *dst[4])
2434 {
2435 TexInstruction *texi = new_TexInstruction(func, OP_TXF);
2436 unsigned int c, d;
2437
2438 texi->tex.target = TEX_TARGET_2D_MS_ARRAY;
2439 texi->tex.levelZero = 1;
2440 texi->tex.useOffsets = 0;
2441
2442 for (c = 0, d = 0; c < 4; ++c) {
2443 if (dst[c]) {
2444 texi->setDef(d++, dst[c]);
2445 texi->tex.mask |= 1 << c;
2446 }
2447 }
2448
2449 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 0));
2450 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 1));
2451 Value *z = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_LAYER, 0));
2452 Value *ms = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_SAMPLE_INDEX, 0));
2453
2454 mkCvt(OP_CVT, TYPE_U32, x, TYPE_F32, x)->rnd = ROUND_Z;
2455 mkCvt(OP_CVT, TYPE_U32, y, TYPE_F32, y)->rnd = ROUND_Z;
2456 texi->setSrc(0, x);
2457 texi->setSrc(1, y);
2458 texi->setSrc(2, z);
2459 texi->setSrc(3, ms);
2460
2461 texi->tex.r = texi->tex.s = -1;
2462
2463 bb->insertTail(texi);
2464 }
2465
2466 void
2467 Converter::handleLIT(Value *dst0[4])
2468 {
2469 Value *val0 = NULL;
2470 unsigned int mask = tgsi.getDst(0).getMask();
2471
2472 if (mask & (1 << 0))
2473 loadImm(dst0[0], 1.0f);
2474
2475 if (mask & (1 << 3))
2476 loadImm(dst0[3], 1.0f);
2477
2478 if (mask & (3 << 1)) {
2479 val0 = getScratch();
2480 mkOp2(OP_MAX, TYPE_F32, val0, fetchSrc(0, 0), zero);
2481 if (mask & (1 << 1))
2482 mkMov(dst0[1], val0);
2483 }
2484
2485 if (mask & (1 << 2)) {
2486 Value *src1 = fetchSrc(0, 1), *src3 = fetchSrc(0, 3);
2487 Value *val1 = getScratch(), *val3 = getScratch();
2488
2489 Value *pos128 = loadImm(NULL, +127.999999f);
2490 Value *neg128 = loadImm(NULL, -127.999999f);
2491
2492 mkOp2(OP_MAX, TYPE_F32, val1, src1, zero);
2493 mkOp2(OP_MAX, TYPE_F32, val3, src3, neg128);
2494 mkOp2(OP_MIN, TYPE_F32, val3, val3, pos128);
2495 mkOp2(OP_POW, TYPE_F32, val3, val1, val3);
2496
2497 mkCmp(OP_SLCT, CC_GT, TYPE_F32, dst0[2], TYPE_F32, val3, zero, val0);
2498 }
2499 }
2500
2501 /* Keep this around for now as reference when adding img support
2502 static inline bool
2503 isResourceSpecial(const int r)
2504 {
2505 return (r == TGSI_RESOURCE_GLOBAL ||
2506 r == TGSI_RESOURCE_LOCAL ||
2507 r == TGSI_RESOURCE_PRIVATE ||
2508 r == TGSI_RESOURCE_INPUT);
2509 }
2510
2511 static inline bool
2512 isResourceRaw(const tgsi::Source *code, const int r)
2513 {
2514 return isResourceSpecial(r) || code->resources[r].raw;
2515 }
2516
2517 static inline nv50_ir::TexTarget
2518 getResourceTarget(const tgsi::Source *code, int r)
2519 {
2520 if (isResourceSpecial(r))
2521 return nv50_ir::TEX_TARGET_BUFFER;
2522 return tgsi::translateTexture(code->resources.at(r).target);
2523 }
2524
2525 Symbol *
2526 Converter::getResourceBase(const int r)
2527 {
2528 Symbol *sym = NULL;
2529
2530 switch (r) {
2531 case TGSI_RESOURCE_GLOBAL:
2532 sym = new_Symbol(prog, nv50_ir::FILE_MEMORY_GLOBAL,
2533 info->io.auxCBSlot);
2534 break;
2535 case TGSI_RESOURCE_LOCAL:
2536 assert(prog->getType() == Program::TYPE_COMPUTE);
2537 sym = mkSymbol(nv50_ir::FILE_MEMORY_SHARED, 0, TYPE_U32,
2538 info->prop.cp.sharedOffset);
2539 break;
2540 case TGSI_RESOURCE_PRIVATE:
2541 sym = mkSymbol(nv50_ir::FILE_MEMORY_LOCAL, 0, TYPE_U32,
2542 info->bin.tlsSpace);
2543 break;
2544 case TGSI_RESOURCE_INPUT:
2545 assert(prog->getType() == Program::TYPE_COMPUTE);
2546 sym = mkSymbol(nv50_ir::FILE_SHADER_INPUT, 0, TYPE_U32,
2547 info->prop.cp.inputOffset);
2548 break;
2549 default:
2550 sym = new_Symbol(prog,
2551 nv50_ir::FILE_MEMORY_GLOBAL, code->resources.at(r).slot);
2552 break;
2553 }
2554 return sym;
2555 }
2556
2557 void
2558 Converter::getResourceCoords(std::vector<Value *> &coords, int r, int s)
2559 {
2560 const int arg =
2561 TexInstruction::Target(getResourceTarget(code, r)).getArgCount();
2562
2563 for (int c = 0; c < arg; ++c)
2564 coords.push_back(fetchSrc(s, c));
2565
2566 // NOTE: TGSI_RESOURCE_GLOBAL needs FILE_GPR; this is an nv50 quirk
2567 if (r == TGSI_RESOURCE_LOCAL ||
2568 r == TGSI_RESOURCE_PRIVATE ||
2569 r == TGSI_RESOURCE_INPUT)
2570 coords[0] = mkOp1v(OP_MOV, TYPE_U32, getScratch(4, FILE_ADDRESS),
2571 coords[0]);
2572 }
2573
2574 static inline int
2575 partitionLoadStore(uint8_t comp[2], uint8_t size[2], uint8_t mask)
2576 {
2577 int n = 0;
2578
2579 while (mask) {
2580 if (mask & 1) {
2581 size[n]++;
2582 } else {
2583 if (size[n])
2584 comp[n = 1] = size[0] + 1;
2585 else
2586 comp[n]++;
2587 }
2588 mask >>= 1;
2589 }
2590 if (size[0] == 3) {
2591 n = 1;
2592 size[0] = (comp[0] == 1) ? 1 : 2;
2593 size[1] = 3 - size[0];
2594 comp[1] = comp[0] + size[0];
2595 }
2596 return n + 1;
2597 }
2598 */
2599 void
2600 Converter::getImageCoords(std::vector<Value *> &coords, int s)
2601 {
2602 TexInstruction::Target t =
2603 TexInstruction::Target(tgsi.getImageTarget());
2604 const int arg = t.getDim() + (t.isArray() || t.isCube());
2605
2606 for (int c = 0; c < arg; ++c)
2607 coords.push_back(fetchSrc(s, c));
2608
2609 if (t.isMS())
2610 coords.push_back(fetchSrc(s, 3));
2611 }
2612
2613 // For raw loads, granularity is 4 byte.
2614 // Usage of the texture read mask on OP_SULDP is not allowed.
2615 void
2616 Converter::handleLOAD(Value *dst0[4])
2617 {
2618 const int r = tgsi.getSrc(0).getIndex(0);
2619 int c;
2620 std::vector<Value *> off, src, ldv, def;
2621 Value *ind = NULL;
2622
2623 if (tgsi.getSrc(0).isIndirect(0))
2624 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2625
2626 switch (tgsi.getSrc(0).getFile()) {
2627 case TGSI_FILE_BUFFER:
2628 case TGSI_FILE_MEMORY:
2629 for (c = 0; c < 4; ++c) {
2630 if (!dst0[c])
2631 continue;
2632
2633 Value *off;
2634 Symbol *sym;
2635 uint32_t src0_component_offset = tgsi.getSrc(0).getSwizzle(c) * 4;
2636
2637 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE) {
2638 off = NULL;
2639 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2640 tgsi.getSrc(1).getValueU32(0, code->immd.data) +
2641 src0_component_offset);
2642 } else {
2643 // yzw are ignored for buffers
2644 off = fetchSrc(1, 0);
2645 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2646 src0_component_offset);
2647 }
2648
2649 Instruction *ld = mkLoad(TYPE_U32, dst0[c], sym, off);
2650 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER &&
2651 code->bufferAtomics[r])
2652 ld->cache = nv50_ir::CACHE_CG;
2653 else
2654 ld->cache = tgsi.getCacheMode();
2655 if (ind)
2656 ld->setIndirect(0, 1, ind);
2657 }
2658 break;
2659 default: {
2660 getImageCoords(off, 1);
2661 def.resize(4);
2662
2663 for (c = 0; c < 4; ++c) {
2664 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2665 def[c] = getScratch();
2666 else
2667 def[c] = dst0[c];
2668 }
2669
2670 bool bindless = tgsi.getSrc(0).getFile() != TGSI_FILE_IMAGE;
2671 if (bindless)
2672 ind = fetchSrc(0, 0);
2673
2674 TexInstruction *ld =
2675 mkTex(OP_SULDP, tgsi.getImageTarget(), 0, 0, def, off);
2676 ld->tex.mask = tgsi.getDst(0).getMask();
2677 ld->tex.format = tgsi.getImageFormat();
2678 ld->cache = tgsi.getCacheMode();
2679 ld->tex.bindless = bindless;
2680 if (!bindless)
2681 ld->tex.r = r;
2682 if (ind)
2683 ld->setIndirectR(ind);
2684
2685 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2686 if (dst0[c] != def[c])
2687 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2688 break;
2689 }
2690 }
2691
2692
2693 /* Keep this around for now as reference when adding img support
2694 getResourceCoords(off, r, 1);
2695
2696 if (isResourceRaw(code, r)) {
2697 uint8_t mask = 0;
2698 uint8_t comp[2] = { 0, 0 };
2699 uint8_t size[2] = { 0, 0 };
2700
2701 Symbol *base = getResourceBase(r);
2702
2703 // determine the base and size of the at most 2 load ops
2704 for (c = 0; c < 4; ++c)
2705 if (!tgsi.getDst(0).isMasked(c))
2706 mask |= 1 << (tgsi.getSrc(0).getSwizzle(c) - TGSI_SWIZZLE_X);
2707
2708 int n = partitionLoadStore(comp, size, mask);
2709
2710 src = off;
2711
2712 def.resize(4); // index by component, the ones we need will be non-NULL
2713 for (c = 0; c < 4; ++c) {
2714 if (dst0[c] && tgsi.getSrc(0).getSwizzle(c) == (TGSI_SWIZZLE_X + c))
2715 def[c] = dst0[c];
2716 else
2717 if (mask & (1 << c))
2718 def[c] = getScratch();
2719 }
2720
2721 const bool useLd = isResourceSpecial(r) ||
2722 (info->io.nv50styleSurfaces &&
2723 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2724
2725 for (int i = 0; i < n; ++i) {
2726 ldv.assign(def.begin() + comp[i], def.begin() + comp[i] + size[i]);
2727
2728 if (comp[i]) // adjust x component of source address if necessary
2729 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2730 off[0], mkImm(comp[i] * 4));
2731 else
2732 src[0] = off[0];
2733
2734 if (useLd) {
2735 Instruction *ld =
2736 mkLoad(typeOfSize(size[i] * 4), ldv[0], base, src[0]);
2737 for (size_t c = 1; c < ldv.size(); ++c)
2738 ld->setDef(c, ldv[c]);
2739 } else {
2740 mkTex(OP_SULDB, getResourceTarget(code, r), code->resources[r].slot,
2741 0, ldv, src)->dType = typeOfSize(size[i] * 4);
2742 }
2743 }
2744 } else {
2745 def.resize(4);
2746 for (c = 0; c < 4; ++c) {
2747 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2748 def[c] = getScratch();
2749 else
2750 def[c] = dst0[c];
2751 }
2752
2753 mkTex(OP_SULDP, getResourceTarget(code, r), code->resources[r].slot, 0,
2754 def, off);
2755 }
2756 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2757 if (dst0[c] != def[c])
2758 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2759 */
2760 }
2761
2762 // For formatted stores, the write mask on OP_SUSTP can be used.
2763 // Raw stores have to be split.
2764 void
2765 Converter::handleSTORE()
2766 {
2767 const int r = tgsi.getDst(0).getIndex(0);
2768 int c;
2769 std::vector<Value *> off, src, dummy;
2770 Value *ind = NULL;
2771
2772 if (tgsi.getDst(0).isIndirect(0))
2773 ind = fetchSrc(tgsi.getDst(0).getIndirect(0), 0, 0);
2774
2775 switch (tgsi.getDst(0).getFile()) {
2776 case TGSI_FILE_BUFFER:
2777 case TGSI_FILE_MEMORY:
2778 for (c = 0; c < 4; ++c) {
2779 if (!(tgsi.getDst(0).getMask() & (1 << c)))
2780 continue;
2781
2782 Symbol *sym;
2783 Value *off;
2784 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMMEDIATE) {
2785 off = NULL;
2786 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c,
2787 tgsi.getSrc(0).getValueU32(0, code->immd.data) + 4 * c);
2788 } else {
2789 // yzw are ignored for buffers
2790 off = fetchSrc(0, 0);
2791 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c, 4 * c);
2792 }
2793
2794 Instruction *st = mkStore(OP_STORE, TYPE_U32, sym, off, fetchSrc(1, c));
2795 st->cache = tgsi.getCacheMode();
2796 if (ind)
2797 st->setIndirect(0, 1, ind);
2798 }
2799 break;
2800 default: {
2801 getImageCoords(off, 0);
2802 src = off;
2803
2804 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2805 src.push_back(fetchSrc(1, c));
2806
2807 bool bindless = tgsi.getDst(0).getFile() != TGSI_FILE_IMAGE;
2808 if (bindless)
2809 ind = fetchDst(0, 0);
2810
2811 TexInstruction *st =
2812 mkTex(OP_SUSTP, tgsi.getImageTarget(), 0, 0, dummy, src);
2813 st->tex.mask = tgsi.getDst(0).getMask();
2814 st->tex.format = tgsi.getImageFormat();
2815 st->cache = tgsi.getCacheMode();
2816 st->tex.bindless = bindless;
2817 if (!bindless)
2818 st->tex.r = r;
2819 if (ind)
2820 st->setIndirectR(ind);
2821
2822 break;
2823 }
2824 }
2825
2826 /* Keep this around for now as reference when adding img support
2827 getResourceCoords(off, r, 0);
2828 src = off;
2829 const int s = src.size();
2830
2831 if (isResourceRaw(code, r)) {
2832 uint8_t comp[2] = { 0, 0 };
2833 uint8_t size[2] = { 0, 0 };
2834
2835 int n = partitionLoadStore(comp, size, tgsi.getDst(0).getMask());
2836
2837 Symbol *base = getResourceBase(r);
2838
2839 const bool useSt = isResourceSpecial(r) ||
2840 (info->io.nv50styleSurfaces &&
2841 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2842
2843 for (int i = 0; i < n; ++i) {
2844 if (comp[i]) // adjust x component of source address if necessary
2845 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2846 off[0], mkImm(comp[i] * 4));
2847 else
2848 src[0] = off[0];
2849
2850 const DataType stTy = typeOfSize(size[i] * 4);
2851
2852 if (useSt) {
2853 Instruction *st =
2854 mkStore(OP_STORE, stTy, base, NULL, fetchSrc(1, comp[i]));
2855 for (c = 1; c < size[i]; ++c)
2856 st->setSrc(1 + c, fetchSrc(1, comp[i] + c));
2857 st->setIndirect(0, 0, src[0]);
2858 } else {
2859 // attach values to be stored
2860 src.resize(s + size[i]);
2861 for (c = 0; c < size[i]; ++c)
2862 src[s + c] = fetchSrc(1, comp[i] + c);
2863 mkTex(OP_SUSTB, getResourceTarget(code, r), code->resources[r].slot,
2864 0, dummy, src)->setType(stTy);
2865 }
2866 }
2867 } else {
2868 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2869 src.push_back(fetchSrc(1, c));
2870
2871 mkTex(OP_SUSTP, getResourceTarget(code, r), code->resources[r].slot, 0,
2872 dummy, src)->tex.mask = tgsi.getDst(0).getMask();
2873 }
2874 */
2875 }
2876
2877 // XXX: These only work on resources with the single-component u32/s32 formats.
2878 // Therefore the result is replicated. This might not be intended by TGSI, but
2879 // operating on more than 1 component would produce undefined results because
2880 // they do not exist.
2881 void
2882 Converter::handleATOM(Value *dst0[4], DataType ty, uint16_t subOp)
2883 {
2884 const int r = tgsi.getSrc(0).getIndex(0);
2885 std::vector<Value *> srcv;
2886 std::vector<Value *> defv;
2887 LValue *dst = getScratch();
2888 Value *ind = NULL;
2889
2890 if (tgsi.getSrc(0).isIndirect(0))
2891 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2892
2893 switch (tgsi.getSrc(0).getFile()) {
2894 case TGSI_FILE_BUFFER:
2895 case TGSI_FILE_MEMORY:
2896 for (int c = 0; c < 4; ++c) {
2897 if (!dst0[c])
2898 continue;
2899
2900 Instruction *insn;
2901 Value *off = fetchSrc(1, c);
2902 Value *sym;
2903 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE)
2904 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2905 tgsi.getSrc(1).getValueU32(c, code->immd.data));
2906 else
2907 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c, 0);
2908 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2909 insn = mkOp3(OP_ATOM, ty, dst, sym, fetchSrc(2, c), fetchSrc(3, c));
2910 else
2911 insn = mkOp2(OP_ATOM, ty, dst, sym, fetchSrc(2, c));
2912 if (tgsi.getSrc(1).getFile() != TGSI_FILE_IMMEDIATE)
2913 insn->setIndirect(0, 0, off);
2914 if (ind)
2915 insn->setIndirect(0, 1, ind);
2916 insn->subOp = subOp;
2917 }
2918 for (int c = 0; c < 4; ++c)
2919 if (dst0[c])
2920 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2921 break;
2922 default: {
2923 getImageCoords(srcv, 1);
2924 defv.push_back(dst);
2925 srcv.push_back(fetchSrc(2, 0));
2926
2927 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2928 srcv.push_back(fetchSrc(3, 0));
2929
2930 bool bindless = tgsi.getSrc(0).getFile() != TGSI_FILE_IMAGE;
2931 if (bindless)
2932 ind = fetchSrc(0, 0);
2933
2934 TexInstruction *tex = mkTex(OP_SUREDP, tgsi.getImageTarget(),
2935 0, 0, defv, srcv);
2936 tex->subOp = subOp;
2937 tex->tex.mask = 1;
2938 tex->tex.format = tgsi.getImageFormat();
2939 tex->setType(ty);
2940 tex->tex.bindless = bindless;
2941 if (!bindless)
2942 tex->tex.r = r;
2943 if (ind)
2944 tex->setIndirectR(ind);
2945
2946 for (int c = 0; c < 4; ++c)
2947 if (dst0[c])
2948 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2949 break;
2950 }
2951 }
2952
2953 /* Keep this around for now as reference when adding img support
2954 getResourceCoords(srcv, r, 1);
2955
2956 if (isResourceSpecial(r)) {
2957 assert(r != TGSI_RESOURCE_INPUT);
2958 Instruction *insn;
2959 insn = mkOp2(OP_ATOM, ty, dst, getResourceBase(r), fetchSrc(2, 0));
2960 insn->subOp = subOp;
2961 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2962 insn->setSrc(2, fetchSrc(3, 0));
2963 insn->setIndirect(0, 0, srcv.at(0));
2964 } else {
2965 operation op = isResourceRaw(code, r) ? OP_SUREDB : OP_SUREDP;
2966 TexTarget targ = getResourceTarget(code, r);
2967 int idx = code->resources[r].slot;
2968 defv.push_back(dst);
2969 srcv.push_back(fetchSrc(2, 0));
2970 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2971 srcv.push_back(fetchSrc(3, 0));
2972 TexInstruction *tex = mkTex(op, targ, idx, 0, defv, srcv);
2973 tex->subOp = subOp;
2974 tex->tex.mask = 1;
2975 tex->setType(ty);
2976 }
2977
2978 for (int c = 0; c < 4; ++c)
2979 if (dst0[c])
2980 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2981 */
2982 }
2983
2984 void
2985 Converter::handleINTERP(Value *dst[4])
2986 {
2987 // Check whether the input is linear. All other attributes ignored.
2988 Instruction *insn;
2989 Value *offset = NULL, *ptr = NULL, *w = NULL;
2990 Symbol *sym[4] = { NULL };
2991 bool linear;
2992 operation op = OP_NOP;
2993 int c, mode = 0;
2994
2995 tgsi::Instruction::SrcRegister src = tgsi.getSrc(0);
2996
2997 // In some odd cases, in large part due to varying packing, the source
2998 // might not actually be an input. This is illegal TGSI, but it's easier to
2999 // account for it here than it is to fix it where the TGSI is being
3000 // generated. In that case, it's going to be a straight up mov (or sequence
3001 // of mov's) from the input in question. We follow the mov chain to see
3002 // which input we need to use.
3003 if (src.getFile() != TGSI_FILE_INPUT) {
3004 if (src.isIndirect(0)) {
3005 ERROR("Ignoring indirect input interpolation\n");
3006 return;
3007 }
3008 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3009 Value *val = fetchSrc(0, c);
3010 assert(val->defs.size() == 1);
3011 insn = val->getInsn();
3012 while (insn->op == OP_MOV) {
3013 assert(insn->getSrc(0)->defs.size() == 1);
3014 insn = insn->getSrc(0)->getInsn();
3015 if (!insn) {
3016 ERROR("Miscompiling shader due to unhandled INTERP\n");
3017 return;
3018 }
3019 }
3020 if (insn->op != OP_LINTERP && insn->op != OP_PINTERP) {
3021 ERROR("Trying to interpolate non-input, this is not allowed.\n");
3022 return;
3023 }
3024 sym[c] = insn->getSrc(0)->asSym();
3025 assert(sym[c]);
3026 op = insn->op;
3027 mode = insn->ipa;
3028 ptr = insn->getIndirect(0, 0);
3029 }
3030 } else {
3031 if (src.isIndirect(0))
3032 ptr = shiftAddress(fetchSrc(src.getIndirect(0), 0, NULL));
3033
3034 // We can assume that the fixed index will point to an input of the same
3035 // interpolation type in case of an indirect.
3036 // TODO: Make use of ArrayID.
3037 linear = info_out->in[src.getIndex(0)].linear;
3038 if (linear) {
3039 op = OP_LINTERP;
3040 mode = NV50_IR_INTERP_LINEAR;
3041 } else {
3042 op = OP_PINTERP;
3043 mode = NV50_IR_INTERP_PERSPECTIVE;
3044 }
3045 }
3046
3047 switch (tgsi.getOpcode()) {
3048 case TGSI_OPCODE_INTERP_CENTROID:
3049 mode |= NV50_IR_INTERP_CENTROID;
3050 break;
3051 case TGSI_OPCODE_INTERP_SAMPLE:
3052 insn = mkOp1(OP_PIXLD, TYPE_U32, (offset = getScratch()), fetchSrc(1, 0));
3053 insn->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
3054 mode |= NV50_IR_INTERP_OFFSET;
3055 break;
3056 case TGSI_OPCODE_INTERP_OFFSET: {
3057 // The input in src1.xy is float, but we need a single 32-bit value
3058 // where the upper and lower 16 bits are encoded in S0.12 format. We need
3059 // to clamp the input coordinates to (-0.5, 0.4375), multiply by 4096,
3060 // and then convert to s32.
3061 Value *offs[2];
3062 for (c = 0; c < 2; c++) {
3063 offs[c] = getScratch();
3064 mkOp2(OP_MIN, TYPE_F32, offs[c], fetchSrc(1, c), loadImm(NULL, 0.4375f));
3065 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
3066 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
3067 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
3068 }
3069 offset = mkOp3v(OP_INSBF, TYPE_U32, getScratch(),
3070 offs[1], mkImm(0x1010), offs[0]);
3071 mode |= NV50_IR_INTERP_OFFSET;
3072 break;
3073 }
3074 }
3075
3076 if (op == OP_PINTERP) {
3077 if (offset) {
3078 w = mkOp2v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 3), offset);
3079 mkOp1(OP_RCP, TYPE_F32, w, w);
3080 } else {
3081 w = fragCoord[3];
3082 }
3083 }
3084
3085
3086 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3087 insn = mkOp1(op, TYPE_F32, dst[c], sym[c] ? sym[c] : srcToSym(src, c));
3088 if (op == OP_PINTERP)
3089 insn->setSrc(1, w);
3090 if (offset)
3091 insn->setSrc(op == OP_PINTERP ? 2 : 1, offset);
3092 if (ptr)
3093 insn->setIndirect(0, 0, ptr);
3094
3095 insn->setInterpolate(mode);
3096 }
3097 }
3098
3099 bool
3100 Converter::isEndOfSubroutine(uint ip)
3101 {
3102 assert(ip < code->scan.num_instructions);
3103 tgsi::Instruction insn(&code->insns[ip]);
3104 return (insn.getOpcode() == TGSI_OPCODE_END ||
3105 insn.getOpcode() == TGSI_OPCODE_ENDSUB ||
3106 // does END occur at end of main or the very end ?
3107 insn.getOpcode() == TGSI_OPCODE_BGNSUB);
3108 }
3109
3110 bool
3111 Converter::handleInstruction(const struct tgsi_full_instruction *insn)
3112 {
3113 Instruction *geni;
3114
3115 Value *dst0[4], *rDst0[4];
3116 Value *src0, *src1, *src2, *src3;
3117 Value *val0, *val1;
3118 int c;
3119
3120 tgsi = tgsi::Instruction(insn);
3121
3122 bool useScratchDst = tgsi.checkDstSrcAliasing();
3123
3124 operation op = tgsi.getOP();
3125 dstTy = tgsi.inferDstType();
3126 srcTy = tgsi.inferSrcType();
3127
3128 unsigned int mask = tgsi.dstCount() ? tgsi.getDst(0).getMask() : 0;
3129
3130 if (tgsi.dstCount() && tgsi.getOpcode() != TGSI_OPCODE_STORE) {
3131 for (c = 0; c < 4; ++c) {
3132 rDst0[c] = acquireDst(0, c);
3133 dst0[c] = (useScratchDst && rDst0[c]) ? getScratch() : rDst0[c];
3134 }
3135 }
3136
3137 switch (tgsi.getOpcode()) {
3138 case TGSI_OPCODE_ADD:
3139 case TGSI_OPCODE_UADD:
3140 case TGSI_OPCODE_AND:
3141 case TGSI_OPCODE_DIV:
3142 case TGSI_OPCODE_IDIV:
3143 case TGSI_OPCODE_UDIV:
3144 case TGSI_OPCODE_MAX:
3145 case TGSI_OPCODE_MIN:
3146 case TGSI_OPCODE_IMAX:
3147 case TGSI_OPCODE_IMIN:
3148 case TGSI_OPCODE_UMAX:
3149 case TGSI_OPCODE_UMIN:
3150 case TGSI_OPCODE_MOD:
3151 case TGSI_OPCODE_UMOD:
3152 case TGSI_OPCODE_MUL:
3153 case TGSI_OPCODE_UMUL:
3154 case TGSI_OPCODE_IMUL_HI:
3155 case TGSI_OPCODE_UMUL_HI:
3156 case TGSI_OPCODE_OR:
3157 case TGSI_OPCODE_SHL:
3158 case TGSI_OPCODE_ISHR:
3159 case TGSI_OPCODE_USHR:
3160 case TGSI_OPCODE_XOR:
3161 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3162 src0 = fetchSrc(0, c);
3163 src1 = fetchSrc(1, c);
3164 geni = mkOp2(op, dstTy, dst0[c], src0, src1);
3165 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3166 if (op == OP_MUL && dstTy == TYPE_F32)
3167 geni->dnz = info->io.mul_zero_wins;
3168 geni->precise = insn->Instruction.Precise;
3169 }
3170 break;
3171 case TGSI_OPCODE_MAD:
3172 case TGSI_OPCODE_UMAD:
3173 case TGSI_OPCODE_FMA:
3174 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3175 src0 = fetchSrc(0, c);
3176 src1 = fetchSrc(1, c);
3177 src2 = fetchSrc(2, c);
3178 geni = mkOp3(op, dstTy, dst0[c], src0, src1, src2);
3179 if (dstTy == TYPE_F32)
3180 geni->dnz = info->io.mul_zero_wins;
3181 geni->precise = insn->Instruction.Precise;
3182 }
3183 break;
3184 case TGSI_OPCODE_MOV:
3185 case TGSI_OPCODE_CEIL:
3186 case TGSI_OPCODE_FLR:
3187 case TGSI_OPCODE_TRUNC:
3188 case TGSI_OPCODE_RCP:
3189 case TGSI_OPCODE_SQRT:
3190 case TGSI_OPCODE_IABS:
3191 case TGSI_OPCODE_INEG:
3192 case TGSI_OPCODE_NOT:
3193 case TGSI_OPCODE_DDX:
3194 case TGSI_OPCODE_DDY:
3195 case TGSI_OPCODE_DDX_FINE:
3196 case TGSI_OPCODE_DDY_FINE:
3197 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3198 mkOp1(op, dstTy, dst0[c], fetchSrc(0, c));
3199 break;
3200 case TGSI_OPCODE_RSQ:
3201 src0 = fetchSrc(0, 0);
3202 val0 = getScratch();
3203 mkOp1(OP_ABS, TYPE_F32, val0, src0);
3204 mkOp1(OP_RSQ, TYPE_F32, val0, val0);
3205 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3206 mkMov(dst0[c], val0);
3207 break;
3208 case TGSI_OPCODE_ARL:
3209 case TGSI_OPCODE_ARR:
3210 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3211 const RoundMode rnd =
3212 tgsi.getOpcode() == TGSI_OPCODE_ARR ? ROUND_N : ROUND_M;
3213 src0 = fetchSrc(0, c);
3214 mkCvt(OP_CVT, TYPE_S32, dst0[c], TYPE_F32, src0)->rnd = rnd;
3215 }
3216 break;
3217 case TGSI_OPCODE_UARL:
3218 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3219 mkOp1(OP_MOV, TYPE_U32, dst0[c], fetchSrc(0, c));
3220 break;
3221 case TGSI_OPCODE_POW:
3222 val0 = mkOp2v(op, TYPE_F32, getScratch(), fetchSrc(0, 0), fetchSrc(1, 0));
3223 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3224 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3225 break;
3226 case TGSI_OPCODE_EX2:
3227 case TGSI_OPCODE_LG2:
3228 val0 = mkOp1(op, TYPE_F32, getScratch(), fetchSrc(0, 0))->getDef(0);
3229 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3230 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3231 break;
3232 case TGSI_OPCODE_COS:
3233 case TGSI_OPCODE_SIN:
3234 val0 = getScratch();
3235 if (mask & 7) {
3236 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 0));
3237 mkOp1(op, TYPE_F32, val0, val0);
3238 for (c = 0; c < 3; ++c)
3239 if (dst0[c])
3240 mkMov(dst0[c], val0);
3241 }
3242 if (dst0[3]) {
3243 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 3));
3244 mkOp1(op, TYPE_F32, dst0[3], val0);
3245 }
3246 break;
3247 case TGSI_OPCODE_EXP:
3248 src0 = fetchSrc(0, 0);
3249 val0 = mkOp1v(OP_FLOOR, TYPE_F32, getSSA(), src0);
3250 if (dst0[1])
3251 mkOp2(OP_SUB, TYPE_F32, dst0[1], src0, val0);
3252 if (dst0[0])
3253 mkOp1(OP_EX2, TYPE_F32, dst0[0], val0);
3254 if (dst0[2])
3255 mkOp1(OP_EX2, TYPE_F32, dst0[2], src0);
3256 if (dst0[3])
3257 loadImm(dst0[3], 1.0f);
3258 break;
3259 case TGSI_OPCODE_LOG:
3260 src0 = mkOp1v(OP_ABS, TYPE_F32, getSSA(), fetchSrc(0, 0));
3261 val0 = mkOp1v(OP_LG2, TYPE_F32, dst0[2] ? dst0[2] : getSSA(), src0);
3262 if (dst0[0] || dst0[1])
3263 val1 = mkOp1v(OP_FLOOR, TYPE_F32, dst0[0] ? dst0[0] : getSSA(), val0);
3264 if (dst0[1]) {
3265 mkOp1(OP_EX2, TYPE_F32, dst0[1], val1);
3266 mkOp1(OP_RCP, TYPE_F32, dst0[1], dst0[1]);
3267 mkOp2(OP_MUL, TYPE_F32, dst0[1], dst0[1], src0)
3268 ->dnz = info->io.mul_zero_wins;
3269 }
3270 if (dst0[3])
3271 loadImm(dst0[3], 1.0f);
3272 break;
3273 case TGSI_OPCODE_DP2:
3274 val0 = buildDot(2);
3275 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3276 mkMov(dst0[c], val0);
3277 break;
3278 case TGSI_OPCODE_DP3:
3279 val0 = buildDot(3);
3280 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3281 mkMov(dst0[c], val0);
3282 break;
3283 case TGSI_OPCODE_DP4:
3284 val0 = buildDot(4);
3285 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3286 mkMov(dst0[c], val0);
3287 break;
3288 case TGSI_OPCODE_DST:
3289 if (dst0[0])
3290 loadImm(dst0[0], 1.0f);
3291 if (dst0[1]) {
3292 src0 = fetchSrc(0, 1);
3293 src1 = fetchSrc(1, 1);
3294 mkOp2(OP_MUL, TYPE_F32, dst0[1], src0, src1)
3295 ->dnz = info->io.mul_zero_wins;
3296 }
3297 if (dst0[2])
3298 mkMov(dst0[2], fetchSrc(0, 2));
3299 if (dst0[3])
3300 mkMov(dst0[3], fetchSrc(1, 3));
3301 break;
3302 case TGSI_OPCODE_LRP:
3303 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3304 src0 = fetchSrc(0, c);
3305 src1 = fetchSrc(1, c);
3306 src2 = fetchSrc(2, c);
3307 mkOp3(OP_MAD, TYPE_F32, dst0[c],
3308 mkOp2v(OP_SUB, TYPE_F32, getSSA(), src1, src2), src0, src2)
3309 ->dnz = info->io.mul_zero_wins;
3310 }
3311 break;
3312 case TGSI_OPCODE_LIT:
3313 handleLIT(dst0);
3314 break;
3315 case TGSI_OPCODE_ISSG:
3316 case TGSI_OPCODE_SSG:
3317 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3318 src0 = fetchSrc(0, c);
3319 val0 = getScratch();
3320 val1 = getScratch();
3321 mkCmp(OP_SET, CC_GT, srcTy, val0, srcTy, src0, zero);
3322 mkCmp(OP_SET, CC_LT, srcTy, val1, srcTy, src0, zero);
3323 if (srcTy == TYPE_F32)
3324 mkOp2(OP_SUB, TYPE_F32, dst0[c], val0, val1);
3325 else
3326 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
3327 }
3328 break;
3329 case TGSI_OPCODE_UCMP:
3330 srcTy = TYPE_U32;
3331 /* fallthrough */
3332 case TGSI_OPCODE_CMP:
3333 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3334 src0 = fetchSrc(0, c);
3335 src1 = fetchSrc(1, c);
3336 src2 = fetchSrc(2, c);
3337 if (src1 == src2)
3338 mkMov(dst0[c], src1);
3339 else
3340 mkCmp(OP_SLCT, (srcTy == TYPE_F32) ? CC_LT : CC_NE,
3341 srcTy, dst0[c], srcTy, src1, src2, src0);
3342 }
3343 break;
3344 case TGSI_OPCODE_FRC:
3345 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3346 src0 = fetchSrc(0, c);
3347 val0 = getScratch();
3348 mkOp1(OP_FLOOR, TYPE_F32, val0, src0);
3349 mkOp2(OP_SUB, TYPE_F32, dst0[c], src0, val0);
3350 }
3351 break;
3352 case TGSI_OPCODE_ROUND:
3353 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3354 mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F32, fetchSrc(0, c))
3355 ->rnd = ROUND_NI;
3356 break;
3357 case TGSI_OPCODE_SLT:
3358 case TGSI_OPCODE_SGE:
3359 case TGSI_OPCODE_SEQ:
3360 case TGSI_OPCODE_SGT:
3361 case TGSI_OPCODE_SLE:
3362 case TGSI_OPCODE_SNE:
3363 case TGSI_OPCODE_FSEQ:
3364 case TGSI_OPCODE_FSGE:
3365 case TGSI_OPCODE_FSLT:
3366 case TGSI_OPCODE_FSNE:
3367 case TGSI_OPCODE_ISGE:
3368 case TGSI_OPCODE_ISLT:
3369 case TGSI_OPCODE_USEQ:
3370 case TGSI_OPCODE_USGE:
3371 case TGSI_OPCODE_USLT:
3372 case TGSI_OPCODE_USNE:
3373 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3374 src0 = fetchSrc(0, c);
3375 src1 = fetchSrc(1, c);
3376 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3377 }
3378 break;
3379 case TGSI_OPCODE_VOTE_ALL:
3380 case TGSI_OPCODE_VOTE_ANY:
3381 case TGSI_OPCODE_VOTE_EQ:
3382 val0 = new_LValue(func, FILE_PREDICATE);
3383 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3384 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, c), zero);
3385 mkOp1(op, dstTy, val0, val0)
3386 ->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3387 mkCvt(OP_CVT, TYPE_U32, dst0[c], TYPE_U8, val0);
3388 }
3389 break;
3390 case TGSI_OPCODE_BALLOT:
3391 if (!tgsi.getDst(0).isMasked(0)) {
3392 val0 = new_LValue(func, FILE_PREDICATE);
3393 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, 0), zero);
3394 mkOp1(op, TYPE_U32, dst0[0], val0)->subOp = NV50_IR_SUBOP_VOTE_ANY;
3395 }
3396 if (!tgsi.getDst(0).isMasked(1))
3397 mkMov(dst0[1], zero, TYPE_U32);
3398 break;
3399 case TGSI_OPCODE_READ_FIRST:
3400 // ReadFirstInvocationARB(src) is implemented as
3401 // ReadInvocationARB(src, findLSB(ballot(true)))
3402 val0 = getScratch();
3403 mkOp1(OP_VOTE, TYPE_U32, val0, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
3404 mkOp1(OP_BREV, TYPE_U32, val0, val0);
3405 mkOp1(OP_BFIND, TYPE_U32, val0, val0)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3406 src1 = val0;
3407 /* fallthrough */
3408 case TGSI_OPCODE_READ_INVOC:
3409 if (tgsi.getOpcode() == TGSI_OPCODE_READ_INVOC)
3410 src1 = fetchSrc(1, 0);
3411 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3412 geni = mkOp3(op, dstTy, dst0[c], fetchSrc(0, c), src1, mkImm(0x1f));
3413 geni->subOp = NV50_IR_SUBOP_SHFL_IDX;
3414 }
3415 break;
3416 case TGSI_OPCODE_CLOCK:
3417 // Stick the 32-bit clock into the high dword of the logical result.
3418 if (!tgsi.getDst(0).isMasked(0))
3419 mkOp1(OP_MOV, TYPE_U32, dst0[0], zero);
3420 if (!tgsi.getDst(0).isMasked(1))
3421 mkOp1(OP_RDSV, TYPE_U32, dst0[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
3422 break;
3423 case TGSI_OPCODE_READ_HELPER:
3424 if (!tgsi.getDst(0).isMasked(0))
3425 mkOp1(OP_RDSV, TYPE_U32, dst0[0], mkSysVal(SV_THREAD_KILL, 0))
3426 ->fixed = 1;
3427 break;
3428 case TGSI_OPCODE_KILL_IF:
3429 val0 = new_LValue(func, FILE_PREDICATE);
3430 mask = 0;
3431 for (c = 0; c < 4; ++c) {
3432 const int s = tgsi.getSrc(0).getSwizzle(c);
3433 if (mask & (1 << s))
3434 continue;
3435 mask |= 1 << s;
3436 mkCmp(OP_SET, CC_LT, TYPE_F32, val0, TYPE_F32, fetchSrc(0, c), zero);
3437 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, val0);
3438 }
3439 break;
3440 case TGSI_OPCODE_KILL:
3441 case TGSI_OPCODE_DEMOTE:
3442 // TODO: Should we make KILL exit that invocation? Some old shaders
3443 // don't like that.
3444 mkOp(OP_DISCARD, TYPE_NONE, NULL);
3445 break;
3446 case TGSI_OPCODE_TEX:
3447 case TGSI_OPCODE_TEX_LZ:
3448 case TGSI_OPCODE_TXB:
3449 case TGSI_OPCODE_TXL:
3450 case TGSI_OPCODE_TXP:
3451 case TGSI_OPCODE_LODQ:
3452 // R S L C Dx Dy
3453 handleTEX(dst0, 1, 1, 0x03, 0x0f, 0x00, 0x00);
3454 break;
3455 case TGSI_OPCODE_TXD:
3456 handleTEX(dst0, 3, 3, 0x03, 0x0f, 0x10, 0x20);
3457 break;
3458 case TGSI_OPCODE_TG4:
3459 handleTEX(dst0, 2, 2, 0x03, 0x0f, 0x00, 0x00);
3460 break;
3461 case TGSI_OPCODE_TEX2:
3462 handleTEX(dst0, 2, 2, 0x03, 0x10, 0x00, 0x00);
3463 break;
3464 case TGSI_OPCODE_TXB2:
3465 case TGSI_OPCODE_TXL2:
3466 handleTEX(dst0, 2, 2, 0x10, 0x0f, 0x00, 0x00);
3467 break;
3468 case TGSI_OPCODE_SAMPLE:
3469 case TGSI_OPCODE_SAMPLE_B:
3470 case TGSI_OPCODE_SAMPLE_D:
3471 case TGSI_OPCODE_SAMPLE_L:
3472 case TGSI_OPCODE_SAMPLE_C:
3473 case TGSI_OPCODE_SAMPLE_C_LZ:
3474 handleTEX(dst0, 1, 2, 0x30, 0x30, 0x30, 0x40);
3475 break;
3476 case TGSI_OPCODE_TXF_LZ:
3477 case TGSI_OPCODE_TXF:
3478 handleTXF(dst0, 1, 0x03);
3479 break;
3480 case TGSI_OPCODE_SAMPLE_I:
3481 handleTXF(dst0, 1, 0x03);
3482 break;
3483 case TGSI_OPCODE_SAMPLE_I_MS:
3484 handleTXF(dst0, 1, 0x20);
3485 break;
3486 case TGSI_OPCODE_TXQ:
3487 case TGSI_OPCODE_SVIEWINFO:
3488 handleTXQ(dst0, TXQ_DIMS, 1);
3489 break;
3490 case TGSI_OPCODE_TXQS:
3491 // The TXQ_TYPE query returns samples in its 3rd arg, but we need it to
3492 // be in .x
3493 dst0[1] = dst0[2] = dst0[3] = NULL;
3494 std::swap(dst0[0], dst0[2]);
3495 handleTXQ(dst0, TXQ_TYPE, 0);
3496 std::swap(dst0[0], dst0[2]);
3497 break;
3498 case TGSI_OPCODE_FBFETCH:
3499 handleFBFETCH(dst0);
3500 break;
3501 case TGSI_OPCODE_F2I:
3502 case TGSI_OPCODE_F2U:
3503 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3504 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c))->rnd = ROUND_Z;
3505 break;
3506 case TGSI_OPCODE_I2F:
3507 case TGSI_OPCODE_U2F:
3508 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3509 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c));
3510 break;
3511 case TGSI_OPCODE_PK2H:
3512 val0 = getScratch();
3513 val1 = getScratch();
3514 mkCvt(OP_CVT, TYPE_F16, val0, TYPE_F32, fetchSrc(0, 0));
3515 mkCvt(OP_CVT, TYPE_F16, val1, TYPE_F32, fetchSrc(0, 1));
3516 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3517 mkOp3(OP_INSBF, TYPE_U32, dst0[c], val1, mkImm(0x1010), val0);
3518 break;
3519 case TGSI_OPCODE_UP2H:
3520 src0 = fetchSrc(0, 0);
3521 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3522 geni = mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F16, src0);
3523 geni->subOp = c & 1;
3524 }
3525 break;
3526 case TGSI_OPCODE_EMIT:
3527 /* export the saved viewport index */
3528 if (viewport != NULL) {
3529 Symbol *vpSym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_U32,
3530 info_out->out[info->io.viewportId].slot[0] * 4);
3531 mkStore(OP_EXPORT, TYPE_U32, vpSym, NULL, viewport);
3532 }
3533 /* handle user clip planes for each emitted vertex */
3534 if (info_out->io.genUserClip > 0)
3535 handleUserClipPlanes();
3536 /* fallthrough */
3537 case TGSI_OPCODE_ENDPRIM:
3538 {
3539 // get vertex stream (must be immediate)
3540 unsigned int stream = tgsi.getSrc(0).getValueU32(0, code->immd.data);
3541 if (stream && op == OP_RESTART)
3542 break;
3543 if (info_out->prop.gp.maxVertices == 0)
3544 break;
3545 src0 = mkImm(stream);
3546 mkOp1(op, TYPE_U32, NULL, src0)->fixed = 1;
3547 break;
3548 }
3549 case TGSI_OPCODE_IF:
3550 case TGSI_OPCODE_UIF:
3551 {
3552 BasicBlock *ifBB = new BasicBlock(func);
3553
3554 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
3555 condBBs.push(bb);
3556 joinBBs.push(bb);
3557
3558 mkFlow(OP_BRA, NULL, CC_NOT_P, fetchSrc(0, 0))->setType(srcTy);
3559
3560 setPosition(ifBB, true);
3561 }
3562 break;
3563 case TGSI_OPCODE_ELSE:
3564 {
3565 BasicBlock *elseBB = new BasicBlock(func);
3566 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3567
3568 forkBB->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
3569 condBBs.push(bb);
3570
3571 forkBB->getExit()->asFlow()->target.bb = elseBB;
3572 if (!bb->isTerminated())
3573 mkFlow(OP_BRA, NULL, CC_ALWAYS, NULL);
3574
3575 setPosition(elseBB, true);
3576 }
3577 break;
3578 case TGSI_OPCODE_ENDIF:
3579 {
3580 BasicBlock *convBB = new BasicBlock(func);
3581 BasicBlock *prevBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3582 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(joinBBs.pop().u.p);
3583
3584 if (!bb->isTerminated()) {
3585 // we only want join if none of the clauses ended with CONT/BREAK/RET
3586 if (prevBB->getExit()->op == OP_BRA && joinBBs.getSize() < 6)
3587 insertConvergenceOps(convBB, forkBB);
3588 mkFlow(OP_BRA, convBB, CC_ALWAYS, NULL);
3589 bb->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3590 }
3591
3592 if (prevBB->getExit()->op == OP_BRA) {
3593 prevBB->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3594 prevBB->getExit()->asFlow()->target.bb = convBB;
3595 }
3596 setPosition(convBB, true);
3597 }
3598 break;
3599 case TGSI_OPCODE_BGNLOOP:
3600 {
3601 BasicBlock *lbgnBB = new BasicBlock(func);
3602 BasicBlock *lbrkBB = new BasicBlock(func);
3603
3604 loopBBs.push(lbgnBB);
3605 breakBBs.push(lbrkBB);
3606 if (loopBBs.getSize() > func->loopNestingBound)
3607 func->loopNestingBound++;
3608
3609 mkFlow(OP_PREBREAK, lbrkBB, CC_ALWAYS, NULL);
3610
3611 bb->cfg.attach(&lbgnBB->cfg, Graph::Edge::TREE);
3612 setPosition(lbgnBB, true);
3613 mkFlow(OP_PRECONT, lbgnBB, CC_ALWAYS, NULL);
3614 }
3615 break;
3616 case TGSI_OPCODE_ENDLOOP:
3617 {
3618 BasicBlock *loopBB = reinterpret_cast<BasicBlock *>(loopBBs.pop().u.p);
3619
3620 if (!bb->isTerminated()) {
3621 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
3622 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
3623 }
3624 setPosition(reinterpret_cast<BasicBlock *>(breakBBs.pop().u.p), true);
3625
3626 // If the loop never breaks (e.g. only has RET's inside), then there
3627 // will be no way to get to the break bb. However BGNLOOP will have
3628 // already made a PREBREAK to it, so it must be in the CFG.
3629 if (getBB()->cfg.incidentCount() == 0)
3630 loopBB->cfg.attach(&getBB()->cfg, Graph::Edge::TREE);
3631 }
3632 break;
3633 case TGSI_OPCODE_BRK:
3634 {
3635 if (bb->isTerminated())
3636 break;
3637 BasicBlock *brkBB = reinterpret_cast<BasicBlock *>(breakBBs.peek().u.p);
3638 mkFlow(OP_BREAK, brkBB, CC_ALWAYS, NULL);
3639 bb->cfg.attach(&brkBB->cfg, Graph::Edge::CROSS);
3640 }
3641 break;
3642 case TGSI_OPCODE_CONT:
3643 {
3644 if (bb->isTerminated())
3645 break;
3646 BasicBlock *contBB = reinterpret_cast<BasicBlock *>(loopBBs.peek().u.p);
3647 mkFlow(OP_CONT, contBB, CC_ALWAYS, NULL);
3648 contBB->explicitCont = true;
3649 bb->cfg.attach(&contBB->cfg, Graph::Edge::BACK);
3650 }
3651 break;
3652 case TGSI_OPCODE_BGNSUB:
3653 {
3654 Subroutine *s = getSubroutine(ip);
3655 BasicBlock *entry = new BasicBlock(s->f);
3656 BasicBlock *leave = new BasicBlock(s->f);
3657
3658 // multiple entrypoints possible, keep the graph connected
3659 if (prog->getType() == Program::TYPE_COMPUTE)
3660 prog->main->call.attach(&s->f->call, Graph::Edge::TREE);
3661
3662 sub.cur = s;
3663 s->f->setEntry(entry);
3664 s->f->setExit(leave);
3665 setPosition(entry, true);
3666 return true;
3667 }
3668 case TGSI_OPCODE_ENDSUB:
3669 {
3670 sub.cur = getSubroutine(prog->main);
3671 setPosition(BasicBlock::get(sub.cur->f->cfg.getRoot()), true);
3672 return true;
3673 }
3674 case TGSI_OPCODE_CAL:
3675 {
3676 Subroutine *s = getSubroutine(tgsi.getLabel());
3677 mkFlow(OP_CALL, s->f, CC_ALWAYS, NULL);
3678 func->call.attach(&s->f->call, Graph::Edge::TREE);
3679 return true;
3680 }
3681 case TGSI_OPCODE_RET:
3682 {
3683 if (bb->isTerminated())
3684 return true;
3685 BasicBlock *leave = BasicBlock::get(func->cfgExit);
3686
3687 if (!isEndOfSubroutine(ip + 1)) {
3688 // insert a PRERET at the entry if this is an early return
3689 // (only needed for sharing code in the epilogue)
3690 BasicBlock *root = BasicBlock::get(func->cfg.getRoot());
3691 if (root->getEntry() == NULL || root->getEntry()->op != OP_PRERET) {
3692 BasicBlock *pos = getBB();
3693 setPosition(root, false);
3694 mkFlow(OP_PRERET, leave, CC_ALWAYS, NULL)->fixed = 1;
3695 setPosition(pos, true);
3696 }
3697 }
3698 mkFlow(OP_RET, NULL, CC_ALWAYS, NULL)->fixed = 1;
3699 bb->cfg.attach(&leave->cfg, Graph::Edge::CROSS);
3700 }
3701 break;
3702 case TGSI_OPCODE_END:
3703 {
3704 // attach and generate epilogue code
3705 BasicBlock *epilogue = BasicBlock::get(func->cfgExit);
3706 bb->cfg.attach(&epilogue->cfg, Graph::Edge::TREE);
3707 setPosition(epilogue, true);
3708 if (prog->getType() == Program::TYPE_FRAGMENT)
3709 exportOutputs();
3710 if ((prog->getType() == Program::TYPE_VERTEX ||
3711 prog->getType() == Program::TYPE_TESSELLATION_EVAL
3712 ) && info_out->io.genUserClip > 0)
3713 handleUserClipPlanes();
3714 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
3715 }
3716 break;
3717 case TGSI_OPCODE_SWITCH:
3718 case TGSI_OPCODE_CASE:
3719 ERROR("switch/case opcode encountered, should have been lowered\n");
3720 abort();
3721 break;
3722 case TGSI_OPCODE_LOAD:
3723 handleLOAD(dst0);
3724 break;
3725 case TGSI_OPCODE_STORE:
3726 handleSTORE();
3727 break;
3728 case TGSI_OPCODE_BARRIER:
3729 geni = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
3730 geni->fixed = 1;
3731 geni->subOp = NV50_IR_SUBOP_BAR_SYNC;
3732 break;
3733 case TGSI_OPCODE_MEMBAR:
3734 {
3735 uint32_t level = tgsi.getSrc(0).getValueU32(0, code->immd.data);
3736 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3737 geni->fixed = 1;
3738 if (!(level & ~(TGSI_MEMBAR_THREAD_GROUP | TGSI_MEMBAR_SHARED)))
3739 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
3740 else
3741 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
3742 }
3743 break;
3744 case TGSI_OPCODE_ATOMUADD:
3745 case TGSI_OPCODE_ATOMXCHG:
3746 case TGSI_OPCODE_ATOMCAS:
3747 case TGSI_OPCODE_ATOMAND:
3748 case TGSI_OPCODE_ATOMOR:
3749 case TGSI_OPCODE_ATOMXOR:
3750 case TGSI_OPCODE_ATOMUMIN:
3751 case TGSI_OPCODE_ATOMIMIN:
3752 case TGSI_OPCODE_ATOMUMAX:
3753 case TGSI_OPCODE_ATOMIMAX:
3754 case TGSI_OPCODE_ATOMFADD:
3755 case TGSI_OPCODE_ATOMDEC_WRAP:
3756 case TGSI_OPCODE_ATOMINC_WRAP:
3757 handleATOM(dst0, dstTy, tgsi::opcodeToSubOp(tgsi.getOpcode()));
3758 break;
3759 case TGSI_OPCODE_RESQ:
3760 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER) {
3761 Value *ind = NULL;
3762 if (tgsi.getSrc(0).isIndirect(0))
3763 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
3764 geni = mkOp1(OP_BUFQ, TYPE_U32, dst0[0],
3765 makeSym(tgsi.getSrc(0).getFile(),
3766 tgsi.getSrc(0).getIndex(0), -1, 0, 0));
3767 if (ind)
3768 geni->setIndirect(0, 1, ind);
3769 } else {
3770 TexInstruction *texi = new_TexInstruction(func, OP_SUQ);
3771 for (int c = 0, d = 0; c < 4; ++c) {
3772 if (dst0[c]) {
3773 texi->setDef(d++, dst0[c]);
3774 texi->tex.mask |= 1 << c;
3775 }
3776 }
3777 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMAGE) {
3778 texi->tex.r = tgsi.getSrc(0).getIndex(0);
3779 if (tgsi.getSrc(0).isIndirect(0))
3780 texi->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
3781 } else {
3782 texi->tex.bindless = true;
3783 texi->setIndirectR(fetchSrc(0, 0));
3784 }
3785 texi->tex.target = tgsi.getImageTarget();
3786
3787 bb->insertTail(texi);
3788 }
3789 break;
3790 case TGSI_OPCODE_IBFE:
3791 case TGSI_OPCODE_UBFE:
3792 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3793 src0 = fetchSrc(0, c);
3794 val0 = getScratch();
3795 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE &&
3796 tgsi.getSrc(2).getFile() == TGSI_FILE_IMMEDIATE) {
3797 loadImm(val0, (tgsi.getSrc(2).getValueU32(c, code->immd.data) << 8) |
3798 tgsi.getSrc(1).getValueU32(c, code->immd.data));
3799 } else {
3800 src1 = fetchSrc(1, c);
3801 src2 = fetchSrc(2, c);
3802 mkOp3(OP_INSBF, TYPE_U32, val0, src2, mkImm(0x808), src1);
3803 }
3804 mkOp2(OP_EXTBF, dstTy, dst0[c], src0, val0);
3805 }
3806 break;
3807 case TGSI_OPCODE_BFI:
3808 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3809 src0 = fetchSrc(0, c);
3810 src1 = fetchSrc(1, c);
3811 src2 = fetchSrc(2, c);
3812 src3 = fetchSrc(3, c);
3813 val0 = getScratch();
3814 mkOp3(OP_INSBF, TYPE_U32, val0, src3, mkImm(0x808), src2);
3815 mkOp3(OP_INSBF, TYPE_U32, dst0[c], src1, val0, src0);
3816 }
3817 break;
3818 case TGSI_OPCODE_LSB:
3819 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3820 src0 = fetchSrc(0, c);
3821 val0 = getScratch();
3822 mkOp1(OP_BREV, TYPE_U32, val0, src0);
3823 geni = mkOp1(OP_BFIND, TYPE_U32, dst0[c], val0);
3824 geni->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3825 }
3826 break;
3827 case TGSI_OPCODE_IMSB:
3828 case TGSI_OPCODE_UMSB:
3829 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3830 src0 = fetchSrc(0, c);
3831 mkOp1(OP_BFIND, srcTy, dst0[c], src0);
3832 }
3833 break;
3834 case TGSI_OPCODE_BREV:
3835 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3836 src0 = fetchSrc(0, c);
3837 mkOp1(OP_BREV, TYPE_U32, dst0[c], src0);
3838 }
3839 break;
3840 case TGSI_OPCODE_POPC:
3841 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3842 src0 = fetchSrc(0, c);
3843 mkOp2(OP_POPCNT, TYPE_U32, dst0[c], src0, src0);
3844 }
3845 break;
3846 case TGSI_OPCODE_INTERP_CENTROID:
3847 case TGSI_OPCODE_INTERP_SAMPLE:
3848 case TGSI_OPCODE_INTERP_OFFSET:
3849 handleINTERP(dst0);
3850 break;
3851 case TGSI_OPCODE_I642F:
3852 case TGSI_OPCODE_U642F:
3853 case TGSI_OPCODE_D2I:
3854 case TGSI_OPCODE_D2U:
3855 case TGSI_OPCODE_D2F: {
3856 int pos = 0;
3857 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3858 Value *dreg = getSSA(8);
3859 src0 = fetchSrc(0, pos);
3860 src1 = fetchSrc(0, pos + 1);
3861 mkOp2(OP_MERGE, TYPE_U64, dreg, src0, src1);
3862 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst0[c], srcTy, dreg);
3863 if (!isFloatType(dstTy))
3864 cvt->rnd = ROUND_Z;
3865 pos += 2;
3866 }
3867 break;
3868 }
3869 case TGSI_OPCODE_I2I64:
3870 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3871 dst0[c] = fetchSrc(0, c / 2);
3872 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(NULL, 31));
3873 c++;
3874 }
3875 break;
3876 case TGSI_OPCODE_U2I64:
3877 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3878 dst0[c] = fetchSrc(0, c / 2);
3879 dst0[c + 1] = zero;
3880 c++;
3881 }
3882 break;
3883 case TGSI_OPCODE_F2I64:
3884 case TGSI_OPCODE_F2U64:
3885 case TGSI_OPCODE_I2D:
3886 case TGSI_OPCODE_U2D:
3887 case TGSI_OPCODE_F2D:
3888 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3889 Value *dreg = getSSA(8);
3890 Instruction *cvt = mkCvt(OP_CVT, dstTy, dreg, srcTy, fetchSrc(0, c / 2));
3891 if (!isFloatType(dstTy))
3892 cvt->rnd = ROUND_Z;
3893 mkSplit(&dst0[c], 4, dreg);
3894 c++;
3895 }
3896 break;
3897 case TGSI_OPCODE_D2I64:
3898 case TGSI_OPCODE_D2U64:
3899 case TGSI_OPCODE_I642D:
3900 case TGSI_OPCODE_U642D:
3901 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3902 src0 = getSSA(8);
3903 Value *dst = getSSA(8), *tmp[2];
3904 tmp[0] = fetchSrc(0, c);
3905 tmp[1] = fetchSrc(0, c + 1);
3906 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3907 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst, srcTy, src0);
3908 if (!isFloatType(dstTy))
3909 cvt->rnd = ROUND_Z;
3910 mkSplit(&dst0[c], 4, dst);
3911 c++;
3912 }
3913 break;
3914 case TGSI_OPCODE_I64NEG:
3915 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3916 src0 = getSSA(8);
3917 Value *dst = getSSA(8), *tmp[2];
3918 tmp[0] = fetchSrc(0, c);
3919 tmp[1] = fetchSrc(0, c + 1);
3920 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3921 mkOp2(OP_SUB, dstTy, dst, zero, src0);
3922 mkSplit(&dst0[c], 4, dst);
3923 c++;
3924 }
3925 break;
3926 case TGSI_OPCODE_I64ABS:
3927 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3928 src0 = getSSA(8);
3929 Value *neg = getSSA(8), *srcComp[2], *negComp[2];
3930 srcComp[0] = fetchSrc(0, c);
3931 srcComp[1] = fetchSrc(0, c + 1);
3932 mkOp2(OP_MERGE, TYPE_U64, src0, srcComp[0], srcComp[1]);
3933 mkOp2(OP_SUB, dstTy, neg, zero, src0);
3934 mkSplit(negComp, 4, neg);
3935 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c], TYPE_S32,
3936 negComp[0], srcComp[0], srcComp[1]);
3937 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c + 1], TYPE_S32,
3938 negComp[1], srcComp[1], srcComp[1]);
3939 c++;
3940 }
3941 break;
3942 case TGSI_OPCODE_DABS:
3943 case TGSI_OPCODE_DNEG:
3944 case TGSI_OPCODE_DRCP:
3945 case TGSI_OPCODE_DSQRT:
3946 case TGSI_OPCODE_DRSQ:
3947 case TGSI_OPCODE_DTRUNC:
3948 case TGSI_OPCODE_DCEIL:
3949 case TGSI_OPCODE_DFLR:
3950 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3951 src0 = getSSA(8);
3952 Value *dst = getSSA(8), *tmp[2];
3953 tmp[0] = fetchSrc(0, c);
3954 tmp[1] = fetchSrc(0, c + 1);
3955 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3956 mkOp1(op, dstTy, dst, src0);
3957 mkSplit(&dst0[c], 4, dst);
3958 c++;
3959 }
3960 break;
3961 case TGSI_OPCODE_DFRAC:
3962 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3963 src0 = getSSA(8);
3964 Value *dst = getSSA(8), *tmp[2];
3965 tmp[0] = fetchSrc(0, c);
3966 tmp[1] = fetchSrc(0, c + 1);
3967 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3968 mkOp1(OP_FLOOR, TYPE_F64, dst, src0);
3969 mkOp2(OP_SUB, TYPE_F64, dst, src0, dst);
3970 mkSplit(&dst0[c], 4, dst);
3971 c++;
3972 }
3973 break;
3974 case TGSI_OPCODE_U64SEQ:
3975 case TGSI_OPCODE_U64SNE:
3976 case TGSI_OPCODE_U64SLT:
3977 case TGSI_OPCODE_U64SGE:
3978 case TGSI_OPCODE_I64SLT:
3979 case TGSI_OPCODE_I64SGE:
3980 case TGSI_OPCODE_DSLT:
3981 case TGSI_OPCODE_DSGE:
3982 case TGSI_OPCODE_DSEQ:
3983 case TGSI_OPCODE_DSNE: {
3984 int pos = 0;
3985 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3986 Value *tmp[2];
3987
3988 src0 = getSSA(8);
3989 src1 = getSSA(8);
3990 tmp[0] = fetchSrc(0, pos);
3991 tmp[1] = fetchSrc(0, pos + 1);
3992 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3993 tmp[0] = fetchSrc(1, pos);
3994 tmp[1] = fetchSrc(1, pos + 1);
3995 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
3996 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3997 pos += 2;
3998 }
3999 break;
4000 }
4001 case TGSI_OPCODE_U64MIN:
4002 case TGSI_OPCODE_U64MAX:
4003 case TGSI_OPCODE_I64MIN:
4004 case TGSI_OPCODE_I64MAX: {
4005 dstTy = isSignedIntType(dstTy) ? TYPE_S32 : TYPE_U32;
4006 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4007 Value *flag = getSSA(1, FILE_FLAGS);
4008 src0 = fetchSrc(0, c + 1);
4009 src1 = fetchSrc(1, c + 1);
4010 geni = mkOp2(op, dstTy, dst0[c + 1], src0, src1);
4011 geni->subOp = NV50_IR_SUBOP_MINMAX_HIGH;
4012 geni->setFlagsDef(1, flag);
4013
4014 src0 = fetchSrc(0, c);
4015 src1 = fetchSrc(1, c);
4016 geni = mkOp2(op, TYPE_U32, dst0[c], src0, src1);
4017 geni->subOp = NV50_IR_SUBOP_MINMAX_LOW;
4018 geni->setFlagsSrc(2, flag);
4019
4020 c++;
4021 }
4022 break;
4023 }
4024 case TGSI_OPCODE_U64SHL:
4025 case TGSI_OPCODE_I64SHR:
4026 case TGSI_OPCODE_U64SHR:
4027 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4028 src0 = getSSA(8);
4029 Value *dst = getSSA(8), *tmp[2];
4030 tmp[0] = fetchSrc(0, c);
4031 tmp[1] = fetchSrc(0, c + 1);
4032 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4033 // Theoretically src1 is a 64-bit value but in practice only the low
4034 // bits matter. The IR expects this to be a 32-bit value.
4035 src1 = fetchSrc(1, c);
4036 mkOp2(op, dstTy, dst, src0, src1);
4037 mkSplit(&dst0[c], 4, dst);
4038 c++;
4039 }
4040 break;
4041 case TGSI_OPCODE_U64ADD:
4042 case TGSI_OPCODE_U64MUL:
4043 case TGSI_OPCODE_DADD:
4044 case TGSI_OPCODE_DMUL:
4045 case TGSI_OPCODE_DDIV:
4046 case TGSI_OPCODE_DMAX:
4047 case TGSI_OPCODE_DMIN:
4048 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4049 src0 = getSSA(8);
4050 src1 = getSSA(8);
4051 Value *dst = getSSA(8), *tmp[2];
4052 tmp[0] = fetchSrc(0, c);
4053 tmp[1] = fetchSrc(0, c + 1);
4054 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4055 tmp[0] = fetchSrc(1, c);
4056 tmp[1] = fetchSrc(1, c + 1);
4057 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4058 mkOp2(op, dstTy, dst, src0, src1);
4059 mkSplit(&dst0[c], 4, dst);
4060 c++;
4061 }
4062 break;
4063 case TGSI_OPCODE_DMAD:
4064 case TGSI_OPCODE_DFMA:
4065 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4066 src0 = getSSA(8);
4067 src1 = getSSA(8);
4068 src2 = getSSA(8);
4069 Value *dst = getSSA(8), *tmp[2];
4070 tmp[0] = fetchSrc(0, c);
4071 tmp[1] = fetchSrc(0, c + 1);
4072 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4073 tmp[0] = fetchSrc(1, c);
4074 tmp[1] = fetchSrc(1, c + 1);
4075 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4076 tmp[0] = fetchSrc(2, c);
4077 tmp[1] = fetchSrc(2, c + 1);
4078 mkOp2(OP_MERGE, TYPE_U64, src2, tmp[0], tmp[1]);
4079 mkOp3(op, dstTy, dst, src0, src1, src2);
4080 mkSplit(&dst0[c], 4, dst);
4081 c++;
4082 }
4083 break;
4084 case TGSI_OPCODE_DROUND:
4085 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4086 src0 = getSSA(8);
4087 Value *dst = getSSA(8), *tmp[2];
4088 tmp[0] = fetchSrc(0, c);
4089 tmp[1] = fetchSrc(0, c + 1);
4090 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4091 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F64, src0)
4092 ->rnd = ROUND_NI;
4093 mkSplit(&dst0[c], 4, dst);
4094 c++;
4095 }
4096 break;
4097 case TGSI_OPCODE_DSSG:
4098 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4099 src0 = getSSA(8);
4100 Value *dst = getSSA(8), *dstF32 = getSSA(), *tmp[2];
4101 tmp[0] = fetchSrc(0, c);
4102 tmp[1] = fetchSrc(0, c + 1);
4103 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4104
4105 val0 = getScratch();
4106 val1 = getScratch();
4107 // The zero is wrong here since it's only 32-bit, but it works out in
4108 // the end since it gets replaced with $r63.
4109 mkCmp(OP_SET, CC_GT, TYPE_F32, val0, TYPE_F64, src0, zero);
4110 mkCmp(OP_SET, CC_LT, TYPE_F32, val1, TYPE_F64, src0, zero);
4111 mkOp2(OP_SUB, TYPE_F32, dstF32, val0, val1);
4112 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F32, dstF32);
4113 mkSplit(&dst0[c], 4, dst);
4114 c++;
4115 }
4116 break;
4117 case TGSI_OPCODE_I64SSG:
4118 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4119 src0 = getSSA(8);
4120 Value *tmp[2];
4121 tmp[0] = fetchSrc(0, c);
4122 tmp[1] = fetchSrc(0, c + 1);
4123 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4124
4125 val0 = getScratch();
4126 val1 = getScratch();
4127 mkCmp(OP_SET, CC_GT, TYPE_U32, val0, TYPE_S64, src0, zero);
4128 mkCmp(OP_SET, CC_LT, TYPE_U32, val1, TYPE_S64, src0, zero);
4129 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
4130 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(0, 31));
4131 c++;
4132 }
4133 break;
4134 default:
4135 ERROR("unhandled TGSI opcode: %u\n", tgsi.getOpcode());
4136 assert(0);
4137 break;
4138 }
4139
4140 if (tgsi.dstCount() && tgsi.getOpcode() != TGSI_OPCODE_STORE) {
4141 for (c = 0; c < 4; ++c) {
4142 if (!dst0[c])
4143 continue;
4144 if (dst0[c] != rDst0[c])
4145 mkMov(rDst0[c], dst0[c]);
4146 storeDst(0, c, rDst0[c]);
4147 }
4148 }
4149 vtxBaseValid = 0;
4150
4151 return true;
4152 }
4153
4154 void
4155 Converter::exportOutputs()
4156 {
4157 if (info->io.alphaRefBase) {
4158 for (unsigned int i = 0; i < info_out->numOutputs; ++i) {
4159 if (info_out->out[i].sn != TGSI_SEMANTIC_COLOR ||
4160 info_out->out[i].si != 0)
4161 continue;
4162 const unsigned int c = 3;
4163 if (!oData.exists(sub.cur->values, i, c))
4164 continue;
4165 Value *val = oData.load(sub.cur->values, i, c, NULL);
4166 if (!val)
4167 continue;
4168
4169 Symbol *ref = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4170 TYPE_U32, info->io.alphaRefBase);
4171 Value *pred = new_LValue(func, FILE_PREDICATE);
4172 mkCmp(OP_SET, CC_TR, TYPE_U32, pred, TYPE_F32, val,
4173 mkLoadv(TYPE_U32, ref, NULL))
4174 ->subOp = 1;
4175 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_NOT_P, pred);
4176 }
4177 }
4178
4179 for (unsigned int i = 0; i < info_out->numOutputs; ++i) {
4180 for (unsigned int c = 0; c < 4; ++c) {
4181 if (!oData.exists(sub.cur->values, i, c))
4182 continue;
4183 Symbol *sym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32,
4184 info_out->out[i].slot[c] * 4);
4185 Value *val = oData.load(sub.cur->values, i, c, NULL);
4186 if (val) {
4187 if (info_out->out[i].sn == TGSI_SEMANTIC_POSITION)
4188 mkOp1(OP_SAT, TYPE_F32, val, val);
4189 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, val);
4190 }
4191 }
4192 }
4193 }
4194
4195 Converter::Converter(Program *ir, const tgsi::Source *code, nv50_ir_prog_info_out *info_out)
4196 : ConverterCommon(ir, code->info, info_out),
4197 code(code),
4198 tgsi(NULL),
4199 tData(this), lData(this), aData(this), oData(this)
4200 {
4201 const unsigned tSize = code->fileSize(TGSI_FILE_TEMPORARY);
4202 const unsigned aSize = code->fileSize(TGSI_FILE_ADDRESS);
4203 const unsigned oSize = code->fileSize(TGSI_FILE_OUTPUT);
4204
4205 tData.setup(TGSI_FILE_TEMPORARY, 0, 0, tSize, 4, 4, FILE_GPR, 0);
4206 lData.setup(TGSI_FILE_TEMPORARY, 1, 0, tSize, 4, 4, FILE_MEMORY_LOCAL, 0);
4207 aData.setup(TGSI_FILE_ADDRESS, 0, 0, aSize, 4, 4, FILE_GPR, 0);
4208 oData.setup(TGSI_FILE_OUTPUT, 0, 0, oSize, 4, 4, FILE_GPR, 0);
4209
4210 zero = mkImm((uint32_t)0);
4211
4212 vtxBaseValid = 0;
4213 }
4214
4215 Converter::~Converter()
4216 {
4217 }
4218
4219 inline const Converter::Location *
4220 Converter::BindArgumentsPass::getValueLocation(Subroutine *s, Value *v)
4221 {
4222 ValueMap::l_iterator it = s->values.l.find(v);
4223 return it == s->values.l.end() ? NULL : &it->second;
4224 }
4225
4226 template<typename T> inline void
4227 Converter::BindArgumentsPass::updateCallArgs(
4228 Instruction *i, void (Instruction::*setArg)(int, Value *),
4229 T (Function::*proto))
4230 {
4231 Function *g = i->asFlow()->target.fn;
4232 Subroutine *subg = conv.getSubroutine(g);
4233
4234 for (unsigned a = 0; a < (g->*proto).size(); ++a) {
4235 Value *v = (g->*proto)[a].get();
4236 const Converter::Location &l = *getValueLocation(subg, v);
4237 Converter::DataArray *array = conv.getArrayForFile(l.array, l.arrayIdx);
4238
4239 (i->*setArg)(a, array->acquire(sub->values, l.i, l.c));
4240 }
4241 }
4242
4243 template<typename T> inline void
4244 Converter::BindArgumentsPass::updatePrototype(
4245 BitSet *set, void (Function::*updateSet)(), T (Function::*proto))
4246 {
4247 (func->*updateSet)();
4248
4249 for (unsigned i = 0; i < set->getSize(); ++i) {
4250 Value *v = func->getLValue(i);
4251 const Converter::Location *l = getValueLocation(sub, v);
4252
4253 // only include values with a matching TGSI register
4254 if (set->test(i) && l && !conv.code->locals.count(*l))
4255 (func->*proto).push_back(v);
4256 }
4257 }
4258
4259 bool
4260 Converter::BindArgumentsPass::visit(Function *f)
4261 {
4262 sub = conv.getSubroutine(f);
4263
4264 for (ArrayList::Iterator bi = f->allBBlocks.iterator();
4265 !bi.end(); bi.next()) {
4266 for (Instruction *i = BasicBlock::get(bi)->getFirst();
4267 i; i = i->next) {
4268 if (i->op == OP_CALL && !i->asFlow()->builtin) {
4269 updateCallArgs(i, &Instruction::setSrc, &Function::ins);
4270 updateCallArgs(i, &Instruction::setDef, &Function::outs);
4271 }
4272 }
4273 }
4274
4275 if (func == prog->main /* && prog->getType() != Program::TYPE_COMPUTE */)
4276 return true;
4277 updatePrototype(&BasicBlock::get(f->cfg.getRoot())->liveSet,
4278 &Function::buildLiveSets, &Function::ins);
4279 updatePrototype(&BasicBlock::get(f->cfgExit)->defSet,
4280 &Function::buildDefSets, &Function::outs);
4281
4282 return true;
4283 }
4284
4285 bool
4286 Converter::run()
4287 {
4288 BasicBlock *entry = new BasicBlock(prog->main);
4289 BasicBlock *leave = new BasicBlock(prog->main);
4290
4291 prog->main->setEntry(entry);
4292 prog->main->setExit(leave);
4293
4294 setPosition(entry, true);
4295 sub.cur = getSubroutine(prog->main);
4296
4297 if (info_out->io.genUserClip > 0) {
4298 for (int c = 0; c < 4; ++c)
4299 clipVtx[c] = getScratch();
4300 }
4301
4302 switch (prog->getType()) {
4303 case Program::TYPE_TESSELLATION_CONTROL:
4304 outBase = mkOp2v(
4305 OP_SUB, TYPE_U32, getSSA(),
4306 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
4307 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
4308 break;
4309 case Program::TYPE_FRAGMENT: {
4310 Symbol *sv = mkSysVal(SV_POSITION, 3);
4311 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
4312 mkOp1(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
4313 break;
4314 }
4315 default:
4316 break;
4317 }
4318
4319 if (info->io.viewportId >= 0)
4320 viewport = getScratch();
4321 else
4322 viewport = NULL;
4323
4324 for (ip = 0; ip < code->scan.num_instructions; ++ip) {
4325 if (!handleInstruction(&code->insns[ip]))
4326 return false;
4327 }
4328
4329 if (!BindArgumentsPass(*this).run(prog))
4330 return false;
4331
4332 return true;
4333 }
4334
4335 } // unnamed namespace
4336
4337 namespace nv50_ir {
4338
4339 bool
4340 Program::makeFromTGSI(struct nv50_ir_prog_info *info,
4341 struct nv50_ir_prog_info_out *info_out)
4342 {
4343 tgsi::Source src(info, info_out, this);
4344 if (!src.scanSource())
4345 return false;
4346 tlsSize = info_out->bin.tlsSpace;
4347
4348 Converter builder(this, &src, info_out);
4349 return builder.run();
4350 }
4351
4352 } // namespace nv50_ir