gm107/ir: use lane 0 for manual textureGrad handling
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_tgsi.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "tgsi/tgsi_dump.h"
24 #include "tgsi/tgsi_scan.h"
25 #include "tgsi/tgsi_util.h"
26
27 #include <set>
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_util.h"
31 #include "codegen/nv50_ir_build_util.h"
32
33 namespace tgsi {
34
35 class Source;
36
37 static nv50_ir::operation translateOpcode(uint opcode);
38 static nv50_ir::DataFile translateFile(uint file);
39 static nv50_ir::TexTarget translateTexture(uint texTarg);
40 static nv50_ir::SVSemantic translateSysVal(uint sysval);
41 static nv50_ir::CacheMode translateCacheMode(uint qualifier);
42 static nv50_ir::ImgFormat translateImgFormat(uint format);
43
44 class Instruction
45 {
46 public:
47 Instruction(const struct tgsi_full_instruction *inst) : insn(inst) { }
48
49 class SrcRegister
50 {
51 public:
52 SrcRegister(const struct tgsi_full_src_register *src)
53 : reg(src->Register),
54 fsr(src)
55 { }
56
57 SrcRegister(const struct tgsi_src_register& src) : reg(src), fsr(NULL) { }
58
59 SrcRegister(const struct tgsi_ind_register& ind)
60 : reg(tgsi_util_get_src_from_ind(&ind)),
61 fsr(NULL)
62 { }
63
64 struct tgsi_src_register offsetToSrc(struct tgsi_texture_offset off)
65 {
66 struct tgsi_src_register reg;
67 memset(&reg, 0, sizeof(reg));
68 reg.Index = off.Index;
69 reg.File = off.File;
70 reg.SwizzleX = off.SwizzleX;
71 reg.SwizzleY = off.SwizzleY;
72 reg.SwizzleZ = off.SwizzleZ;
73 return reg;
74 }
75
76 SrcRegister(const struct tgsi_texture_offset& off) :
77 reg(offsetToSrc(off)),
78 fsr(NULL)
79 { }
80
81 uint getFile() const { return reg.File; }
82
83 bool is2D() const { return reg.Dimension; }
84
85 bool isIndirect(int dim) const
86 {
87 return (dim && fsr) ? fsr->Dimension.Indirect : reg.Indirect;
88 }
89
90 int getIndex(int dim) const
91 {
92 return (dim && fsr) ? fsr->Dimension.Index : reg.Index;
93 }
94
95 int getSwizzle(int chan) const
96 {
97 return tgsi_util_get_src_register_swizzle(&reg, chan);
98 }
99
100 int getArrayId() const
101 {
102 if (isIndirect(0))
103 return fsr->Indirect.ArrayID;
104 return 0;
105 }
106
107 nv50_ir::Modifier getMod(int chan) const;
108
109 SrcRegister getIndirect(int dim) const
110 {
111 assert(fsr && isIndirect(dim));
112 if (dim)
113 return SrcRegister(fsr->DimIndirect);
114 return SrcRegister(fsr->Indirect);
115 }
116
117 uint32_t getValueU32(int c, const struct nv50_ir_prog_info *info) const
118 {
119 assert(reg.File == TGSI_FILE_IMMEDIATE);
120 assert(!reg.Absolute);
121 assert(!reg.Negate);
122 return info->immd.data[reg.Index * 4 + getSwizzle(c)];
123 }
124
125 private:
126 const struct tgsi_src_register reg;
127 const struct tgsi_full_src_register *fsr;
128 };
129
130 class DstRegister
131 {
132 public:
133 DstRegister(const struct tgsi_full_dst_register *dst)
134 : reg(dst->Register),
135 fdr(dst)
136 { }
137
138 DstRegister(const struct tgsi_dst_register& dst) : reg(dst), fdr(NULL) { }
139
140 uint getFile() const { return reg.File; }
141
142 bool is2D() const { return reg.Dimension; }
143
144 bool isIndirect(int dim) const
145 {
146 return (dim && fdr) ? fdr->Dimension.Indirect : reg.Indirect;
147 }
148
149 int getIndex(int dim) const
150 {
151 return (dim && fdr) ? fdr->Dimension.Dimension : reg.Index;
152 }
153
154 unsigned int getMask() const { return reg.WriteMask; }
155
156 bool isMasked(int chan) const { return !(getMask() & (1 << chan)); }
157
158 SrcRegister getIndirect(int dim) const
159 {
160 assert(fdr && isIndirect(dim));
161 if (dim)
162 return SrcRegister(fdr->DimIndirect);
163 return SrcRegister(fdr->Indirect);
164 }
165
166 int getArrayId() const
167 {
168 if (isIndirect(0))
169 return fdr->Indirect.ArrayID;
170 return 0;
171 }
172
173 private:
174 const struct tgsi_dst_register reg;
175 const struct tgsi_full_dst_register *fdr;
176 };
177
178 inline uint getOpcode() const { return insn->Instruction.Opcode; }
179
180 unsigned int srcCount() const { return insn->Instruction.NumSrcRegs; }
181 unsigned int dstCount() const { return insn->Instruction.NumDstRegs; }
182
183 // mask of used components of source s
184 unsigned int srcMask(unsigned int s) const;
185 unsigned int texOffsetMask() const;
186
187 SrcRegister getSrc(unsigned int s) const
188 {
189 assert(s < srcCount());
190 return SrcRegister(&insn->Src[s]);
191 }
192
193 DstRegister getDst(unsigned int d) const
194 {
195 assert(d < dstCount());
196 return DstRegister(&insn->Dst[d]);
197 }
198
199 SrcRegister getTexOffset(unsigned int i) const
200 {
201 assert(i < TGSI_FULL_MAX_TEX_OFFSETS);
202 return SrcRegister(insn->TexOffsets[i]);
203 }
204
205 unsigned int getNumTexOffsets() const { return insn->Texture.NumOffsets; }
206
207 bool checkDstSrcAliasing() const;
208
209 inline nv50_ir::operation getOP() const {
210 return translateOpcode(getOpcode()); }
211
212 nv50_ir::DataType inferSrcType() const;
213 nv50_ir::DataType inferDstType() const;
214
215 nv50_ir::CondCode getSetCond() const;
216
217 nv50_ir::TexInstruction::Target getTexture(const Source *, int s) const;
218
219 nv50_ir::CacheMode getCacheMode() const {
220 if (!insn->Instruction.Memory)
221 return nv50_ir::CACHE_CA;
222 return translateCacheMode(insn->Memory.Qualifier);
223 }
224
225 inline uint getLabel() { return insn->Label.Label; }
226
227 unsigned getSaturate() const { return insn->Instruction.Saturate; }
228
229 void print() const
230 {
231 tgsi_dump_instruction(insn, 1);
232 }
233
234 private:
235 const struct tgsi_full_instruction *insn;
236 };
237
238 unsigned int Instruction::texOffsetMask() const
239 {
240 const struct tgsi_instruction_texture *tex = &insn->Texture;
241 assert(insn->Instruction.Texture);
242
243 switch (tex->Texture) {
244 case TGSI_TEXTURE_BUFFER:
245 case TGSI_TEXTURE_1D:
246 case TGSI_TEXTURE_SHADOW1D:
247 case TGSI_TEXTURE_1D_ARRAY:
248 case TGSI_TEXTURE_SHADOW1D_ARRAY:
249 return 0x1;
250 case TGSI_TEXTURE_2D:
251 case TGSI_TEXTURE_SHADOW2D:
252 case TGSI_TEXTURE_2D_ARRAY:
253 case TGSI_TEXTURE_SHADOW2D_ARRAY:
254 case TGSI_TEXTURE_RECT:
255 case TGSI_TEXTURE_SHADOWRECT:
256 case TGSI_TEXTURE_2D_MSAA:
257 case TGSI_TEXTURE_2D_ARRAY_MSAA:
258 return 0x3;
259 case TGSI_TEXTURE_3D:
260 return 0x7;
261 default:
262 assert(!"Unexpected texture target");
263 return 0xf;
264 }
265 }
266
267 unsigned int Instruction::srcMask(unsigned int s) const
268 {
269 unsigned int mask = insn->Dst[0].Register.WriteMask;
270
271 switch (insn->Instruction.Opcode) {
272 case TGSI_OPCODE_COS:
273 case TGSI_OPCODE_SIN:
274 return (mask & 0x8) | ((mask & 0x7) ? 0x1 : 0x0);
275 case TGSI_OPCODE_DP2:
276 return 0x3;
277 case TGSI_OPCODE_DP3:
278 return 0x7;
279 case TGSI_OPCODE_DP4:
280 case TGSI_OPCODE_KILL_IF: /* WriteMask ignored */
281 return 0xf;
282 case TGSI_OPCODE_DST:
283 return mask & (s ? 0xa : 0x6);
284 case TGSI_OPCODE_EX2:
285 case TGSI_OPCODE_EXP:
286 case TGSI_OPCODE_LG2:
287 case TGSI_OPCODE_LOG:
288 case TGSI_OPCODE_POW:
289 case TGSI_OPCODE_RCP:
290 case TGSI_OPCODE_RSQ:
291 return 0x1;
292 case TGSI_OPCODE_IF:
293 case TGSI_OPCODE_UIF:
294 return 0x1;
295 case TGSI_OPCODE_LIT:
296 return 0xb;
297 case TGSI_OPCODE_TEX2:
298 case TGSI_OPCODE_TXB2:
299 case TGSI_OPCODE_TXL2:
300 return (s == 0) ? 0xf : 0x3;
301 case TGSI_OPCODE_TEX:
302 case TGSI_OPCODE_TXB:
303 case TGSI_OPCODE_TXD:
304 case TGSI_OPCODE_TXL:
305 case TGSI_OPCODE_TXP:
306 case TGSI_OPCODE_TXF:
307 case TGSI_OPCODE_TG4:
308 case TGSI_OPCODE_TEX_LZ:
309 case TGSI_OPCODE_TXF_LZ:
310 case TGSI_OPCODE_LODQ:
311 {
312 const struct tgsi_instruction_texture *tex = &insn->Texture;
313
314 assert(insn->Instruction.Texture);
315
316 mask = 0x7;
317 if (insn->Instruction.Opcode != TGSI_OPCODE_TEX &&
318 insn->Instruction.Opcode != TGSI_OPCODE_TEX_LZ &&
319 insn->Instruction.Opcode != TGSI_OPCODE_TXF_LZ &&
320 insn->Instruction.Opcode != TGSI_OPCODE_TXD)
321 mask |= 0x8; /* bias, lod or proj */
322
323 switch (tex->Texture) {
324 case TGSI_TEXTURE_1D:
325 mask &= 0x9;
326 break;
327 case TGSI_TEXTURE_SHADOW1D:
328 mask &= 0xd;
329 break;
330 case TGSI_TEXTURE_1D_ARRAY:
331 case TGSI_TEXTURE_2D:
332 case TGSI_TEXTURE_RECT:
333 mask &= 0xb;
334 break;
335 case TGSI_TEXTURE_CUBE_ARRAY:
336 case TGSI_TEXTURE_SHADOW2D_ARRAY:
337 case TGSI_TEXTURE_SHADOWCUBE:
338 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
339 mask |= 0x8;
340 break;
341 default:
342 break;
343 }
344 }
345 return mask;
346 case TGSI_OPCODE_TXQ:
347 return 1;
348 case TGSI_OPCODE_D2I:
349 case TGSI_OPCODE_D2U:
350 case TGSI_OPCODE_D2F:
351 case TGSI_OPCODE_DSLT:
352 case TGSI_OPCODE_DSGE:
353 case TGSI_OPCODE_DSEQ:
354 case TGSI_OPCODE_DSNE:
355 case TGSI_OPCODE_U64SEQ:
356 case TGSI_OPCODE_U64SNE:
357 case TGSI_OPCODE_I64SLT:
358 case TGSI_OPCODE_U64SLT:
359 case TGSI_OPCODE_I64SGE:
360 case TGSI_OPCODE_U64SGE:
361 case TGSI_OPCODE_I642F:
362 case TGSI_OPCODE_U642F:
363 switch (util_bitcount(mask)) {
364 case 1: return 0x3;
365 case 2: return 0xf;
366 default:
367 assert(!"unexpected mask");
368 return 0xf;
369 }
370 case TGSI_OPCODE_I2D:
371 case TGSI_OPCODE_U2D:
372 case TGSI_OPCODE_F2D: {
373 unsigned int x = 0;
374 if ((mask & 0x3) == 0x3)
375 x |= 1;
376 if ((mask & 0xc) == 0xc)
377 x |= 2;
378 return x;
379 }
380 case TGSI_OPCODE_PK2H:
381 return 0x3;
382 case TGSI_OPCODE_UP2H:
383 return 0x1;
384 default:
385 break;
386 }
387
388 return mask;
389 }
390
391 nv50_ir::Modifier Instruction::SrcRegister::getMod(int chan) const
392 {
393 nv50_ir::Modifier m(0);
394
395 if (reg.Absolute)
396 m = m | nv50_ir::Modifier(NV50_IR_MOD_ABS);
397 if (reg.Negate)
398 m = m | nv50_ir::Modifier(NV50_IR_MOD_NEG);
399 return m;
400 }
401
402 static nv50_ir::DataFile translateFile(uint file)
403 {
404 switch (file) {
405 case TGSI_FILE_CONSTANT: return nv50_ir::FILE_MEMORY_CONST;
406 case TGSI_FILE_INPUT: return nv50_ir::FILE_SHADER_INPUT;
407 case TGSI_FILE_OUTPUT: return nv50_ir::FILE_SHADER_OUTPUT;
408 case TGSI_FILE_TEMPORARY: return nv50_ir::FILE_GPR;
409 case TGSI_FILE_ADDRESS: return nv50_ir::FILE_ADDRESS;
410 case TGSI_FILE_IMMEDIATE: return nv50_ir::FILE_IMMEDIATE;
411 case TGSI_FILE_SYSTEM_VALUE: return nv50_ir::FILE_SYSTEM_VALUE;
412 case TGSI_FILE_BUFFER: return nv50_ir::FILE_MEMORY_BUFFER;
413 case TGSI_FILE_IMAGE: return nv50_ir::FILE_MEMORY_GLOBAL;
414 case TGSI_FILE_MEMORY: return nv50_ir::FILE_MEMORY_GLOBAL;
415 case TGSI_FILE_SAMPLER:
416 case TGSI_FILE_NULL:
417 default:
418 return nv50_ir::FILE_NULL;
419 }
420 }
421
422 static nv50_ir::SVSemantic translateSysVal(uint sysval)
423 {
424 switch (sysval) {
425 case TGSI_SEMANTIC_FACE: return nv50_ir::SV_FACE;
426 case TGSI_SEMANTIC_PSIZE: return nv50_ir::SV_POINT_SIZE;
427 case TGSI_SEMANTIC_PRIMID: return nv50_ir::SV_PRIMITIVE_ID;
428 case TGSI_SEMANTIC_INSTANCEID: return nv50_ir::SV_INSTANCE_ID;
429 case TGSI_SEMANTIC_VERTEXID: return nv50_ir::SV_VERTEX_ID;
430 case TGSI_SEMANTIC_GRID_SIZE: return nv50_ir::SV_NCTAID;
431 case TGSI_SEMANTIC_BLOCK_ID: return nv50_ir::SV_CTAID;
432 case TGSI_SEMANTIC_BLOCK_SIZE: return nv50_ir::SV_NTID;
433 case TGSI_SEMANTIC_THREAD_ID: return nv50_ir::SV_TID;
434 case TGSI_SEMANTIC_SAMPLEID: return nv50_ir::SV_SAMPLE_INDEX;
435 case TGSI_SEMANTIC_SAMPLEPOS: return nv50_ir::SV_SAMPLE_POS;
436 case TGSI_SEMANTIC_SAMPLEMASK: return nv50_ir::SV_SAMPLE_MASK;
437 case TGSI_SEMANTIC_INVOCATIONID: return nv50_ir::SV_INVOCATION_ID;
438 case TGSI_SEMANTIC_TESSCOORD: return nv50_ir::SV_TESS_COORD;
439 case TGSI_SEMANTIC_TESSOUTER: return nv50_ir::SV_TESS_OUTER;
440 case TGSI_SEMANTIC_TESSINNER: return nv50_ir::SV_TESS_INNER;
441 case TGSI_SEMANTIC_VERTICESIN: return nv50_ir::SV_VERTEX_COUNT;
442 case TGSI_SEMANTIC_HELPER_INVOCATION: return nv50_ir::SV_THREAD_KILL;
443 case TGSI_SEMANTIC_BASEVERTEX: return nv50_ir::SV_BASEVERTEX;
444 case TGSI_SEMANTIC_BASEINSTANCE: return nv50_ir::SV_BASEINSTANCE;
445 case TGSI_SEMANTIC_DRAWID: return nv50_ir::SV_DRAWID;
446 case TGSI_SEMANTIC_WORK_DIM: return nv50_ir::SV_WORK_DIM;
447 case TGSI_SEMANTIC_SUBGROUP_INVOCATION: return nv50_ir::SV_LANEID;
448 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK: return nv50_ir::SV_LANEMASK_EQ;
449 case TGSI_SEMANTIC_SUBGROUP_LT_MASK: return nv50_ir::SV_LANEMASK_LT;
450 case TGSI_SEMANTIC_SUBGROUP_LE_MASK: return nv50_ir::SV_LANEMASK_LE;
451 case TGSI_SEMANTIC_SUBGROUP_GT_MASK: return nv50_ir::SV_LANEMASK_GT;
452 case TGSI_SEMANTIC_SUBGROUP_GE_MASK: return nv50_ir::SV_LANEMASK_GE;
453 default:
454 assert(0);
455 return nv50_ir::SV_CLOCK;
456 }
457 }
458
459 #define NV50_IR_TEX_TARG_CASE(a, b) \
460 case TGSI_TEXTURE_##a: return nv50_ir::TEX_TARGET_##b;
461
462 static nv50_ir::TexTarget translateTexture(uint tex)
463 {
464 switch (tex) {
465 NV50_IR_TEX_TARG_CASE(1D, 1D);
466 NV50_IR_TEX_TARG_CASE(2D, 2D);
467 NV50_IR_TEX_TARG_CASE(2D_MSAA, 2D_MS);
468 NV50_IR_TEX_TARG_CASE(3D, 3D);
469 NV50_IR_TEX_TARG_CASE(CUBE, CUBE);
470 NV50_IR_TEX_TARG_CASE(RECT, RECT);
471 NV50_IR_TEX_TARG_CASE(1D_ARRAY, 1D_ARRAY);
472 NV50_IR_TEX_TARG_CASE(2D_ARRAY, 2D_ARRAY);
473 NV50_IR_TEX_TARG_CASE(2D_ARRAY_MSAA, 2D_MS_ARRAY);
474 NV50_IR_TEX_TARG_CASE(CUBE_ARRAY, CUBE_ARRAY);
475 NV50_IR_TEX_TARG_CASE(SHADOW1D, 1D_SHADOW);
476 NV50_IR_TEX_TARG_CASE(SHADOW2D, 2D_SHADOW);
477 NV50_IR_TEX_TARG_CASE(SHADOWCUBE, CUBE_SHADOW);
478 NV50_IR_TEX_TARG_CASE(SHADOWRECT, RECT_SHADOW);
479 NV50_IR_TEX_TARG_CASE(SHADOW1D_ARRAY, 1D_ARRAY_SHADOW);
480 NV50_IR_TEX_TARG_CASE(SHADOW2D_ARRAY, 2D_ARRAY_SHADOW);
481 NV50_IR_TEX_TARG_CASE(SHADOWCUBE_ARRAY, CUBE_ARRAY_SHADOW);
482 NV50_IR_TEX_TARG_CASE(BUFFER, BUFFER);
483
484 case TGSI_TEXTURE_UNKNOWN:
485 default:
486 assert(!"invalid texture target");
487 return nv50_ir::TEX_TARGET_2D;
488 }
489 }
490
491 static nv50_ir::CacheMode translateCacheMode(uint qualifier)
492 {
493 if (qualifier & TGSI_MEMORY_VOLATILE)
494 return nv50_ir::CACHE_CV;
495 if (qualifier & TGSI_MEMORY_COHERENT)
496 return nv50_ir::CACHE_CG;
497 return nv50_ir::CACHE_CA;
498 }
499
500 static nv50_ir::ImgFormat translateImgFormat(uint format)
501 {
502
503 #define FMT_CASE(a, b) \
504 case PIPE_FORMAT_ ## a: return nv50_ir::FMT_ ## b
505
506 switch (format) {
507 FMT_CASE(NONE, NONE);
508
509 FMT_CASE(R32G32B32A32_FLOAT, RGBA32F);
510 FMT_CASE(R16G16B16A16_FLOAT, RGBA16F);
511 FMT_CASE(R32G32_FLOAT, RG32F);
512 FMT_CASE(R16G16_FLOAT, RG16F);
513 FMT_CASE(R11G11B10_FLOAT, R11G11B10F);
514 FMT_CASE(R32_FLOAT, R32F);
515 FMT_CASE(R16_FLOAT, R16F);
516
517 FMT_CASE(R32G32B32A32_UINT, RGBA32UI);
518 FMT_CASE(R16G16B16A16_UINT, RGBA16UI);
519 FMT_CASE(R10G10B10A2_UINT, RGB10A2UI);
520 FMT_CASE(R8G8B8A8_UINT, RGBA8UI);
521 FMT_CASE(R32G32_UINT, RG32UI);
522 FMT_CASE(R16G16_UINT, RG16UI);
523 FMT_CASE(R8G8_UINT, RG8UI);
524 FMT_CASE(R32_UINT, R32UI);
525 FMT_CASE(R16_UINT, R16UI);
526 FMT_CASE(R8_UINT, R8UI);
527
528 FMT_CASE(R32G32B32A32_SINT, RGBA32I);
529 FMT_CASE(R16G16B16A16_SINT, RGBA16I);
530 FMT_CASE(R8G8B8A8_SINT, RGBA8I);
531 FMT_CASE(R32G32_SINT, RG32I);
532 FMT_CASE(R16G16_SINT, RG16I);
533 FMT_CASE(R8G8_SINT, RG8I);
534 FMT_CASE(R32_SINT, R32I);
535 FMT_CASE(R16_SINT, R16I);
536 FMT_CASE(R8_SINT, R8I);
537
538 FMT_CASE(R16G16B16A16_UNORM, RGBA16);
539 FMT_CASE(R10G10B10A2_UNORM, RGB10A2);
540 FMT_CASE(R8G8B8A8_UNORM, RGBA8);
541 FMT_CASE(R16G16_UNORM, RG16);
542 FMT_CASE(R8G8_UNORM, RG8);
543 FMT_CASE(R16_UNORM, R16);
544 FMT_CASE(R8_UNORM, R8);
545
546 FMT_CASE(R16G16B16A16_SNORM, RGBA16_SNORM);
547 FMT_CASE(R8G8B8A8_SNORM, RGBA8_SNORM);
548 FMT_CASE(R16G16_SNORM, RG16_SNORM);
549 FMT_CASE(R8G8_SNORM, RG8_SNORM);
550 FMT_CASE(R16_SNORM, R16_SNORM);
551 FMT_CASE(R8_SNORM, R8_SNORM);
552
553 FMT_CASE(B8G8R8A8_UNORM, BGRA8);
554 }
555
556 assert(!"Unexpected format");
557 return nv50_ir::FMT_NONE;
558 }
559
560 nv50_ir::DataType Instruction::inferSrcType() const
561 {
562 switch (getOpcode()) {
563 case TGSI_OPCODE_UIF:
564 case TGSI_OPCODE_AND:
565 case TGSI_OPCODE_OR:
566 case TGSI_OPCODE_XOR:
567 case TGSI_OPCODE_NOT:
568 case TGSI_OPCODE_SHL:
569 case TGSI_OPCODE_U2F:
570 case TGSI_OPCODE_U2D:
571 case TGSI_OPCODE_U2I64:
572 case TGSI_OPCODE_UADD:
573 case TGSI_OPCODE_UDIV:
574 case TGSI_OPCODE_UMOD:
575 case TGSI_OPCODE_UMAD:
576 case TGSI_OPCODE_UMUL:
577 case TGSI_OPCODE_UMUL_HI:
578 case TGSI_OPCODE_UMAX:
579 case TGSI_OPCODE_UMIN:
580 case TGSI_OPCODE_USEQ:
581 case TGSI_OPCODE_USGE:
582 case TGSI_OPCODE_USLT:
583 case TGSI_OPCODE_USNE:
584 case TGSI_OPCODE_USHR:
585 case TGSI_OPCODE_ATOMUADD:
586 case TGSI_OPCODE_ATOMXCHG:
587 case TGSI_OPCODE_ATOMCAS:
588 case TGSI_OPCODE_ATOMAND:
589 case TGSI_OPCODE_ATOMOR:
590 case TGSI_OPCODE_ATOMXOR:
591 case TGSI_OPCODE_ATOMUMIN:
592 case TGSI_OPCODE_ATOMUMAX:
593 case TGSI_OPCODE_UBFE:
594 case TGSI_OPCODE_UMSB:
595 case TGSI_OPCODE_UP2H:
596 case TGSI_OPCODE_VOTE_ALL:
597 case TGSI_OPCODE_VOTE_ANY:
598 case TGSI_OPCODE_VOTE_EQ:
599 return nv50_ir::TYPE_U32;
600 case TGSI_OPCODE_I2F:
601 case TGSI_OPCODE_I2D:
602 case TGSI_OPCODE_I2I64:
603 case TGSI_OPCODE_IDIV:
604 case TGSI_OPCODE_IMUL_HI:
605 case TGSI_OPCODE_IMAX:
606 case TGSI_OPCODE_IMIN:
607 case TGSI_OPCODE_IABS:
608 case TGSI_OPCODE_INEG:
609 case TGSI_OPCODE_ISGE:
610 case TGSI_OPCODE_ISHR:
611 case TGSI_OPCODE_ISLT:
612 case TGSI_OPCODE_ISSG:
613 case TGSI_OPCODE_MOD:
614 case TGSI_OPCODE_UARL:
615 case TGSI_OPCODE_ATOMIMIN:
616 case TGSI_OPCODE_ATOMIMAX:
617 case TGSI_OPCODE_IBFE:
618 case TGSI_OPCODE_IMSB:
619 return nv50_ir::TYPE_S32;
620 case TGSI_OPCODE_D2F:
621 case TGSI_OPCODE_D2I:
622 case TGSI_OPCODE_D2U:
623 case TGSI_OPCODE_D2I64:
624 case TGSI_OPCODE_D2U64:
625 case TGSI_OPCODE_DABS:
626 case TGSI_OPCODE_DNEG:
627 case TGSI_OPCODE_DADD:
628 case TGSI_OPCODE_DMUL:
629 case TGSI_OPCODE_DDIV:
630 case TGSI_OPCODE_DMAX:
631 case TGSI_OPCODE_DMIN:
632 case TGSI_OPCODE_DSLT:
633 case TGSI_OPCODE_DSGE:
634 case TGSI_OPCODE_DSEQ:
635 case TGSI_OPCODE_DSNE:
636 case TGSI_OPCODE_DRCP:
637 case TGSI_OPCODE_DSQRT:
638 case TGSI_OPCODE_DMAD:
639 case TGSI_OPCODE_DFMA:
640 case TGSI_OPCODE_DFRAC:
641 case TGSI_OPCODE_DRSQ:
642 case TGSI_OPCODE_DTRUNC:
643 case TGSI_OPCODE_DCEIL:
644 case TGSI_OPCODE_DFLR:
645 case TGSI_OPCODE_DROUND:
646 return nv50_ir::TYPE_F64;
647 case TGSI_OPCODE_U64SEQ:
648 case TGSI_OPCODE_U64SNE:
649 case TGSI_OPCODE_U64SLT:
650 case TGSI_OPCODE_U64SGE:
651 case TGSI_OPCODE_U64MIN:
652 case TGSI_OPCODE_U64MAX:
653 case TGSI_OPCODE_U64ADD:
654 case TGSI_OPCODE_U64MUL:
655 case TGSI_OPCODE_U64SHL:
656 case TGSI_OPCODE_U64SHR:
657 case TGSI_OPCODE_U64DIV:
658 case TGSI_OPCODE_U64MOD:
659 case TGSI_OPCODE_U642F:
660 case TGSI_OPCODE_U642D:
661 return nv50_ir::TYPE_U64;
662 case TGSI_OPCODE_I64ABS:
663 case TGSI_OPCODE_I64SSG:
664 case TGSI_OPCODE_I64NEG:
665 case TGSI_OPCODE_I64SLT:
666 case TGSI_OPCODE_I64SGE:
667 case TGSI_OPCODE_I64MIN:
668 case TGSI_OPCODE_I64MAX:
669 case TGSI_OPCODE_I64SHR:
670 case TGSI_OPCODE_I64DIV:
671 case TGSI_OPCODE_I64MOD:
672 case TGSI_OPCODE_I642F:
673 case TGSI_OPCODE_I642D:
674 return nv50_ir::TYPE_S64;
675 default:
676 return nv50_ir::TYPE_F32;
677 }
678 }
679
680 nv50_ir::DataType Instruction::inferDstType() const
681 {
682 switch (getOpcode()) {
683 case TGSI_OPCODE_D2U:
684 case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32;
685 case TGSI_OPCODE_D2I:
686 case TGSI_OPCODE_F2I: return nv50_ir::TYPE_S32;
687 case TGSI_OPCODE_FSEQ:
688 case TGSI_OPCODE_FSGE:
689 case TGSI_OPCODE_FSLT:
690 case TGSI_OPCODE_FSNE:
691 case TGSI_OPCODE_DSEQ:
692 case TGSI_OPCODE_DSGE:
693 case TGSI_OPCODE_DSLT:
694 case TGSI_OPCODE_DSNE:
695 case TGSI_OPCODE_I64SLT:
696 case TGSI_OPCODE_I64SGE:
697 case TGSI_OPCODE_U64SEQ:
698 case TGSI_OPCODE_U64SNE:
699 case TGSI_OPCODE_U64SLT:
700 case TGSI_OPCODE_U64SGE:
701 case TGSI_OPCODE_PK2H:
702 return nv50_ir::TYPE_U32;
703 case TGSI_OPCODE_I2F:
704 case TGSI_OPCODE_U2F:
705 case TGSI_OPCODE_D2F:
706 case TGSI_OPCODE_I642F:
707 case TGSI_OPCODE_U642F:
708 case TGSI_OPCODE_UP2H:
709 return nv50_ir::TYPE_F32;
710 case TGSI_OPCODE_I2D:
711 case TGSI_OPCODE_U2D:
712 case TGSI_OPCODE_F2D:
713 case TGSI_OPCODE_I642D:
714 case TGSI_OPCODE_U642D:
715 return nv50_ir::TYPE_F64;
716 case TGSI_OPCODE_I2I64:
717 case TGSI_OPCODE_U2I64:
718 case TGSI_OPCODE_F2I64:
719 case TGSI_OPCODE_D2I64:
720 return nv50_ir::TYPE_S64;
721 case TGSI_OPCODE_F2U64:
722 case TGSI_OPCODE_D2U64:
723 return nv50_ir::TYPE_U64;
724 default:
725 return inferSrcType();
726 }
727 }
728
729 nv50_ir::CondCode Instruction::getSetCond() const
730 {
731 using namespace nv50_ir;
732
733 switch (getOpcode()) {
734 case TGSI_OPCODE_SLT:
735 case TGSI_OPCODE_ISLT:
736 case TGSI_OPCODE_USLT:
737 case TGSI_OPCODE_FSLT:
738 case TGSI_OPCODE_DSLT:
739 case TGSI_OPCODE_I64SLT:
740 case TGSI_OPCODE_U64SLT:
741 return CC_LT;
742 case TGSI_OPCODE_SLE:
743 return CC_LE;
744 case TGSI_OPCODE_SGE:
745 case TGSI_OPCODE_ISGE:
746 case TGSI_OPCODE_USGE:
747 case TGSI_OPCODE_FSGE:
748 case TGSI_OPCODE_DSGE:
749 case TGSI_OPCODE_I64SGE:
750 case TGSI_OPCODE_U64SGE:
751 return CC_GE;
752 case TGSI_OPCODE_SGT:
753 return CC_GT;
754 case TGSI_OPCODE_SEQ:
755 case TGSI_OPCODE_USEQ:
756 case TGSI_OPCODE_FSEQ:
757 case TGSI_OPCODE_DSEQ:
758 case TGSI_OPCODE_U64SEQ:
759 return CC_EQ;
760 case TGSI_OPCODE_SNE:
761 case TGSI_OPCODE_FSNE:
762 case TGSI_OPCODE_DSNE:
763 case TGSI_OPCODE_U64SNE:
764 return CC_NEU;
765 case TGSI_OPCODE_USNE:
766 return CC_NE;
767 default:
768 return CC_ALWAYS;
769 }
770 }
771
772 #define NV50_IR_OPCODE_CASE(a, b) case TGSI_OPCODE_##a: return nv50_ir::OP_##b
773
774 static nv50_ir::operation translateOpcode(uint opcode)
775 {
776 switch (opcode) {
777 NV50_IR_OPCODE_CASE(ARL, SHL);
778 NV50_IR_OPCODE_CASE(MOV, MOV);
779
780 NV50_IR_OPCODE_CASE(RCP, RCP);
781 NV50_IR_OPCODE_CASE(RSQ, RSQ);
782 NV50_IR_OPCODE_CASE(SQRT, SQRT);
783
784 NV50_IR_OPCODE_CASE(MUL, MUL);
785 NV50_IR_OPCODE_CASE(ADD, ADD);
786
787 NV50_IR_OPCODE_CASE(MIN, MIN);
788 NV50_IR_OPCODE_CASE(MAX, MAX);
789 NV50_IR_OPCODE_CASE(SLT, SET);
790 NV50_IR_OPCODE_CASE(SGE, SET);
791 NV50_IR_OPCODE_CASE(MAD, MAD);
792 NV50_IR_OPCODE_CASE(FMA, FMA);
793
794 NV50_IR_OPCODE_CASE(FLR, FLOOR);
795 NV50_IR_OPCODE_CASE(ROUND, CVT);
796 NV50_IR_OPCODE_CASE(EX2, EX2);
797 NV50_IR_OPCODE_CASE(LG2, LG2);
798 NV50_IR_OPCODE_CASE(POW, POW);
799
800 NV50_IR_OPCODE_CASE(COS, COS);
801 NV50_IR_OPCODE_CASE(DDX, DFDX);
802 NV50_IR_OPCODE_CASE(DDX_FINE, DFDX);
803 NV50_IR_OPCODE_CASE(DDY, DFDY);
804 NV50_IR_OPCODE_CASE(DDY_FINE, DFDY);
805 NV50_IR_OPCODE_CASE(KILL, DISCARD);
806
807 NV50_IR_OPCODE_CASE(SEQ, SET);
808 NV50_IR_OPCODE_CASE(SGT, SET);
809 NV50_IR_OPCODE_CASE(SIN, SIN);
810 NV50_IR_OPCODE_CASE(SLE, SET);
811 NV50_IR_OPCODE_CASE(SNE, SET);
812 NV50_IR_OPCODE_CASE(TEX, TEX);
813 NV50_IR_OPCODE_CASE(TXD, TXD);
814 NV50_IR_OPCODE_CASE(TXP, TEX);
815
816 NV50_IR_OPCODE_CASE(CAL, CALL);
817 NV50_IR_OPCODE_CASE(RET, RET);
818 NV50_IR_OPCODE_CASE(CMP, SLCT);
819
820 NV50_IR_OPCODE_CASE(TXB, TXB);
821
822 NV50_IR_OPCODE_CASE(DIV, DIV);
823
824 NV50_IR_OPCODE_CASE(TXL, TXL);
825 NV50_IR_OPCODE_CASE(TEX_LZ, TXL);
826
827 NV50_IR_OPCODE_CASE(CEIL, CEIL);
828 NV50_IR_OPCODE_CASE(I2F, CVT);
829 NV50_IR_OPCODE_CASE(NOT, NOT);
830 NV50_IR_OPCODE_CASE(TRUNC, TRUNC);
831 NV50_IR_OPCODE_CASE(SHL, SHL);
832
833 NV50_IR_OPCODE_CASE(AND, AND);
834 NV50_IR_OPCODE_CASE(OR, OR);
835 NV50_IR_OPCODE_CASE(MOD, MOD);
836 NV50_IR_OPCODE_CASE(XOR, XOR);
837 NV50_IR_OPCODE_CASE(TXF, TXF);
838 NV50_IR_OPCODE_CASE(TXF_LZ, TXF);
839 NV50_IR_OPCODE_CASE(TXQ, TXQ);
840 NV50_IR_OPCODE_CASE(TXQS, TXQ);
841 NV50_IR_OPCODE_CASE(TG4, TXG);
842 NV50_IR_OPCODE_CASE(LODQ, TXLQ);
843
844 NV50_IR_OPCODE_CASE(EMIT, EMIT);
845 NV50_IR_OPCODE_CASE(ENDPRIM, RESTART);
846
847 NV50_IR_OPCODE_CASE(KILL_IF, DISCARD);
848
849 NV50_IR_OPCODE_CASE(F2I, CVT);
850 NV50_IR_OPCODE_CASE(FSEQ, SET);
851 NV50_IR_OPCODE_CASE(FSGE, SET);
852 NV50_IR_OPCODE_CASE(FSLT, SET);
853 NV50_IR_OPCODE_CASE(FSNE, SET);
854 NV50_IR_OPCODE_CASE(IDIV, DIV);
855 NV50_IR_OPCODE_CASE(IMAX, MAX);
856 NV50_IR_OPCODE_CASE(IMIN, MIN);
857 NV50_IR_OPCODE_CASE(IABS, ABS);
858 NV50_IR_OPCODE_CASE(INEG, NEG);
859 NV50_IR_OPCODE_CASE(ISGE, SET);
860 NV50_IR_OPCODE_CASE(ISHR, SHR);
861 NV50_IR_OPCODE_CASE(ISLT, SET);
862 NV50_IR_OPCODE_CASE(F2U, CVT);
863 NV50_IR_OPCODE_CASE(U2F, CVT);
864 NV50_IR_OPCODE_CASE(UADD, ADD);
865 NV50_IR_OPCODE_CASE(UDIV, DIV);
866 NV50_IR_OPCODE_CASE(UMAD, MAD);
867 NV50_IR_OPCODE_CASE(UMAX, MAX);
868 NV50_IR_OPCODE_CASE(UMIN, MIN);
869 NV50_IR_OPCODE_CASE(UMOD, MOD);
870 NV50_IR_OPCODE_CASE(UMUL, MUL);
871 NV50_IR_OPCODE_CASE(USEQ, SET);
872 NV50_IR_OPCODE_CASE(USGE, SET);
873 NV50_IR_OPCODE_CASE(USHR, SHR);
874 NV50_IR_OPCODE_CASE(USLT, SET);
875 NV50_IR_OPCODE_CASE(USNE, SET);
876
877 NV50_IR_OPCODE_CASE(DABS, ABS);
878 NV50_IR_OPCODE_CASE(DNEG, NEG);
879 NV50_IR_OPCODE_CASE(DADD, ADD);
880 NV50_IR_OPCODE_CASE(DMUL, MUL);
881 NV50_IR_OPCODE_CASE(DDIV, DIV);
882 NV50_IR_OPCODE_CASE(DMAX, MAX);
883 NV50_IR_OPCODE_CASE(DMIN, MIN);
884 NV50_IR_OPCODE_CASE(DSLT, SET);
885 NV50_IR_OPCODE_CASE(DSGE, SET);
886 NV50_IR_OPCODE_CASE(DSEQ, SET);
887 NV50_IR_OPCODE_CASE(DSNE, SET);
888 NV50_IR_OPCODE_CASE(DRCP, RCP);
889 NV50_IR_OPCODE_CASE(DSQRT, SQRT);
890 NV50_IR_OPCODE_CASE(DMAD, MAD);
891 NV50_IR_OPCODE_CASE(DFMA, FMA);
892 NV50_IR_OPCODE_CASE(D2I, CVT);
893 NV50_IR_OPCODE_CASE(D2U, CVT);
894 NV50_IR_OPCODE_CASE(I2D, CVT);
895 NV50_IR_OPCODE_CASE(U2D, CVT);
896 NV50_IR_OPCODE_CASE(DRSQ, RSQ);
897 NV50_IR_OPCODE_CASE(DTRUNC, TRUNC);
898 NV50_IR_OPCODE_CASE(DCEIL, CEIL);
899 NV50_IR_OPCODE_CASE(DFLR, FLOOR);
900 NV50_IR_OPCODE_CASE(DROUND, CVT);
901
902 NV50_IR_OPCODE_CASE(U64SEQ, SET);
903 NV50_IR_OPCODE_CASE(U64SNE, SET);
904 NV50_IR_OPCODE_CASE(U64SLT, SET);
905 NV50_IR_OPCODE_CASE(U64SGE, SET);
906 NV50_IR_OPCODE_CASE(I64SLT, SET);
907 NV50_IR_OPCODE_CASE(I64SGE, SET);
908 NV50_IR_OPCODE_CASE(I2I64, CVT);
909 NV50_IR_OPCODE_CASE(U2I64, CVT);
910 NV50_IR_OPCODE_CASE(F2I64, CVT);
911 NV50_IR_OPCODE_CASE(F2U64, CVT);
912 NV50_IR_OPCODE_CASE(D2I64, CVT);
913 NV50_IR_OPCODE_CASE(D2U64, CVT);
914 NV50_IR_OPCODE_CASE(I642F, CVT);
915 NV50_IR_OPCODE_CASE(U642F, CVT);
916 NV50_IR_OPCODE_CASE(I642D, CVT);
917 NV50_IR_OPCODE_CASE(U642D, CVT);
918
919 NV50_IR_OPCODE_CASE(I64MIN, MIN);
920 NV50_IR_OPCODE_CASE(U64MIN, MIN);
921 NV50_IR_OPCODE_CASE(I64MAX, MAX);
922 NV50_IR_OPCODE_CASE(U64MAX, MAX);
923 NV50_IR_OPCODE_CASE(I64ABS, ABS);
924 NV50_IR_OPCODE_CASE(I64NEG, NEG);
925 NV50_IR_OPCODE_CASE(U64ADD, ADD);
926 NV50_IR_OPCODE_CASE(U64MUL, MUL);
927 NV50_IR_OPCODE_CASE(U64SHL, SHL);
928 NV50_IR_OPCODE_CASE(I64SHR, SHR);
929 NV50_IR_OPCODE_CASE(U64SHR, SHR);
930
931 NV50_IR_OPCODE_CASE(IMUL_HI, MUL);
932 NV50_IR_OPCODE_CASE(UMUL_HI, MUL);
933
934 NV50_IR_OPCODE_CASE(SAMPLE, TEX);
935 NV50_IR_OPCODE_CASE(SAMPLE_B, TXB);
936 NV50_IR_OPCODE_CASE(SAMPLE_C, TEX);
937 NV50_IR_OPCODE_CASE(SAMPLE_C_LZ, TEX);
938 NV50_IR_OPCODE_CASE(SAMPLE_D, TXD);
939 NV50_IR_OPCODE_CASE(SAMPLE_L, TXL);
940 NV50_IR_OPCODE_CASE(SAMPLE_I, TXF);
941 NV50_IR_OPCODE_CASE(SAMPLE_I_MS, TXF);
942 NV50_IR_OPCODE_CASE(GATHER4, TXG);
943 NV50_IR_OPCODE_CASE(SVIEWINFO, TXQ);
944
945 NV50_IR_OPCODE_CASE(ATOMUADD, ATOM);
946 NV50_IR_OPCODE_CASE(ATOMXCHG, ATOM);
947 NV50_IR_OPCODE_CASE(ATOMCAS, ATOM);
948 NV50_IR_OPCODE_CASE(ATOMAND, ATOM);
949 NV50_IR_OPCODE_CASE(ATOMOR, ATOM);
950 NV50_IR_OPCODE_CASE(ATOMXOR, ATOM);
951 NV50_IR_OPCODE_CASE(ATOMUMIN, ATOM);
952 NV50_IR_OPCODE_CASE(ATOMUMAX, ATOM);
953 NV50_IR_OPCODE_CASE(ATOMIMIN, ATOM);
954 NV50_IR_OPCODE_CASE(ATOMIMAX, ATOM);
955
956 NV50_IR_OPCODE_CASE(TEX2, TEX);
957 NV50_IR_OPCODE_CASE(TXB2, TXB);
958 NV50_IR_OPCODE_CASE(TXL2, TXL);
959
960 NV50_IR_OPCODE_CASE(IBFE, EXTBF);
961 NV50_IR_OPCODE_CASE(UBFE, EXTBF);
962 NV50_IR_OPCODE_CASE(BFI, INSBF);
963 NV50_IR_OPCODE_CASE(BREV, EXTBF);
964 NV50_IR_OPCODE_CASE(POPC, POPCNT);
965 NV50_IR_OPCODE_CASE(LSB, BFIND);
966 NV50_IR_OPCODE_CASE(IMSB, BFIND);
967 NV50_IR_OPCODE_CASE(UMSB, BFIND);
968
969 NV50_IR_OPCODE_CASE(VOTE_ALL, VOTE);
970 NV50_IR_OPCODE_CASE(VOTE_ANY, VOTE);
971 NV50_IR_OPCODE_CASE(VOTE_EQ, VOTE);
972
973 NV50_IR_OPCODE_CASE(BALLOT, VOTE);
974 NV50_IR_OPCODE_CASE(READ_INVOC, SHFL);
975 NV50_IR_OPCODE_CASE(READ_FIRST, SHFL);
976
977 NV50_IR_OPCODE_CASE(END, EXIT);
978
979 default:
980 return nv50_ir::OP_NOP;
981 }
982 }
983
984 static uint16_t opcodeToSubOp(uint opcode)
985 {
986 switch (opcode) {
987 case TGSI_OPCODE_ATOMUADD: return NV50_IR_SUBOP_ATOM_ADD;
988 case TGSI_OPCODE_ATOMXCHG: return NV50_IR_SUBOP_ATOM_EXCH;
989 case TGSI_OPCODE_ATOMCAS: return NV50_IR_SUBOP_ATOM_CAS;
990 case TGSI_OPCODE_ATOMAND: return NV50_IR_SUBOP_ATOM_AND;
991 case TGSI_OPCODE_ATOMOR: return NV50_IR_SUBOP_ATOM_OR;
992 case TGSI_OPCODE_ATOMXOR: return NV50_IR_SUBOP_ATOM_XOR;
993 case TGSI_OPCODE_ATOMUMIN: return NV50_IR_SUBOP_ATOM_MIN;
994 case TGSI_OPCODE_ATOMIMIN: return NV50_IR_SUBOP_ATOM_MIN;
995 case TGSI_OPCODE_ATOMUMAX: return NV50_IR_SUBOP_ATOM_MAX;
996 case TGSI_OPCODE_ATOMIMAX: return NV50_IR_SUBOP_ATOM_MAX;
997 case TGSI_OPCODE_IMUL_HI:
998 case TGSI_OPCODE_UMUL_HI:
999 return NV50_IR_SUBOP_MUL_HIGH;
1000 case TGSI_OPCODE_VOTE_ALL: return NV50_IR_SUBOP_VOTE_ALL;
1001 case TGSI_OPCODE_VOTE_ANY: return NV50_IR_SUBOP_VOTE_ANY;
1002 case TGSI_OPCODE_VOTE_EQ: return NV50_IR_SUBOP_VOTE_UNI;
1003 default:
1004 return 0;
1005 }
1006 }
1007
1008 bool Instruction::checkDstSrcAliasing() const
1009 {
1010 if (insn->Dst[0].Register.Indirect) // no danger if indirect, using memory
1011 return false;
1012
1013 for (int s = 0; s < TGSI_FULL_MAX_SRC_REGISTERS; ++s) {
1014 if (insn->Src[s].Register.File == TGSI_FILE_NULL)
1015 break;
1016 if (insn->Src[s].Register.File == insn->Dst[0].Register.File &&
1017 insn->Src[s].Register.Index == insn->Dst[0].Register.Index)
1018 return true;
1019 }
1020 return false;
1021 }
1022
1023 class Source
1024 {
1025 public:
1026 Source(struct nv50_ir_prog_info *);
1027 ~Source();
1028
1029 public:
1030 bool scanSource();
1031 unsigned fileSize(unsigned file) const { return scan.file_max[file] + 1; }
1032
1033 public:
1034 struct tgsi_shader_info scan;
1035 struct tgsi_full_instruction *insns;
1036 const struct tgsi_token *tokens;
1037 struct nv50_ir_prog_info *info;
1038
1039 nv50_ir::DynArray tempArrays;
1040 nv50_ir::DynArray immdArrays;
1041
1042 typedef nv50_ir::BuildUtil::Location Location;
1043 // these registers are per-subroutine, cannot be used for parameter passing
1044 std::set<Location> locals;
1045
1046 std::set<int> indirectTempArrays;
1047 std::map<int, int> indirectTempOffsets;
1048 std::map<int, std::pair<int, int> > tempArrayInfo;
1049 std::vector<int> tempArrayId;
1050
1051 int clipVertexOutput;
1052
1053 struct TextureView {
1054 uint8_t target; // TGSI_TEXTURE_*
1055 };
1056 std::vector<TextureView> textureViews;
1057
1058 /*
1059 struct Resource {
1060 uint8_t target; // TGSI_TEXTURE_*
1061 bool raw;
1062 uint8_t slot; // $surface index
1063 };
1064 std::vector<Resource> resources;
1065 */
1066
1067 struct Image {
1068 uint8_t target; // TGSI_TEXTURE_*
1069 bool raw;
1070 uint8_t slot;
1071 uint16_t format; // PIPE_FORMAT_*
1072 };
1073 std::vector<Image> images;
1074
1075 struct MemoryFile {
1076 uint8_t mem_type; // TGSI_MEMORY_TYPE_*
1077 };
1078 std::vector<MemoryFile> memoryFiles;
1079
1080 private:
1081 int inferSysValDirection(unsigned sn) const;
1082 bool scanDeclaration(const struct tgsi_full_declaration *);
1083 bool scanInstruction(const struct tgsi_full_instruction *);
1084 void scanInstructionSrc(const Instruction& insn,
1085 const Instruction::SrcRegister& src,
1086 unsigned mask);
1087 void scanProperty(const struct tgsi_full_property *);
1088 void scanImmediate(const struct tgsi_full_immediate *);
1089
1090 inline bool isEdgeFlagPassthrough(const Instruction&) const;
1091 };
1092
1093 Source::Source(struct nv50_ir_prog_info *prog) : info(prog)
1094 {
1095 tokens = (const struct tgsi_token *)info->bin.source;
1096
1097 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
1098 tgsi_dump(tokens, 0);
1099 }
1100
1101 Source::~Source()
1102 {
1103 if (insns)
1104 FREE(insns);
1105
1106 if (info->immd.data)
1107 FREE(info->immd.data);
1108 if (info->immd.type)
1109 FREE(info->immd.type);
1110 }
1111
1112 bool Source::scanSource()
1113 {
1114 unsigned insnCount = 0;
1115 struct tgsi_parse_context parse;
1116
1117 tgsi_scan_shader(tokens, &scan);
1118
1119 insns = (struct tgsi_full_instruction *)MALLOC(scan.num_instructions *
1120 sizeof(insns[0]));
1121 if (!insns)
1122 return false;
1123
1124 clipVertexOutput = -1;
1125
1126 textureViews.resize(scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1);
1127 //resources.resize(scan.file_max[TGSI_FILE_RESOURCE] + 1);
1128 images.resize(scan.file_max[TGSI_FILE_IMAGE] + 1);
1129 tempArrayId.resize(scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1130 memoryFiles.resize(scan.file_max[TGSI_FILE_MEMORY] + 1);
1131
1132 info->immd.bufSize = 0;
1133
1134 info->numInputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1135 info->numOutputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1136 info->numSysVals = scan.file_max[TGSI_FILE_SYSTEM_VALUE] + 1;
1137
1138 if (info->type == PIPE_SHADER_FRAGMENT) {
1139 info->prop.fp.writesDepth = scan.writes_z;
1140 info->prop.fp.usesDiscard = scan.uses_kill || info->io.alphaRefBase;
1141 } else
1142 if (info->type == PIPE_SHADER_GEOMETRY) {
1143 info->prop.gp.instanceCount = 1; // default value
1144 }
1145
1146 info->io.viewportId = -1;
1147
1148 info->immd.data = (uint32_t *)MALLOC(scan.immediate_count * 16);
1149 info->immd.type = (ubyte *)MALLOC(scan.immediate_count * sizeof(ubyte));
1150
1151 tgsi_parse_init(&parse, tokens);
1152 while (!tgsi_parse_end_of_tokens(&parse)) {
1153 tgsi_parse_token(&parse);
1154
1155 switch (parse.FullToken.Token.Type) {
1156 case TGSI_TOKEN_TYPE_IMMEDIATE:
1157 scanImmediate(&parse.FullToken.FullImmediate);
1158 break;
1159 case TGSI_TOKEN_TYPE_DECLARATION:
1160 scanDeclaration(&parse.FullToken.FullDeclaration);
1161 break;
1162 case TGSI_TOKEN_TYPE_INSTRUCTION:
1163 insns[insnCount++] = parse.FullToken.FullInstruction;
1164 scanInstruction(&parse.FullToken.FullInstruction);
1165 break;
1166 case TGSI_TOKEN_TYPE_PROPERTY:
1167 scanProperty(&parse.FullToken.FullProperty);
1168 break;
1169 default:
1170 INFO("unknown TGSI token type: %d\n", parse.FullToken.Token.Type);
1171 break;
1172 }
1173 }
1174 tgsi_parse_free(&parse);
1175
1176 if (indirectTempArrays.size()) {
1177 int tempBase = 0;
1178 for (std::set<int>::const_iterator it = indirectTempArrays.begin();
1179 it != indirectTempArrays.end(); ++it) {
1180 std::pair<int, int>& info = tempArrayInfo[*it];
1181 indirectTempOffsets.insert(std::make_pair(*it, tempBase - info.first));
1182 tempBase += info.second;
1183 }
1184 info->bin.tlsSpace += tempBase * 16;
1185 }
1186
1187 if (info->io.genUserClip > 0) {
1188 info->io.clipDistances = info->io.genUserClip;
1189
1190 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1191
1192 for (unsigned int n = 0; n < nOut; ++n) {
1193 unsigned int i = info->numOutputs++;
1194 info->out[i].id = i;
1195 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1196 info->out[i].si = n;
1197 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1198 }
1199 }
1200
1201 return info->assignSlots(info) == 0;
1202 }
1203
1204 void Source::scanProperty(const struct tgsi_full_property *prop)
1205 {
1206 switch (prop->Property.PropertyName) {
1207 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1208 info->prop.gp.outputPrim = prop->u[0].Data;
1209 break;
1210 case TGSI_PROPERTY_GS_INPUT_PRIM:
1211 info->prop.gp.inputPrim = prop->u[0].Data;
1212 break;
1213 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1214 info->prop.gp.maxVertices = prop->u[0].Data;
1215 break;
1216 case TGSI_PROPERTY_GS_INVOCATIONS:
1217 info->prop.gp.instanceCount = prop->u[0].Data;
1218 break;
1219 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1220 info->prop.fp.separateFragData = true;
1221 break;
1222 case TGSI_PROPERTY_FS_COORD_ORIGIN:
1223 case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1224 case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
1225 // we don't care
1226 break;
1227 case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1228 info->io.genUserClip = -1;
1229 break;
1230 case TGSI_PROPERTY_TCS_VERTICES_OUT:
1231 info->prop.tp.outputPatchSize = prop->u[0].Data;
1232 break;
1233 case TGSI_PROPERTY_TES_PRIM_MODE:
1234 info->prop.tp.domain = prop->u[0].Data;
1235 break;
1236 case TGSI_PROPERTY_TES_SPACING:
1237 info->prop.tp.partitioning = prop->u[0].Data;
1238 break;
1239 case TGSI_PROPERTY_TES_VERTEX_ORDER_CW:
1240 info->prop.tp.winding = prop->u[0].Data;
1241 break;
1242 case TGSI_PROPERTY_TES_POINT_MODE:
1243 if (prop->u[0].Data)
1244 info->prop.tp.outputPrim = PIPE_PRIM_POINTS;
1245 else
1246 info->prop.tp.outputPrim = PIPE_PRIM_TRIANGLES; /* anything but points */
1247 break;
1248 case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
1249 info->prop.cp.numThreads[0] = prop->u[0].Data;
1250 break;
1251 case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
1252 info->prop.cp.numThreads[1] = prop->u[0].Data;
1253 break;
1254 case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
1255 info->prop.cp.numThreads[2] = prop->u[0].Data;
1256 break;
1257 case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
1258 info->io.clipDistances = prop->u[0].Data;
1259 break;
1260 case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
1261 info->io.cullDistances = prop->u[0].Data;
1262 break;
1263 case TGSI_PROPERTY_NEXT_SHADER:
1264 /* Do not need to know the next shader stage. */
1265 break;
1266 case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
1267 info->prop.fp.earlyFragTests = prop->u[0].Data;
1268 break;
1269 case TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE:
1270 info->prop.fp.postDepthCoverage = prop->u[0].Data;
1271 break;
1272 case TGSI_PROPERTY_MUL_ZERO_WINS:
1273 info->io.mul_zero_wins = prop->u[0].Data;
1274 break;
1275 default:
1276 INFO("unhandled TGSI property %d\n", prop->Property.PropertyName);
1277 break;
1278 }
1279 }
1280
1281 void Source::scanImmediate(const struct tgsi_full_immediate *imm)
1282 {
1283 const unsigned n = info->immd.count++;
1284
1285 assert(n < scan.immediate_count);
1286
1287 for (int c = 0; c < 4; ++c)
1288 info->immd.data[n * 4 + c] = imm->u[c].Uint;
1289
1290 info->immd.type[n] = imm->Immediate.DataType;
1291 }
1292
1293 int Source::inferSysValDirection(unsigned sn) const
1294 {
1295 switch (sn) {
1296 case TGSI_SEMANTIC_INSTANCEID:
1297 case TGSI_SEMANTIC_VERTEXID:
1298 return 1;
1299 case TGSI_SEMANTIC_LAYER:
1300 #if 0
1301 case TGSI_SEMANTIC_VIEWPORTINDEX:
1302 return 0;
1303 #endif
1304 case TGSI_SEMANTIC_PRIMID:
1305 return (info->type == PIPE_SHADER_FRAGMENT) ? 1 : 0;
1306 default:
1307 return 0;
1308 }
1309 }
1310
1311 bool Source::scanDeclaration(const struct tgsi_full_declaration *decl)
1312 {
1313 unsigned i, c;
1314 unsigned sn = TGSI_SEMANTIC_GENERIC;
1315 unsigned si = 0;
1316 const unsigned first = decl->Range.First, last = decl->Range.Last;
1317 const int arrayId = decl->Array.ArrayID;
1318
1319 if (decl->Declaration.Semantic) {
1320 sn = decl->Semantic.Name;
1321 si = decl->Semantic.Index;
1322 }
1323
1324 if (decl->Declaration.Local || decl->Declaration.File == TGSI_FILE_ADDRESS) {
1325 for (i = first; i <= last; ++i) {
1326 for (c = 0; c < 4; ++c) {
1327 locals.insert(
1328 Location(decl->Declaration.File, decl->Dim.Index2D, i, c));
1329 }
1330 }
1331 }
1332
1333 switch (decl->Declaration.File) {
1334 case TGSI_FILE_INPUT:
1335 if (info->type == PIPE_SHADER_VERTEX) {
1336 // all vertex attributes are equal
1337 for (i = first; i <= last; ++i) {
1338 info->in[i].sn = TGSI_SEMANTIC_GENERIC;
1339 info->in[i].si = i;
1340 }
1341 } else {
1342 for (i = first; i <= last; ++i, ++si) {
1343 info->in[i].id = i;
1344 info->in[i].sn = sn;
1345 info->in[i].si = si;
1346 if (info->type == PIPE_SHADER_FRAGMENT) {
1347 // translate interpolation mode
1348 switch (decl->Interp.Interpolate) {
1349 case TGSI_INTERPOLATE_CONSTANT:
1350 info->in[i].flat = 1;
1351 break;
1352 case TGSI_INTERPOLATE_COLOR:
1353 info->in[i].sc = 1;
1354 break;
1355 case TGSI_INTERPOLATE_LINEAR:
1356 info->in[i].linear = 1;
1357 break;
1358 default:
1359 break;
1360 }
1361 if (decl->Interp.Location)
1362 info->in[i].centroid = 1;
1363 }
1364
1365 if (sn == TGSI_SEMANTIC_PATCH)
1366 info->in[i].patch = 1;
1367 if (sn == TGSI_SEMANTIC_PATCH)
1368 info->numPatchConstants = MAX2(info->numPatchConstants, si + 1);
1369 }
1370 }
1371 break;
1372 case TGSI_FILE_OUTPUT:
1373 for (i = first; i <= last; ++i, ++si) {
1374 switch (sn) {
1375 case TGSI_SEMANTIC_POSITION:
1376 if (info->type == PIPE_SHADER_FRAGMENT)
1377 info->io.fragDepth = i;
1378 else
1379 if (clipVertexOutput < 0)
1380 clipVertexOutput = i;
1381 break;
1382 case TGSI_SEMANTIC_COLOR:
1383 if (info->type == PIPE_SHADER_FRAGMENT)
1384 info->prop.fp.numColourResults++;
1385 break;
1386 case TGSI_SEMANTIC_EDGEFLAG:
1387 info->io.edgeFlagOut = i;
1388 break;
1389 case TGSI_SEMANTIC_CLIPVERTEX:
1390 clipVertexOutput = i;
1391 break;
1392 case TGSI_SEMANTIC_CLIPDIST:
1393 info->io.genUserClip = -1;
1394 break;
1395 case TGSI_SEMANTIC_SAMPLEMASK:
1396 info->io.sampleMask = i;
1397 break;
1398 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1399 info->io.viewportId = i;
1400 break;
1401 case TGSI_SEMANTIC_PATCH:
1402 info->numPatchConstants = MAX2(info->numPatchConstants, si + 1);
1403 /* fallthrough */
1404 case TGSI_SEMANTIC_TESSOUTER:
1405 case TGSI_SEMANTIC_TESSINNER:
1406 info->out[i].patch = 1;
1407 break;
1408 default:
1409 break;
1410 }
1411 info->out[i].id = i;
1412 info->out[i].sn = sn;
1413 info->out[i].si = si;
1414 }
1415 break;
1416 case TGSI_FILE_SYSTEM_VALUE:
1417 switch (sn) {
1418 case TGSI_SEMANTIC_INSTANCEID:
1419 info->io.instanceId = first;
1420 break;
1421 case TGSI_SEMANTIC_VERTEXID:
1422 info->io.vertexId = first;
1423 break;
1424 case TGSI_SEMANTIC_BASEVERTEX:
1425 case TGSI_SEMANTIC_BASEINSTANCE:
1426 case TGSI_SEMANTIC_DRAWID:
1427 info->prop.vp.usesDrawParameters = true;
1428 break;
1429 case TGSI_SEMANTIC_SAMPLEID:
1430 case TGSI_SEMANTIC_SAMPLEPOS:
1431 info->prop.fp.persampleInvocation = true;
1432 break;
1433 case TGSI_SEMANTIC_SAMPLEMASK:
1434 info->prop.fp.usesSampleMaskIn = true;
1435 break;
1436 default:
1437 break;
1438 }
1439 for (i = first; i <= last; ++i, ++si) {
1440 info->sv[i].sn = sn;
1441 info->sv[i].si = si;
1442 info->sv[i].input = inferSysValDirection(sn);
1443
1444 switch (sn) {
1445 case TGSI_SEMANTIC_TESSOUTER:
1446 case TGSI_SEMANTIC_TESSINNER:
1447 info->sv[i].patch = 1;
1448 break;
1449 }
1450 }
1451 break;
1452 /*
1453 case TGSI_FILE_RESOURCE:
1454 for (i = first; i <= last; ++i) {
1455 resources[i].target = decl->Resource.Resource;
1456 resources[i].raw = decl->Resource.Raw;
1457 resources[i].slot = i;
1458 }
1459 break;
1460 */
1461 case TGSI_FILE_IMAGE:
1462 for (i = first; i <= last; ++i) {
1463 images[i].target = decl->Image.Resource;
1464 images[i].raw = decl->Image.Raw;
1465 images[i].format = decl->Image.Format;
1466 images[i].slot = i;
1467 }
1468 break;
1469 case TGSI_FILE_SAMPLER_VIEW:
1470 for (i = first; i <= last; ++i)
1471 textureViews[i].target = decl->SamplerView.Resource;
1472 break;
1473 case TGSI_FILE_MEMORY:
1474 for (i = first; i <= last; ++i)
1475 memoryFiles[i].mem_type = decl->Declaration.MemType;
1476 break;
1477 case TGSI_FILE_NULL:
1478 case TGSI_FILE_TEMPORARY:
1479 for (i = first; i <= last; ++i)
1480 tempArrayId[i] = arrayId;
1481 if (arrayId)
1482 tempArrayInfo.insert(std::make_pair(arrayId, std::make_pair(
1483 first, last - first + 1)));
1484 break;
1485 case TGSI_FILE_ADDRESS:
1486 case TGSI_FILE_CONSTANT:
1487 case TGSI_FILE_IMMEDIATE:
1488 case TGSI_FILE_SAMPLER:
1489 case TGSI_FILE_BUFFER:
1490 break;
1491 default:
1492 ERROR("unhandled TGSI_FILE %d\n", decl->Declaration.File);
1493 return false;
1494 }
1495 return true;
1496 }
1497
1498 inline bool Source::isEdgeFlagPassthrough(const Instruction& insn) const
1499 {
1500 return insn.getOpcode() == TGSI_OPCODE_MOV &&
1501 insn.getDst(0).getIndex(0) == info->io.edgeFlagOut &&
1502 insn.getSrc(0).getFile() == TGSI_FILE_INPUT;
1503 }
1504
1505 void Source::scanInstructionSrc(const Instruction& insn,
1506 const Instruction::SrcRegister& src,
1507 unsigned mask)
1508 {
1509 if (src.getFile() == TGSI_FILE_TEMPORARY) {
1510 if (src.isIndirect(0))
1511 indirectTempArrays.insert(src.getArrayId());
1512 } else
1513 if (src.getFile() == TGSI_FILE_BUFFER ||
1514 src.getFile() == TGSI_FILE_IMAGE ||
1515 (src.getFile() == TGSI_FILE_MEMORY &&
1516 memoryFiles[src.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1517 info->io.globalAccess |= (insn.getOpcode() == TGSI_OPCODE_LOAD) ?
1518 0x1 : 0x2;
1519 } else
1520 if (src.getFile() == TGSI_FILE_OUTPUT) {
1521 if (src.isIndirect(0)) {
1522 // We don't know which one is accessed, just mark everything for
1523 // reading. This is an extremely unlikely occurrence.
1524 for (unsigned i = 0; i < info->numOutputs; ++i)
1525 info->out[i].oread = 1;
1526 } else {
1527 info->out[src.getIndex(0)].oread = 1;
1528 }
1529 }
1530 if (src.getFile() != TGSI_FILE_INPUT)
1531 return;
1532
1533 if (src.isIndirect(0)) {
1534 for (unsigned i = 0; i < info->numInputs; ++i)
1535 info->in[i].mask = 0xf;
1536 } else {
1537 const int i = src.getIndex(0);
1538 for (unsigned c = 0; c < 4; ++c) {
1539 if (!(mask & (1 << c)))
1540 continue;
1541 int k = src.getSwizzle(c);
1542 if (k <= TGSI_SWIZZLE_W)
1543 info->in[i].mask |= 1 << k;
1544 }
1545 switch (info->in[i].sn) {
1546 case TGSI_SEMANTIC_PSIZE:
1547 case TGSI_SEMANTIC_PRIMID:
1548 case TGSI_SEMANTIC_FOG:
1549 info->in[i].mask &= 0x1;
1550 break;
1551 case TGSI_SEMANTIC_PCOORD:
1552 info->in[i].mask &= 0x3;
1553 break;
1554 default:
1555 break;
1556 }
1557 }
1558 }
1559
1560 bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
1561 {
1562 Instruction insn(inst);
1563
1564 if (insn.getOpcode() == TGSI_OPCODE_BARRIER)
1565 info->numBarriers = 1;
1566
1567 if (insn.getOpcode() == TGSI_OPCODE_FBFETCH)
1568 info->prop.fp.readsFramebuffer = true;
1569
1570 if (insn.dstCount()) {
1571 Instruction::DstRegister dst = insn.getDst(0);
1572
1573 if (dst.getFile() == TGSI_FILE_OUTPUT) {
1574 if (dst.isIndirect(0))
1575 for (unsigned i = 0; i < info->numOutputs; ++i)
1576 info->out[i].mask = 0xf;
1577 else
1578 info->out[dst.getIndex(0)].mask |= dst.getMask();
1579
1580 if (info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PSIZE ||
1581 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PRIMID ||
1582 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_LAYER ||
1583 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_VIEWPORT_INDEX ||
1584 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_FOG)
1585 info->out[dst.getIndex(0)].mask &= 1;
1586
1587 if (isEdgeFlagPassthrough(insn))
1588 info->io.edgeFlagIn = insn.getSrc(0).getIndex(0);
1589 } else
1590 if (dst.getFile() == TGSI_FILE_TEMPORARY) {
1591 if (dst.isIndirect(0))
1592 indirectTempArrays.insert(dst.getArrayId());
1593 } else
1594 if (dst.getFile() == TGSI_FILE_BUFFER ||
1595 dst.getFile() == TGSI_FILE_IMAGE ||
1596 (dst.getFile() == TGSI_FILE_MEMORY &&
1597 memoryFiles[dst.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1598 info->io.globalAccess |= 0x2;
1599 }
1600 }
1601
1602 for (unsigned s = 0; s < insn.srcCount(); ++s)
1603 scanInstructionSrc(insn, insn.getSrc(s), insn.srcMask(s));
1604
1605 for (unsigned s = 0; s < insn.getNumTexOffsets(); ++s)
1606 scanInstructionSrc(insn, insn.getTexOffset(s), insn.texOffsetMask());
1607
1608 return true;
1609 }
1610
1611 nv50_ir::TexInstruction::Target
1612 Instruction::getTexture(const tgsi::Source *code, int s) const
1613 {
1614 // XXX: indirect access
1615 unsigned int r;
1616
1617 switch (getSrc(s).getFile()) {
1618 /*
1619 case TGSI_FILE_RESOURCE:
1620 r = getSrc(s).getIndex(0);
1621 return translateTexture(code->resources.at(r).target);
1622 */
1623 case TGSI_FILE_SAMPLER_VIEW:
1624 r = getSrc(s).getIndex(0);
1625 return translateTexture(code->textureViews.at(r).target);
1626 default:
1627 return translateTexture(insn->Texture.Texture);
1628 }
1629 }
1630
1631 } // namespace tgsi
1632
1633 namespace {
1634
1635 using namespace nv50_ir;
1636
1637 class Converter : public BuildUtil
1638 {
1639 public:
1640 Converter(Program *, const tgsi::Source *);
1641 ~Converter();
1642
1643 bool run();
1644
1645 private:
1646 struct Subroutine
1647 {
1648 Subroutine(Function *f) : f(f) { }
1649 Function *f;
1650 ValueMap values;
1651 };
1652
1653 Value *shiftAddress(Value *);
1654 Value *getVertexBase(int s);
1655 Value *getOutputBase(int s);
1656 DataArray *getArrayForFile(unsigned file, int idx);
1657 Value *fetchSrc(int s, int c);
1658 Value *acquireDst(int d, int c);
1659 void storeDst(int d, int c, Value *);
1660
1661 Value *fetchSrc(const tgsi::Instruction::SrcRegister src, int c, Value *ptr);
1662 void storeDst(const tgsi::Instruction::DstRegister dst, int c,
1663 Value *val, Value *ptr);
1664
1665 void adjustTempIndex(int arrayId, int &idx, int &idx2d) const;
1666 Value *applySrcMod(Value *, int s, int c);
1667
1668 Symbol *makeSym(uint file, int fileIndex, int idx, int c, uint32_t addr);
1669 Symbol *srcToSym(tgsi::Instruction::SrcRegister, int c);
1670 Symbol *dstToSym(tgsi::Instruction::DstRegister, int c);
1671
1672 bool isSubGroupMask(uint8_t semantic);
1673
1674 bool handleInstruction(const struct tgsi_full_instruction *);
1675 void exportOutputs();
1676 inline Subroutine *getSubroutine(unsigned ip);
1677 inline Subroutine *getSubroutine(Function *);
1678 inline bool isEndOfSubroutine(uint ip);
1679
1680 void loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask);
1681
1682 // R,S,L,C,Dx,Dy encode TGSI sources for respective values (0xSf for auto)
1683 void setTexRS(TexInstruction *, unsigned int& s, int R, int S);
1684 void handleTEX(Value *dst0[4], int R, int S, int L, int C, int Dx, int Dy);
1685 void handleTXF(Value *dst0[4], int R, int L_M);
1686 void handleTXQ(Value *dst0[4], enum TexQuery, int R);
1687 void handleFBFETCH(Value *dst0[4]);
1688 void handleLIT(Value *dst0[4]);
1689 void handleUserClipPlanes();
1690
1691 // Symbol *getResourceBase(int r);
1692 void getImageCoords(std::vector<Value *>&, int r, int s);
1693
1694 void handleLOAD(Value *dst0[4]);
1695 void handleSTORE();
1696 void handleATOM(Value *dst0[4], DataType, uint16_t subOp);
1697
1698 void handleINTERP(Value *dst0[4]);
1699
1700 uint8_t translateInterpMode(const struct nv50_ir_varying *var,
1701 operation& op);
1702 Value *interpolate(tgsi::Instruction::SrcRegister, int c, Value *ptr);
1703
1704 void insertConvergenceOps(BasicBlock *conv, BasicBlock *fork);
1705
1706 Value *buildDot(int dim);
1707
1708 class BindArgumentsPass : public Pass {
1709 public:
1710 BindArgumentsPass(Converter &conv) : conv(conv) { }
1711
1712 private:
1713 Converter &conv;
1714 Subroutine *sub;
1715
1716 inline const Location *getValueLocation(Subroutine *, Value *);
1717
1718 template<typename T> inline void
1719 updateCallArgs(Instruction *i, void (Instruction::*setArg)(int, Value *),
1720 T (Function::*proto));
1721
1722 template<typename T> inline void
1723 updatePrototype(BitSet *set, void (Function::*updateSet)(),
1724 T (Function::*proto));
1725
1726 protected:
1727 bool visit(Function *);
1728 bool visit(BasicBlock *bb) { return false; }
1729 };
1730
1731 private:
1732 const tgsi::Source *code;
1733 const struct nv50_ir_prog_info *info;
1734
1735 struct {
1736 std::map<unsigned, Subroutine> map;
1737 Subroutine *cur;
1738 } sub;
1739
1740 uint ip; // instruction pointer
1741
1742 tgsi::Instruction tgsi;
1743
1744 DataType dstTy;
1745 DataType srcTy;
1746
1747 DataArray tData; // TGSI_FILE_TEMPORARY
1748 DataArray lData; // TGSI_FILE_TEMPORARY, for indirect arrays
1749 DataArray aData; // TGSI_FILE_ADDRESS
1750 DataArray oData; // TGSI_FILE_OUTPUT (if outputs in registers)
1751
1752 Value *zero;
1753 Value *fragCoord[4];
1754 Value *clipVtx[4];
1755
1756 Value *vtxBase[5]; // base address of vertex in primitive (for TP/GP)
1757 uint8_t vtxBaseValid;
1758
1759 Value *outBase; // base address of vertex out patch (for TCP)
1760
1761 Stack condBBs; // fork BB, then else clause BB
1762 Stack joinBBs; // fork BB, for inserting join ops on ENDIF
1763 Stack loopBBs; // loop headers
1764 Stack breakBBs; // end of / after loop
1765
1766 Value *viewport;
1767 };
1768
1769 Symbol *
1770 Converter::srcToSym(tgsi::Instruction::SrcRegister src, int c)
1771 {
1772 const int swz = src.getSwizzle(c);
1773
1774 /* TODO: Use Array ID when it's available for the index */
1775 return makeSym(src.getFile(),
1776 src.is2D() ? src.getIndex(1) : 0,
1777 src.getIndex(0), swz,
1778 src.getIndex(0) * 16 + swz * 4);
1779 }
1780
1781 Symbol *
1782 Converter::dstToSym(tgsi::Instruction::DstRegister dst, int c)
1783 {
1784 /* TODO: Use Array ID when it's available for the index */
1785 return makeSym(dst.getFile(),
1786 dst.is2D() ? dst.getIndex(1) : 0,
1787 dst.getIndex(0), c,
1788 dst.getIndex(0) * 16 + c * 4);
1789 }
1790
1791 Symbol *
1792 Converter::makeSym(uint tgsiFile, int fileIdx, int idx, int c, uint32_t address)
1793 {
1794 Symbol *sym = new_Symbol(prog, tgsi::translateFile(tgsiFile));
1795
1796 sym->reg.fileIndex = fileIdx;
1797
1798 if (tgsiFile == TGSI_FILE_MEMORY) {
1799 switch (code->memoryFiles[fileIdx].mem_type) {
1800 case TGSI_MEMORY_TYPE_GLOBAL:
1801 /* No-op this is the default for TGSI_FILE_MEMORY */
1802 sym->setFile(FILE_MEMORY_GLOBAL);
1803 break;
1804 case TGSI_MEMORY_TYPE_SHARED:
1805 sym->setFile(FILE_MEMORY_SHARED);
1806 break;
1807 case TGSI_MEMORY_TYPE_INPUT:
1808 assert(prog->getType() == Program::TYPE_COMPUTE);
1809 assert(idx == -1);
1810 sym->setFile(FILE_SHADER_INPUT);
1811 address += info->prop.cp.inputOffset;
1812 break;
1813 default:
1814 assert(0); /* TODO: Add support for global and private memory */
1815 }
1816 }
1817
1818 if (idx >= 0) {
1819 if (sym->reg.file == FILE_SHADER_INPUT)
1820 sym->setOffset(info->in[idx].slot[c] * 4);
1821 else
1822 if (sym->reg.file == FILE_SHADER_OUTPUT)
1823 sym->setOffset(info->out[idx].slot[c] * 4);
1824 else
1825 if (sym->reg.file == FILE_SYSTEM_VALUE)
1826 sym->setSV(tgsi::translateSysVal(info->sv[idx].sn), c);
1827 else
1828 sym->setOffset(address);
1829 } else {
1830 sym->setOffset(address);
1831 }
1832 return sym;
1833 }
1834
1835 uint8_t
1836 Converter::translateInterpMode(const struct nv50_ir_varying *var, operation& op)
1837 {
1838 uint8_t mode = NV50_IR_INTERP_PERSPECTIVE;
1839
1840 if (var->flat)
1841 mode = NV50_IR_INTERP_FLAT;
1842 else
1843 if (var->linear)
1844 mode = NV50_IR_INTERP_LINEAR;
1845 else
1846 if (var->sc)
1847 mode = NV50_IR_INTERP_SC;
1848
1849 op = (mode == NV50_IR_INTERP_PERSPECTIVE || mode == NV50_IR_INTERP_SC)
1850 ? OP_PINTERP : OP_LINTERP;
1851
1852 if (var->centroid)
1853 mode |= NV50_IR_INTERP_CENTROID;
1854
1855 return mode;
1856 }
1857
1858 Value *
1859 Converter::interpolate(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1860 {
1861 operation op;
1862
1863 // XXX: no way to know interpolation mode if we don't know what's accessed
1864 const uint8_t mode = translateInterpMode(&info->in[ptr ? 0 :
1865 src.getIndex(0)], op);
1866
1867 Instruction *insn = new_Instruction(func, op, TYPE_F32);
1868
1869 insn->setDef(0, getScratch());
1870 insn->setSrc(0, srcToSym(src, c));
1871 if (op == OP_PINTERP)
1872 insn->setSrc(1, fragCoord[3]);
1873 if (ptr)
1874 insn->setIndirect(0, 0, ptr);
1875
1876 insn->setInterpolate(mode);
1877
1878 bb->insertTail(insn);
1879 return insn->getDef(0);
1880 }
1881
1882 Value *
1883 Converter::applySrcMod(Value *val, int s, int c)
1884 {
1885 Modifier m = tgsi.getSrc(s).getMod(c);
1886 DataType ty = tgsi.inferSrcType();
1887
1888 if (m & Modifier(NV50_IR_MOD_ABS))
1889 val = mkOp1v(OP_ABS, ty, getScratch(), val);
1890
1891 if (m & Modifier(NV50_IR_MOD_NEG))
1892 val = mkOp1v(OP_NEG, ty, getScratch(), val);
1893
1894 return val;
1895 }
1896
1897 Value *
1898 Converter::getVertexBase(int s)
1899 {
1900 assert(s < 5);
1901 if (!(vtxBaseValid & (1 << s))) {
1902 const int index = tgsi.getSrc(s).getIndex(1);
1903 Value *rel = NULL;
1904 if (tgsi.getSrc(s).isIndirect(1))
1905 rel = fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL);
1906 vtxBaseValid |= 1 << s;
1907 vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1908 mkImm(index), rel);
1909 }
1910 return vtxBase[s];
1911 }
1912
1913 Value *
1914 Converter::getOutputBase(int s)
1915 {
1916 assert(s < 5);
1917 if (!(vtxBaseValid & (1 << s))) {
1918 Value *offset = loadImm(NULL, tgsi.getSrc(s).getIndex(1));
1919 if (tgsi.getSrc(s).isIndirect(1))
1920 offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(),
1921 fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL),
1922 offset);
1923 vtxBaseValid |= 1 << s;
1924 vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset);
1925 }
1926 return vtxBase[s];
1927 }
1928
1929 Value *
1930 Converter::fetchSrc(int s, int c)
1931 {
1932 Value *res;
1933 Value *ptr = NULL, *dimRel = NULL;
1934
1935 tgsi::Instruction::SrcRegister src = tgsi.getSrc(s);
1936
1937 if (src.isIndirect(0))
1938 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
1939
1940 if (src.is2D()) {
1941 switch (src.getFile()) {
1942 case TGSI_FILE_OUTPUT:
1943 dimRel = getOutputBase(s);
1944 break;
1945 case TGSI_FILE_INPUT:
1946 dimRel = getVertexBase(s);
1947 break;
1948 case TGSI_FILE_CONSTANT:
1949 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1950 if (src.isIndirect(1))
1951 dimRel = fetchSrc(src.getIndirect(1), 0, 0);
1952 break;
1953 default:
1954 break;
1955 }
1956 }
1957
1958 res = fetchSrc(src, c, ptr);
1959
1960 if (dimRel)
1961 res->getInsn()->setIndirect(0, 1, dimRel);
1962
1963 return applySrcMod(res, s, c);
1964 }
1965
1966 Converter::DataArray *
1967 Converter::getArrayForFile(unsigned file, int idx)
1968 {
1969 switch (file) {
1970 case TGSI_FILE_TEMPORARY:
1971 return idx == 0 ? &tData : &lData;
1972 case TGSI_FILE_ADDRESS:
1973 return &aData;
1974 case TGSI_FILE_OUTPUT:
1975 assert(prog->getType() == Program::TYPE_FRAGMENT);
1976 return &oData;
1977 default:
1978 assert(!"invalid/unhandled TGSI source file");
1979 return NULL;
1980 }
1981 }
1982
1983 Value *
1984 Converter::shiftAddress(Value *index)
1985 {
1986 if (!index)
1987 return NULL;
1988 return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4));
1989 }
1990
1991 void
1992 Converter::adjustTempIndex(int arrayId, int &idx, int &idx2d) const
1993 {
1994 std::map<int, int>::const_iterator it =
1995 code->indirectTempOffsets.find(arrayId);
1996 if (it == code->indirectTempOffsets.end())
1997 return;
1998
1999 idx2d = 1;
2000 idx += it->second;
2001 }
2002
2003 bool
2004 Converter::isSubGroupMask(uint8_t semantic)
2005 {
2006 switch (semantic) {
2007 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2008 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2009 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2010 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2011 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2012 return true;
2013 default:
2014 return false;
2015 }
2016 }
2017
2018 Value *
2019 Converter::fetchSrc(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
2020 {
2021 int idx2d = src.is2D() ? src.getIndex(1) : 0;
2022 int idx = src.getIndex(0);
2023 const int swz = src.getSwizzle(c);
2024 Instruction *ld;
2025
2026 switch (src.getFile()) {
2027 case TGSI_FILE_IMMEDIATE:
2028 assert(!ptr);
2029 return loadImm(NULL, info->immd.data[idx * 4 + swz]);
2030 case TGSI_FILE_CONSTANT:
2031 return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr));
2032 case TGSI_FILE_INPUT:
2033 if (prog->getType() == Program::TYPE_FRAGMENT) {
2034 // don't load masked inputs, won't be assigned a slot
2035 if (!ptr && !(info->in[idx].mask & (1 << swz)))
2036 return loadImm(NULL, swz == TGSI_SWIZZLE_W ? 1.0f : 0.0f);
2037 return interpolate(src, c, shiftAddress(ptr));
2038 } else
2039 if (prog->getType() == Program::TYPE_GEOMETRY) {
2040 if (!ptr && info->in[idx].sn == TGSI_SEMANTIC_PRIMID)
2041 return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0));
2042 // XXX: This is going to be a problem with scalar arrays, i.e. when
2043 // we cannot assume that the address is given in units of vec4.
2044 //
2045 // nv50 and nvc0 need different things here, so let the lowering
2046 // passes decide what to do with the address
2047 if (ptr)
2048 return mkLoadv(TYPE_U32, srcToSym(src, c), ptr);
2049 }
2050 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2051 ld->perPatch = info->in[idx].patch;
2052 return ld->getDef(0);
2053 case TGSI_FILE_OUTPUT:
2054 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
2055 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2056 ld->perPatch = info->out[idx].patch;
2057 return ld->getDef(0);
2058 case TGSI_FILE_SYSTEM_VALUE:
2059 assert(!ptr);
2060 if (info->sv[idx].sn == TGSI_SEMANTIC_THREAD_ID &&
2061 info->prop.cp.numThreads[swz] == 1)
2062 return loadImm(NULL, 0u);
2063 if (isSubGroupMask(info->sv[idx].sn) && swz > 0)
2064 return loadImm(NULL, 0u);
2065 if (info->sv[idx].sn == TGSI_SEMANTIC_SUBGROUP_SIZE)
2066 return loadImm(NULL, 32u);
2067 ld = mkOp1(OP_RDSV, TYPE_U32, getSSA(), srcToSym(src, c));
2068 ld->perPatch = info->sv[idx].patch;
2069 return ld->getDef(0);
2070 case TGSI_FILE_TEMPORARY: {
2071 int arrayid = src.getArrayId();
2072 if (!arrayid)
2073 arrayid = code->tempArrayId[idx];
2074 adjustTempIndex(arrayid, idx, idx2d);
2075 }
2076 /* fallthrough */
2077 default:
2078 return getArrayForFile(src.getFile(), idx2d)->load(
2079 sub.cur->values, idx, swz, shiftAddress(ptr));
2080 }
2081 }
2082
2083 Value *
2084 Converter::acquireDst(int d, int c)
2085 {
2086 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2087 const unsigned f = dst.getFile();
2088 int idx = dst.getIndex(0);
2089 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2090
2091 if (dst.isMasked(c) || f == TGSI_FILE_BUFFER || f == TGSI_FILE_MEMORY ||
2092 f == TGSI_FILE_IMAGE)
2093 return NULL;
2094
2095 if (dst.isIndirect(0) ||
2096 f == TGSI_FILE_SYSTEM_VALUE ||
2097 (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT))
2098 return getScratch();
2099
2100 if (f == TGSI_FILE_TEMPORARY) {
2101 int arrayid = dst.getArrayId();
2102 if (!arrayid)
2103 arrayid = code->tempArrayId[idx];
2104 adjustTempIndex(arrayid, idx, idx2d);
2105 }
2106
2107 return getArrayForFile(f, idx2d)-> acquire(sub.cur->values, idx, c);
2108 }
2109
2110 void
2111 Converter::storeDst(int d, int c, Value *val)
2112 {
2113 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2114
2115 if (tgsi.getSaturate()) {
2116 mkOp1(OP_SAT, dstTy, val, val);
2117 }
2118
2119 Value *ptr = NULL;
2120 if (dst.isIndirect(0))
2121 ptr = shiftAddress(fetchSrc(dst.getIndirect(0), 0, NULL));
2122
2123 if (info->io.genUserClip > 0 &&
2124 dst.getFile() == TGSI_FILE_OUTPUT &&
2125 !dst.isIndirect(0) && dst.getIndex(0) == code->clipVertexOutput) {
2126 mkMov(clipVtx[c], val);
2127 val = clipVtx[c];
2128 }
2129
2130 storeDst(dst, c, val, ptr);
2131 }
2132
2133 void
2134 Converter::storeDst(const tgsi::Instruction::DstRegister dst, int c,
2135 Value *val, Value *ptr)
2136 {
2137 const unsigned f = dst.getFile();
2138 int idx = dst.getIndex(0);
2139 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2140
2141 if (f == TGSI_FILE_SYSTEM_VALUE) {
2142 assert(!ptr);
2143 mkOp2(OP_WRSV, TYPE_U32, NULL, dstToSym(dst, c), val);
2144 } else
2145 if (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT) {
2146
2147 if (ptr || (info->out[idx].mask & (1 << c))) {
2148 /* Save the viewport index into a scratch register so that it can be
2149 exported at EMIT time */
2150 if (info->out[idx].sn == TGSI_SEMANTIC_VIEWPORT_INDEX &&
2151 prog->getType() == Program::TYPE_GEOMETRY &&
2152 viewport != NULL)
2153 mkOp1(OP_MOV, TYPE_U32, viewport, val);
2154 else
2155 mkStore(OP_EXPORT, TYPE_U32, dstToSym(dst, c), ptr, val)->perPatch =
2156 info->out[idx].patch;
2157 }
2158 } else
2159 if (f == TGSI_FILE_TEMPORARY ||
2160 f == TGSI_FILE_ADDRESS ||
2161 f == TGSI_FILE_OUTPUT) {
2162 if (f == TGSI_FILE_TEMPORARY) {
2163 int arrayid = dst.getArrayId();
2164 if (!arrayid)
2165 arrayid = code->tempArrayId[idx];
2166 adjustTempIndex(arrayid, idx, idx2d);
2167 }
2168
2169 getArrayForFile(f, idx2d)->store(sub.cur->values, idx, c, ptr, val);
2170 } else {
2171 assert(!"invalid dst file");
2172 }
2173 }
2174
2175 #define FOR_EACH_DST_ENABLED_CHANNEL(d, chan, inst) \
2176 for (chan = 0; chan < 4; ++chan) \
2177 if (!inst.getDst(d).isMasked(chan))
2178
2179 Value *
2180 Converter::buildDot(int dim)
2181 {
2182 assert(dim > 0);
2183
2184 Value *src0 = fetchSrc(0, 0), *src1 = fetchSrc(1, 0);
2185 Value *dotp = getScratch();
2186
2187 mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1)
2188 ->dnz = info->io.mul_zero_wins;
2189
2190 for (int c = 1; c < dim; ++c) {
2191 src0 = fetchSrc(0, c);
2192 src1 = fetchSrc(1, c);
2193 mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp)
2194 ->dnz = info->io.mul_zero_wins;
2195 }
2196 return dotp;
2197 }
2198
2199 void
2200 Converter::insertConvergenceOps(BasicBlock *conv, BasicBlock *fork)
2201 {
2202 FlowInstruction *join = new_FlowInstruction(func, OP_JOIN, NULL);
2203 join->fixed = 1;
2204 conv->insertHead(join);
2205
2206 assert(!fork->joinAt);
2207 fork->joinAt = new_FlowInstruction(func, OP_JOINAT, conv);
2208 fork->insertBefore(fork->getExit(), fork->joinAt);
2209 }
2210
2211 void
2212 Converter::setTexRS(TexInstruction *tex, unsigned int& s, int R, int S)
2213 {
2214 unsigned rIdx = 0, sIdx = 0;
2215
2216 if (R >= 0)
2217 rIdx = tgsi.getSrc(R).getIndex(0);
2218 if (S >= 0)
2219 sIdx = tgsi.getSrc(S).getIndex(0);
2220
2221 tex->setTexture(tgsi.getTexture(code, R), rIdx, sIdx);
2222
2223 if (tgsi.getSrc(R).isIndirect(0)) {
2224 tex->tex.rIndirectSrc = s;
2225 tex->setSrc(s++, fetchSrc(tgsi.getSrc(R).getIndirect(0), 0, NULL));
2226 }
2227 if (S >= 0 && tgsi.getSrc(S).isIndirect(0)) {
2228 tex->tex.sIndirectSrc = s;
2229 tex->setSrc(s++, fetchSrc(tgsi.getSrc(S).getIndirect(0), 0, NULL));
2230 }
2231 }
2232
2233 void
2234 Converter::handleTXQ(Value *dst0[4], enum TexQuery query, int R)
2235 {
2236 TexInstruction *tex = new_TexInstruction(func, OP_TXQ);
2237 tex->tex.query = query;
2238 unsigned int c, d;
2239
2240 for (d = 0, c = 0; c < 4; ++c) {
2241 if (!dst0[c])
2242 continue;
2243 tex->tex.mask |= 1 << c;
2244 tex->setDef(d++, dst0[c]);
2245 }
2246 if (query == TXQ_DIMS)
2247 tex->setSrc((c = 0), fetchSrc(0, 0)); // mip level
2248 else
2249 tex->setSrc((c = 0), zero);
2250
2251 setTexRS(tex, ++c, R, -1);
2252
2253 bb->insertTail(tex);
2254 }
2255
2256 void
2257 Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
2258 {
2259 Value *proj = fetchSrc(0, 3);
2260 Instruction *insn = proj->getUniqueInsn();
2261 int c;
2262
2263 if (insn->op == OP_PINTERP) {
2264 bb->insertTail(insn = cloneForward(func, insn));
2265 insn->op = OP_LINTERP;
2266 insn->setInterpolate(NV50_IR_INTERP_LINEAR | insn->getSampleMode());
2267 insn->setSrc(1, NULL);
2268 proj = insn->getDef(0);
2269 }
2270 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj);
2271
2272 for (c = 0; c < 4; ++c) {
2273 if (!(mask & (1 << c)))
2274 continue;
2275 if ((insn = src[c]->getUniqueInsn())->op != OP_PINTERP)
2276 continue;
2277 mask &= ~(1 << c);
2278
2279 bb->insertTail(insn = cloneForward(func, insn));
2280 insn->setInterpolate(NV50_IR_INTERP_PERSPECTIVE | insn->getSampleMode());
2281 insn->setSrc(1, proj);
2282 dst[c] = insn->getDef(0);
2283 }
2284 if (!mask)
2285 return;
2286
2287 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3));
2288
2289 for (c = 0; c < 4; ++c)
2290 if (mask & (1 << c))
2291 dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj);
2292 }
2293
2294 // order of nv50 ir sources: x y z layer lod/bias shadow
2295 // order of TGSI TEX sources: x y z layer shadow lod/bias
2296 // lowering will finally set the hw specific order (like array first on nvc0)
2297 void
2298 Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
2299 {
2300 Value *arg[4], *src[8];
2301 Value *lod = NULL, *shd = NULL;
2302 unsigned int s, c, d;
2303 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2304
2305 TexInstruction::Target tgt = tgsi.getTexture(code, R);
2306
2307 for (s = 0; s < tgt.getArgCount(); ++s)
2308 arg[s] = src[s] = fetchSrc(0, s);
2309
2310 if (tgsi.getOpcode() == TGSI_OPCODE_TEX_LZ)
2311 lod = loadImm(NULL, 0);
2312 else if (texi->op == OP_TXL || texi->op == OP_TXB)
2313 lod = fetchSrc(L >> 4, L & 3);
2314
2315 if (C == 0x0f)
2316 C = 0x00 | MAX2(tgt.getArgCount(), 2); // guess DC src
2317
2318 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 &&
2319 tgt == TEX_TARGET_CUBE_ARRAY_SHADOW)
2320 shd = fetchSrc(1, 0);
2321 else if (tgt.isShadow())
2322 shd = fetchSrc(C >> 4, C & 3);
2323
2324 if (texi->op == OP_TXD) {
2325 for (c = 0; c < tgt.getDim() + tgt.isCube(); ++c) {
2326 texi->dPdx[c].set(fetchSrc(Dx >> 4, (Dx & 3) + c));
2327 texi->dPdy[c].set(fetchSrc(Dy >> 4, (Dy & 3) + c));
2328 }
2329 }
2330
2331 // cube textures don't care about projection value, it's divided out
2332 if (tgsi.getOpcode() == TGSI_OPCODE_TXP && !tgt.isCube() && !tgt.isArray()) {
2333 unsigned int n = tgt.getDim();
2334 if (shd) {
2335 arg[n] = shd;
2336 ++n;
2337 assert(tgt.getDim() == tgt.getArgCount());
2338 }
2339 loadProjTexCoords(src, arg, (1 << n) - 1);
2340 if (shd)
2341 shd = src[n - 1];
2342 }
2343
2344 for (c = 0, d = 0; c < 4; ++c) {
2345 if (dst[c]) {
2346 texi->setDef(d++, dst[c]);
2347 texi->tex.mask |= 1 << c;
2348 } else {
2349 // NOTE: maybe hook up def too, for CSE
2350 }
2351 }
2352 for (s = 0; s < tgt.getArgCount(); ++s)
2353 texi->setSrc(s, src[s]);
2354 if (lod)
2355 texi->setSrc(s++, lod);
2356 if (shd)
2357 texi->setSrc(s++, shd);
2358
2359 setTexRS(texi, s, R, S);
2360
2361 if (tgsi.getOpcode() == TGSI_OPCODE_SAMPLE_C_LZ)
2362 texi->tex.levelZero = true;
2363 if (prog->getType() != Program::TYPE_FRAGMENT &&
2364 (tgsi.getOpcode() == TGSI_OPCODE_TEX ||
2365 tgsi.getOpcode() == TGSI_OPCODE_TEX2 ||
2366 tgsi.getOpcode() == TGSI_OPCODE_TXP))
2367 texi->tex.levelZero = true;
2368 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 && !tgt.isShadow())
2369 texi->tex.gatherComp = tgsi.getSrc(1).getValueU32(0, info);
2370
2371 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2372 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2373 for (c = 0; c < 3; ++c) {
2374 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2375 texi->offset[s][c].setInsn(texi);
2376 }
2377 }
2378
2379 bb->insertTail(texi);
2380 }
2381
2382 // 1st source: xyz = coordinates, w = lod/sample
2383 // 2nd source: offset
2384 void
2385 Converter::handleTXF(Value *dst[4], int R, int L_M)
2386 {
2387 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2388 int ms;
2389 unsigned int c, d, s;
2390
2391 texi->tex.target = tgsi.getTexture(code, R);
2392
2393 ms = texi->tex.target.isMS() ? 1 : 0;
2394 texi->tex.levelZero = ms; /* MS textures don't have mip-maps */
2395
2396 for (c = 0, d = 0; c < 4; ++c) {
2397 if (dst[c]) {
2398 texi->setDef(d++, dst[c]);
2399 texi->tex.mask |= 1 << c;
2400 }
2401 }
2402 for (c = 0; c < (texi->tex.target.getArgCount() - ms); ++c)
2403 texi->setSrc(c, fetchSrc(0, c));
2404 if (!ms && tgsi.getOpcode() == TGSI_OPCODE_TXF_LZ)
2405 texi->setSrc(c++, loadImm(NULL, 0));
2406 else
2407 texi->setSrc(c++, fetchSrc(L_M >> 4, L_M & 3)); // lod or ms
2408
2409 setTexRS(texi, c, R, -1);
2410
2411 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2412 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2413 for (c = 0; c < 3; ++c) {
2414 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2415 texi->offset[s][c].setInsn(texi);
2416 }
2417 }
2418
2419 bb->insertTail(texi);
2420 }
2421
2422 void
2423 Converter::handleFBFETCH(Value *dst[4])
2424 {
2425 TexInstruction *texi = new_TexInstruction(func, OP_TXF);
2426 unsigned int c, d;
2427
2428 texi->tex.target = TEX_TARGET_2D_MS_ARRAY;
2429 texi->tex.levelZero = 1;
2430 texi->tex.useOffsets = 0;
2431
2432 for (c = 0, d = 0; c < 4; ++c) {
2433 if (dst[c]) {
2434 texi->setDef(d++, dst[c]);
2435 texi->tex.mask |= 1 << c;
2436 }
2437 }
2438
2439 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 0));
2440 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 1));
2441 Value *z = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_LAYER, 0));
2442 Value *ms = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_SAMPLE_INDEX, 0));
2443
2444 mkCvt(OP_CVT, TYPE_U32, x, TYPE_F32, x)->rnd = ROUND_Z;
2445 mkCvt(OP_CVT, TYPE_U32, y, TYPE_F32, y)->rnd = ROUND_Z;
2446 texi->setSrc(0, x);
2447 texi->setSrc(1, y);
2448 texi->setSrc(2, z);
2449 texi->setSrc(3, ms);
2450
2451 texi->tex.r = texi->tex.s = -1;
2452
2453 bb->insertTail(texi);
2454 }
2455
2456 void
2457 Converter::handleLIT(Value *dst0[4])
2458 {
2459 Value *val0 = NULL;
2460 unsigned int mask = tgsi.getDst(0).getMask();
2461
2462 if (mask & (1 << 0))
2463 loadImm(dst0[0], 1.0f);
2464
2465 if (mask & (1 << 3))
2466 loadImm(dst0[3], 1.0f);
2467
2468 if (mask & (3 << 1)) {
2469 val0 = getScratch();
2470 mkOp2(OP_MAX, TYPE_F32, val0, fetchSrc(0, 0), zero);
2471 if (mask & (1 << 1))
2472 mkMov(dst0[1], val0);
2473 }
2474
2475 if (mask & (1 << 2)) {
2476 Value *src1 = fetchSrc(0, 1), *src3 = fetchSrc(0, 3);
2477 Value *val1 = getScratch(), *val3 = getScratch();
2478
2479 Value *pos128 = loadImm(NULL, +127.999999f);
2480 Value *neg128 = loadImm(NULL, -127.999999f);
2481
2482 mkOp2(OP_MAX, TYPE_F32, val1, src1, zero);
2483 mkOp2(OP_MAX, TYPE_F32, val3, src3, neg128);
2484 mkOp2(OP_MIN, TYPE_F32, val3, val3, pos128);
2485 mkOp2(OP_POW, TYPE_F32, val3, val1, val3);
2486
2487 mkCmp(OP_SLCT, CC_GT, TYPE_F32, dst0[2], TYPE_F32, val3, zero, val0);
2488 }
2489 }
2490
2491 /* Keep this around for now as reference when adding img support
2492 static inline bool
2493 isResourceSpecial(const int r)
2494 {
2495 return (r == TGSI_RESOURCE_GLOBAL ||
2496 r == TGSI_RESOURCE_LOCAL ||
2497 r == TGSI_RESOURCE_PRIVATE ||
2498 r == TGSI_RESOURCE_INPUT);
2499 }
2500
2501 static inline bool
2502 isResourceRaw(const tgsi::Source *code, const int r)
2503 {
2504 return isResourceSpecial(r) || code->resources[r].raw;
2505 }
2506
2507 static inline nv50_ir::TexTarget
2508 getResourceTarget(const tgsi::Source *code, int r)
2509 {
2510 if (isResourceSpecial(r))
2511 return nv50_ir::TEX_TARGET_BUFFER;
2512 return tgsi::translateTexture(code->resources.at(r).target);
2513 }
2514
2515 Symbol *
2516 Converter::getResourceBase(const int r)
2517 {
2518 Symbol *sym = NULL;
2519
2520 switch (r) {
2521 case TGSI_RESOURCE_GLOBAL:
2522 sym = new_Symbol(prog, nv50_ir::FILE_MEMORY_GLOBAL,
2523 info->io.auxCBSlot);
2524 break;
2525 case TGSI_RESOURCE_LOCAL:
2526 assert(prog->getType() == Program::TYPE_COMPUTE);
2527 sym = mkSymbol(nv50_ir::FILE_MEMORY_SHARED, 0, TYPE_U32,
2528 info->prop.cp.sharedOffset);
2529 break;
2530 case TGSI_RESOURCE_PRIVATE:
2531 sym = mkSymbol(nv50_ir::FILE_MEMORY_LOCAL, 0, TYPE_U32,
2532 info->bin.tlsSpace);
2533 break;
2534 case TGSI_RESOURCE_INPUT:
2535 assert(prog->getType() == Program::TYPE_COMPUTE);
2536 sym = mkSymbol(nv50_ir::FILE_SHADER_INPUT, 0, TYPE_U32,
2537 info->prop.cp.inputOffset);
2538 break;
2539 default:
2540 sym = new_Symbol(prog,
2541 nv50_ir::FILE_MEMORY_GLOBAL, code->resources.at(r).slot);
2542 break;
2543 }
2544 return sym;
2545 }
2546
2547 void
2548 Converter::getResourceCoords(std::vector<Value *> &coords, int r, int s)
2549 {
2550 const int arg =
2551 TexInstruction::Target(getResourceTarget(code, r)).getArgCount();
2552
2553 for (int c = 0; c < arg; ++c)
2554 coords.push_back(fetchSrc(s, c));
2555
2556 // NOTE: TGSI_RESOURCE_GLOBAL needs FILE_GPR; this is an nv50 quirk
2557 if (r == TGSI_RESOURCE_LOCAL ||
2558 r == TGSI_RESOURCE_PRIVATE ||
2559 r == TGSI_RESOURCE_INPUT)
2560 coords[0] = mkOp1v(OP_MOV, TYPE_U32, getScratch(4, FILE_ADDRESS),
2561 coords[0]);
2562 }
2563 */
2564 static inline int
2565 partitionLoadStore(uint8_t comp[2], uint8_t size[2], uint8_t mask)
2566 {
2567 int n = 0;
2568
2569 while (mask) {
2570 if (mask & 1) {
2571 size[n]++;
2572 } else {
2573 if (size[n])
2574 comp[n = 1] = size[0] + 1;
2575 else
2576 comp[n]++;
2577 }
2578 mask >>= 1;
2579 }
2580 if (size[0] == 3) {
2581 n = 1;
2582 size[0] = (comp[0] == 1) ? 1 : 2;
2583 size[1] = 3 - size[0];
2584 comp[1] = comp[0] + size[0];
2585 }
2586 return n + 1;
2587 }
2588
2589 static inline nv50_ir::TexTarget
2590 getImageTarget(const tgsi::Source *code, int r)
2591 {
2592 return tgsi::translateTexture(code->images.at(r).target);
2593 }
2594
2595 static inline const nv50_ir::TexInstruction::ImgFormatDesc *
2596 getImageFormat(const tgsi::Source *code, int r)
2597 {
2598 return &nv50_ir::TexInstruction::formatTable[
2599 tgsi::translateImgFormat(code->images.at(r).format)];
2600 }
2601
2602 void
2603 Converter::getImageCoords(std::vector<Value *> &coords, int r, int s)
2604 {
2605 TexInstruction::Target t =
2606 TexInstruction::Target(getImageTarget(code, r));
2607 const int arg = t.getDim() + (t.isArray() || t.isCube());
2608
2609 for (int c = 0; c < arg; ++c)
2610 coords.push_back(fetchSrc(s, c));
2611
2612 if (t.isMS())
2613 coords.push_back(fetchSrc(s, 3));
2614 }
2615
2616 // For raw loads, granularity is 4 byte.
2617 // Usage of the texture read mask on OP_SULDP is not allowed.
2618 void
2619 Converter::handleLOAD(Value *dst0[4])
2620 {
2621 const int r = tgsi.getSrc(0).getIndex(0);
2622 int c;
2623 std::vector<Value *> off, src, ldv, def;
2624 Value *ind = NULL;
2625
2626 if (tgsi.getSrc(0).isIndirect(0))
2627 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2628
2629 switch (tgsi.getSrc(0).getFile()) {
2630 case TGSI_FILE_BUFFER:
2631 case TGSI_FILE_MEMORY:
2632 for (c = 0; c < 4; ++c) {
2633 if (!dst0[c])
2634 continue;
2635
2636 Value *off;
2637 Symbol *sym;
2638 uint32_t src0_component_offset = tgsi.getSrc(0).getSwizzle(c) * 4;
2639
2640 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE) {
2641 off = NULL;
2642 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2643 tgsi.getSrc(1).getValueU32(0, info) +
2644 src0_component_offset);
2645 } else {
2646 // yzw are ignored for buffers
2647 off = fetchSrc(1, 0);
2648 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2649 src0_component_offset);
2650 }
2651
2652 Instruction *ld = mkLoad(TYPE_U32, dst0[c], sym, off);
2653 ld->cache = tgsi.getCacheMode();
2654 if (ind)
2655 ld->setIndirect(0, 1, ind);
2656 }
2657 break;
2658 case TGSI_FILE_IMAGE: {
2659 assert(!code->images[r].raw);
2660
2661 getImageCoords(off, r, 1);
2662 def.resize(4);
2663
2664 for (c = 0; c < 4; ++c) {
2665 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2666 def[c] = getScratch();
2667 else
2668 def[c] = dst0[c];
2669 }
2670
2671 TexInstruction *ld =
2672 mkTex(OP_SULDP, getImageTarget(code, r), code->images[r].slot, 0,
2673 def, off);
2674 ld->tex.mask = tgsi.getDst(0).getMask();
2675 ld->tex.format = getImageFormat(code, r);
2676 ld->cache = tgsi.getCacheMode();
2677 if (ind)
2678 ld->setIndirectR(ind);
2679
2680 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2681 if (dst0[c] != def[c])
2682 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2683 }
2684 break;
2685 default:
2686 assert(!"Unsupported srcFile for LOAD");
2687 }
2688
2689 /* Keep this around for now as reference when adding img support
2690 getResourceCoords(off, r, 1);
2691
2692 if (isResourceRaw(code, r)) {
2693 uint8_t mask = 0;
2694 uint8_t comp[2] = { 0, 0 };
2695 uint8_t size[2] = { 0, 0 };
2696
2697 Symbol *base = getResourceBase(r);
2698
2699 // determine the base and size of the at most 2 load ops
2700 for (c = 0; c < 4; ++c)
2701 if (!tgsi.getDst(0).isMasked(c))
2702 mask |= 1 << (tgsi.getSrc(0).getSwizzle(c) - TGSI_SWIZZLE_X);
2703
2704 int n = partitionLoadStore(comp, size, mask);
2705
2706 src = off;
2707
2708 def.resize(4); // index by component, the ones we need will be non-NULL
2709 for (c = 0; c < 4; ++c) {
2710 if (dst0[c] && tgsi.getSrc(0).getSwizzle(c) == (TGSI_SWIZZLE_X + c))
2711 def[c] = dst0[c];
2712 else
2713 if (mask & (1 << c))
2714 def[c] = getScratch();
2715 }
2716
2717 const bool useLd = isResourceSpecial(r) ||
2718 (info->io.nv50styleSurfaces &&
2719 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2720
2721 for (int i = 0; i < n; ++i) {
2722 ldv.assign(def.begin() + comp[i], def.begin() + comp[i] + size[i]);
2723
2724 if (comp[i]) // adjust x component of source address if necessary
2725 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2726 off[0], mkImm(comp[i] * 4));
2727 else
2728 src[0] = off[0];
2729
2730 if (useLd) {
2731 Instruction *ld =
2732 mkLoad(typeOfSize(size[i] * 4), ldv[0], base, src[0]);
2733 for (size_t c = 1; c < ldv.size(); ++c)
2734 ld->setDef(c, ldv[c]);
2735 } else {
2736 mkTex(OP_SULDB, getResourceTarget(code, r), code->resources[r].slot,
2737 0, ldv, src)->dType = typeOfSize(size[i] * 4);
2738 }
2739 }
2740 } else {
2741 def.resize(4);
2742 for (c = 0; c < 4; ++c) {
2743 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2744 def[c] = getScratch();
2745 else
2746 def[c] = dst0[c];
2747 }
2748
2749 mkTex(OP_SULDP, getResourceTarget(code, r), code->resources[r].slot, 0,
2750 def, off);
2751 }
2752 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2753 if (dst0[c] != def[c])
2754 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2755 */
2756 }
2757
2758 // For formatted stores, the write mask on OP_SUSTP can be used.
2759 // Raw stores have to be split.
2760 void
2761 Converter::handleSTORE()
2762 {
2763 const int r = tgsi.getDst(0).getIndex(0);
2764 int c;
2765 std::vector<Value *> off, src, dummy;
2766 Value *ind = NULL;
2767
2768 if (tgsi.getDst(0).isIndirect(0))
2769 ind = fetchSrc(tgsi.getDst(0).getIndirect(0), 0, 0);
2770
2771 switch (tgsi.getDst(0).getFile()) {
2772 case TGSI_FILE_BUFFER:
2773 case TGSI_FILE_MEMORY:
2774 for (c = 0; c < 4; ++c) {
2775 if (!(tgsi.getDst(0).getMask() & (1 << c)))
2776 continue;
2777
2778 Symbol *sym;
2779 Value *off;
2780 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMMEDIATE) {
2781 off = NULL;
2782 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c,
2783 tgsi.getSrc(0).getValueU32(0, info) + 4 * c);
2784 } else {
2785 // yzw are ignored for buffers
2786 off = fetchSrc(0, 0);
2787 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c, 4 * c);
2788 }
2789
2790 Instruction *st = mkStore(OP_STORE, TYPE_U32, sym, off, fetchSrc(1, c));
2791 st->cache = tgsi.getCacheMode();
2792 if (ind)
2793 st->setIndirect(0, 1, ind);
2794 }
2795 break;
2796 case TGSI_FILE_IMAGE: {
2797 assert(!code->images[r].raw);
2798
2799 getImageCoords(off, r, 0);
2800 src = off;
2801
2802 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2803 src.push_back(fetchSrc(1, c));
2804
2805 TexInstruction *st =
2806 mkTex(OP_SUSTP, getImageTarget(code, r), code->images[r].slot,
2807 0, dummy, src);
2808 st->tex.mask = tgsi.getDst(0).getMask();
2809 st->tex.format = getImageFormat(code, r);
2810 st->cache = tgsi.getCacheMode();
2811 if (ind)
2812 st->setIndirectR(ind);
2813 }
2814 break;
2815 default:
2816 assert(!"Unsupported dstFile for STORE");
2817 }
2818
2819 /* Keep this around for now as reference when adding img support
2820 getResourceCoords(off, r, 0);
2821 src = off;
2822 const int s = src.size();
2823
2824 if (isResourceRaw(code, r)) {
2825 uint8_t comp[2] = { 0, 0 };
2826 uint8_t size[2] = { 0, 0 };
2827
2828 int n = partitionLoadStore(comp, size, tgsi.getDst(0).getMask());
2829
2830 Symbol *base = getResourceBase(r);
2831
2832 const bool useSt = isResourceSpecial(r) ||
2833 (info->io.nv50styleSurfaces &&
2834 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2835
2836 for (int i = 0; i < n; ++i) {
2837 if (comp[i]) // adjust x component of source address if necessary
2838 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2839 off[0], mkImm(comp[i] * 4));
2840 else
2841 src[0] = off[0];
2842
2843 const DataType stTy = typeOfSize(size[i] * 4);
2844
2845 if (useSt) {
2846 Instruction *st =
2847 mkStore(OP_STORE, stTy, base, NULL, fetchSrc(1, comp[i]));
2848 for (c = 1; c < size[i]; ++c)
2849 st->setSrc(1 + c, fetchSrc(1, comp[i] + c));
2850 st->setIndirect(0, 0, src[0]);
2851 } else {
2852 // attach values to be stored
2853 src.resize(s + size[i]);
2854 for (c = 0; c < size[i]; ++c)
2855 src[s + c] = fetchSrc(1, comp[i] + c);
2856 mkTex(OP_SUSTB, getResourceTarget(code, r), code->resources[r].slot,
2857 0, dummy, src)->setType(stTy);
2858 }
2859 }
2860 } else {
2861 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2862 src.push_back(fetchSrc(1, c));
2863
2864 mkTex(OP_SUSTP, getResourceTarget(code, r), code->resources[r].slot, 0,
2865 dummy, src)->tex.mask = tgsi.getDst(0).getMask();
2866 }
2867 */
2868 }
2869
2870 // XXX: These only work on resources with the single-component u32/s32 formats.
2871 // Therefore the result is replicated. This might not be intended by TGSI, but
2872 // operating on more than 1 component would produce undefined results because
2873 // they do not exist.
2874 void
2875 Converter::handleATOM(Value *dst0[4], DataType ty, uint16_t subOp)
2876 {
2877 const int r = tgsi.getSrc(0).getIndex(0);
2878 std::vector<Value *> srcv;
2879 std::vector<Value *> defv;
2880 LValue *dst = getScratch();
2881 Value *ind = NULL;
2882
2883 if (tgsi.getSrc(0).isIndirect(0))
2884 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2885
2886 switch (tgsi.getSrc(0).getFile()) {
2887 case TGSI_FILE_BUFFER:
2888 case TGSI_FILE_MEMORY:
2889 for (int c = 0; c < 4; ++c) {
2890 if (!dst0[c])
2891 continue;
2892
2893 Instruction *insn;
2894 Value *off = fetchSrc(1, c);
2895 Value *sym;
2896 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE)
2897 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2898 tgsi.getSrc(1).getValueU32(c, info));
2899 else
2900 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c, 0);
2901 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2902 insn = mkOp3(OP_ATOM, ty, dst, sym, fetchSrc(2, c), fetchSrc(3, c));
2903 else
2904 insn = mkOp2(OP_ATOM, ty, dst, sym, fetchSrc(2, c));
2905 if (tgsi.getSrc(1).getFile() != TGSI_FILE_IMMEDIATE)
2906 insn->setIndirect(0, 0, off);
2907 if (ind)
2908 insn->setIndirect(0, 1, ind);
2909 insn->subOp = subOp;
2910 }
2911 for (int c = 0; c < 4; ++c)
2912 if (dst0[c])
2913 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2914 break;
2915 case TGSI_FILE_IMAGE: {
2916 assert(!code->images[r].raw);
2917
2918 getImageCoords(srcv, r, 1);
2919 defv.push_back(dst);
2920 srcv.push_back(fetchSrc(2, 0));
2921
2922 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2923 srcv.push_back(fetchSrc(3, 0));
2924
2925 TexInstruction *tex = mkTex(OP_SUREDP, getImageTarget(code, r),
2926 code->images[r].slot, 0, defv, srcv);
2927 tex->subOp = subOp;
2928 tex->tex.mask = 1;
2929 tex->tex.format = getImageFormat(code, r);
2930 tex->setType(ty);
2931 if (ind)
2932 tex->setIndirectR(ind);
2933
2934 for (int c = 0; c < 4; ++c)
2935 if (dst0[c])
2936 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2937 }
2938 break;
2939 default:
2940 assert(!"Unsupported srcFile for ATOM");
2941 }
2942
2943 /* Keep this around for now as reference when adding img support
2944 getResourceCoords(srcv, r, 1);
2945
2946 if (isResourceSpecial(r)) {
2947 assert(r != TGSI_RESOURCE_INPUT);
2948 Instruction *insn;
2949 insn = mkOp2(OP_ATOM, ty, dst, getResourceBase(r), fetchSrc(2, 0));
2950 insn->subOp = subOp;
2951 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2952 insn->setSrc(2, fetchSrc(3, 0));
2953 insn->setIndirect(0, 0, srcv.at(0));
2954 } else {
2955 operation op = isResourceRaw(code, r) ? OP_SUREDB : OP_SUREDP;
2956 TexTarget targ = getResourceTarget(code, r);
2957 int idx = code->resources[r].slot;
2958 defv.push_back(dst);
2959 srcv.push_back(fetchSrc(2, 0));
2960 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2961 srcv.push_back(fetchSrc(3, 0));
2962 TexInstruction *tex = mkTex(op, targ, idx, 0, defv, srcv);
2963 tex->subOp = subOp;
2964 tex->tex.mask = 1;
2965 tex->setType(ty);
2966 }
2967
2968 for (int c = 0; c < 4; ++c)
2969 if (dst0[c])
2970 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2971 */
2972 }
2973
2974 void
2975 Converter::handleINTERP(Value *dst[4])
2976 {
2977 // Check whether the input is linear. All other attributes ignored.
2978 Instruction *insn;
2979 Value *offset = NULL, *ptr = NULL, *w = NULL;
2980 Symbol *sym[4] = { NULL };
2981 bool linear;
2982 operation op = OP_NOP;
2983 int c, mode = 0;
2984
2985 tgsi::Instruction::SrcRegister src = tgsi.getSrc(0);
2986
2987 // In some odd cases, in large part due to varying packing, the source
2988 // might not actually be an input. This is illegal TGSI, but it's easier to
2989 // account for it here than it is to fix it where the TGSI is being
2990 // generated. In that case, it's going to be a straight up mov (or sequence
2991 // of mov's) from the input in question. We follow the mov chain to see
2992 // which input we need to use.
2993 if (src.getFile() != TGSI_FILE_INPUT) {
2994 if (src.isIndirect(0)) {
2995 ERROR("Ignoring indirect input interpolation\n");
2996 return;
2997 }
2998 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
2999 Value *val = fetchSrc(0, c);
3000 assert(val->defs.size() == 1);
3001 insn = val->getInsn();
3002 while (insn->op == OP_MOV) {
3003 assert(insn->getSrc(0)->defs.size() == 1);
3004 insn = insn->getSrc(0)->getInsn();
3005 if (!insn) {
3006 ERROR("Miscompiling shader due to unhandled INTERP\n");
3007 return;
3008 }
3009 }
3010 if (insn->op != OP_LINTERP && insn->op != OP_PINTERP) {
3011 ERROR("Trying to interpolate non-input, this is not allowed.\n");
3012 return;
3013 }
3014 sym[c] = insn->getSrc(0)->asSym();
3015 assert(sym[c]);
3016 op = insn->op;
3017 mode = insn->ipa;
3018 }
3019 } else {
3020 if (src.isIndirect(0))
3021 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
3022
3023 // We can assume that the fixed index will point to an input of the same
3024 // interpolation type in case of an indirect.
3025 // TODO: Make use of ArrayID.
3026 linear = info->in[src.getIndex(0)].linear;
3027 if (linear) {
3028 op = OP_LINTERP;
3029 mode = NV50_IR_INTERP_LINEAR;
3030 } else {
3031 op = OP_PINTERP;
3032 mode = NV50_IR_INTERP_PERSPECTIVE;
3033 }
3034 }
3035
3036 switch (tgsi.getOpcode()) {
3037 case TGSI_OPCODE_INTERP_CENTROID:
3038 mode |= NV50_IR_INTERP_CENTROID;
3039 break;
3040 case TGSI_OPCODE_INTERP_SAMPLE:
3041 insn = mkOp1(OP_PIXLD, TYPE_U32, (offset = getScratch()), fetchSrc(1, 0));
3042 insn->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
3043 mode |= NV50_IR_INTERP_OFFSET;
3044 break;
3045 case TGSI_OPCODE_INTERP_OFFSET: {
3046 // The input in src1.xy is float, but we need a single 32-bit value
3047 // where the upper and lower 16 bits are encoded in S0.12 format. We need
3048 // to clamp the input coordinates to (-0.5, 0.4375), multiply by 4096,
3049 // and then convert to s32.
3050 Value *offs[2];
3051 for (c = 0; c < 2; c++) {
3052 offs[c] = getScratch();
3053 mkOp2(OP_MIN, TYPE_F32, offs[c], fetchSrc(1, c), loadImm(NULL, 0.4375f));
3054 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
3055 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
3056 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
3057 }
3058 offset = mkOp3v(OP_INSBF, TYPE_U32, getScratch(),
3059 offs[1], mkImm(0x1010), offs[0]);
3060 mode |= NV50_IR_INTERP_OFFSET;
3061 break;
3062 }
3063 }
3064
3065 if (op == OP_PINTERP) {
3066 if (offset) {
3067 w = mkOp2v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 3), offset);
3068 mkOp1(OP_RCP, TYPE_F32, w, w);
3069 } else {
3070 w = fragCoord[3];
3071 }
3072 }
3073
3074
3075 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3076 insn = mkOp1(op, TYPE_F32, dst[c], sym[c] ? sym[c] : srcToSym(src, c));
3077 if (op == OP_PINTERP)
3078 insn->setSrc(1, w);
3079 if (ptr)
3080 insn->setIndirect(0, 0, ptr);
3081 if (offset)
3082 insn->setSrc(op == OP_PINTERP ? 2 : 1, offset);
3083
3084 insn->setInterpolate(mode);
3085 }
3086 }
3087
3088 Converter::Subroutine *
3089 Converter::getSubroutine(unsigned ip)
3090 {
3091 std::map<unsigned, Subroutine>::iterator it = sub.map.find(ip);
3092
3093 if (it == sub.map.end())
3094 it = sub.map.insert(std::make_pair(
3095 ip, Subroutine(new Function(prog, "SUB", ip)))).first;
3096
3097 return &it->second;
3098 }
3099
3100 Converter::Subroutine *
3101 Converter::getSubroutine(Function *f)
3102 {
3103 unsigned ip = f->getLabel();
3104 std::map<unsigned, Subroutine>::iterator it = sub.map.find(ip);
3105
3106 if (it == sub.map.end())
3107 it = sub.map.insert(std::make_pair(ip, Subroutine(f))).first;
3108
3109 return &it->second;
3110 }
3111
3112 bool
3113 Converter::isEndOfSubroutine(uint ip)
3114 {
3115 assert(ip < code->scan.num_instructions);
3116 tgsi::Instruction insn(&code->insns[ip]);
3117 return (insn.getOpcode() == TGSI_OPCODE_END ||
3118 insn.getOpcode() == TGSI_OPCODE_ENDSUB ||
3119 // does END occur at end of main or the very end ?
3120 insn.getOpcode() == TGSI_OPCODE_BGNSUB);
3121 }
3122
3123 bool
3124 Converter::handleInstruction(const struct tgsi_full_instruction *insn)
3125 {
3126 Instruction *geni;
3127
3128 Value *dst0[4], *rDst0[4];
3129 Value *src0, *src1, *src2, *src3;
3130 Value *val0, *val1;
3131 int c;
3132
3133 tgsi = tgsi::Instruction(insn);
3134
3135 bool useScratchDst = tgsi.checkDstSrcAliasing();
3136
3137 operation op = tgsi.getOP();
3138 dstTy = tgsi.inferDstType();
3139 srcTy = tgsi.inferSrcType();
3140
3141 unsigned int mask = tgsi.dstCount() ? tgsi.getDst(0).getMask() : 0;
3142
3143 if (tgsi.dstCount()) {
3144 for (c = 0; c < 4; ++c) {
3145 rDst0[c] = acquireDst(0, c);
3146 dst0[c] = (useScratchDst && rDst0[c]) ? getScratch() : rDst0[c];
3147 }
3148 }
3149
3150 switch (tgsi.getOpcode()) {
3151 case TGSI_OPCODE_ADD:
3152 case TGSI_OPCODE_UADD:
3153 case TGSI_OPCODE_AND:
3154 case TGSI_OPCODE_DIV:
3155 case TGSI_OPCODE_IDIV:
3156 case TGSI_OPCODE_UDIV:
3157 case TGSI_OPCODE_MAX:
3158 case TGSI_OPCODE_MIN:
3159 case TGSI_OPCODE_IMAX:
3160 case TGSI_OPCODE_IMIN:
3161 case TGSI_OPCODE_UMAX:
3162 case TGSI_OPCODE_UMIN:
3163 case TGSI_OPCODE_MOD:
3164 case TGSI_OPCODE_UMOD:
3165 case TGSI_OPCODE_MUL:
3166 case TGSI_OPCODE_UMUL:
3167 case TGSI_OPCODE_IMUL_HI:
3168 case TGSI_OPCODE_UMUL_HI:
3169 case TGSI_OPCODE_OR:
3170 case TGSI_OPCODE_SHL:
3171 case TGSI_OPCODE_ISHR:
3172 case TGSI_OPCODE_USHR:
3173 case TGSI_OPCODE_XOR:
3174 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3175 src0 = fetchSrc(0, c);
3176 src1 = fetchSrc(1, c);
3177 geni = mkOp2(op, dstTy, dst0[c], src0, src1);
3178 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3179 if (op == OP_MUL && dstTy == TYPE_F32)
3180 geni->dnz = info->io.mul_zero_wins;
3181 geni->precise = insn->Instruction.Precise;
3182 }
3183 break;
3184 case TGSI_OPCODE_MAD:
3185 case TGSI_OPCODE_UMAD:
3186 case TGSI_OPCODE_FMA:
3187 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3188 src0 = fetchSrc(0, c);
3189 src1 = fetchSrc(1, c);
3190 src2 = fetchSrc(2, c);
3191 geni = mkOp3(op, dstTy, dst0[c], src0, src1, src2);
3192 if (dstTy == TYPE_F32)
3193 geni->dnz = info->io.mul_zero_wins;
3194 geni->precise = insn->Instruction.Precise;
3195 }
3196 break;
3197 case TGSI_OPCODE_MOV:
3198 case TGSI_OPCODE_CEIL:
3199 case TGSI_OPCODE_FLR:
3200 case TGSI_OPCODE_TRUNC:
3201 case TGSI_OPCODE_RCP:
3202 case TGSI_OPCODE_SQRT:
3203 case TGSI_OPCODE_IABS:
3204 case TGSI_OPCODE_INEG:
3205 case TGSI_OPCODE_NOT:
3206 case TGSI_OPCODE_DDX:
3207 case TGSI_OPCODE_DDY:
3208 case TGSI_OPCODE_DDX_FINE:
3209 case TGSI_OPCODE_DDY_FINE:
3210 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3211 mkOp1(op, dstTy, dst0[c], fetchSrc(0, c));
3212 break;
3213 case TGSI_OPCODE_RSQ:
3214 src0 = fetchSrc(0, 0);
3215 val0 = getScratch();
3216 mkOp1(OP_ABS, TYPE_F32, val0, src0);
3217 mkOp1(OP_RSQ, TYPE_F32, val0, val0);
3218 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3219 mkMov(dst0[c], val0);
3220 break;
3221 case TGSI_OPCODE_ARL:
3222 case TGSI_OPCODE_ARR:
3223 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3224 const RoundMode rnd =
3225 tgsi.getOpcode() == TGSI_OPCODE_ARR ? ROUND_N : ROUND_M;
3226 src0 = fetchSrc(0, c);
3227 mkCvt(OP_CVT, TYPE_S32, dst0[c], TYPE_F32, src0)->rnd = rnd;
3228 }
3229 break;
3230 case TGSI_OPCODE_UARL:
3231 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3232 mkOp1(OP_MOV, TYPE_U32, dst0[c], fetchSrc(0, c));
3233 break;
3234 case TGSI_OPCODE_POW:
3235 val0 = mkOp2v(op, TYPE_F32, getScratch(), fetchSrc(0, 0), fetchSrc(1, 0));
3236 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3237 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3238 break;
3239 case TGSI_OPCODE_EX2:
3240 case TGSI_OPCODE_LG2:
3241 val0 = mkOp1(op, TYPE_F32, getScratch(), fetchSrc(0, 0))->getDef(0);
3242 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3243 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3244 break;
3245 case TGSI_OPCODE_COS:
3246 case TGSI_OPCODE_SIN:
3247 val0 = getScratch();
3248 if (mask & 7) {
3249 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 0));
3250 mkOp1(op, TYPE_F32, val0, val0);
3251 for (c = 0; c < 3; ++c)
3252 if (dst0[c])
3253 mkMov(dst0[c], val0);
3254 }
3255 if (dst0[3]) {
3256 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 3));
3257 mkOp1(op, TYPE_F32, dst0[3], val0);
3258 }
3259 break;
3260 case TGSI_OPCODE_EXP:
3261 src0 = fetchSrc(0, 0);
3262 val0 = mkOp1v(OP_FLOOR, TYPE_F32, getSSA(), src0);
3263 if (dst0[1])
3264 mkOp2(OP_SUB, TYPE_F32, dst0[1], src0, val0);
3265 if (dst0[0])
3266 mkOp1(OP_EX2, TYPE_F32, dst0[0], val0);
3267 if (dst0[2])
3268 mkOp1(OP_EX2, TYPE_F32, dst0[2], src0);
3269 if (dst0[3])
3270 loadImm(dst0[3], 1.0f);
3271 break;
3272 case TGSI_OPCODE_LOG:
3273 src0 = mkOp1v(OP_ABS, TYPE_F32, getSSA(), fetchSrc(0, 0));
3274 val0 = mkOp1v(OP_LG2, TYPE_F32, dst0[2] ? dst0[2] : getSSA(), src0);
3275 if (dst0[0] || dst0[1])
3276 val1 = mkOp1v(OP_FLOOR, TYPE_F32, dst0[0] ? dst0[0] : getSSA(), val0);
3277 if (dst0[1]) {
3278 mkOp1(OP_EX2, TYPE_F32, dst0[1], val1);
3279 mkOp1(OP_RCP, TYPE_F32, dst0[1], dst0[1]);
3280 mkOp2(OP_MUL, TYPE_F32, dst0[1], dst0[1], src0)
3281 ->dnz = info->io.mul_zero_wins;
3282 }
3283 if (dst0[3])
3284 loadImm(dst0[3], 1.0f);
3285 break;
3286 case TGSI_OPCODE_DP2:
3287 val0 = buildDot(2);
3288 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3289 mkMov(dst0[c], val0);
3290 break;
3291 case TGSI_OPCODE_DP3:
3292 val0 = buildDot(3);
3293 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3294 mkMov(dst0[c], val0);
3295 break;
3296 case TGSI_OPCODE_DP4:
3297 val0 = buildDot(4);
3298 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3299 mkMov(dst0[c], val0);
3300 break;
3301 case TGSI_OPCODE_DST:
3302 if (dst0[0])
3303 loadImm(dst0[0], 1.0f);
3304 if (dst0[1]) {
3305 src0 = fetchSrc(0, 1);
3306 src1 = fetchSrc(1, 1);
3307 mkOp2(OP_MUL, TYPE_F32, dst0[1], src0, src1)
3308 ->dnz = info->io.mul_zero_wins;
3309 }
3310 if (dst0[2])
3311 mkMov(dst0[2], fetchSrc(0, 2));
3312 if (dst0[3])
3313 mkMov(dst0[3], fetchSrc(1, 3));
3314 break;
3315 case TGSI_OPCODE_LRP:
3316 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3317 src0 = fetchSrc(0, c);
3318 src1 = fetchSrc(1, c);
3319 src2 = fetchSrc(2, c);
3320 mkOp3(OP_MAD, TYPE_F32, dst0[c],
3321 mkOp2v(OP_SUB, TYPE_F32, getSSA(), src1, src2), src0, src2)
3322 ->dnz = info->io.mul_zero_wins;
3323 }
3324 break;
3325 case TGSI_OPCODE_LIT:
3326 handleLIT(dst0);
3327 break;
3328 case TGSI_OPCODE_ISSG:
3329 case TGSI_OPCODE_SSG:
3330 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3331 src0 = fetchSrc(0, c);
3332 val0 = getScratch();
3333 val1 = getScratch();
3334 mkCmp(OP_SET, CC_GT, srcTy, val0, srcTy, src0, zero);
3335 mkCmp(OP_SET, CC_LT, srcTy, val1, srcTy, src0, zero);
3336 if (srcTy == TYPE_F32)
3337 mkOp2(OP_SUB, TYPE_F32, dst0[c], val0, val1);
3338 else
3339 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
3340 }
3341 break;
3342 case TGSI_OPCODE_UCMP:
3343 srcTy = TYPE_U32;
3344 /* fallthrough */
3345 case TGSI_OPCODE_CMP:
3346 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3347 src0 = fetchSrc(0, c);
3348 src1 = fetchSrc(1, c);
3349 src2 = fetchSrc(2, c);
3350 if (src1 == src2)
3351 mkMov(dst0[c], src1);
3352 else
3353 mkCmp(OP_SLCT, (srcTy == TYPE_F32) ? CC_LT : CC_NE,
3354 srcTy, dst0[c], srcTy, src1, src2, src0);
3355 }
3356 break;
3357 case TGSI_OPCODE_FRC:
3358 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3359 src0 = fetchSrc(0, c);
3360 val0 = getScratch();
3361 mkOp1(OP_FLOOR, TYPE_F32, val0, src0);
3362 mkOp2(OP_SUB, TYPE_F32, dst0[c], src0, val0);
3363 }
3364 break;
3365 case TGSI_OPCODE_ROUND:
3366 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3367 mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F32, fetchSrc(0, c))
3368 ->rnd = ROUND_NI;
3369 break;
3370 case TGSI_OPCODE_SLT:
3371 case TGSI_OPCODE_SGE:
3372 case TGSI_OPCODE_SEQ:
3373 case TGSI_OPCODE_SGT:
3374 case TGSI_OPCODE_SLE:
3375 case TGSI_OPCODE_SNE:
3376 case TGSI_OPCODE_FSEQ:
3377 case TGSI_OPCODE_FSGE:
3378 case TGSI_OPCODE_FSLT:
3379 case TGSI_OPCODE_FSNE:
3380 case TGSI_OPCODE_ISGE:
3381 case TGSI_OPCODE_ISLT:
3382 case TGSI_OPCODE_USEQ:
3383 case TGSI_OPCODE_USGE:
3384 case TGSI_OPCODE_USLT:
3385 case TGSI_OPCODE_USNE:
3386 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3387 src0 = fetchSrc(0, c);
3388 src1 = fetchSrc(1, c);
3389 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3390 }
3391 break;
3392 case TGSI_OPCODE_VOTE_ALL:
3393 case TGSI_OPCODE_VOTE_ANY:
3394 case TGSI_OPCODE_VOTE_EQ:
3395 val0 = new_LValue(func, FILE_PREDICATE);
3396 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3397 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, c), zero);
3398 mkOp1(op, dstTy, val0, val0)
3399 ->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3400 mkCvt(OP_CVT, TYPE_U32, dst0[c], TYPE_U8, val0);
3401 }
3402 break;
3403 case TGSI_OPCODE_BALLOT:
3404 if (!tgsi.getDst(0).isMasked(0)) {
3405 val0 = new_LValue(func, FILE_PREDICATE);
3406 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, 0), zero);
3407 mkOp1(op, TYPE_U32, dst0[0], val0)->subOp = NV50_IR_SUBOP_VOTE_ANY;
3408 }
3409 if (!tgsi.getDst(0).isMasked(1))
3410 mkMov(dst0[1], zero, TYPE_U32);
3411 break;
3412 case TGSI_OPCODE_READ_FIRST:
3413 // ReadFirstInvocationARB(src) is implemented as
3414 // ReadInvocationARB(src, findLSB(ballot(true)))
3415 val0 = getScratch();
3416 mkOp1(OP_VOTE, TYPE_U32, val0, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
3417 mkOp2(OP_EXTBF, TYPE_U32, val0, val0, mkImm(0x2000))
3418 ->subOp = NV50_IR_SUBOP_EXTBF_REV;
3419 mkOp1(OP_BFIND, TYPE_U32, val0, val0)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3420 src1 = val0;
3421 /* fallthrough */
3422 case TGSI_OPCODE_READ_INVOC:
3423 if (tgsi.getOpcode() == TGSI_OPCODE_READ_INVOC)
3424 src1 = fetchSrc(1, 0);
3425 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3426 geni = mkOp3(op, dstTy, dst0[c], fetchSrc(0, c), src1, mkImm(0x1f));
3427 geni->subOp = NV50_IR_SUBOP_SHFL_IDX;
3428 }
3429 break;
3430 case TGSI_OPCODE_CLOCK:
3431 // Stick the 32-bit clock into the high dword of the logical result.
3432 if (!tgsi.getDst(0).isMasked(0))
3433 mkOp1(OP_MOV, TYPE_U32, dst0[0], zero);
3434 if (!tgsi.getDst(0).isMasked(1))
3435 mkOp1(OP_RDSV, TYPE_U32, dst0[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
3436 break;
3437 case TGSI_OPCODE_KILL_IF:
3438 val0 = new_LValue(func, FILE_PREDICATE);
3439 mask = 0;
3440 for (c = 0; c < 4; ++c) {
3441 const int s = tgsi.getSrc(0).getSwizzle(c);
3442 if (mask & (1 << s))
3443 continue;
3444 mask |= 1 << s;
3445 mkCmp(OP_SET, CC_LT, TYPE_F32, val0, TYPE_F32, fetchSrc(0, c), zero);
3446 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, val0);
3447 }
3448 break;
3449 case TGSI_OPCODE_KILL:
3450 mkOp(OP_DISCARD, TYPE_NONE, NULL);
3451 break;
3452 case TGSI_OPCODE_TEX:
3453 case TGSI_OPCODE_TEX_LZ:
3454 case TGSI_OPCODE_TXB:
3455 case TGSI_OPCODE_TXL:
3456 case TGSI_OPCODE_TXP:
3457 case TGSI_OPCODE_LODQ:
3458 // R S L C Dx Dy
3459 handleTEX(dst0, 1, 1, 0x03, 0x0f, 0x00, 0x00);
3460 break;
3461 case TGSI_OPCODE_TXD:
3462 handleTEX(dst0, 3, 3, 0x03, 0x0f, 0x10, 0x20);
3463 break;
3464 case TGSI_OPCODE_TG4:
3465 handleTEX(dst0, 2, 2, 0x03, 0x0f, 0x00, 0x00);
3466 break;
3467 case TGSI_OPCODE_TEX2:
3468 handleTEX(dst0, 2, 2, 0x03, 0x10, 0x00, 0x00);
3469 break;
3470 case TGSI_OPCODE_TXB2:
3471 case TGSI_OPCODE_TXL2:
3472 handleTEX(dst0, 2, 2, 0x10, 0x0f, 0x00, 0x00);
3473 break;
3474 case TGSI_OPCODE_SAMPLE:
3475 case TGSI_OPCODE_SAMPLE_B:
3476 case TGSI_OPCODE_SAMPLE_D:
3477 case TGSI_OPCODE_SAMPLE_L:
3478 case TGSI_OPCODE_SAMPLE_C:
3479 case TGSI_OPCODE_SAMPLE_C_LZ:
3480 handleTEX(dst0, 1, 2, 0x30, 0x30, 0x30, 0x40);
3481 break;
3482 case TGSI_OPCODE_TXF_LZ:
3483 case TGSI_OPCODE_TXF:
3484 handleTXF(dst0, 1, 0x03);
3485 break;
3486 case TGSI_OPCODE_SAMPLE_I:
3487 handleTXF(dst0, 1, 0x03);
3488 break;
3489 case TGSI_OPCODE_SAMPLE_I_MS:
3490 handleTXF(dst0, 1, 0x20);
3491 break;
3492 case TGSI_OPCODE_TXQ:
3493 case TGSI_OPCODE_SVIEWINFO:
3494 handleTXQ(dst0, TXQ_DIMS, 1);
3495 break;
3496 case TGSI_OPCODE_TXQS:
3497 // The TXQ_TYPE query returns samples in its 3rd arg, but we need it to
3498 // be in .x
3499 dst0[1] = dst0[2] = dst0[3] = NULL;
3500 std::swap(dst0[0], dst0[2]);
3501 handleTXQ(dst0, TXQ_TYPE, 0);
3502 std::swap(dst0[0], dst0[2]);
3503 break;
3504 case TGSI_OPCODE_FBFETCH:
3505 handleFBFETCH(dst0);
3506 break;
3507 case TGSI_OPCODE_F2I:
3508 case TGSI_OPCODE_F2U:
3509 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3510 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c))->rnd = ROUND_Z;
3511 break;
3512 case TGSI_OPCODE_I2F:
3513 case TGSI_OPCODE_U2F:
3514 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3515 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c));
3516 break;
3517 case TGSI_OPCODE_PK2H:
3518 val0 = getScratch();
3519 val1 = getScratch();
3520 mkCvt(OP_CVT, TYPE_F16, val0, TYPE_F32, fetchSrc(0, 0));
3521 mkCvt(OP_CVT, TYPE_F16, val1, TYPE_F32, fetchSrc(0, 1));
3522 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3523 mkOp3(OP_INSBF, TYPE_U32, dst0[c], val1, mkImm(0x1010), val0);
3524 break;
3525 case TGSI_OPCODE_UP2H:
3526 src0 = fetchSrc(0, 0);
3527 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3528 geni = mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F16, src0);
3529 geni->subOp = c & 1;
3530 }
3531 break;
3532 case TGSI_OPCODE_EMIT:
3533 /* export the saved viewport index */
3534 if (viewport != NULL) {
3535 Symbol *vpSym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_U32,
3536 info->out[info->io.viewportId].slot[0] * 4);
3537 mkStore(OP_EXPORT, TYPE_U32, vpSym, NULL, viewport);
3538 }
3539 /* fallthrough */
3540 case TGSI_OPCODE_ENDPRIM:
3541 {
3542 // get vertex stream (must be immediate)
3543 unsigned int stream = tgsi.getSrc(0).getValueU32(0, info);
3544 if (stream && op == OP_RESTART)
3545 break;
3546 if (info->prop.gp.maxVertices == 0)
3547 break;
3548 src0 = mkImm(stream);
3549 mkOp1(op, TYPE_U32, NULL, src0)->fixed = 1;
3550 break;
3551 }
3552 case TGSI_OPCODE_IF:
3553 case TGSI_OPCODE_UIF:
3554 {
3555 BasicBlock *ifBB = new BasicBlock(func);
3556
3557 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
3558 condBBs.push(bb);
3559 joinBBs.push(bb);
3560
3561 mkFlow(OP_BRA, NULL, CC_NOT_P, fetchSrc(0, 0))->setType(srcTy);
3562
3563 setPosition(ifBB, true);
3564 }
3565 break;
3566 case TGSI_OPCODE_ELSE:
3567 {
3568 BasicBlock *elseBB = new BasicBlock(func);
3569 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3570
3571 forkBB->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
3572 condBBs.push(bb);
3573
3574 forkBB->getExit()->asFlow()->target.bb = elseBB;
3575 if (!bb->isTerminated())
3576 mkFlow(OP_BRA, NULL, CC_ALWAYS, NULL);
3577
3578 setPosition(elseBB, true);
3579 }
3580 break;
3581 case TGSI_OPCODE_ENDIF:
3582 {
3583 BasicBlock *convBB = new BasicBlock(func);
3584 BasicBlock *prevBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3585 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(joinBBs.pop().u.p);
3586
3587 if (!bb->isTerminated()) {
3588 // we only want join if none of the clauses ended with CONT/BREAK/RET
3589 if (prevBB->getExit()->op == OP_BRA && joinBBs.getSize() < 6)
3590 insertConvergenceOps(convBB, forkBB);
3591 mkFlow(OP_BRA, convBB, CC_ALWAYS, NULL);
3592 bb->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3593 }
3594
3595 if (prevBB->getExit()->op == OP_BRA) {
3596 prevBB->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3597 prevBB->getExit()->asFlow()->target.bb = convBB;
3598 }
3599 setPosition(convBB, true);
3600 }
3601 break;
3602 case TGSI_OPCODE_BGNLOOP:
3603 {
3604 BasicBlock *lbgnBB = new BasicBlock(func);
3605 BasicBlock *lbrkBB = new BasicBlock(func);
3606
3607 loopBBs.push(lbgnBB);
3608 breakBBs.push(lbrkBB);
3609 if (loopBBs.getSize() > func->loopNestingBound)
3610 func->loopNestingBound++;
3611
3612 mkFlow(OP_PREBREAK, lbrkBB, CC_ALWAYS, NULL);
3613
3614 bb->cfg.attach(&lbgnBB->cfg, Graph::Edge::TREE);
3615 setPosition(lbgnBB, true);
3616 mkFlow(OP_PRECONT, lbgnBB, CC_ALWAYS, NULL);
3617 }
3618 break;
3619 case TGSI_OPCODE_ENDLOOP:
3620 {
3621 BasicBlock *loopBB = reinterpret_cast<BasicBlock *>(loopBBs.pop().u.p);
3622
3623 if (!bb->isTerminated()) {
3624 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
3625 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
3626 }
3627 setPosition(reinterpret_cast<BasicBlock *>(breakBBs.pop().u.p), true);
3628
3629 // If the loop never breaks (e.g. only has RET's inside), then there
3630 // will be no way to get to the break bb. However BGNLOOP will have
3631 // already made a PREBREAK to it, so it must be in the CFG.
3632 if (getBB()->cfg.incidentCount() == 0)
3633 loopBB->cfg.attach(&getBB()->cfg, Graph::Edge::TREE);
3634 }
3635 break;
3636 case TGSI_OPCODE_BRK:
3637 {
3638 if (bb->isTerminated())
3639 break;
3640 BasicBlock *brkBB = reinterpret_cast<BasicBlock *>(breakBBs.peek().u.p);
3641 mkFlow(OP_BREAK, brkBB, CC_ALWAYS, NULL);
3642 bb->cfg.attach(&brkBB->cfg, Graph::Edge::CROSS);
3643 }
3644 break;
3645 case TGSI_OPCODE_CONT:
3646 {
3647 if (bb->isTerminated())
3648 break;
3649 BasicBlock *contBB = reinterpret_cast<BasicBlock *>(loopBBs.peek().u.p);
3650 mkFlow(OP_CONT, contBB, CC_ALWAYS, NULL);
3651 contBB->explicitCont = true;
3652 bb->cfg.attach(&contBB->cfg, Graph::Edge::BACK);
3653 }
3654 break;
3655 case TGSI_OPCODE_BGNSUB:
3656 {
3657 Subroutine *s = getSubroutine(ip);
3658 BasicBlock *entry = new BasicBlock(s->f);
3659 BasicBlock *leave = new BasicBlock(s->f);
3660
3661 // multiple entrypoints possible, keep the graph connected
3662 if (prog->getType() == Program::TYPE_COMPUTE)
3663 prog->main->call.attach(&s->f->call, Graph::Edge::TREE);
3664
3665 sub.cur = s;
3666 s->f->setEntry(entry);
3667 s->f->setExit(leave);
3668 setPosition(entry, true);
3669 return true;
3670 }
3671 case TGSI_OPCODE_ENDSUB:
3672 {
3673 sub.cur = getSubroutine(prog->main);
3674 setPosition(BasicBlock::get(sub.cur->f->cfg.getRoot()), true);
3675 return true;
3676 }
3677 case TGSI_OPCODE_CAL:
3678 {
3679 Subroutine *s = getSubroutine(tgsi.getLabel());
3680 mkFlow(OP_CALL, s->f, CC_ALWAYS, NULL);
3681 func->call.attach(&s->f->call, Graph::Edge::TREE);
3682 return true;
3683 }
3684 case TGSI_OPCODE_RET:
3685 {
3686 if (bb->isTerminated())
3687 return true;
3688 BasicBlock *leave = BasicBlock::get(func->cfgExit);
3689
3690 if (!isEndOfSubroutine(ip + 1)) {
3691 // insert a PRERET at the entry if this is an early return
3692 // (only needed for sharing code in the epilogue)
3693 BasicBlock *root = BasicBlock::get(func->cfg.getRoot());
3694 if (root->getEntry() == NULL || root->getEntry()->op != OP_PRERET) {
3695 BasicBlock *pos = getBB();
3696 setPosition(root, false);
3697 mkFlow(OP_PRERET, leave, CC_ALWAYS, NULL)->fixed = 1;
3698 setPosition(pos, true);
3699 }
3700 }
3701 mkFlow(OP_RET, NULL, CC_ALWAYS, NULL)->fixed = 1;
3702 bb->cfg.attach(&leave->cfg, Graph::Edge::CROSS);
3703 }
3704 break;
3705 case TGSI_OPCODE_END:
3706 {
3707 // attach and generate epilogue code
3708 BasicBlock *epilogue = BasicBlock::get(func->cfgExit);
3709 bb->cfg.attach(&epilogue->cfg, Graph::Edge::TREE);
3710 setPosition(epilogue, true);
3711 if (prog->getType() == Program::TYPE_FRAGMENT)
3712 exportOutputs();
3713 if (info->io.genUserClip > 0)
3714 handleUserClipPlanes();
3715 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
3716 }
3717 break;
3718 case TGSI_OPCODE_SWITCH:
3719 case TGSI_OPCODE_CASE:
3720 ERROR("switch/case opcode encountered, should have been lowered\n");
3721 abort();
3722 break;
3723 case TGSI_OPCODE_LOAD:
3724 handleLOAD(dst0);
3725 break;
3726 case TGSI_OPCODE_STORE:
3727 handleSTORE();
3728 break;
3729 case TGSI_OPCODE_BARRIER:
3730 geni = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
3731 geni->fixed = 1;
3732 geni->subOp = NV50_IR_SUBOP_BAR_SYNC;
3733 break;
3734 case TGSI_OPCODE_MEMBAR:
3735 {
3736 uint32_t level = tgsi.getSrc(0).getValueU32(0, info);
3737 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3738 geni->fixed = 1;
3739 if (!(level & ~(TGSI_MEMBAR_THREAD_GROUP | TGSI_MEMBAR_SHARED)))
3740 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
3741 else
3742 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
3743 }
3744 break;
3745 case TGSI_OPCODE_ATOMUADD:
3746 case TGSI_OPCODE_ATOMXCHG:
3747 case TGSI_OPCODE_ATOMCAS:
3748 case TGSI_OPCODE_ATOMAND:
3749 case TGSI_OPCODE_ATOMOR:
3750 case TGSI_OPCODE_ATOMXOR:
3751 case TGSI_OPCODE_ATOMUMIN:
3752 case TGSI_OPCODE_ATOMIMIN:
3753 case TGSI_OPCODE_ATOMUMAX:
3754 case TGSI_OPCODE_ATOMIMAX:
3755 handleATOM(dst0, dstTy, tgsi::opcodeToSubOp(tgsi.getOpcode()));
3756 break;
3757 case TGSI_OPCODE_RESQ:
3758 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER) {
3759 Value *ind = NULL;
3760 if (tgsi.getSrc(0).isIndirect(0))
3761 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
3762 geni = mkOp1(OP_BUFQ, TYPE_U32, dst0[0],
3763 makeSym(tgsi.getSrc(0).getFile(),
3764 tgsi.getSrc(0).getIndex(0), -1, 0, 0));
3765 if (ind)
3766 geni->setIndirect(0, 1, ind);
3767 } else {
3768 assert(tgsi.getSrc(0).getFile() == TGSI_FILE_IMAGE);
3769
3770 TexInstruction *texi = new_TexInstruction(func, OP_SUQ);
3771 for (int c = 0, d = 0; c < 4; ++c) {
3772 if (dst0[c]) {
3773 texi->setDef(d++, dst0[c]);
3774 texi->tex.mask |= 1 << c;
3775 }
3776 }
3777 texi->tex.r = tgsi.getSrc(0).getIndex(0);
3778 texi->tex.target = getImageTarget(code, texi->tex.r);
3779
3780 if (tgsi.getSrc(0).isIndirect(0))
3781 texi->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
3782
3783 bb->insertTail(texi);
3784 }
3785 break;
3786 case TGSI_OPCODE_IBFE:
3787 case TGSI_OPCODE_UBFE:
3788 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3789 src0 = fetchSrc(0, c);
3790 val0 = getScratch();
3791 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE &&
3792 tgsi.getSrc(2).getFile() == TGSI_FILE_IMMEDIATE) {
3793 loadImm(val0, (tgsi.getSrc(2).getValueU32(c, info) << 8) |
3794 tgsi.getSrc(1).getValueU32(c, info));
3795 } else {
3796 src1 = fetchSrc(1, c);
3797 src2 = fetchSrc(2, c);
3798 mkOp3(OP_INSBF, TYPE_U32, val0, src2, mkImm(0x808), src1);
3799 }
3800 mkOp2(OP_EXTBF, dstTy, dst0[c], src0, val0);
3801 }
3802 break;
3803 case TGSI_OPCODE_BFI:
3804 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3805 src0 = fetchSrc(0, c);
3806 src1 = fetchSrc(1, c);
3807 src2 = fetchSrc(2, c);
3808 src3 = fetchSrc(3, c);
3809 val0 = getScratch();
3810 mkOp3(OP_INSBF, TYPE_U32, val0, src3, mkImm(0x808), src2);
3811 mkOp3(OP_INSBF, TYPE_U32, dst0[c], src1, val0, src0);
3812 }
3813 break;
3814 case TGSI_OPCODE_LSB:
3815 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3816 src0 = fetchSrc(0, c);
3817 val0 = getScratch();
3818 geni = mkOp2(OP_EXTBF, TYPE_U32, val0, src0, mkImm(0x2000));
3819 geni->subOp = NV50_IR_SUBOP_EXTBF_REV;
3820 geni = mkOp1(OP_BFIND, TYPE_U32, dst0[c], val0);
3821 geni->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3822 }
3823 break;
3824 case TGSI_OPCODE_IMSB:
3825 case TGSI_OPCODE_UMSB:
3826 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3827 src0 = fetchSrc(0, c);
3828 mkOp1(OP_BFIND, srcTy, dst0[c], src0);
3829 }
3830 break;
3831 case TGSI_OPCODE_BREV:
3832 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3833 src0 = fetchSrc(0, c);
3834 geni = mkOp2(OP_EXTBF, TYPE_U32, dst0[c], src0, mkImm(0x2000));
3835 geni->subOp = NV50_IR_SUBOP_EXTBF_REV;
3836 }
3837 break;
3838 case TGSI_OPCODE_POPC:
3839 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3840 src0 = fetchSrc(0, c);
3841 mkOp2(OP_POPCNT, TYPE_U32, dst0[c], src0, src0);
3842 }
3843 break;
3844 case TGSI_OPCODE_INTERP_CENTROID:
3845 case TGSI_OPCODE_INTERP_SAMPLE:
3846 case TGSI_OPCODE_INTERP_OFFSET:
3847 handleINTERP(dst0);
3848 break;
3849 case TGSI_OPCODE_I642F:
3850 case TGSI_OPCODE_U642F:
3851 case TGSI_OPCODE_D2I:
3852 case TGSI_OPCODE_D2U:
3853 case TGSI_OPCODE_D2F: {
3854 int pos = 0;
3855 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3856 Value *dreg = getSSA(8);
3857 src0 = fetchSrc(0, pos);
3858 src1 = fetchSrc(0, pos + 1);
3859 mkOp2(OP_MERGE, TYPE_U64, dreg, src0, src1);
3860 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst0[c], srcTy, dreg);
3861 if (!isFloatType(dstTy))
3862 cvt->rnd = ROUND_Z;
3863 pos += 2;
3864 }
3865 break;
3866 }
3867 case TGSI_OPCODE_I2I64:
3868 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3869 dst0[c] = fetchSrc(0, c / 2);
3870 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(NULL, 31));
3871 c++;
3872 }
3873 break;
3874 case TGSI_OPCODE_U2I64:
3875 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3876 dst0[c] = fetchSrc(0, c / 2);
3877 dst0[c + 1] = zero;
3878 c++;
3879 }
3880 break;
3881 case TGSI_OPCODE_F2I64:
3882 case TGSI_OPCODE_F2U64:
3883 case TGSI_OPCODE_I2D:
3884 case TGSI_OPCODE_U2D:
3885 case TGSI_OPCODE_F2D:
3886 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3887 Value *dreg = getSSA(8);
3888 Instruction *cvt = mkCvt(OP_CVT, dstTy, dreg, srcTy, fetchSrc(0, c / 2));
3889 if (!isFloatType(dstTy))
3890 cvt->rnd = ROUND_Z;
3891 mkSplit(&dst0[c], 4, dreg);
3892 c++;
3893 }
3894 break;
3895 case TGSI_OPCODE_D2I64:
3896 case TGSI_OPCODE_D2U64:
3897 case TGSI_OPCODE_I642D:
3898 case TGSI_OPCODE_U642D:
3899 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3900 src0 = getSSA(8);
3901 Value *dst = getSSA(8), *tmp[2];
3902 tmp[0] = fetchSrc(0, c);
3903 tmp[1] = fetchSrc(0, c + 1);
3904 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3905 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst, srcTy, src0);
3906 if (!isFloatType(dstTy))
3907 cvt->rnd = ROUND_Z;
3908 mkSplit(&dst0[c], 4, dst);
3909 c++;
3910 }
3911 break;
3912 case TGSI_OPCODE_I64NEG:
3913 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3914 src0 = getSSA(8);
3915 Value *dst = getSSA(8), *tmp[2];
3916 tmp[0] = fetchSrc(0, c);
3917 tmp[1] = fetchSrc(0, c + 1);
3918 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3919 mkOp2(OP_SUB, dstTy, dst, zero, src0);
3920 mkSplit(&dst0[c], 4, dst);
3921 c++;
3922 }
3923 break;
3924 case TGSI_OPCODE_I64ABS:
3925 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3926 src0 = getSSA(8);
3927 Value *neg = getSSA(8), *srcComp[2], *negComp[2];
3928 srcComp[0] = fetchSrc(0, c);
3929 srcComp[1] = fetchSrc(0, c + 1);
3930 mkOp2(OP_MERGE, TYPE_U64, src0, srcComp[0], srcComp[1]);
3931 mkOp2(OP_SUB, dstTy, neg, zero, src0);
3932 mkSplit(negComp, 4, neg);
3933 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c], TYPE_S32,
3934 negComp[0], srcComp[0], srcComp[1]);
3935 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c + 1], TYPE_S32,
3936 negComp[1], srcComp[1], srcComp[1]);
3937 c++;
3938 }
3939 break;
3940 case TGSI_OPCODE_DABS:
3941 case TGSI_OPCODE_DNEG:
3942 case TGSI_OPCODE_DRCP:
3943 case TGSI_OPCODE_DSQRT:
3944 case TGSI_OPCODE_DRSQ:
3945 case TGSI_OPCODE_DTRUNC:
3946 case TGSI_OPCODE_DCEIL:
3947 case TGSI_OPCODE_DFLR:
3948 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3949 src0 = getSSA(8);
3950 Value *dst = getSSA(8), *tmp[2];
3951 tmp[0] = fetchSrc(0, c);
3952 tmp[1] = fetchSrc(0, c + 1);
3953 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3954 mkOp1(op, dstTy, dst, src0);
3955 mkSplit(&dst0[c], 4, dst);
3956 c++;
3957 }
3958 break;
3959 case TGSI_OPCODE_DFRAC:
3960 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3961 src0 = getSSA(8);
3962 Value *dst = getSSA(8), *tmp[2];
3963 tmp[0] = fetchSrc(0, c);
3964 tmp[1] = fetchSrc(0, c + 1);
3965 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3966 mkOp1(OP_FLOOR, TYPE_F64, dst, src0);
3967 mkOp2(OP_SUB, TYPE_F64, dst, src0, dst);
3968 mkSplit(&dst0[c], 4, dst);
3969 c++;
3970 }
3971 break;
3972 case TGSI_OPCODE_U64SEQ:
3973 case TGSI_OPCODE_U64SNE:
3974 case TGSI_OPCODE_U64SLT:
3975 case TGSI_OPCODE_U64SGE:
3976 case TGSI_OPCODE_I64SLT:
3977 case TGSI_OPCODE_I64SGE:
3978 case TGSI_OPCODE_DSLT:
3979 case TGSI_OPCODE_DSGE:
3980 case TGSI_OPCODE_DSEQ:
3981 case TGSI_OPCODE_DSNE: {
3982 int pos = 0;
3983 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3984 Value *tmp[2];
3985
3986 src0 = getSSA(8);
3987 src1 = getSSA(8);
3988 tmp[0] = fetchSrc(0, pos);
3989 tmp[1] = fetchSrc(0, pos + 1);
3990 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3991 tmp[0] = fetchSrc(1, pos);
3992 tmp[1] = fetchSrc(1, pos + 1);
3993 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
3994 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3995 pos += 2;
3996 }
3997 break;
3998 }
3999 case TGSI_OPCODE_U64MIN:
4000 case TGSI_OPCODE_U64MAX:
4001 case TGSI_OPCODE_I64MIN:
4002 case TGSI_OPCODE_I64MAX: {
4003 dstTy = isSignedIntType(dstTy) ? TYPE_S32 : TYPE_U32;
4004 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4005 Value *flag = getSSA(1, FILE_FLAGS);
4006 src0 = fetchSrc(0, c + 1);
4007 src1 = fetchSrc(1, c + 1);
4008 geni = mkOp2(op, dstTy, dst0[c + 1], src0, src1);
4009 geni->subOp = NV50_IR_SUBOP_MINMAX_HIGH;
4010 geni->setFlagsDef(1, flag);
4011
4012 src0 = fetchSrc(0, c);
4013 src1 = fetchSrc(1, c);
4014 geni = mkOp2(op, TYPE_U32, dst0[c], src0, src1);
4015 geni->subOp = NV50_IR_SUBOP_MINMAX_LOW;
4016 geni->setFlagsSrc(2, flag);
4017
4018 c++;
4019 }
4020 break;
4021 }
4022 case TGSI_OPCODE_U64SHL:
4023 case TGSI_OPCODE_I64SHR:
4024 case TGSI_OPCODE_U64SHR:
4025 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4026 src0 = getSSA(8);
4027 Value *dst = getSSA(8), *tmp[2];
4028 tmp[0] = fetchSrc(0, c);
4029 tmp[1] = fetchSrc(0, c + 1);
4030 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4031 // Theoretically src1 is a 64-bit value but in practice only the low
4032 // bits matter. The IR expects this to be a 32-bit value.
4033 src1 = fetchSrc(1, c);
4034 mkOp2(op, dstTy, dst, src0, src1);
4035 mkSplit(&dst0[c], 4, dst);
4036 c++;
4037 }
4038 break;
4039 case TGSI_OPCODE_U64ADD:
4040 case TGSI_OPCODE_U64MUL:
4041 case TGSI_OPCODE_DADD:
4042 case TGSI_OPCODE_DMUL:
4043 case TGSI_OPCODE_DDIV:
4044 case TGSI_OPCODE_DMAX:
4045 case TGSI_OPCODE_DMIN:
4046 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4047 src0 = getSSA(8);
4048 src1 = getSSA(8);
4049 Value *dst = getSSA(8), *tmp[2];
4050 tmp[0] = fetchSrc(0, c);
4051 tmp[1] = fetchSrc(0, c + 1);
4052 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4053 tmp[0] = fetchSrc(1, c);
4054 tmp[1] = fetchSrc(1, c + 1);
4055 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4056 mkOp2(op, dstTy, dst, src0, src1);
4057 mkSplit(&dst0[c], 4, dst);
4058 c++;
4059 }
4060 break;
4061 case TGSI_OPCODE_DMAD:
4062 case TGSI_OPCODE_DFMA:
4063 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4064 src0 = getSSA(8);
4065 src1 = getSSA(8);
4066 src2 = getSSA(8);
4067 Value *dst = getSSA(8), *tmp[2];
4068 tmp[0] = fetchSrc(0, c);
4069 tmp[1] = fetchSrc(0, c + 1);
4070 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4071 tmp[0] = fetchSrc(1, c);
4072 tmp[1] = fetchSrc(1, c + 1);
4073 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4074 tmp[0] = fetchSrc(2, c);
4075 tmp[1] = fetchSrc(2, c + 1);
4076 mkOp2(OP_MERGE, TYPE_U64, src2, tmp[0], tmp[1]);
4077 mkOp3(op, dstTy, dst, src0, src1, src2);
4078 mkSplit(&dst0[c], 4, dst);
4079 c++;
4080 }
4081 break;
4082 case TGSI_OPCODE_DROUND:
4083 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4084 src0 = getSSA(8);
4085 Value *dst = getSSA(8), *tmp[2];
4086 tmp[0] = fetchSrc(0, c);
4087 tmp[1] = fetchSrc(0, c + 1);
4088 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4089 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F64, src0)
4090 ->rnd = ROUND_NI;
4091 mkSplit(&dst0[c], 4, dst);
4092 c++;
4093 }
4094 break;
4095 case TGSI_OPCODE_DSSG:
4096 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4097 src0 = getSSA(8);
4098 Value *dst = getSSA(8), *dstF32 = getSSA(), *tmp[2];
4099 tmp[0] = fetchSrc(0, c);
4100 tmp[1] = fetchSrc(0, c + 1);
4101 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4102
4103 val0 = getScratch();
4104 val1 = getScratch();
4105 // The zero is wrong here since it's only 32-bit, but it works out in
4106 // the end since it gets replaced with $r63.
4107 mkCmp(OP_SET, CC_GT, TYPE_F32, val0, TYPE_F64, src0, zero);
4108 mkCmp(OP_SET, CC_LT, TYPE_F32, val1, TYPE_F64, src0, zero);
4109 mkOp2(OP_SUB, TYPE_F32, dstF32, val0, val1);
4110 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F32, dstF32);
4111 mkSplit(&dst0[c], 4, dst);
4112 c++;
4113 }
4114 break;
4115 case TGSI_OPCODE_I64SSG:
4116 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4117 src0 = getSSA(8);
4118 Value *tmp[2];
4119 tmp[0] = fetchSrc(0, c);
4120 tmp[1] = fetchSrc(0, c + 1);
4121 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4122
4123 val0 = getScratch();
4124 val1 = getScratch();
4125 mkCmp(OP_SET, CC_GT, TYPE_U32, val0, TYPE_S64, src0, zero);
4126 mkCmp(OP_SET, CC_LT, TYPE_U32, val1, TYPE_S64, src0, zero);
4127 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
4128 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(0, 31));
4129 c++;
4130 }
4131 break;
4132 default:
4133 ERROR("unhandled TGSI opcode: %u\n", tgsi.getOpcode());
4134 assert(0);
4135 break;
4136 }
4137
4138 if (tgsi.dstCount()) {
4139 for (c = 0; c < 4; ++c) {
4140 if (!dst0[c])
4141 continue;
4142 if (dst0[c] != rDst0[c])
4143 mkMov(rDst0[c], dst0[c]);
4144 storeDst(0, c, rDst0[c]);
4145 }
4146 }
4147 vtxBaseValid = 0;
4148
4149 return true;
4150 }
4151
4152 void
4153 Converter::handleUserClipPlanes()
4154 {
4155 Value *res[8];
4156 int n, i, c;
4157
4158 for (c = 0; c < 4; ++c) {
4159 for (i = 0; i < info->io.genUserClip; ++i) {
4160 Symbol *sym = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4161 TYPE_F32, info->io.ucpBase + i * 16 + c * 4);
4162 Value *ucp = mkLoadv(TYPE_F32, sym, NULL);
4163 if (c == 0)
4164 res[i] = mkOp2v(OP_MUL, TYPE_F32, getScratch(), clipVtx[c], ucp);
4165 else
4166 mkOp3(OP_MAD, TYPE_F32, res[i], clipVtx[c], ucp, res[i]);
4167 }
4168 }
4169
4170 const int first = info->numOutputs - (info->io.genUserClip + 3) / 4;
4171
4172 for (i = 0; i < info->io.genUserClip; ++i) {
4173 n = i / 4 + first;
4174 c = i % 4;
4175 Symbol *sym =
4176 mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32, info->out[n].slot[c] * 4);
4177 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, res[i]);
4178 }
4179 }
4180
4181 void
4182 Converter::exportOutputs()
4183 {
4184 if (info->io.alphaRefBase) {
4185 for (unsigned int i = 0; i < info->numOutputs; ++i) {
4186 if (info->out[i].sn != TGSI_SEMANTIC_COLOR ||
4187 info->out[i].si != 0)
4188 continue;
4189 const unsigned int c = 3;
4190 if (!oData.exists(sub.cur->values, i, c))
4191 continue;
4192 Value *val = oData.load(sub.cur->values, i, c, NULL);
4193 if (!val)
4194 continue;
4195
4196 Symbol *ref = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4197 TYPE_U32, info->io.alphaRefBase);
4198 Value *pred = new_LValue(func, FILE_PREDICATE);
4199 mkCmp(OP_SET, CC_TR, TYPE_U32, pred, TYPE_F32, val,
4200 mkLoadv(TYPE_U32, ref, NULL))
4201 ->subOp = 1;
4202 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_NOT_P, pred);
4203 }
4204 }
4205
4206 for (unsigned int i = 0; i < info->numOutputs; ++i) {
4207 for (unsigned int c = 0; c < 4; ++c) {
4208 if (!oData.exists(sub.cur->values, i, c))
4209 continue;
4210 Symbol *sym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32,
4211 info->out[i].slot[c] * 4);
4212 Value *val = oData.load(sub.cur->values, i, c, NULL);
4213 if (val) {
4214 if (info->out[i].sn == TGSI_SEMANTIC_POSITION)
4215 mkOp1(OP_SAT, TYPE_F32, val, val);
4216 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, val);
4217 }
4218 }
4219 }
4220 }
4221
4222 Converter::Converter(Program *ir, const tgsi::Source *code) : BuildUtil(ir),
4223 code(code),
4224 tgsi(NULL),
4225 tData(this), lData(this), aData(this), oData(this)
4226 {
4227 info = code->info;
4228
4229 const unsigned tSize = code->fileSize(TGSI_FILE_TEMPORARY);
4230 const unsigned aSize = code->fileSize(TGSI_FILE_ADDRESS);
4231 const unsigned oSize = code->fileSize(TGSI_FILE_OUTPUT);
4232
4233 tData.setup(TGSI_FILE_TEMPORARY, 0, 0, tSize, 4, 4, FILE_GPR, 0);
4234 lData.setup(TGSI_FILE_TEMPORARY, 1, 0, tSize, 4, 4, FILE_MEMORY_LOCAL, 0);
4235 aData.setup(TGSI_FILE_ADDRESS, 0, 0, aSize, 4, 4, FILE_GPR, 0);
4236 oData.setup(TGSI_FILE_OUTPUT, 0, 0, oSize, 4, 4, FILE_GPR, 0);
4237
4238 zero = mkImm((uint32_t)0);
4239
4240 vtxBaseValid = 0;
4241 }
4242
4243 Converter::~Converter()
4244 {
4245 }
4246
4247 inline const Converter::Location *
4248 Converter::BindArgumentsPass::getValueLocation(Subroutine *s, Value *v)
4249 {
4250 ValueMap::l_iterator it = s->values.l.find(v);
4251 return it == s->values.l.end() ? NULL : &it->second;
4252 }
4253
4254 template<typename T> inline void
4255 Converter::BindArgumentsPass::updateCallArgs(
4256 Instruction *i, void (Instruction::*setArg)(int, Value *),
4257 T (Function::*proto))
4258 {
4259 Function *g = i->asFlow()->target.fn;
4260 Subroutine *subg = conv.getSubroutine(g);
4261
4262 for (unsigned a = 0; a < (g->*proto).size(); ++a) {
4263 Value *v = (g->*proto)[a].get();
4264 const Converter::Location &l = *getValueLocation(subg, v);
4265 Converter::DataArray *array = conv.getArrayForFile(l.array, l.arrayIdx);
4266
4267 (i->*setArg)(a, array->acquire(sub->values, l.i, l.c));
4268 }
4269 }
4270
4271 template<typename T> inline void
4272 Converter::BindArgumentsPass::updatePrototype(
4273 BitSet *set, void (Function::*updateSet)(), T (Function::*proto))
4274 {
4275 (func->*updateSet)();
4276
4277 for (unsigned i = 0; i < set->getSize(); ++i) {
4278 Value *v = func->getLValue(i);
4279 const Converter::Location *l = getValueLocation(sub, v);
4280
4281 // only include values with a matching TGSI register
4282 if (set->test(i) && l && !conv.code->locals.count(*l))
4283 (func->*proto).push_back(v);
4284 }
4285 }
4286
4287 bool
4288 Converter::BindArgumentsPass::visit(Function *f)
4289 {
4290 sub = conv.getSubroutine(f);
4291
4292 for (ArrayList::Iterator bi = f->allBBlocks.iterator();
4293 !bi.end(); bi.next()) {
4294 for (Instruction *i = BasicBlock::get(bi)->getFirst();
4295 i; i = i->next) {
4296 if (i->op == OP_CALL && !i->asFlow()->builtin) {
4297 updateCallArgs(i, &Instruction::setSrc, &Function::ins);
4298 updateCallArgs(i, &Instruction::setDef, &Function::outs);
4299 }
4300 }
4301 }
4302
4303 if (func == prog->main && prog->getType() != Program::TYPE_COMPUTE)
4304 return true;
4305 updatePrototype(&BasicBlock::get(f->cfg.getRoot())->liveSet,
4306 &Function::buildLiveSets, &Function::ins);
4307 updatePrototype(&BasicBlock::get(f->cfgExit)->defSet,
4308 &Function::buildDefSets, &Function::outs);
4309
4310 return true;
4311 }
4312
4313 bool
4314 Converter::run()
4315 {
4316 BasicBlock *entry = new BasicBlock(prog->main);
4317 BasicBlock *leave = new BasicBlock(prog->main);
4318
4319 prog->main->setEntry(entry);
4320 prog->main->setExit(leave);
4321
4322 setPosition(entry, true);
4323 sub.cur = getSubroutine(prog->main);
4324
4325 if (info->io.genUserClip > 0) {
4326 for (int c = 0; c < 4; ++c)
4327 clipVtx[c] = getScratch();
4328 }
4329
4330 switch (prog->getType()) {
4331 case Program::TYPE_TESSELLATION_CONTROL:
4332 outBase = mkOp2v(
4333 OP_SUB, TYPE_U32, getSSA(),
4334 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
4335 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
4336 break;
4337 case Program::TYPE_FRAGMENT: {
4338 Symbol *sv = mkSysVal(SV_POSITION, 3);
4339 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
4340 mkOp1(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
4341 break;
4342 }
4343 default:
4344 break;
4345 }
4346
4347 if (info->io.viewportId >= 0)
4348 viewport = getScratch();
4349 else
4350 viewport = NULL;
4351
4352 for (ip = 0; ip < code->scan.num_instructions; ++ip) {
4353 if (!handleInstruction(&code->insns[ip]))
4354 return false;
4355 }
4356
4357 if (!BindArgumentsPass(*this).run(prog))
4358 return false;
4359
4360 return true;
4361 }
4362
4363 } // unnamed namespace
4364
4365 namespace nv50_ir {
4366
4367 bool
4368 Program::makeFromTGSI(struct nv50_ir_prog_info *info)
4369 {
4370 tgsi::Source src(info);
4371 if (!src.scanSource())
4372 return false;
4373 tlsSize = info->bin.tlsSpace;
4374
4375 Converter builder(this, &src);
4376 return builder.run();
4377 }
4378
4379 } // namespace nv50_ir