gallium: remove TGSI_OPCODE_ABS
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_tgsi.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "tgsi/tgsi_dump.h"
24 #include "tgsi/tgsi_scan.h"
25 #include "tgsi/tgsi_util.h"
26
27 #include <set>
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_util.h"
31 #include "codegen/nv50_ir_build_util.h"
32
33 namespace tgsi {
34
35 class Source;
36
37 static nv50_ir::operation translateOpcode(uint opcode);
38 static nv50_ir::DataFile translateFile(uint file);
39 static nv50_ir::TexTarget translateTexture(uint texTarg);
40 static nv50_ir::SVSemantic translateSysVal(uint sysval);
41 static nv50_ir::CacheMode translateCacheMode(uint qualifier);
42 static nv50_ir::ImgFormat translateImgFormat(uint format);
43
44 class Instruction
45 {
46 public:
47 Instruction(const struct tgsi_full_instruction *inst) : insn(inst) { }
48
49 class SrcRegister
50 {
51 public:
52 SrcRegister(const struct tgsi_full_src_register *src)
53 : reg(src->Register),
54 fsr(src)
55 { }
56
57 SrcRegister(const struct tgsi_src_register& src) : reg(src), fsr(NULL) { }
58
59 SrcRegister(const struct tgsi_ind_register& ind)
60 : reg(tgsi_util_get_src_from_ind(&ind)),
61 fsr(NULL)
62 { }
63
64 struct tgsi_src_register offsetToSrc(struct tgsi_texture_offset off)
65 {
66 struct tgsi_src_register reg;
67 memset(&reg, 0, sizeof(reg));
68 reg.Index = off.Index;
69 reg.File = off.File;
70 reg.SwizzleX = off.SwizzleX;
71 reg.SwizzleY = off.SwizzleY;
72 reg.SwizzleZ = off.SwizzleZ;
73 return reg;
74 }
75
76 SrcRegister(const struct tgsi_texture_offset& off) :
77 reg(offsetToSrc(off)),
78 fsr(NULL)
79 { }
80
81 uint getFile() const { return reg.File; }
82
83 bool is2D() const { return reg.Dimension; }
84
85 bool isIndirect(int dim) const
86 {
87 return (dim && fsr) ? fsr->Dimension.Indirect : reg.Indirect;
88 }
89
90 int getIndex(int dim) const
91 {
92 return (dim && fsr) ? fsr->Dimension.Index : reg.Index;
93 }
94
95 int getSwizzle(int chan) const
96 {
97 return tgsi_util_get_src_register_swizzle(&reg, chan);
98 }
99
100 int getArrayId() const
101 {
102 if (isIndirect(0))
103 return fsr->Indirect.ArrayID;
104 return 0;
105 }
106
107 nv50_ir::Modifier getMod(int chan) const;
108
109 SrcRegister getIndirect(int dim) const
110 {
111 assert(fsr && isIndirect(dim));
112 if (dim)
113 return SrcRegister(fsr->DimIndirect);
114 return SrcRegister(fsr->Indirect);
115 }
116
117 uint32_t getValueU32(int c, const struct nv50_ir_prog_info *info) const
118 {
119 assert(reg.File == TGSI_FILE_IMMEDIATE);
120 assert(!reg.Absolute);
121 assert(!reg.Negate);
122 return info->immd.data[reg.Index * 4 + getSwizzle(c)];
123 }
124
125 private:
126 const struct tgsi_src_register reg;
127 const struct tgsi_full_src_register *fsr;
128 };
129
130 class DstRegister
131 {
132 public:
133 DstRegister(const struct tgsi_full_dst_register *dst)
134 : reg(dst->Register),
135 fdr(dst)
136 { }
137
138 DstRegister(const struct tgsi_dst_register& dst) : reg(dst), fdr(NULL) { }
139
140 uint getFile() const { return reg.File; }
141
142 bool is2D() const { return reg.Dimension; }
143
144 bool isIndirect(int dim) const
145 {
146 return (dim && fdr) ? fdr->Dimension.Indirect : reg.Indirect;
147 }
148
149 int getIndex(int dim) const
150 {
151 return (dim && fdr) ? fdr->Dimension.Dimension : reg.Index;
152 }
153
154 unsigned int getMask() const { return reg.WriteMask; }
155
156 bool isMasked(int chan) const { return !(getMask() & (1 << chan)); }
157
158 SrcRegister getIndirect(int dim) const
159 {
160 assert(fdr && isIndirect(dim));
161 if (dim)
162 return SrcRegister(fdr->DimIndirect);
163 return SrcRegister(fdr->Indirect);
164 }
165
166 int getArrayId() const
167 {
168 if (isIndirect(0))
169 return fdr->Indirect.ArrayID;
170 return 0;
171 }
172
173 private:
174 const struct tgsi_dst_register reg;
175 const struct tgsi_full_dst_register *fdr;
176 };
177
178 inline uint getOpcode() const { return insn->Instruction.Opcode; }
179
180 unsigned int srcCount() const { return insn->Instruction.NumSrcRegs; }
181 unsigned int dstCount() const { return insn->Instruction.NumDstRegs; }
182
183 // mask of used components of source s
184 unsigned int srcMask(unsigned int s) const;
185 unsigned int texOffsetMask() const;
186
187 SrcRegister getSrc(unsigned int s) const
188 {
189 assert(s < srcCount());
190 return SrcRegister(&insn->Src[s]);
191 }
192
193 DstRegister getDst(unsigned int d) const
194 {
195 assert(d < dstCount());
196 return DstRegister(&insn->Dst[d]);
197 }
198
199 SrcRegister getTexOffset(unsigned int i) const
200 {
201 assert(i < TGSI_FULL_MAX_TEX_OFFSETS);
202 return SrcRegister(insn->TexOffsets[i]);
203 }
204
205 unsigned int getNumTexOffsets() const { return insn->Texture.NumOffsets; }
206
207 bool checkDstSrcAliasing() const;
208
209 inline nv50_ir::operation getOP() const {
210 return translateOpcode(getOpcode()); }
211
212 nv50_ir::DataType inferSrcType() const;
213 nv50_ir::DataType inferDstType() const;
214
215 nv50_ir::CondCode getSetCond() const;
216
217 nv50_ir::TexInstruction::Target getTexture(const Source *, int s) const;
218
219 nv50_ir::CacheMode getCacheMode() const {
220 if (!insn->Instruction.Memory)
221 return nv50_ir::CACHE_CA;
222 return translateCacheMode(insn->Memory.Qualifier);
223 }
224
225 inline uint getLabel() { return insn->Label.Label; }
226
227 unsigned getSaturate() const { return insn->Instruction.Saturate; }
228
229 void print() const
230 {
231 tgsi_dump_instruction(insn, 1);
232 }
233
234 private:
235 const struct tgsi_full_instruction *insn;
236 };
237
238 unsigned int Instruction::texOffsetMask() const
239 {
240 const struct tgsi_instruction_texture *tex = &insn->Texture;
241 assert(insn->Instruction.Texture);
242
243 switch (tex->Texture) {
244 case TGSI_TEXTURE_BUFFER:
245 case TGSI_TEXTURE_1D:
246 case TGSI_TEXTURE_SHADOW1D:
247 case TGSI_TEXTURE_1D_ARRAY:
248 case TGSI_TEXTURE_SHADOW1D_ARRAY:
249 return 0x1;
250 case TGSI_TEXTURE_2D:
251 case TGSI_TEXTURE_SHADOW2D:
252 case TGSI_TEXTURE_2D_ARRAY:
253 case TGSI_TEXTURE_SHADOW2D_ARRAY:
254 case TGSI_TEXTURE_RECT:
255 case TGSI_TEXTURE_SHADOWRECT:
256 case TGSI_TEXTURE_2D_MSAA:
257 case TGSI_TEXTURE_2D_ARRAY_MSAA:
258 return 0x3;
259 case TGSI_TEXTURE_3D:
260 return 0x7;
261 default:
262 assert(!"Unexpected texture target");
263 return 0xf;
264 }
265 }
266
267 unsigned int Instruction::srcMask(unsigned int s) const
268 {
269 unsigned int mask = insn->Dst[0].Register.WriteMask;
270
271 switch (insn->Instruction.Opcode) {
272 case TGSI_OPCODE_COS:
273 case TGSI_OPCODE_SIN:
274 return (mask & 0x8) | ((mask & 0x7) ? 0x1 : 0x0);
275 case TGSI_OPCODE_DP2:
276 return 0x3;
277 case TGSI_OPCODE_DP3:
278 return 0x7;
279 case TGSI_OPCODE_DP4:
280 case TGSI_OPCODE_DPH:
281 case TGSI_OPCODE_KILL_IF: /* WriteMask ignored */
282 return 0xf;
283 case TGSI_OPCODE_DST:
284 return mask & (s ? 0xa : 0x6);
285 case TGSI_OPCODE_EX2:
286 case TGSI_OPCODE_EXP:
287 case TGSI_OPCODE_LG2:
288 case TGSI_OPCODE_LOG:
289 case TGSI_OPCODE_POW:
290 case TGSI_OPCODE_RCP:
291 case TGSI_OPCODE_RSQ:
292 case TGSI_OPCODE_SCS:
293 return 0x1;
294 case TGSI_OPCODE_IF:
295 case TGSI_OPCODE_UIF:
296 return 0x1;
297 case TGSI_OPCODE_LIT:
298 return 0xb;
299 case TGSI_OPCODE_TEX2:
300 case TGSI_OPCODE_TXB2:
301 case TGSI_OPCODE_TXL2:
302 return (s == 0) ? 0xf : 0x3;
303 case TGSI_OPCODE_TEX:
304 case TGSI_OPCODE_TXB:
305 case TGSI_OPCODE_TXD:
306 case TGSI_OPCODE_TXL:
307 case TGSI_OPCODE_TXP:
308 case TGSI_OPCODE_LODQ:
309 {
310 const struct tgsi_instruction_texture *tex = &insn->Texture;
311
312 assert(insn->Instruction.Texture);
313
314 mask = 0x7;
315 if (insn->Instruction.Opcode != TGSI_OPCODE_TEX &&
316 insn->Instruction.Opcode != TGSI_OPCODE_TXD)
317 mask |= 0x8; /* bias, lod or proj */
318
319 switch (tex->Texture) {
320 case TGSI_TEXTURE_1D:
321 mask &= 0x9;
322 break;
323 case TGSI_TEXTURE_SHADOW1D:
324 mask &= 0xd;
325 break;
326 case TGSI_TEXTURE_1D_ARRAY:
327 case TGSI_TEXTURE_2D:
328 case TGSI_TEXTURE_RECT:
329 mask &= 0xb;
330 break;
331 case TGSI_TEXTURE_CUBE_ARRAY:
332 case TGSI_TEXTURE_SHADOW2D_ARRAY:
333 case TGSI_TEXTURE_SHADOWCUBE:
334 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
335 mask |= 0x8;
336 break;
337 default:
338 break;
339 }
340 }
341 return mask;
342 case TGSI_OPCODE_XPD:
343 {
344 unsigned int x = 0;
345 if (mask & 1) x |= 0x6;
346 if (mask & 2) x |= 0x5;
347 if (mask & 4) x |= 0x3;
348 return x;
349 }
350 case TGSI_OPCODE_D2I:
351 case TGSI_OPCODE_D2U:
352 case TGSI_OPCODE_D2F:
353 case TGSI_OPCODE_DSLT:
354 case TGSI_OPCODE_DSGE:
355 case TGSI_OPCODE_DSEQ:
356 case TGSI_OPCODE_DSNE:
357 switch (util_bitcount(mask)) {
358 case 1: return 0x3;
359 case 2: return 0xf;
360 default:
361 assert(!"unexpected mask");
362 return 0xf;
363 }
364 case TGSI_OPCODE_I2D:
365 case TGSI_OPCODE_U2D:
366 case TGSI_OPCODE_F2D: {
367 unsigned int x = 0;
368 if ((mask & 0x3) == 0x3)
369 x |= 1;
370 if ((mask & 0xc) == 0xc)
371 x |= 2;
372 return x;
373 }
374 case TGSI_OPCODE_PK2H:
375 return 0x3;
376 case TGSI_OPCODE_UP2H:
377 return 0x1;
378 default:
379 break;
380 }
381
382 return mask;
383 }
384
385 nv50_ir::Modifier Instruction::SrcRegister::getMod(int chan) const
386 {
387 nv50_ir::Modifier m(0);
388
389 if (reg.Absolute)
390 m = m | nv50_ir::Modifier(NV50_IR_MOD_ABS);
391 if (reg.Negate)
392 m = m | nv50_ir::Modifier(NV50_IR_MOD_NEG);
393 return m;
394 }
395
396 static nv50_ir::DataFile translateFile(uint file)
397 {
398 switch (file) {
399 case TGSI_FILE_CONSTANT: return nv50_ir::FILE_MEMORY_CONST;
400 case TGSI_FILE_INPUT: return nv50_ir::FILE_SHADER_INPUT;
401 case TGSI_FILE_OUTPUT: return nv50_ir::FILE_SHADER_OUTPUT;
402 case TGSI_FILE_TEMPORARY: return nv50_ir::FILE_GPR;
403 case TGSI_FILE_ADDRESS: return nv50_ir::FILE_ADDRESS;
404 case TGSI_FILE_PREDICATE: return nv50_ir::FILE_PREDICATE;
405 case TGSI_FILE_IMMEDIATE: return nv50_ir::FILE_IMMEDIATE;
406 case TGSI_FILE_SYSTEM_VALUE: return nv50_ir::FILE_SYSTEM_VALUE;
407 case TGSI_FILE_BUFFER: return nv50_ir::FILE_MEMORY_BUFFER;
408 case TGSI_FILE_IMAGE: return nv50_ir::FILE_MEMORY_GLOBAL;
409 case TGSI_FILE_MEMORY: return nv50_ir::FILE_MEMORY_GLOBAL;
410 case TGSI_FILE_SAMPLER:
411 case TGSI_FILE_NULL:
412 default:
413 return nv50_ir::FILE_NULL;
414 }
415 }
416
417 static nv50_ir::SVSemantic translateSysVal(uint sysval)
418 {
419 switch (sysval) {
420 case TGSI_SEMANTIC_FACE: return nv50_ir::SV_FACE;
421 case TGSI_SEMANTIC_PSIZE: return nv50_ir::SV_POINT_SIZE;
422 case TGSI_SEMANTIC_PRIMID: return nv50_ir::SV_PRIMITIVE_ID;
423 case TGSI_SEMANTIC_INSTANCEID: return nv50_ir::SV_INSTANCE_ID;
424 case TGSI_SEMANTIC_VERTEXID: return nv50_ir::SV_VERTEX_ID;
425 case TGSI_SEMANTIC_GRID_SIZE: return nv50_ir::SV_NCTAID;
426 case TGSI_SEMANTIC_BLOCK_ID: return nv50_ir::SV_CTAID;
427 case TGSI_SEMANTIC_BLOCK_SIZE: return nv50_ir::SV_NTID;
428 case TGSI_SEMANTIC_THREAD_ID: return nv50_ir::SV_TID;
429 case TGSI_SEMANTIC_SAMPLEID: return nv50_ir::SV_SAMPLE_INDEX;
430 case TGSI_SEMANTIC_SAMPLEPOS: return nv50_ir::SV_SAMPLE_POS;
431 case TGSI_SEMANTIC_SAMPLEMASK: return nv50_ir::SV_SAMPLE_MASK;
432 case TGSI_SEMANTIC_INVOCATIONID: return nv50_ir::SV_INVOCATION_ID;
433 case TGSI_SEMANTIC_TESSCOORD: return nv50_ir::SV_TESS_COORD;
434 case TGSI_SEMANTIC_TESSOUTER: return nv50_ir::SV_TESS_OUTER;
435 case TGSI_SEMANTIC_TESSINNER: return nv50_ir::SV_TESS_INNER;
436 case TGSI_SEMANTIC_VERTICESIN: return nv50_ir::SV_VERTEX_COUNT;
437 case TGSI_SEMANTIC_HELPER_INVOCATION: return nv50_ir::SV_THREAD_KILL;
438 case TGSI_SEMANTIC_BASEVERTEX: return nv50_ir::SV_BASEVERTEX;
439 case TGSI_SEMANTIC_BASEINSTANCE: return nv50_ir::SV_BASEINSTANCE;
440 case TGSI_SEMANTIC_DRAWID: return nv50_ir::SV_DRAWID;
441 case TGSI_SEMANTIC_WORK_DIM: return nv50_ir::SV_WORK_DIM;
442 default:
443 assert(0);
444 return nv50_ir::SV_CLOCK;
445 }
446 }
447
448 #define NV50_IR_TEX_TARG_CASE(a, b) \
449 case TGSI_TEXTURE_##a: return nv50_ir::TEX_TARGET_##b;
450
451 static nv50_ir::TexTarget translateTexture(uint tex)
452 {
453 switch (tex) {
454 NV50_IR_TEX_TARG_CASE(1D, 1D);
455 NV50_IR_TEX_TARG_CASE(2D, 2D);
456 NV50_IR_TEX_TARG_CASE(2D_MSAA, 2D_MS);
457 NV50_IR_TEX_TARG_CASE(3D, 3D);
458 NV50_IR_TEX_TARG_CASE(CUBE, CUBE);
459 NV50_IR_TEX_TARG_CASE(RECT, RECT);
460 NV50_IR_TEX_TARG_CASE(1D_ARRAY, 1D_ARRAY);
461 NV50_IR_TEX_TARG_CASE(2D_ARRAY, 2D_ARRAY);
462 NV50_IR_TEX_TARG_CASE(2D_ARRAY_MSAA, 2D_MS_ARRAY);
463 NV50_IR_TEX_TARG_CASE(CUBE_ARRAY, CUBE_ARRAY);
464 NV50_IR_TEX_TARG_CASE(SHADOW1D, 1D_SHADOW);
465 NV50_IR_TEX_TARG_CASE(SHADOW2D, 2D_SHADOW);
466 NV50_IR_TEX_TARG_CASE(SHADOWCUBE, CUBE_SHADOW);
467 NV50_IR_TEX_TARG_CASE(SHADOWRECT, RECT_SHADOW);
468 NV50_IR_TEX_TARG_CASE(SHADOW1D_ARRAY, 1D_ARRAY_SHADOW);
469 NV50_IR_TEX_TARG_CASE(SHADOW2D_ARRAY, 2D_ARRAY_SHADOW);
470 NV50_IR_TEX_TARG_CASE(SHADOWCUBE_ARRAY, CUBE_ARRAY_SHADOW);
471 NV50_IR_TEX_TARG_CASE(BUFFER, BUFFER);
472
473 case TGSI_TEXTURE_UNKNOWN:
474 default:
475 assert(!"invalid texture target");
476 return nv50_ir::TEX_TARGET_2D;
477 }
478 }
479
480 static nv50_ir::CacheMode translateCacheMode(uint qualifier)
481 {
482 if (qualifier & TGSI_MEMORY_VOLATILE)
483 return nv50_ir::CACHE_CV;
484 if (qualifier & TGSI_MEMORY_COHERENT)
485 return nv50_ir::CACHE_CG;
486 return nv50_ir::CACHE_CA;
487 }
488
489 static nv50_ir::ImgFormat translateImgFormat(uint format)
490 {
491
492 #define FMT_CASE(a, b) \
493 case PIPE_FORMAT_ ## a: return nv50_ir::FMT_ ## b
494
495 switch (format) {
496 FMT_CASE(NONE, NONE);
497
498 FMT_CASE(R32G32B32A32_FLOAT, RGBA32F);
499 FMT_CASE(R16G16B16A16_FLOAT, RGBA16F);
500 FMT_CASE(R32G32_FLOAT, RG32F);
501 FMT_CASE(R16G16_FLOAT, RG16F);
502 FMT_CASE(R11G11B10_FLOAT, R11G11B10F);
503 FMT_CASE(R32_FLOAT, R32F);
504 FMT_CASE(R16_FLOAT, R16F);
505
506 FMT_CASE(R32G32B32A32_UINT, RGBA32UI);
507 FMT_CASE(R16G16B16A16_UINT, RGBA16UI);
508 FMT_CASE(R10G10B10A2_UINT, RGB10A2UI);
509 FMT_CASE(R8G8B8A8_UINT, RGBA8UI);
510 FMT_CASE(R32G32_UINT, RG32UI);
511 FMT_CASE(R16G16_UINT, RG16UI);
512 FMT_CASE(R8G8_UINT, RG8UI);
513 FMT_CASE(R32_UINT, R32UI);
514 FMT_CASE(R16_UINT, R16UI);
515 FMT_CASE(R8_UINT, R8UI);
516
517 FMT_CASE(R32G32B32A32_SINT, RGBA32I);
518 FMT_CASE(R16G16B16A16_SINT, RGBA16I);
519 FMT_CASE(R8G8B8A8_SINT, RGBA8I);
520 FMT_CASE(R32G32_SINT, RG32I);
521 FMT_CASE(R16G16_SINT, RG16I);
522 FMT_CASE(R8G8_SINT, RG8I);
523 FMT_CASE(R32_SINT, R32I);
524 FMT_CASE(R16_SINT, R16I);
525 FMT_CASE(R8_SINT, R8I);
526
527 FMT_CASE(R16G16B16A16_UNORM, RGBA16);
528 FMT_CASE(R10G10B10A2_UNORM, RGB10A2);
529 FMT_CASE(R8G8B8A8_UNORM, RGBA8);
530 FMT_CASE(R16G16_UNORM, RG16);
531 FMT_CASE(R8G8_UNORM, RG8);
532 FMT_CASE(R16_UNORM, R16);
533 FMT_CASE(R8_UNORM, R8);
534
535 FMT_CASE(R16G16B16A16_SNORM, RGBA16_SNORM);
536 FMT_CASE(R8G8B8A8_SNORM, RGBA8_SNORM);
537 FMT_CASE(R16G16_SNORM, RG16_SNORM);
538 FMT_CASE(R8G8_SNORM, RG8_SNORM);
539 FMT_CASE(R16_SNORM, R16_SNORM);
540 FMT_CASE(R8_SNORM, R8_SNORM);
541
542 FMT_CASE(B8G8R8A8_UNORM, BGRA8);
543 }
544
545 assert(!"Unexpected format");
546 return nv50_ir::FMT_NONE;
547 }
548
549 nv50_ir::DataType Instruction::inferSrcType() const
550 {
551 switch (getOpcode()) {
552 case TGSI_OPCODE_UIF:
553 case TGSI_OPCODE_AND:
554 case TGSI_OPCODE_OR:
555 case TGSI_OPCODE_XOR:
556 case TGSI_OPCODE_NOT:
557 case TGSI_OPCODE_SHL:
558 case TGSI_OPCODE_U2F:
559 case TGSI_OPCODE_U2D:
560 case TGSI_OPCODE_UADD:
561 case TGSI_OPCODE_UDIV:
562 case TGSI_OPCODE_UMOD:
563 case TGSI_OPCODE_UMAD:
564 case TGSI_OPCODE_UMUL:
565 case TGSI_OPCODE_UMUL_HI:
566 case TGSI_OPCODE_UMAX:
567 case TGSI_OPCODE_UMIN:
568 case TGSI_OPCODE_USEQ:
569 case TGSI_OPCODE_USGE:
570 case TGSI_OPCODE_USLT:
571 case TGSI_OPCODE_USNE:
572 case TGSI_OPCODE_USHR:
573 case TGSI_OPCODE_ATOMUADD:
574 case TGSI_OPCODE_ATOMXCHG:
575 case TGSI_OPCODE_ATOMCAS:
576 case TGSI_OPCODE_ATOMAND:
577 case TGSI_OPCODE_ATOMOR:
578 case TGSI_OPCODE_ATOMXOR:
579 case TGSI_OPCODE_ATOMUMIN:
580 case TGSI_OPCODE_ATOMUMAX:
581 case TGSI_OPCODE_UBFE:
582 case TGSI_OPCODE_UMSB:
583 case TGSI_OPCODE_UP2H:
584 case TGSI_OPCODE_VOTE_ALL:
585 case TGSI_OPCODE_VOTE_ANY:
586 case TGSI_OPCODE_VOTE_EQ:
587 return nv50_ir::TYPE_U32;
588 case TGSI_OPCODE_I2F:
589 case TGSI_OPCODE_I2D:
590 case TGSI_OPCODE_IDIV:
591 case TGSI_OPCODE_IMUL_HI:
592 case TGSI_OPCODE_IMAX:
593 case TGSI_OPCODE_IMIN:
594 case TGSI_OPCODE_IABS:
595 case TGSI_OPCODE_INEG:
596 case TGSI_OPCODE_ISGE:
597 case TGSI_OPCODE_ISHR:
598 case TGSI_OPCODE_ISLT:
599 case TGSI_OPCODE_ISSG:
600 case TGSI_OPCODE_SAD: // not sure about SAD, but no one has a float version
601 case TGSI_OPCODE_MOD:
602 case TGSI_OPCODE_UARL:
603 case TGSI_OPCODE_ATOMIMIN:
604 case TGSI_OPCODE_ATOMIMAX:
605 case TGSI_OPCODE_IBFE:
606 case TGSI_OPCODE_IMSB:
607 return nv50_ir::TYPE_S32;
608 case TGSI_OPCODE_D2F:
609 case TGSI_OPCODE_D2I:
610 case TGSI_OPCODE_D2U:
611 case TGSI_OPCODE_DABS:
612 case TGSI_OPCODE_DNEG:
613 case TGSI_OPCODE_DADD:
614 case TGSI_OPCODE_DMUL:
615 case TGSI_OPCODE_DMAX:
616 case TGSI_OPCODE_DMIN:
617 case TGSI_OPCODE_DSLT:
618 case TGSI_OPCODE_DSGE:
619 case TGSI_OPCODE_DSEQ:
620 case TGSI_OPCODE_DSNE:
621 case TGSI_OPCODE_DRCP:
622 case TGSI_OPCODE_DSQRT:
623 case TGSI_OPCODE_DMAD:
624 case TGSI_OPCODE_DFMA:
625 case TGSI_OPCODE_DFRAC:
626 case TGSI_OPCODE_DRSQ:
627 case TGSI_OPCODE_DTRUNC:
628 case TGSI_OPCODE_DCEIL:
629 case TGSI_OPCODE_DFLR:
630 case TGSI_OPCODE_DROUND:
631 return nv50_ir::TYPE_F64;
632 default:
633 return nv50_ir::TYPE_F32;
634 }
635 }
636
637 nv50_ir::DataType Instruction::inferDstType() const
638 {
639 switch (getOpcode()) {
640 case TGSI_OPCODE_D2U:
641 case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32;
642 case TGSI_OPCODE_D2I:
643 case TGSI_OPCODE_F2I: return nv50_ir::TYPE_S32;
644 case TGSI_OPCODE_FSEQ:
645 case TGSI_OPCODE_FSGE:
646 case TGSI_OPCODE_FSLT:
647 case TGSI_OPCODE_FSNE:
648 case TGSI_OPCODE_DSEQ:
649 case TGSI_OPCODE_DSGE:
650 case TGSI_OPCODE_DSLT:
651 case TGSI_OPCODE_DSNE:
652 case TGSI_OPCODE_PK2H:
653 return nv50_ir::TYPE_U32;
654 case TGSI_OPCODE_I2F:
655 case TGSI_OPCODE_U2F:
656 case TGSI_OPCODE_D2F:
657 case TGSI_OPCODE_UP2H:
658 return nv50_ir::TYPE_F32;
659 case TGSI_OPCODE_I2D:
660 case TGSI_OPCODE_U2D:
661 case TGSI_OPCODE_F2D:
662 return nv50_ir::TYPE_F64;
663 default:
664 return inferSrcType();
665 }
666 }
667
668 nv50_ir::CondCode Instruction::getSetCond() const
669 {
670 using namespace nv50_ir;
671
672 switch (getOpcode()) {
673 case TGSI_OPCODE_SLT:
674 case TGSI_OPCODE_ISLT:
675 case TGSI_OPCODE_USLT:
676 case TGSI_OPCODE_FSLT:
677 case TGSI_OPCODE_DSLT:
678 return CC_LT;
679 case TGSI_OPCODE_SLE:
680 return CC_LE;
681 case TGSI_OPCODE_SGE:
682 case TGSI_OPCODE_ISGE:
683 case TGSI_OPCODE_USGE:
684 case TGSI_OPCODE_FSGE:
685 case TGSI_OPCODE_DSGE:
686 return CC_GE;
687 case TGSI_OPCODE_SGT:
688 return CC_GT;
689 case TGSI_OPCODE_SEQ:
690 case TGSI_OPCODE_USEQ:
691 case TGSI_OPCODE_FSEQ:
692 case TGSI_OPCODE_DSEQ:
693 return CC_EQ;
694 case TGSI_OPCODE_SNE:
695 case TGSI_OPCODE_FSNE:
696 case TGSI_OPCODE_DSNE:
697 return CC_NEU;
698 case TGSI_OPCODE_USNE:
699 return CC_NE;
700 default:
701 return CC_ALWAYS;
702 }
703 }
704
705 #define NV50_IR_OPCODE_CASE(a, b) case TGSI_OPCODE_##a: return nv50_ir::OP_##b
706
707 static nv50_ir::operation translateOpcode(uint opcode)
708 {
709 switch (opcode) {
710 NV50_IR_OPCODE_CASE(ARL, SHL);
711 NV50_IR_OPCODE_CASE(MOV, MOV);
712
713 NV50_IR_OPCODE_CASE(RCP, RCP);
714 NV50_IR_OPCODE_CASE(RSQ, RSQ);
715 NV50_IR_OPCODE_CASE(SQRT, SQRT);
716
717 NV50_IR_OPCODE_CASE(MUL, MUL);
718 NV50_IR_OPCODE_CASE(ADD, ADD);
719
720 NV50_IR_OPCODE_CASE(MIN, MIN);
721 NV50_IR_OPCODE_CASE(MAX, MAX);
722 NV50_IR_OPCODE_CASE(SLT, SET);
723 NV50_IR_OPCODE_CASE(SGE, SET);
724 NV50_IR_OPCODE_CASE(MAD, MAD);
725 NV50_IR_OPCODE_CASE(FMA, FMA);
726 NV50_IR_OPCODE_CASE(SUB, SUB);
727
728 NV50_IR_OPCODE_CASE(FLR, FLOOR);
729 NV50_IR_OPCODE_CASE(ROUND, CVT);
730 NV50_IR_OPCODE_CASE(EX2, EX2);
731 NV50_IR_OPCODE_CASE(LG2, LG2);
732 NV50_IR_OPCODE_CASE(POW, POW);
733
734 NV50_IR_OPCODE_CASE(COS, COS);
735 NV50_IR_OPCODE_CASE(DDX, DFDX);
736 NV50_IR_OPCODE_CASE(DDX_FINE, DFDX);
737 NV50_IR_OPCODE_CASE(DDY, DFDY);
738 NV50_IR_OPCODE_CASE(DDY_FINE, DFDY);
739 NV50_IR_OPCODE_CASE(KILL, DISCARD);
740
741 NV50_IR_OPCODE_CASE(SEQ, SET);
742 NV50_IR_OPCODE_CASE(SGT, SET);
743 NV50_IR_OPCODE_CASE(SIN, SIN);
744 NV50_IR_OPCODE_CASE(SLE, SET);
745 NV50_IR_OPCODE_CASE(SNE, SET);
746 NV50_IR_OPCODE_CASE(TEX, TEX);
747 NV50_IR_OPCODE_CASE(TXD, TXD);
748 NV50_IR_OPCODE_CASE(TXP, TEX);
749
750 NV50_IR_OPCODE_CASE(CAL, CALL);
751 NV50_IR_OPCODE_CASE(RET, RET);
752 NV50_IR_OPCODE_CASE(CMP, SLCT);
753
754 NV50_IR_OPCODE_CASE(TXB, TXB);
755
756 NV50_IR_OPCODE_CASE(DIV, DIV);
757
758 NV50_IR_OPCODE_CASE(TXL, TXL);
759
760 NV50_IR_OPCODE_CASE(CEIL, CEIL);
761 NV50_IR_OPCODE_CASE(I2F, CVT);
762 NV50_IR_OPCODE_CASE(NOT, NOT);
763 NV50_IR_OPCODE_CASE(TRUNC, TRUNC);
764 NV50_IR_OPCODE_CASE(SHL, SHL);
765
766 NV50_IR_OPCODE_CASE(AND, AND);
767 NV50_IR_OPCODE_CASE(OR, OR);
768 NV50_IR_OPCODE_CASE(MOD, MOD);
769 NV50_IR_OPCODE_CASE(XOR, XOR);
770 NV50_IR_OPCODE_CASE(SAD, SAD);
771 NV50_IR_OPCODE_CASE(TXF, TXF);
772 NV50_IR_OPCODE_CASE(TXQ, TXQ);
773 NV50_IR_OPCODE_CASE(TXQS, TXQ);
774 NV50_IR_OPCODE_CASE(TG4, TXG);
775 NV50_IR_OPCODE_CASE(LODQ, TXLQ);
776
777 NV50_IR_OPCODE_CASE(EMIT, EMIT);
778 NV50_IR_OPCODE_CASE(ENDPRIM, RESTART);
779
780 NV50_IR_OPCODE_CASE(KILL_IF, DISCARD);
781
782 NV50_IR_OPCODE_CASE(F2I, CVT);
783 NV50_IR_OPCODE_CASE(FSEQ, SET);
784 NV50_IR_OPCODE_CASE(FSGE, SET);
785 NV50_IR_OPCODE_CASE(FSLT, SET);
786 NV50_IR_OPCODE_CASE(FSNE, SET);
787 NV50_IR_OPCODE_CASE(IDIV, DIV);
788 NV50_IR_OPCODE_CASE(IMAX, MAX);
789 NV50_IR_OPCODE_CASE(IMIN, MIN);
790 NV50_IR_OPCODE_CASE(IABS, ABS);
791 NV50_IR_OPCODE_CASE(INEG, NEG);
792 NV50_IR_OPCODE_CASE(ISGE, SET);
793 NV50_IR_OPCODE_CASE(ISHR, SHR);
794 NV50_IR_OPCODE_CASE(ISLT, SET);
795 NV50_IR_OPCODE_CASE(F2U, CVT);
796 NV50_IR_OPCODE_CASE(U2F, CVT);
797 NV50_IR_OPCODE_CASE(UADD, ADD);
798 NV50_IR_OPCODE_CASE(UDIV, DIV);
799 NV50_IR_OPCODE_CASE(UMAD, MAD);
800 NV50_IR_OPCODE_CASE(UMAX, MAX);
801 NV50_IR_OPCODE_CASE(UMIN, MIN);
802 NV50_IR_OPCODE_CASE(UMOD, MOD);
803 NV50_IR_OPCODE_CASE(UMUL, MUL);
804 NV50_IR_OPCODE_CASE(USEQ, SET);
805 NV50_IR_OPCODE_CASE(USGE, SET);
806 NV50_IR_OPCODE_CASE(USHR, SHR);
807 NV50_IR_OPCODE_CASE(USLT, SET);
808 NV50_IR_OPCODE_CASE(USNE, SET);
809
810 NV50_IR_OPCODE_CASE(DABS, ABS);
811 NV50_IR_OPCODE_CASE(DNEG, NEG);
812 NV50_IR_OPCODE_CASE(DADD, ADD);
813 NV50_IR_OPCODE_CASE(DMUL, MUL);
814 NV50_IR_OPCODE_CASE(DMAX, MAX);
815 NV50_IR_OPCODE_CASE(DMIN, MIN);
816 NV50_IR_OPCODE_CASE(DSLT, SET);
817 NV50_IR_OPCODE_CASE(DSGE, SET);
818 NV50_IR_OPCODE_CASE(DSEQ, SET);
819 NV50_IR_OPCODE_CASE(DSNE, SET);
820 NV50_IR_OPCODE_CASE(DRCP, RCP);
821 NV50_IR_OPCODE_CASE(DSQRT, SQRT);
822 NV50_IR_OPCODE_CASE(DMAD, MAD);
823 NV50_IR_OPCODE_CASE(DFMA, FMA);
824 NV50_IR_OPCODE_CASE(D2I, CVT);
825 NV50_IR_OPCODE_CASE(D2U, CVT);
826 NV50_IR_OPCODE_CASE(I2D, CVT);
827 NV50_IR_OPCODE_CASE(U2D, CVT);
828 NV50_IR_OPCODE_CASE(DRSQ, RSQ);
829 NV50_IR_OPCODE_CASE(DTRUNC, TRUNC);
830 NV50_IR_OPCODE_CASE(DCEIL, CEIL);
831 NV50_IR_OPCODE_CASE(DFLR, FLOOR);
832 NV50_IR_OPCODE_CASE(DROUND, CVT);
833
834 NV50_IR_OPCODE_CASE(IMUL_HI, MUL);
835 NV50_IR_OPCODE_CASE(UMUL_HI, MUL);
836
837 NV50_IR_OPCODE_CASE(SAMPLE, TEX);
838 NV50_IR_OPCODE_CASE(SAMPLE_B, TXB);
839 NV50_IR_OPCODE_CASE(SAMPLE_C, TEX);
840 NV50_IR_OPCODE_CASE(SAMPLE_C_LZ, TEX);
841 NV50_IR_OPCODE_CASE(SAMPLE_D, TXD);
842 NV50_IR_OPCODE_CASE(SAMPLE_L, TXL);
843 NV50_IR_OPCODE_CASE(SAMPLE_I, TXF);
844 NV50_IR_OPCODE_CASE(SAMPLE_I_MS, TXF);
845 NV50_IR_OPCODE_CASE(GATHER4, TXG);
846 NV50_IR_OPCODE_CASE(SVIEWINFO, TXQ);
847
848 NV50_IR_OPCODE_CASE(ATOMUADD, ATOM);
849 NV50_IR_OPCODE_CASE(ATOMXCHG, ATOM);
850 NV50_IR_OPCODE_CASE(ATOMCAS, ATOM);
851 NV50_IR_OPCODE_CASE(ATOMAND, ATOM);
852 NV50_IR_OPCODE_CASE(ATOMOR, ATOM);
853 NV50_IR_OPCODE_CASE(ATOMXOR, ATOM);
854 NV50_IR_OPCODE_CASE(ATOMUMIN, ATOM);
855 NV50_IR_OPCODE_CASE(ATOMUMAX, ATOM);
856 NV50_IR_OPCODE_CASE(ATOMIMIN, ATOM);
857 NV50_IR_OPCODE_CASE(ATOMIMAX, ATOM);
858
859 NV50_IR_OPCODE_CASE(TEX2, TEX);
860 NV50_IR_OPCODE_CASE(TXB2, TXB);
861 NV50_IR_OPCODE_CASE(TXL2, TXL);
862
863 NV50_IR_OPCODE_CASE(IBFE, EXTBF);
864 NV50_IR_OPCODE_CASE(UBFE, EXTBF);
865 NV50_IR_OPCODE_CASE(BFI, INSBF);
866 NV50_IR_OPCODE_CASE(BREV, EXTBF);
867 NV50_IR_OPCODE_CASE(POPC, POPCNT);
868 NV50_IR_OPCODE_CASE(LSB, BFIND);
869 NV50_IR_OPCODE_CASE(IMSB, BFIND);
870 NV50_IR_OPCODE_CASE(UMSB, BFIND);
871
872 NV50_IR_OPCODE_CASE(VOTE_ALL, VOTE);
873 NV50_IR_OPCODE_CASE(VOTE_ANY, VOTE);
874 NV50_IR_OPCODE_CASE(VOTE_EQ, VOTE);
875
876 NV50_IR_OPCODE_CASE(END, EXIT);
877
878 default:
879 return nv50_ir::OP_NOP;
880 }
881 }
882
883 static uint16_t opcodeToSubOp(uint opcode)
884 {
885 switch (opcode) {
886 case TGSI_OPCODE_LFENCE: return NV50_IR_SUBOP_MEMBAR(L, GL);
887 case TGSI_OPCODE_SFENCE: return NV50_IR_SUBOP_MEMBAR(S, GL);
888 case TGSI_OPCODE_MFENCE: return NV50_IR_SUBOP_MEMBAR(M, GL);
889 case TGSI_OPCODE_ATOMUADD: return NV50_IR_SUBOP_ATOM_ADD;
890 case TGSI_OPCODE_ATOMXCHG: return NV50_IR_SUBOP_ATOM_EXCH;
891 case TGSI_OPCODE_ATOMCAS: return NV50_IR_SUBOP_ATOM_CAS;
892 case TGSI_OPCODE_ATOMAND: return NV50_IR_SUBOP_ATOM_AND;
893 case TGSI_OPCODE_ATOMOR: return NV50_IR_SUBOP_ATOM_OR;
894 case TGSI_OPCODE_ATOMXOR: return NV50_IR_SUBOP_ATOM_XOR;
895 case TGSI_OPCODE_ATOMUMIN: return NV50_IR_SUBOP_ATOM_MIN;
896 case TGSI_OPCODE_ATOMIMIN: return NV50_IR_SUBOP_ATOM_MIN;
897 case TGSI_OPCODE_ATOMUMAX: return NV50_IR_SUBOP_ATOM_MAX;
898 case TGSI_OPCODE_ATOMIMAX: return NV50_IR_SUBOP_ATOM_MAX;
899 case TGSI_OPCODE_IMUL_HI:
900 case TGSI_OPCODE_UMUL_HI:
901 return NV50_IR_SUBOP_MUL_HIGH;
902 case TGSI_OPCODE_VOTE_ALL: return NV50_IR_SUBOP_VOTE_ALL;
903 case TGSI_OPCODE_VOTE_ANY: return NV50_IR_SUBOP_VOTE_ANY;
904 case TGSI_OPCODE_VOTE_EQ: return NV50_IR_SUBOP_VOTE_UNI;
905 default:
906 return 0;
907 }
908 }
909
910 bool Instruction::checkDstSrcAliasing() const
911 {
912 if (insn->Dst[0].Register.Indirect) // no danger if indirect, using memory
913 return false;
914
915 for (int s = 0; s < TGSI_FULL_MAX_SRC_REGISTERS; ++s) {
916 if (insn->Src[s].Register.File == TGSI_FILE_NULL)
917 break;
918 if (insn->Src[s].Register.File == insn->Dst[0].Register.File &&
919 insn->Src[s].Register.Index == insn->Dst[0].Register.Index)
920 return true;
921 }
922 return false;
923 }
924
925 class Source
926 {
927 public:
928 Source(struct nv50_ir_prog_info *);
929 ~Source();
930
931 public:
932 bool scanSource();
933 unsigned fileSize(unsigned file) const { return scan.file_max[file] + 1; }
934
935 public:
936 struct tgsi_shader_info scan;
937 struct tgsi_full_instruction *insns;
938 const struct tgsi_token *tokens;
939 struct nv50_ir_prog_info *info;
940
941 nv50_ir::DynArray tempArrays;
942 nv50_ir::DynArray immdArrays;
943
944 typedef nv50_ir::BuildUtil::Location Location;
945 // these registers are per-subroutine, cannot be used for parameter passing
946 std::set<Location> locals;
947
948 std::set<int> indirectTempArrays;
949 std::map<int, int> indirectTempOffsets;
950 std::map<int, std::pair<int, int> > tempArrayInfo;
951 std::vector<int> tempArrayId;
952
953 int clipVertexOutput;
954
955 struct TextureView {
956 uint8_t target; // TGSI_TEXTURE_*
957 };
958 std::vector<TextureView> textureViews;
959
960 /*
961 struct Resource {
962 uint8_t target; // TGSI_TEXTURE_*
963 bool raw;
964 uint8_t slot; // $surface index
965 };
966 std::vector<Resource> resources;
967 */
968
969 struct Image {
970 uint8_t target; // TGSI_TEXTURE_*
971 bool raw;
972 uint8_t slot;
973 uint16_t format; // PIPE_FORMAT_*
974 };
975 std::vector<Image> images;
976
977 struct MemoryFile {
978 uint8_t mem_type; // TGSI_MEMORY_TYPE_*
979 };
980 std::vector<MemoryFile> memoryFiles;
981
982 private:
983 int inferSysValDirection(unsigned sn) const;
984 bool scanDeclaration(const struct tgsi_full_declaration *);
985 bool scanInstruction(const struct tgsi_full_instruction *);
986 void scanInstructionSrc(const Instruction& insn,
987 const Instruction::SrcRegister& src,
988 unsigned mask);
989 void scanProperty(const struct tgsi_full_property *);
990 void scanImmediate(const struct tgsi_full_immediate *);
991
992 inline bool isEdgeFlagPassthrough(const Instruction&) const;
993 };
994
995 Source::Source(struct nv50_ir_prog_info *prog) : info(prog)
996 {
997 tokens = (const struct tgsi_token *)info->bin.source;
998
999 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
1000 tgsi_dump(tokens, 0);
1001 }
1002
1003 Source::~Source()
1004 {
1005 if (insns)
1006 FREE(insns);
1007
1008 if (info->immd.data)
1009 FREE(info->immd.data);
1010 if (info->immd.type)
1011 FREE(info->immd.type);
1012 }
1013
1014 bool Source::scanSource()
1015 {
1016 unsigned insnCount = 0;
1017 struct tgsi_parse_context parse;
1018
1019 tgsi_scan_shader(tokens, &scan);
1020
1021 insns = (struct tgsi_full_instruction *)MALLOC(scan.num_instructions *
1022 sizeof(insns[0]));
1023 if (!insns)
1024 return false;
1025
1026 clipVertexOutput = -1;
1027
1028 textureViews.resize(scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1);
1029 //resources.resize(scan.file_max[TGSI_FILE_RESOURCE] + 1);
1030 images.resize(scan.file_max[TGSI_FILE_IMAGE] + 1);
1031 tempArrayId.resize(scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1032 memoryFiles.resize(scan.file_max[TGSI_FILE_MEMORY] + 1);
1033
1034 info->immd.bufSize = 0;
1035
1036 info->numInputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1037 info->numOutputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1038 info->numSysVals = scan.file_max[TGSI_FILE_SYSTEM_VALUE] + 1;
1039
1040 if (info->type == PIPE_SHADER_FRAGMENT) {
1041 info->prop.fp.writesDepth = scan.writes_z;
1042 info->prop.fp.usesDiscard = scan.uses_kill || info->io.alphaRefBase;
1043 } else
1044 if (info->type == PIPE_SHADER_GEOMETRY) {
1045 info->prop.gp.instanceCount = 1; // default value
1046 }
1047
1048 info->io.viewportId = -1;
1049 info->prop.cp.numThreads = 1;
1050
1051 info->immd.data = (uint32_t *)MALLOC(scan.immediate_count * 16);
1052 info->immd.type = (ubyte *)MALLOC(scan.immediate_count * sizeof(ubyte));
1053
1054 tgsi_parse_init(&parse, tokens);
1055 while (!tgsi_parse_end_of_tokens(&parse)) {
1056 tgsi_parse_token(&parse);
1057
1058 switch (parse.FullToken.Token.Type) {
1059 case TGSI_TOKEN_TYPE_IMMEDIATE:
1060 scanImmediate(&parse.FullToken.FullImmediate);
1061 break;
1062 case TGSI_TOKEN_TYPE_DECLARATION:
1063 scanDeclaration(&parse.FullToken.FullDeclaration);
1064 break;
1065 case TGSI_TOKEN_TYPE_INSTRUCTION:
1066 insns[insnCount++] = parse.FullToken.FullInstruction;
1067 scanInstruction(&parse.FullToken.FullInstruction);
1068 break;
1069 case TGSI_TOKEN_TYPE_PROPERTY:
1070 scanProperty(&parse.FullToken.FullProperty);
1071 break;
1072 default:
1073 INFO("unknown TGSI token type: %d\n", parse.FullToken.Token.Type);
1074 break;
1075 }
1076 }
1077 tgsi_parse_free(&parse);
1078
1079 if (indirectTempArrays.size()) {
1080 int tempBase = 0;
1081 for (std::set<int>::const_iterator it = indirectTempArrays.begin();
1082 it != indirectTempArrays.end(); ++it) {
1083 std::pair<int, int>& info = tempArrayInfo[*it];
1084 indirectTempOffsets.insert(std::make_pair(*it, tempBase - info.first));
1085 tempBase += info.second;
1086 }
1087 info->bin.tlsSpace += tempBase * 16;
1088 }
1089
1090 if (info->io.genUserClip > 0) {
1091 info->io.clipDistances = info->io.genUserClip;
1092
1093 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1094
1095 for (unsigned int n = 0; n < nOut; ++n) {
1096 unsigned int i = info->numOutputs++;
1097 info->out[i].id = i;
1098 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1099 info->out[i].si = n;
1100 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1101 }
1102 }
1103
1104 return info->assignSlots(info) == 0;
1105 }
1106
1107 void Source::scanProperty(const struct tgsi_full_property *prop)
1108 {
1109 switch (prop->Property.PropertyName) {
1110 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1111 info->prop.gp.outputPrim = prop->u[0].Data;
1112 break;
1113 case TGSI_PROPERTY_GS_INPUT_PRIM:
1114 info->prop.gp.inputPrim = prop->u[0].Data;
1115 break;
1116 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1117 info->prop.gp.maxVertices = prop->u[0].Data;
1118 break;
1119 case TGSI_PROPERTY_GS_INVOCATIONS:
1120 info->prop.gp.instanceCount = prop->u[0].Data;
1121 break;
1122 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1123 info->prop.fp.separateFragData = true;
1124 break;
1125 case TGSI_PROPERTY_FS_COORD_ORIGIN:
1126 case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1127 case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
1128 // we don't care
1129 break;
1130 case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1131 info->io.genUserClip = -1;
1132 break;
1133 case TGSI_PROPERTY_TCS_VERTICES_OUT:
1134 info->prop.tp.outputPatchSize = prop->u[0].Data;
1135 break;
1136 case TGSI_PROPERTY_TES_PRIM_MODE:
1137 info->prop.tp.domain = prop->u[0].Data;
1138 break;
1139 case TGSI_PROPERTY_TES_SPACING:
1140 info->prop.tp.partitioning = prop->u[0].Data;
1141 break;
1142 case TGSI_PROPERTY_TES_VERTEX_ORDER_CW:
1143 info->prop.tp.winding = prop->u[0].Data;
1144 break;
1145 case TGSI_PROPERTY_TES_POINT_MODE:
1146 if (prop->u[0].Data)
1147 info->prop.tp.outputPrim = PIPE_PRIM_POINTS;
1148 else
1149 info->prop.tp.outputPrim = PIPE_PRIM_TRIANGLES; /* anything but points */
1150 break;
1151 case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
1152 case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
1153 case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
1154 info->prop.cp.numThreads *= prop->u[0].Data;
1155 break;
1156 case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
1157 info->io.clipDistances = prop->u[0].Data;
1158 break;
1159 case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
1160 info->io.cullDistances = prop->u[0].Data;
1161 break;
1162 case TGSI_PROPERTY_NEXT_SHADER:
1163 /* Do not need to know the next shader stage. */
1164 break;
1165 case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
1166 info->prop.fp.earlyFragTests = prop->u[0].Data;
1167 break;
1168 default:
1169 INFO("unhandled TGSI property %d\n", prop->Property.PropertyName);
1170 break;
1171 }
1172 }
1173
1174 void Source::scanImmediate(const struct tgsi_full_immediate *imm)
1175 {
1176 const unsigned n = info->immd.count++;
1177
1178 assert(n < scan.immediate_count);
1179
1180 for (int c = 0; c < 4; ++c)
1181 info->immd.data[n * 4 + c] = imm->u[c].Uint;
1182
1183 info->immd.type[n] = imm->Immediate.DataType;
1184 }
1185
1186 int Source::inferSysValDirection(unsigned sn) const
1187 {
1188 switch (sn) {
1189 case TGSI_SEMANTIC_INSTANCEID:
1190 case TGSI_SEMANTIC_VERTEXID:
1191 return 1;
1192 case TGSI_SEMANTIC_LAYER:
1193 #if 0
1194 case TGSI_SEMANTIC_VIEWPORTINDEX:
1195 return 0;
1196 #endif
1197 case TGSI_SEMANTIC_PRIMID:
1198 return (info->type == PIPE_SHADER_FRAGMENT) ? 1 : 0;
1199 default:
1200 return 0;
1201 }
1202 }
1203
1204 bool Source::scanDeclaration(const struct tgsi_full_declaration *decl)
1205 {
1206 unsigned i, c;
1207 unsigned sn = TGSI_SEMANTIC_GENERIC;
1208 unsigned si = 0;
1209 const unsigned first = decl->Range.First, last = decl->Range.Last;
1210 const int arrayId = decl->Array.ArrayID;
1211
1212 if (decl->Declaration.Semantic) {
1213 sn = decl->Semantic.Name;
1214 si = decl->Semantic.Index;
1215 }
1216
1217 if (decl->Declaration.Local || decl->Declaration.File == TGSI_FILE_ADDRESS) {
1218 for (i = first; i <= last; ++i) {
1219 for (c = 0; c < 4; ++c) {
1220 locals.insert(
1221 Location(decl->Declaration.File, decl->Dim.Index2D, i, c));
1222 }
1223 }
1224 }
1225
1226 switch (decl->Declaration.File) {
1227 case TGSI_FILE_INPUT:
1228 if (info->type == PIPE_SHADER_VERTEX) {
1229 // all vertex attributes are equal
1230 for (i = first; i <= last; ++i) {
1231 info->in[i].sn = TGSI_SEMANTIC_GENERIC;
1232 info->in[i].si = i;
1233 }
1234 } else {
1235 for (i = first; i <= last; ++i, ++si) {
1236 info->in[i].id = i;
1237 info->in[i].sn = sn;
1238 info->in[i].si = si;
1239 if (info->type == PIPE_SHADER_FRAGMENT) {
1240 // translate interpolation mode
1241 switch (decl->Interp.Interpolate) {
1242 case TGSI_INTERPOLATE_CONSTANT:
1243 info->in[i].flat = 1;
1244 break;
1245 case TGSI_INTERPOLATE_COLOR:
1246 info->in[i].sc = 1;
1247 break;
1248 case TGSI_INTERPOLATE_LINEAR:
1249 info->in[i].linear = 1;
1250 break;
1251 default:
1252 break;
1253 }
1254 if (decl->Interp.Location)
1255 info->in[i].centroid = 1;
1256 }
1257
1258 if (sn == TGSI_SEMANTIC_PATCH)
1259 info->in[i].patch = 1;
1260 if (sn == TGSI_SEMANTIC_PATCH)
1261 info->numPatchConstants = MAX2(info->numPatchConstants, si + 1);
1262 }
1263 }
1264 break;
1265 case TGSI_FILE_OUTPUT:
1266 for (i = first; i <= last; ++i, ++si) {
1267 switch (sn) {
1268 case TGSI_SEMANTIC_POSITION:
1269 if (info->type == PIPE_SHADER_FRAGMENT)
1270 info->io.fragDepth = i;
1271 else
1272 if (clipVertexOutput < 0)
1273 clipVertexOutput = i;
1274 break;
1275 case TGSI_SEMANTIC_COLOR:
1276 if (info->type == PIPE_SHADER_FRAGMENT)
1277 info->prop.fp.numColourResults++;
1278 break;
1279 case TGSI_SEMANTIC_EDGEFLAG:
1280 info->io.edgeFlagOut = i;
1281 break;
1282 case TGSI_SEMANTIC_CLIPVERTEX:
1283 clipVertexOutput = i;
1284 break;
1285 case TGSI_SEMANTIC_CLIPDIST:
1286 info->io.genUserClip = -1;
1287 break;
1288 case TGSI_SEMANTIC_SAMPLEMASK:
1289 info->io.sampleMask = i;
1290 break;
1291 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1292 info->io.viewportId = i;
1293 break;
1294 case TGSI_SEMANTIC_PATCH:
1295 info->numPatchConstants = MAX2(info->numPatchConstants, si + 1);
1296 /* fallthrough */
1297 case TGSI_SEMANTIC_TESSOUTER:
1298 case TGSI_SEMANTIC_TESSINNER:
1299 info->out[i].patch = 1;
1300 break;
1301 default:
1302 break;
1303 }
1304 info->out[i].id = i;
1305 info->out[i].sn = sn;
1306 info->out[i].si = si;
1307 }
1308 break;
1309 case TGSI_FILE_SYSTEM_VALUE:
1310 switch (sn) {
1311 case TGSI_SEMANTIC_INSTANCEID:
1312 info->io.instanceId = first;
1313 break;
1314 case TGSI_SEMANTIC_VERTEXID:
1315 info->io.vertexId = first;
1316 break;
1317 case TGSI_SEMANTIC_BASEVERTEX:
1318 case TGSI_SEMANTIC_BASEINSTANCE:
1319 case TGSI_SEMANTIC_DRAWID:
1320 info->prop.vp.usesDrawParameters = true;
1321 break;
1322 case TGSI_SEMANTIC_SAMPLEID:
1323 case TGSI_SEMANTIC_SAMPLEPOS:
1324 info->prop.fp.persampleInvocation = true;
1325 break;
1326 case TGSI_SEMANTIC_SAMPLEMASK:
1327 info->prop.fp.usesSampleMaskIn = true;
1328 break;
1329 default:
1330 break;
1331 }
1332 for (i = first; i <= last; ++i, ++si) {
1333 info->sv[i].sn = sn;
1334 info->sv[i].si = si;
1335 info->sv[i].input = inferSysValDirection(sn);
1336
1337 switch (sn) {
1338 case TGSI_SEMANTIC_TESSOUTER:
1339 case TGSI_SEMANTIC_TESSINNER:
1340 info->sv[i].patch = 1;
1341 break;
1342 }
1343 }
1344 break;
1345 /*
1346 case TGSI_FILE_RESOURCE:
1347 for (i = first; i <= last; ++i) {
1348 resources[i].target = decl->Resource.Resource;
1349 resources[i].raw = decl->Resource.Raw;
1350 resources[i].slot = i;
1351 }
1352 break;
1353 */
1354 case TGSI_FILE_IMAGE:
1355 for (i = first; i <= last; ++i) {
1356 images[i].target = decl->Image.Resource;
1357 images[i].raw = decl->Image.Raw;
1358 images[i].format = decl->Image.Format;
1359 images[i].slot = i;
1360 }
1361 break;
1362 case TGSI_FILE_SAMPLER_VIEW:
1363 for (i = first; i <= last; ++i)
1364 textureViews[i].target = decl->SamplerView.Resource;
1365 break;
1366 case TGSI_FILE_MEMORY:
1367 for (i = first; i <= last; ++i)
1368 memoryFiles[i].mem_type = decl->Declaration.MemType;
1369 break;
1370 case TGSI_FILE_NULL:
1371 case TGSI_FILE_TEMPORARY:
1372 for (i = first; i <= last; ++i)
1373 tempArrayId[i] = arrayId;
1374 if (arrayId)
1375 tempArrayInfo.insert(std::make_pair(arrayId, std::make_pair(
1376 first, last - first + 1)));
1377 break;
1378 case TGSI_FILE_ADDRESS:
1379 case TGSI_FILE_CONSTANT:
1380 case TGSI_FILE_IMMEDIATE:
1381 case TGSI_FILE_PREDICATE:
1382 case TGSI_FILE_SAMPLER:
1383 case TGSI_FILE_BUFFER:
1384 break;
1385 default:
1386 ERROR("unhandled TGSI_FILE %d\n", decl->Declaration.File);
1387 return false;
1388 }
1389 return true;
1390 }
1391
1392 inline bool Source::isEdgeFlagPassthrough(const Instruction& insn) const
1393 {
1394 return insn.getOpcode() == TGSI_OPCODE_MOV &&
1395 insn.getDst(0).getIndex(0) == info->io.edgeFlagOut &&
1396 insn.getSrc(0).getFile() == TGSI_FILE_INPUT;
1397 }
1398
1399 void Source::scanInstructionSrc(const Instruction& insn,
1400 const Instruction::SrcRegister& src,
1401 unsigned mask)
1402 {
1403 if (src.getFile() == TGSI_FILE_TEMPORARY) {
1404 if (src.isIndirect(0))
1405 indirectTempArrays.insert(src.getArrayId());
1406 } else
1407 if (src.getFile() == TGSI_FILE_BUFFER ||
1408 src.getFile() == TGSI_FILE_IMAGE ||
1409 (src.getFile() == TGSI_FILE_MEMORY &&
1410 memoryFiles[src.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1411 info->io.globalAccess |= (insn.getOpcode() == TGSI_OPCODE_LOAD) ?
1412 0x1 : 0x2;
1413 } else
1414 if (src.getFile() == TGSI_FILE_OUTPUT) {
1415 if (src.isIndirect(0)) {
1416 // We don't know which one is accessed, just mark everything for
1417 // reading. This is an extremely unlikely occurrence.
1418 for (unsigned i = 0; i < info->numOutputs; ++i)
1419 info->out[i].oread = 1;
1420 } else {
1421 info->out[src.getIndex(0)].oread = 1;
1422 }
1423 }
1424 if (src.getFile() != TGSI_FILE_INPUT)
1425 return;
1426
1427 if (src.isIndirect(0)) {
1428 for (unsigned i = 0; i < info->numInputs; ++i)
1429 info->in[i].mask = 0xf;
1430 } else {
1431 const int i = src.getIndex(0);
1432 for (unsigned c = 0; c < 4; ++c) {
1433 if (!(mask & (1 << c)))
1434 continue;
1435 int k = src.getSwizzle(c);
1436 if (k <= TGSI_SWIZZLE_W)
1437 info->in[i].mask |= 1 << k;
1438 }
1439 switch (info->in[i].sn) {
1440 case TGSI_SEMANTIC_PSIZE:
1441 case TGSI_SEMANTIC_PRIMID:
1442 case TGSI_SEMANTIC_FOG:
1443 info->in[i].mask &= 0x1;
1444 break;
1445 case TGSI_SEMANTIC_PCOORD:
1446 info->in[i].mask &= 0x3;
1447 break;
1448 default:
1449 break;
1450 }
1451 }
1452 }
1453
1454 bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
1455 {
1456 Instruction insn(inst);
1457
1458 if (insn.getOpcode() == TGSI_OPCODE_BARRIER)
1459 info->numBarriers = 1;
1460
1461 if (insn.dstCount()) {
1462 Instruction::DstRegister dst = insn.getDst(0);
1463
1464 if (dst.getFile() == TGSI_FILE_OUTPUT) {
1465 if (dst.isIndirect(0))
1466 for (unsigned i = 0; i < info->numOutputs; ++i)
1467 info->out[i].mask = 0xf;
1468 else
1469 info->out[dst.getIndex(0)].mask |= dst.getMask();
1470
1471 if (info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PSIZE ||
1472 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PRIMID ||
1473 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_LAYER ||
1474 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_VIEWPORT_INDEX ||
1475 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_FOG)
1476 info->out[dst.getIndex(0)].mask &= 1;
1477
1478 if (isEdgeFlagPassthrough(insn))
1479 info->io.edgeFlagIn = insn.getSrc(0).getIndex(0);
1480 } else
1481 if (dst.getFile() == TGSI_FILE_TEMPORARY) {
1482 if (dst.isIndirect(0))
1483 indirectTempArrays.insert(dst.getArrayId());
1484 } else
1485 if (dst.getFile() == TGSI_FILE_BUFFER ||
1486 dst.getFile() == TGSI_FILE_IMAGE ||
1487 (dst.getFile() == TGSI_FILE_MEMORY &&
1488 memoryFiles[dst.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1489 info->io.globalAccess |= 0x2;
1490 }
1491 }
1492
1493 for (unsigned s = 0; s < insn.srcCount(); ++s)
1494 scanInstructionSrc(insn, insn.getSrc(s), insn.srcMask(s));
1495
1496 for (unsigned s = 0; s < insn.getNumTexOffsets(); ++s)
1497 scanInstructionSrc(insn, insn.getTexOffset(s), insn.texOffsetMask());
1498
1499 return true;
1500 }
1501
1502 nv50_ir::TexInstruction::Target
1503 Instruction::getTexture(const tgsi::Source *code, int s) const
1504 {
1505 // XXX: indirect access
1506 unsigned int r;
1507
1508 switch (getSrc(s).getFile()) {
1509 /*
1510 case TGSI_FILE_RESOURCE:
1511 r = getSrc(s).getIndex(0);
1512 return translateTexture(code->resources.at(r).target);
1513 */
1514 case TGSI_FILE_SAMPLER_VIEW:
1515 r = getSrc(s).getIndex(0);
1516 return translateTexture(code->textureViews.at(r).target);
1517 default:
1518 return translateTexture(insn->Texture.Texture);
1519 }
1520 }
1521
1522 } // namespace tgsi
1523
1524 namespace {
1525
1526 using namespace nv50_ir;
1527
1528 class Converter : public BuildUtil
1529 {
1530 public:
1531 Converter(Program *, const tgsi::Source *);
1532 ~Converter();
1533
1534 bool run();
1535
1536 private:
1537 struct Subroutine
1538 {
1539 Subroutine(Function *f) : f(f) { }
1540 Function *f;
1541 ValueMap values;
1542 };
1543
1544 Value *shiftAddress(Value *);
1545 Value *getVertexBase(int s);
1546 Value *getOutputBase(int s);
1547 DataArray *getArrayForFile(unsigned file, int idx);
1548 Value *fetchSrc(int s, int c);
1549 Value *acquireDst(int d, int c);
1550 void storeDst(int d, int c, Value *);
1551
1552 Value *fetchSrc(const tgsi::Instruction::SrcRegister src, int c, Value *ptr);
1553 void storeDst(const tgsi::Instruction::DstRegister dst, int c,
1554 Value *val, Value *ptr);
1555
1556 void adjustTempIndex(int arrayId, int &idx, int &idx2d) const;
1557 Value *applySrcMod(Value *, int s, int c);
1558
1559 Symbol *makeSym(uint file, int fileIndex, int idx, int c, uint32_t addr);
1560 Symbol *srcToSym(tgsi::Instruction::SrcRegister, int c);
1561 Symbol *dstToSym(tgsi::Instruction::DstRegister, int c);
1562
1563 bool handleInstruction(const struct tgsi_full_instruction *);
1564 void exportOutputs();
1565 inline Subroutine *getSubroutine(unsigned ip);
1566 inline Subroutine *getSubroutine(Function *);
1567 inline bool isEndOfSubroutine(uint ip);
1568
1569 void loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask);
1570
1571 // R,S,L,C,Dx,Dy encode TGSI sources for respective values (0xSf for auto)
1572 void setTexRS(TexInstruction *, unsigned int& s, int R, int S);
1573 void handleTEX(Value *dst0[4], int R, int S, int L, int C, int Dx, int Dy);
1574 void handleTXF(Value *dst0[4], int R, int L_M);
1575 void handleTXQ(Value *dst0[4], enum TexQuery, int R);
1576 void handleLIT(Value *dst0[4]);
1577 void handleUserClipPlanes();
1578
1579 // Symbol *getResourceBase(int r);
1580 void getImageCoords(std::vector<Value *>&, int r, int s);
1581
1582 void handleLOAD(Value *dst0[4]);
1583 void handleSTORE();
1584 void handleATOM(Value *dst0[4], DataType, uint16_t subOp);
1585
1586 void handleINTERP(Value *dst0[4]);
1587
1588 uint8_t translateInterpMode(const struct nv50_ir_varying *var,
1589 operation& op);
1590 Value *interpolate(tgsi::Instruction::SrcRegister, int c, Value *ptr);
1591
1592 void insertConvergenceOps(BasicBlock *conv, BasicBlock *fork);
1593
1594 Value *buildDot(int dim);
1595
1596 class BindArgumentsPass : public Pass {
1597 public:
1598 BindArgumentsPass(Converter &conv) : conv(conv) { }
1599
1600 private:
1601 Converter &conv;
1602 Subroutine *sub;
1603
1604 inline const Location *getValueLocation(Subroutine *, Value *);
1605
1606 template<typename T> inline void
1607 updateCallArgs(Instruction *i, void (Instruction::*setArg)(int, Value *),
1608 T (Function::*proto));
1609
1610 template<typename T> inline void
1611 updatePrototype(BitSet *set, void (Function::*updateSet)(),
1612 T (Function::*proto));
1613
1614 protected:
1615 bool visit(Function *);
1616 bool visit(BasicBlock *bb) { return false; }
1617 };
1618
1619 private:
1620 const tgsi::Source *code;
1621 const struct nv50_ir_prog_info *info;
1622
1623 struct {
1624 std::map<unsigned, Subroutine> map;
1625 Subroutine *cur;
1626 } sub;
1627
1628 uint ip; // instruction pointer
1629
1630 tgsi::Instruction tgsi;
1631
1632 DataType dstTy;
1633 DataType srcTy;
1634
1635 DataArray tData; // TGSI_FILE_TEMPORARY
1636 DataArray lData; // TGSI_FILE_TEMPORARY, for indirect arrays
1637 DataArray aData; // TGSI_FILE_ADDRESS
1638 DataArray pData; // TGSI_FILE_PREDICATE
1639 DataArray oData; // TGSI_FILE_OUTPUT (if outputs in registers)
1640
1641 Value *zero;
1642 Value *fragCoord[4];
1643 Value *clipVtx[4];
1644
1645 Value *vtxBase[5]; // base address of vertex in primitive (for TP/GP)
1646 uint8_t vtxBaseValid;
1647
1648 Value *outBase; // base address of vertex out patch (for TCP)
1649
1650 Stack condBBs; // fork BB, then else clause BB
1651 Stack joinBBs; // fork BB, for inserting join ops on ENDIF
1652 Stack loopBBs; // loop headers
1653 Stack breakBBs; // end of / after loop
1654
1655 Value *viewport;
1656 };
1657
1658 Symbol *
1659 Converter::srcToSym(tgsi::Instruction::SrcRegister src, int c)
1660 {
1661 const int swz = src.getSwizzle(c);
1662
1663 /* TODO: Use Array ID when it's available for the index */
1664 return makeSym(src.getFile(),
1665 src.is2D() ? src.getIndex(1) : 0,
1666 src.getIndex(0), swz,
1667 src.getIndex(0) * 16 + swz * 4);
1668 }
1669
1670 Symbol *
1671 Converter::dstToSym(tgsi::Instruction::DstRegister dst, int c)
1672 {
1673 /* TODO: Use Array ID when it's available for the index */
1674 return makeSym(dst.getFile(),
1675 dst.is2D() ? dst.getIndex(1) : 0,
1676 dst.getIndex(0), c,
1677 dst.getIndex(0) * 16 + c * 4);
1678 }
1679
1680 Symbol *
1681 Converter::makeSym(uint tgsiFile, int fileIdx, int idx, int c, uint32_t address)
1682 {
1683 Symbol *sym = new_Symbol(prog, tgsi::translateFile(tgsiFile));
1684
1685 sym->reg.fileIndex = fileIdx;
1686
1687 if (tgsiFile == TGSI_FILE_MEMORY) {
1688 switch (code->memoryFiles[fileIdx].mem_type) {
1689 case TGSI_MEMORY_TYPE_GLOBAL:
1690 /* No-op this is the default for TGSI_FILE_MEMORY */
1691 sym->setFile(FILE_MEMORY_GLOBAL);
1692 break;
1693 case TGSI_MEMORY_TYPE_SHARED:
1694 sym->setFile(FILE_MEMORY_SHARED);
1695 break;
1696 case TGSI_MEMORY_TYPE_INPUT:
1697 assert(prog->getType() == Program::TYPE_COMPUTE);
1698 assert(idx == -1);
1699 sym->setFile(FILE_SHADER_INPUT);
1700 address += info->prop.cp.inputOffset;
1701 break;
1702 default:
1703 assert(0); /* TODO: Add support for global and private memory */
1704 }
1705 }
1706
1707 if (idx >= 0) {
1708 if (sym->reg.file == FILE_SHADER_INPUT)
1709 sym->setOffset(info->in[idx].slot[c] * 4);
1710 else
1711 if (sym->reg.file == FILE_SHADER_OUTPUT)
1712 sym->setOffset(info->out[idx].slot[c] * 4);
1713 else
1714 if (sym->reg.file == FILE_SYSTEM_VALUE)
1715 sym->setSV(tgsi::translateSysVal(info->sv[idx].sn), c);
1716 else
1717 sym->setOffset(address);
1718 } else {
1719 sym->setOffset(address);
1720 }
1721 return sym;
1722 }
1723
1724 uint8_t
1725 Converter::translateInterpMode(const struct nv50_ir_varying *var, operation& op)
1726 {
1727 uint8_t mode = NV50_IR_INTERP_PERSPECTIVE;
1728
1729 if (var->flat)
1730 mode = NV50_IR_INTERP_FLAT;
1731 else
1732 if (var->linear)
1733 mode = NV50_IR_INTERP_LINEAR;
1734 else
1735 if (var->sc)
1736 mode = NV50_IR_INTERP_SC;
1737
1738 op = (mode == NV50_IR_INTERP_PERSPECTIVE || mode == NV50_IR_INTERP_SC)
1739 ? OP_PINTERP : OP_LINTERP;
1740
1741 if (var->centroid)
1742 mode |= NV50_IR_INTERP_CENTROID;
1743
1744 return mode;
1745 }
1746
1747 Value *
1748 Converter::interpolate(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1749 {
1750 operation op;
1751
1752 // XXX: no way to know interpolation mode if we don't know what's accessed
1753 const uint8_t mode = translateInterpMode(&info->in[ptr ? 0 :
1754 src.getIndex(0)], op);
1755
1756 Instruction *insn = new_Instruction(func, op, TYPE_F32);
1757
1758 insn->setDef(0, getScratch());
1759 insn->setSrc(0, srcToSym(src, c));
1760 if (op == OP_PINTERP)
1761 insn->setSrc(1, fragCoord[3]);
1762 if (ptr)
1763 insn->setIndirect(0, 0, ptr);
1764
1765 insn->setInterpolate(mode);
1766
1767 bb->insertTail(insn);
1768 return insn->getDef(0);
1769 }
1770
1771 Value *
1772 Converter::applySrcMod(Value *val, int s, int c)
1773 {
1774 Modifier m = tgsi.getSrc(s).getMod(c);
1775 DataType ty = tgsi.inferSrcType();
1776
1777 if (m & Modifier(NV50_IR_MOD_ABS))
1778 val = mkOp1v(OP_ABS, ty, getScratch(), val);
1779
1780 if (m & Modifier(NV50_IR_MOD_NEG))
1781 val = mkOp1v(OP_NEG, ty, getScratch(), val);
1782
1783 return val;
1784 }
1785
1786 Value *
1787 Converter::getVertexBase(int s)
1788 {
1789 assert(s < 5);
1790 if (!(vtxBaseValid & (1 << s))) {
1791 const int index = tgsi.getSrc(s).getIndex(1);
1792 Value *rel = NULL;
1793 if (tgsi.getSrc(s).isIndirect(1))
1794 rel = fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL);
1795 vtxBaseValid |= 1 << s;
1796 vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1797 mkImm(index), rel);
1798 }
1799 return vtxBase[s];
1800 }
1801
1802 Value *
1803 Converter::getOutputBase(int s)
1804 {
1805 assert(s < 5);
1806 if (!(vtxBaseValid & (1 << s))) {
1807 Value *offset = loadImm(NULL, tgsi.getSrc(s).getIndex(1));
1808 if (tgsi.getSrc(s).isIndirect(1))
1809 offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(),
1810 fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL),
1811 offset);
1812 vtxBaseValid |= 1 << s;
1813 vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset);
1814 }
1815 return vtxBase[s];
1816 }
1817
1818 Value *
1819 Converter::fetchSrc(int s, int c)
1820 {
1821 Value *res;
1822 Value *ptr = NULL, *dimRel = NULL;
1823
1824 tgsi::Instruction::SrcRegister src = tgsi.getSrc(s);
1825
1826 if (src.isIndirect(0))
1827 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
1828
1829 if (src.is2D()) {
1830 switch (src.getFile()) {
1831 case TGSI_FILE_OUTPUT:
1832 dimRel = getOutputBase(s);
1833 break;
1834 case TGSI_FILE_INPUT:
1835 dimRel = getVertexBase(s);
1836 break;
1837 case TGSI_FILE_CONSTANT:
1838 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1839 if (src.isIndirect(1))
1840 dimRel = fetchSrc(src.getIndirect(1), 0, 0);
1841 break;
1842 default:
1843 break;
1844 }
1845 }
1846
1847 res = fetchSrc(src, c, ptr);
1848
1849 if (dimRel)
1850 res->getInsn()->setIndirect(0, 1, dimRel);
1851
1852 return applySrcMod(res, s, c);
1853 }
1854
1855 Converter::DataArray *
1856 Converter::getArrayForFile(unsigned file, int idx)
1857 {
1858 switch (file) {
1859 case TGSI_FILE_TEMPORARY:
1860 return idx == 0 ? &tData : &lData;
1861 case TGSI_FILE_PREDICATE:
1862 return &pData;
1863 case TGSI_FILE_ADDRESS:
1864 return &aData;
1865 case TGSI_FILE_OUTPUT:
1866 assert(prog->getType() == Program::TYPE_FRAGMENT);
1867 return &oData;
1868 default:
1869 assert(!"invalid/unhandled TGSI source file");
1870 return NULL;
1871 }
1872 }
1873
1874 Value *
1875 Converter::shiftAddress(Value *index)
1876 {
1877 if (!index)
1878 return NULL;
1879 return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4));
1880 }
1881
1882 void
1883 Converter::adjustTempIndex(int arrayId, int &idx, int &idx2d) const
1884 {
1885 std::map<int, int>::const_iterator it =
1886 code->indirectTempOffsets.find(arrayId);
1887 if (it == code->indirectTempOffsets.end())
1888 return;
1889
1890 idx2d = 1;
1891 idx += it->second;
1892 }
1893
1894 Value *
1895 Converter::fetchSrc(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1896 {
1897 int idx2d = src.is2D() ? src.getIndex(1) : 0;
1898 int idx = src.getIndex(0);
1899 const int swz = src.getSwizzle(c);
1900 Instruction *ld;
1901
1902 switch (src.getFile()) {
1903 case TGSI_FILE_IMMEDIATE:
1904 assert(!ptr);
1905 return loadImm(NULL, info->immd.data[idx * 4 + swz]);
1906 case TGSI_FILE_CONSTANT:
1907 return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr));
1908 case TGSI_FILE_INPUT:
1909 if (prog->getType() == Program::TYPE_FRAGMENT) {
1910 // don't load masked inputs, won't be assigned a slot
1911 if (!ptr && !(info->in[idx].mask & (1 << swz)))
1912 return loadImm(NULL, swz == TGSI_SWIZZLE_W ? 1.0f : 0.0f);
1913 return interpolate(src, c, shiftAddress(ptr));
1914 } else
1915 if (prog->getType() == Program::TYPE_GEOMETRY) {
1916 if (!ptr && info->in[idx].sn == TGSI_SEMANTIC_PRIMID)
1917 return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0));
1918 // XXX: This is going to be a problem with scalar arrays, i.e. when
1919 // we cannot assume that the address is given in units of vec4.
1920 //
1921 // nv50 and nvc0 need different things here, so let the lowering
1922 // passes decide what to do with the address
1923 if (ptr)
1924 return mkLoadv(TYPE_U32, srcToSym(src, c), ptr);
1925 }
1926 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
1927 ld->perPatch = info->in[idx].patch;
1928 return ld->getDef(0);
1929 case TGSI_FILE_OUTPUT:
1930 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
1931 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
1932 ld->perPatch = info->out[idx].patch;
1933 return ld->getDef(0);
1934 case TGSI_FILE_SYSTEM_VALUE:
1935 assert(!ptr);
1936 ld = mkOp1(OP_RDSV, TYPE_U32, getSSA(), srcToSym(src, c));
1937 ld->perPatch = info->sv[idx].patch;
1938 return ld->getDef(0);
1939 case TGSI_FILE_TEMPORARY: {
1940 int arrayid = src.getArrayId();
1941 if (!arrayid)
1942 arrayid = code->tempArrayId[idx];
1943 adjustTempIndex(arrayid, idx, idx2d);
1944 }
1945 /* fallthrough */
1946 default:
1947 return getArrayForFile(src.getFile(), idx2d)->load(
1948 sub.cur->values, idx, swz, shiftAddress(ptr));
1949 }
1950 }
1951
1952 Value *
1953 Converter::acquireDst(int d, int c)
1954 {
1955 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
1956 const unsigned f = dst.getFile();
1957 int idx = dst.getIndex(0);
1958 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
1959
1960 if (dst.isMasked(c) || f == TGSI_FILE_BUFFER || f == TGSI_FILE_MEMORY ||
1961 f == TGSI_FILE_IMAGE)
1962 return NULL;
1963
1964 if (dst.isIndirect(0) ||
1965 f == TGSI_FILE_SYSTEM_VALUE ||
1966 (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT))
1967 return getScratch();
1968
1969 if (f == TGSI_FILE_TEMPORARY) {
1970 int arrayid = dst.getArrayId();
1971 if (!arrayid)
1972 arrayid = code->tempArrayId[idx];
1973 adjustTempIndex(arrayid, idx, idx2d);
1974 }
1975
1976 return getArrayForFile(f, idx2d)-> acquire(sub.cur->values, idx, c);
1977 }
1978
1979 void
1980 Converter::storeDst(int d, int c, Value *val)
1981 {
1982 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
1983
1984 if (tgsi.getSaturate()) {
1985 mkOp1(OP_SAT, dstTy, val, val);
1986 }
1987
1988 Value *ptr = NULL;
1989 if (dst.isIndirect(0))
1990 ptr = shiftAddress(fetchSrc(dst.getIndirect(0), 0, NULL));
1991
1992 if (info->io.genUserClip > 0 &&
1993 dst.getFile() == TGSI_FILE_OUTPUT &&
1994 !dst.isIndirect(0) && dst.getIndex(0) == code->clipVertexOutput) {
1995 mkMov(clipVtx[c], val);
1996 val = clipVtx[c];
1997 }
1998
1999 storeDst(dst, c, val, ptr);
2000 }
2001
2002 void
2003 Converter::storeDst(const tgsi::Instruction::DstRegister dst, int c,
2004 Value *val, Value *ptr)
2005 {
2006 const unsigned f = dst.getFile();
2007 int idx = dst.getIndex(0);
2008 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2009
2010 if (f == TGSI_FILE_SYSTEM_VALUE) {
2011 assert(!ptr);
2012 mkOp2(OP_WRSV, TYPE_U32, NULL, dstToSym(dst, c), val);
2013 } else
2014 if (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT) {
2015
2016 if (ptr || (info->out[idx].mask & (1 << c))) {
2017 /* Save the viewport index into a scratch register so that it can be
2018 exported at EMIT time */
2019 if (info->out[idx].sn == TGSI_SEMANTIC_VIEWPORT_INDEX &&
2020 viewport != NULL)
2021 mkOp1(OP_MOV, TYPE_U32, viewport, val);
2022 else
2023 mkStore(OP_EXPORT, TYPE_U32, dstToSym(dst, c), ptr, val)->perPatch =
2024 info->out[idx].patch;
2025 }
2026 } else
2027 if (f == TGSI_FILE_TEMPORARY ||
2028 f == TGSI_FILE_PREDICATE ||
2029 f == TGSI_FILE_ADDRESS ||
2030 f == TGSI_FILE_OUTPUT) {
2031 if (f == TGSI_FILE_TEMPORARY) {
2032 int arrayid = dst.getArrayId();
2033 if (!arrayid)
2034 arrayid = code->tempArrayId[idx];
2035 adjustTempIndex(arrayid, idx, idx2d);
2036 }
2037
2038 getArrayForFile(f, idx2d)->store(sub.cur->values, idx, c, ptr, val);
2039 } else {
2040 assert(!"invalid dst file");
2041 }
2042 }
2043
2044 #define FOR_EACH_DST_ENABLED_CHANNEL(d, chan, inst) \
2045 for (chan = 0; chan < 4; ++chan) \
2046 if (!inst.getDst(d).isMasked(chan))
2047
2048 Value *
2049 Converter::buildDot(int dim)
2050 {
2051 assert(dim > 0);
2052
2053 Value *src0 = fetchSrc(0, 0), *src1 = fetchSrc(1, 0);
2054 Value *dotp = getScratch();
2055
2056 mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1);
2057
2058 for (int c = 1; c < dim; ++c) {
2059 src0 = fetchSrc(0, c);
2060 src1 = fetchSrc(1, c);
2061 mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp);
2062 }
2063 return dotp;
2064 }
2065
2066 void
2067 Converter::insertConvergenceOps(BasicBlock *conv, BasicBlock *fork)
2068 {
2069 FlowInstruction *join = new_FlowInstruction(func, OP_JOIN, NULL);
2070 join->fixed = 1;
2071 conv->insertHead(join);
2072
2073 assert(!fork->joinAt);
2074 fork->joinAt = new_FlowInstruction(func, OP_JOINAT, conv);
2075 fork->insertBefore(fork->getExit(), fork->joinAt);
2076 }
2077
2078 void
2079 Converter::setTexRS(TexInstruction *tex, unsigned int& s, int R, int S)
2080 {
2081 unsigned rIdx = 0, sIdx = 0;
2082
2083 if (R >= 0)
2084 rIdx = tgsi.getSrc(R).getIndex(0);
2085 if (S >= 0)
2086 sIdx = tgsi.getSrc(S).getIndex(0);
2087
2088 tex->setTexture(tgsi.getTexture(code, R), rIdx, sIdx);
2089
2090 if (tgsi.getSrc(R).isIndirect(0)) {
2091 tex->tex.rIndirectSrc = s;
2092 tex->setSrc(s++, fetchSrc(tgsi.getSrc(R).getIndirect(0), 0, NULL));
2093 }
2094 if (S >= 0 && tgsi.getSrc(S).isIndirect(0)) {
2095 tex->tex.sIndirectSrc = s;
2096 tex->setSrc(s++, fetchSrc(tgsi.getSrc(S).getIndirect(0), 0, NULL));
2097 }
2098 }
2099
2100 void
2101 Converter::handleTXQ(Value *dst0[4], enum TexQuery query, int R)
2102 {
2103 TexInstruction *tex = new_TexInstruction(func, OP_TXQ);
2104 tex->tex.query = query;
2105 unsigned int c, d;
2106
2107 for (d = 0, c = 0; c < 4; ++c) {
2108 if (!dst0[c])
2109 continue;
2110 tex->tex.mask |= 1 << c;
2111 tex->setDef(d++, dst0[c]);
2112 }
2113 if (query == TXQ_DIMS)
2114 tex->setSrc((c = 0), fetchSrc(0, 0)); // mip level
2115 else
2116 tex->setSrc((c = 0), zero);
2117
2118 setTexRS(tex, ++c, R, -1);
2119
2120 bb->insertTail(tex);
2121 }
2122
2123 void
2124 Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
2125 {
2126 Value *proj = fetchSrc(0, 3);
2127 Instruction *insn = proj->getUniqueInsn();
2128 int c;
2129
2130 if (insn->op == OP_PINTERP) {
2131 bb->insertTail(insn = cloneForward(func, insn));
2132 insn->op = OP_LINTERP;
2133 insn->setInterpolate(NV50_IR_INTERP_LINEAR | insn->getSampleMode());
2134 insn->setSrc(1, NULL);
2135 proj = insn->getDef(0);
2136 }
2137 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj);
2138
2139 for (c = 0; c < 4; ++c) {
2140 if (!(mask & (1 << c)))
2141 continue;
2142 if ((insn = src[c]->getUniqueInsn())->op != OP_PINTERP)
2143 continue;
2144 mask &= ~(1 << c);
2145
2146 bb->insertTail(insn = cloneForward(func, insn));
2147 insn->setInterpolate(NV50_IR_INTERP_PERSPECTIVE | insn->getSampleMode());
2148 insn->setSrc(1, proj);
2149 dst[c] = insn->getDef(0);
2150 }
2151 if (!mask)
2152 return;
2153
2154 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3));
2155
2156 for (c = 0; c < 4; ++c)
2157 if (mask & (1 << c))
2158 dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj);
2159 }
2160
2161 // order of nv50 ir sources: x y z layer lod/bias shadow
2162 // order of TGSI TEX sources: x y z layer shadow lod/bias
2163 // lowering will finally set the hw specific order (like array first on nvc0)
2164 void
2165 Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
2166 {
2167 Value *arg[4], *src[8];
2168 Value *lod = NULL, *shd = NULL;
2169 unsigned int s, c, d;
2170 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2171
2172 TexInstruction::Target tgt = tgsi.getTexture(code, R);
2173
2174 for (s = 0; s < tgt.getArgCount(); ++s)
2175 arg[s] = src[s] = fetchSrc(0, s);
2176
2177 if (texi->op == OP_TXL || texi->op == OP_TXB)
2178 lod = fetchSrc(L >> 4, L & 3);
2179
2180 if (C == 0x0f)
2181 C = 0x00 | MAX2(tgt.getArgCount(), 2); // guess DC src
2182
2183 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 &&
2184 tgt == TEX_TARGET_CUBE_ARRAY_SHADOW)
2185 shd = fetchSrc(1, 0);
2186 else if (tgt.isShadow())
2187 shd = fetchSrc(C >> 4, C & 3);
2188
2189 if (texi->op == OP_TXD) {
2190 for (c = 0; c < tgt.getDim() + tgt.isCube(); ++c) {
2191 texi->dPdx[c].set(fetchSrc(Dx >> 4, (Dx & 3) + c));
2192 texi->dPdy[c].set(fetchSrc(Dy >> 4, (Dy & 3) + c));
2193 }
2194 }
2195
2196 // cube textures don't care about projection value, it's divided out
2197 if (tgsi.getOpcode() == TGSI_OPCODE_TXP && !tgt.isCube() && !tgt.isArray()) {
2198 unsigned int n = tgt.getDim();
2199 if (shd) {
2200 arg[n] = shd;
2201 ++n;
2202 assert(tgt.getDim() == tgt.getArgCount());
2203 }
2204 loadProjTexCoords(src, arg, (1 << n) - 1);
2205 if (shd)
2206 shd = src[n - 1];
2207 }
2208
2209 for (c = 0, d = 0; c < 4; ++c) {
2210 if (dst[c]) {
2211 texi->setDef(d++, dst[c]);
2212 texi->tex.mask |= 1 << c;
2213 } else {
2214 // NOTE: maybe hook up def too, for CSE
2215 }
2216 }
2217 for (s = 0; s < tgt.getArgCount(); ++s)
2218 texi->setSrc(s, src[s]);
2219 if (lod)
2220 texi->setSrc(s++, lod);
2221 if (shd)
2222 texi->setSrc(s++, shd);
2223
2224 setTexRS(texi, s, R, S);
2225
2226 if (tgsi.getOpcode() == TGSI_OPCODE_SAMPLE_C_LZ)
2227 texi->tex.levelZero = true;
2228 if (prog->getType() != Program::TYPE_FRAGMENT &&
2229 (tgsi.getOpcode() == TGSI_OPCODE_TEX ||
2230 tgsi.getOpcode() == TGSI_OPCODE_TEX2 ||
2231 tgsi.getOpcode() == TGSI_OPCODE_TXP))
2232 texi->tex.levelZero = true;
2233 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 && !tgt.isShadow())
2234 texi->tex.gatherComp = tgsi.getSrc(1).getValueU32(0, info);
2235
2236 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2237 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2238 for (c = 0; c < 3; ++c) {
2239 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2240 texi->offset[s][c].setInsn(texi);
2241 }
2242 }
2243
2244 bb->insertTail(texi);
2245 }
2246
2247 // 1st source: xyz = coordinates, w = lod/sample
2248 // 2nd source: offset
2249 void
2250 Converter::handleTXF(Value *dst[4], int R, int L_M)
2251 {
2252 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2253 int ms;
2254 unsigned int c, d, s;
2255
2256 texi->tex.target = tgsi.getTexture(code, R);
2257
2258 ms = texi->tex.target.isMS() ? 1 : 0;
2259 texi->tex.levelZero = ms; /* MS textures don't have mip-maps */
2260
2261 for (c = 0, d = 0; c < 4; ++c) {
2262 if (dst[c]) {
2263 texi->setDef(d++, dst[c]);
2264 texi->tex.mask |= 1 << c;
2265 }
2266 }
2267 for (c = 0; c < (texi->tex.target.getArgCount() - ms); ++c)
2268 texi->setSrc(c, fetchSrc(0, c));
2269 texi->setSrc(c++, fetchSrc(L_M >> 4, L_M & 3)); // lod or ms
2270
2271 setTexRS(texi, c, R, -1);
2272
2273 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2274 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2275 for (c = 0; c < 3; ++c) {
2276 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2277 texi->offset[s][c].setInsn(texi);
2278 }
2279 }
2280
2281 bb->insertTail(texi);
2282 }
2283
2284 void
2285 Converter::handleLIT(Value *dst0[4])
2286 {
2287 Value *val0 = NULL;
2288 unsigned int mask = tgsi.getDst(0).getMask();
2289
2290 if (mask & (1 << 0))
2291 loadImm(dst0[0], 1.0f);
2292
2293 if (mask & (1 << 3))
2294 loadImm(dst0[3], 1.0f);
2295
2296 if (mask & (3 << 1)) {
2297 val0 = getScratch();
2298 mkOp2(OP_MAX, TYPE_F32, val0, fetchSrc(0, 0), zero);
2299 if (mask & (1 << 1))
2300 mkMov(dst0[1], val0);
2301 }
2302
2303 if (mask & (1 << 2)) {
2304 Value *src1 = fetchSrc(0, 1), *src3 = fetchSrc(0, 3);
2305 Value *val1 = getScratch(), *val3 = getScratch();
2306
2307 Value *pos128 = loadImm(NULL, +127.999999f);
2308 Value *neg128 = loadImm(NULL, -127.999999f);
2309
2310 mkOp2(OP_MAX, TYPE_F32, val1, src1, zero);
2311 mkOp2(OP_MAX, TYPE_F32, val3, src3, neg128);
2312 mkOp2(OP_MIN, TYPE_F32, val3, val3, pos128);
2313 mkOp2(OP_POW, TYPE_F32, val3, val1, val3);
2314
2315 mkCmp(OP_SLCT, CC_GT, TYPE_F32, dst0[2], TYPE_F32, val3, zero, val0);
2316 }
2317 }
2318
2319 /* Keep this around for now as reference when adding img support
2320 static inline bool
2321 isResourceSpecial(const int r)
2322 {
2323 return (r == TGSI_RESOURCE_GLOBAL ||
2324 r == TGSI_RESOURCE_LOCAL ||
2325 r == TGSI_RESOURCE_PRIVATE ||
2326 r == TGSI_RESOURCE_INPUT);
2327 }
2328
2329 static inline bool
2330 isResourceRaw(const tgsi::Source *code, const int r)
2331 {
2332 return isResourceSpecial(r) || code->resources[r].raw;
2333 }
2334
2335 static inline nv50_ir::TexTarget
2336 getResourceTarget(const tgsi::Source *code, int r)
2337 {
2338 if (isResourceSpecial(r))
2339 return nv50_ir::TEX_TARGET_BUFFER;
2340 return tgsi::translateTexture(code->resources.at(r).target);
2341 }
2342
2343 Symbol *
2344 Converter::getResourceBase(const int r)
2345 {
2346 Symbol *sym = NULL;
2347
2348 switch (r) {
2349 case TGSI_RESOURCE_GLOBAL:
2350 sym = new_Symbol(prog, nv50_ir::FILE_MEMORY_GLOBAL,
2351 info->io.auxCBSlot);
2352 break;
2353 case TGSI_RESOURCE_LOCAL:
2354 assert(prog->getType() == Program::TYPE_COMPUTE);
2355 sym = mkSymbol(nv50_ir::FILE_MEMORY_SHARED, 0, TYPE_U32,
2356 info->prop.cp.sharedOffset);
2357 break;
2358 case TGSI_RESOURCE_PRIVATE:
2359 sym = mkSymbol(nv50_ir::FILE_MEMORY_LOCAL, 0, TYPE_U32,
2360 info->bin.tlsSpace);
2361 break;
2362 case TGSI_RESOURCE_INPUT:
2363 assert(prog->getType() == Program::TYPE_COMPUTE);
2364 sym = mkSymbol(nv50_ir::FILE_SHADER_INPUT, 0, TYPE_U32,
2365 info->prop.cp.inputOffset);
2366 break;
2367 default:
2368 sym = new_Symbol(prog,
2369 nv50_ir::FILE_MEMORY_GLOBAL, code->resources.at(r).slot);
2370 break;
2371 }
2372 return sym;
2373 }
2374
2375 void
2376 Converter::getResourceCoords(std::vector<Value *> &coords, int r, int s)
2377 {
2378 const int arg =
2379 TexInstruction::Target(getResourceTarget(code, r)).getArgCount();
2380
2381 for (int c = 0; c < arg; ++c)
2382 coords.push_back(fetchSrc(s, c));
2383
2384 // NOTE: TGSI_RESOURCE_GLOBAL needs FILE_GPR; this is an nv50 quirk
2385 if (r == TGSI_RESOURCE_LOCAL ||
2386 r == TGSI_RESOURCE_PRIVATE ||
2387 r == TGSI_RESOURCE_INPUT)
2388 coords[0] = mkOp1v(OP_MOV, TYPE_U32, getScratch(4, FILE_ADDRESS),
2389 coords[0]);
2390 }
2391 */
2392 static inline int
2393 partitionLoadStore(uint8_t comp[2], uint8_t size[2], uint8_t mask)
2394 {
2395 int n = 0;
2396
2397 while (mask) {
2398 if (mask & 1) {
2399 size[n]++;
2400 } else {
2401 if (size[n])
2402 comp[n = 1] = size[0] + 1;
2403 else
2404 comp[n]++;
2405 }
2406 mask >>= 1;
2407 }
2408 if (size[0] == 3) {
2409 n = 1;
2410 size[0] = (comp[0] == 1) ? 1 : 2;
2411 size[1] = 3 - size[0];
2412 comp[1] = comp[0] + size[0];
2413 }
2414 return n + 1;
2415 }
2416
2417 static inline nv50_ir::TexTarget
2418 getImageTarget(const tgsi::Source *code, int r)
2419 {
2420 return tgsi::translateTexture(code->images.at(r).target);
2421 }
2422
2423 static inline const nv50_ir::TexInstruction::ImgFormatDesc *
2424 getImageFormat(const tgsi::Source *code, int r)
2425 {
2426 return &nv50_ir::TexInstruction::formatTable[
2427 tgsi::translateImgFormat(code->images.at(r).format)];
2428 }
2429
2430 void
2431 Converter::getImageCoords(std::vector<Value *> &coords, int r, int s)
2432 {
2433 TexInstruction::Target t =
2434 TexInstruction::Target(getImageTarget(code, r));
2435 const int arg = t.getDim() + (t.isArray() || t.isCube());
2436
2437 for (int c = 0; c < arg; ++c)
2438 coords.push_back(fetchSrc(s, c));
2439
2440 if (t.isMS())
2441 coords.push_back(fetchSrc(s, 3));
2442 }
2443
2444 // For raw loads, granularity is 4 byte.
2445 // Usage of the texture read mask on OP_SULDP is not allowed.
2446 void
2447 Converter::handleLOAD(Value *dst0[4])
2448 {
2449 const int r = tgsi.getSrc(0).getIndex(0);
2450 int c;
2451 std::vector<Value *> off, src, ldv, def;
2452
2453 switch (tgsi.getSrc(0).getFile()) {
2454 case TGSI_FILE_BUFFER:
2455 case TGSI_FILE_MEMORY:
2456 for (c = 0; c < 4; ++c) {
2457 if (!dst0[c])
2458 continue;
2459
2460 Value *off;
2461 Symbol *sym;
2462 uint32_t src0_component_offset = tgsi.getSrc(0).getSwizzle(c) * 4;
2463
2464 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE) {
2465 off = NULL;
2466 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2467 tgsi.getSrc(1).getValueU32(0, info) +
2468 src0_component_offset);
2469 } else {
2470 // yzw are ignored for buffers
2471 off = fetchSrc(1, 0);
2472 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2473 src0_component_offset);
2474 }
2475
2476 Instruction *ld = mkLoad(TYPE_U32, dst0[c], sym, off);
2477 ld->cache = tgsi.getCacheMode();
2478 if (tgsi.getSrc(0).isIndirect(0))
2479 ld->setIndirect(0, 1, fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0));
2480 }
2481 break;
2482 case TGSI_FILE_IMAGE: {
2483 assert(!code->images[r].raw);
2484
2485 getImageCoords(off, r, 1);
2486 def.resize(4);
2487
2488 for (c = 0; c < 4; ++c) {
2489 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2490 def[c] = getScratch();
2491 else
2492 def[c] = dst0[c];
2493 }
2494
2495 TexInstruction *ld =
2496 mkTex(OP_SULDP, getImageTarget(code, r), code->images[r].slot, 0,
2497 def, off);
2498 ld->tex.mask = tgsi.getDst(0).getMask();
2499 ld->tex.format = getImageFormat(code, r);
2500 ld->cache = tgsi.getCacheMode();
2501 if (tgsi.getSrc(0).isIndirect(0))
2502 ld->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
2503
2504 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2505 if (dst0[c] != def[c])
2506 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2507 }
2508 break;
2509 default:
2510 assert(!"Unsupported srcFile for LOAD");
2511 }
2512
2513 /* Keep this around for now as reference when adding img support
2514 getResourceCoords(off, r, 1);
2515
2516 if (isResourceRaw(code, r)) {
2517 uint8_t mask = 0;
2518 uint8_t comp[2] = { 0, 0 };
2519 uint8_t size[2] = { 0, 0 };
2520
2521 Symbol *base = getResourceBase(r);
2522
2523 // determine the base and size of the at most 2 load ops
2524 for (c = 0; c < 4; ++c)
2525 if (!tgsi.getDst(0).isMasked(c))
2526 mask |= 1 << (tgsi.getSrc(0).getSwizzle(c) - TGSI_SWIZZLE_X);
2527
2528 int n = partitionLoadStore(comp, size, mask);
2529
2530 src = off;
2531
2532 def.resize(4); // index by component, the ones we need will be non-NULL
2533 for (c = 0; c < 4; ++c) {
2534 if (dst0[c] && tgsi.getSrc(0).getSwizzle(c) == (TGSI_SWIZZLE_X + c))
2535 def[c] = dst0[c];
2536 else
2537 if (mask & (1 << c))
2538 def[c] = getScratch();
2539 }
2540
2541 const bool useLd = isResourceSpecial(r) ||
2542 (info->io.nv50styleSurfaces &&
2543 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2544
2545 for (int i = 0; i < n; ++i) {
2546 ldv.assign(def.begin() + comp[i], def.begin() + comp[i] + size[i]);
2547
2548 if (comp[i]) // adjust x component of source address if necessary
2549 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2550 off[0], mkImm(comp[i] * 4));
2551 else
2552 src[0] = off[0];
2553
2554 if (useLd) {
2555 Instruction *ld =
2556 mkLoad(typeOfSize(size[i] * 4), ldv[0], base, src[0]);
2557 for (size_t c = 1; c < ldv.size(); ++c)
2558 ld->setDef(c, ldv[c]);
2559 } else {
2560 mkTex(OP_SULDB, getResourceTarget(code, r), code->resources[r].slot,
2561 0, ldv, src)->dType = typeOfSize(size[i] * 4);
2562 }
2563 }
2564 } else {
2565 def.resize(4);
2566 for (c = 0; c < 4; ++c) {
2567 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2568 def[c] = getScratch();
2569 else
2570 def[c] = dst0[c];
2571 }
2572
2573 mkTex(OP_SULDP, getResourceTarget(code, r), code->resources[r].slot, 0,
2574 def, off);
2575 }
2576 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2577 if (dst0[c] != def[c])
2578 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2579 */
2580 }
2581
2582 // For formatted stores, the write mask on OP_SUSTP can be used.
2583 // Raw stores have to be split.
2584 void
2585 Converter::handleSTORE()
2586 {
2587 const int r = tgsi.getDst(0).getIndex(0);
2588 int c;
2589 std::vector<Value *> off, src, dummy;
2590
2591 switch (tgsi.getDst(0).getFile()) {
2592 case TGSI_FILE_BUFFER:
2593 case TGSI_FILE_MEMORY:
2594 for (c = 0; c < 4; ++c) {
2595 if (!(tgsi.getDst(0).getMask() & (1 << c)))
2596 continue;
2597
2598 Symbol *sym;
2599 Value *off;
2600 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMMEDIATE) {
2601 off = NULL;
2602 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c,
2603 tgsi.getSrc(0).getValueU32(0, info) + 4 * c);
2604 } else {
2605 // yzw are ignored for buffers
2606 off = fetchSrc(0, 0);
2607 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c, 4 * c);
2608 }
2609
2610 Instruction *st = mkStore(OP_STORE, TYPE_U32, sym, off, fetchSrc(1, c));
2611 st->cache = tgsi.getCacheMode();
2612 if (tgsi.getDst(0).isIndirect(0))
2613 st->setIndirect(0, 1, fetchSrc(tgsi.getDst(0).getIndirect(0), 0, 0));
2614 }
2615 break;
2616 case TGSI_FILE_IMAGE: {
2617 assert(!code->images[r].raw);
2618
2619 getImageCoords(off, r, 0);
2620 src = off;
2621
2622 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2623 src.push_back(fetchSrc(1, c));
2624
2625 TexInstruction *st =
2626 mkTex(OP_SUSTP, getImageTarget(code, r), code->images[r].slot,
2627 0, dummy, src);
2628 st->tex.mask = tgsi.getDst(0).getMask();
2629 st->tex.format = getImageFormat(code, r);
2630 st->cache = tgsi.getCacheMode();
2631 if (tgsi.getDst(0).isIndirect(0))
2632 st->setIndirectR(fetchSrc(tgsi.getDst(0).getIndirect(0), 0, NULL));
2633 }
2634 break;
2635 default:
2636 assert(!"Unsupported dstFile for STORE");
2637 }
2638
2639 /* Keep this around for now as reference when adding img support
2640 getResourceCoords(off, r, 0);
2641 src = off;
2642 const int s = src.size();
2643
2644 if (isResourceRaw(code, r)) {
2645 uint8_t comp[2] = { 0, 0 };
2646 uint8_t size[2] = { 0, 0 };
2647
2648 int n = partitionLoadStore(comp, size, tgsi.getDst(0).getMask());
2649
2650 Symbol *base = getResourceBase(r);
2651
2652 const bool useSt = isResourceSpecial(r) ||
2653 (info->io.nv50styleSurfaces &&
2654 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2655
2656 for (int i = 0; i < n; ++i) {
2657 if (comp[i]) // adjust x component of source address if necessary
2658 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2659 off[0], mkImm(comp[i] * 4));
2660 else
2661 src[0] = off[0];
2662
2663 const DataType stTy = typeOfSize(size[i] * 4);
2664
2665 if (useSt) {
2666 Instruction *st =
2667 mkStore(OP_STORE, stTy, base, NULL, fetchSrc(1, comp[i]));
2668 for (c = 1; c < size[i]; ++c)
2669 st->setSrc(1 + c, fetchSrc(1, comp[i] + c));
2670 st->setIndirect(0, 0, src[0]);
2671 } else {
2672 // attach values to be stored
2673 src.resize(s + size[i]);
2674 for (c = 0; c < size[i]; ++c)
2675 src[s + c] = fetchSrc(1, comp[i] + c);
2676 mkTex(OP_SUSTB, getResourceTarget(code, r), code->resources[r].slot,
2677 0, dummy, src)->setType(stTy);
2678 }
2679 }
2680 } else {
2681 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2682 src.push_back(fetchSrc(1, c));
2683
2684 mkTex(OP_SUSTP, getResourceTarget(code, r), code->resources[r].slot, 0,
2685 dummy, src)->tex.mask = tgsi.getDst(0).getMask();
2686 }
2687 */
2688 }
2689
2690 // XXX: These only work on resources with the single-component u32/s32 formats.
2691 // Therefore the result is replicated. This might not be intended by TGSI, but
2692 // operating on more than 1 component would produce undefined results because
2693 // they do not exist.
2694 void
2695 Converter::handleATOM(Value *dst0[4], DataType ty, uint16_t subOp)
2696 {
2697 const int r = tgsi.getSrc(0).getIndex(0);
2698 std::vector<Value *> srcv;
2699 std::vector<Value *> defv;
2700 LValue *dst = getScratch();
2701
2702 switch (tgsi.getSrc(0).getFile()) {
2703 case TGSI_FILE_BUFFER:
2704 case TGSI_FILE_MEMORY:
2705 for (int c = 0; c < 4; ++c) {
2706 if (!dst0[c])
2707 continue;
2708
2709 Instruction *insn;
2710 Value *off = fetchSrc(1, c), *off2 = NULL;
2711 Value *sym;
2712 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE)
2713 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2714 tgsi.getSrc(1).getValueU32(c, info));
2715 else
2716 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c, 0);
2717 if (tgsi.getSrc(0).isIndirect(0))
2718 off2 = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2719 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2720 insn = mkOp3(OP_ATOM, ty, dst, sym, fetchSrc(2, c), fetchSrc(3, c));
2721 else
2722 insn = mkOp2(OP_ATOM, ty, dst, sym, fetchSrc(2, c));
2723 if (tgsi.getSrc(1).getFile() != TGSI_FILE_IMMEDIATE)
2724 insn->setIndirect(0, 0, off);
2725 if (off2)
2726 insn->setIndirect(0, 1, off2);
2727 insn->subOp = subOp;
2728 }
2729 for (int c = 0; c < 4; ++c)
2730 if (dst0[c])
2731 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2732 break;
2733 case TGSI_FILE_IMAGE: {
2734 assert(!code->images[r].raw);
2735
2736 getImageCoords(srcv, r, 1);
2737 defv.push_back(dst);
2738 srcv.push_back(fetchSrc(2, 0));
2739
2740 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2741 srcv.push_back(fetchSrc(3, 0));
2742
2743 TexInstruction *tex = mkTex(OP_SUREDP, getImageTarget(code, r),
2744 code->images[r].slot, 0, defv, srcv);
2745 tex->subOp = subOp;
2746 tex->tex.mask = 1;
2747 tex->tex.format = getImageFormat(code, r);
2748 tex->setType(ty);
2749 if (tgsi.getSrc(0).isIndirect(0))
2750 tex->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
2751
2752 for (int c = 0; c < 4; ++c)
2753 if (dst0[c])
2754 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2755 }
2756 break;
2757 default:
2758 assert(!"Unsupported srcFile for ATOM");
2759 }
2760
2761 /* Keep this around for now as reference when adding img support
2762 getResourceCoords(srcv, r, 1);
2763
2764 if (isResourceSpecial(r)) {
2765 assert(r != TGSI_RESOURCE_INPUT);
2766 Instruction *insn;
2767 insn = mkOp2(OP_ATOM, ty, dst, getResourceBase(r), fetchSrc(2, 0));
2768 insn->subOp = subOp;
2769 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2770 insn->setSrc(2, fetchSrc(3, 0));
2771 insn->setIndirect(0, 0, srcv.at(0));
2772 } else {
2773 operation op = isResourceRaw(code, r) ? OP_SUREDB : OP_SUREDP;
2774 TexTarget targ = getResourceTarget(code, r);
2775 int idx = code->resources[r].slot;
2776 defv.push_back(dst);
2777 srcv.push_back(fetchSrc(2, 0));
2778 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2779 srcv.push_back(fetchSrc(3, 0));
2780 TexInstruction *tex = mkTex(op, targ, idx, 0, defv, srcv);
2781 tex->subOp = subOp;
2782 tex->tex.mask = 1;
2783 tex->setType(ty);
2784 }
2785
2786 for (int c = 0; c < 4; ++c)
2787 if (dst0[c])
2788 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2789 */
2790 }
2791
2792 void
2793 Converter::handleINTERP(Value *dst[4])
2794 {
2795 // Check whether the input is linear. All other attributes ignored.
2796 Instruction *insn;
2797 Value *offset = NULL, *ptr = NULL, *w = NULL;
2798 Symbol *sym[4] = { NULL };
2799 bool linear;
2800 operation op = OP_NOP;
2801 int c, mode = 0;
2802
2803 tgsi::Instruction::SrcRegister src = tgsi.getSrc(0);
2804
2805 // In some odd cases, in large part due to varying packing, the source
2806 // might not actually be an input. This is illegal TGSI, but it's easier to
2807 // account for it here than it is to fix it where the TGSI is being
2808 // generated. In that case, it's going to be a straight up mov (or sequence
2809 // of mov's) from the input in question. We follow the mov chain to see
2810 // which input we need to use.
2811 if (src.getFile() != TGSI_FILE_INPUT) {
2812 if (src.isIndirect(0)) {
2813 ERROR("Ignoring indirect input interpolation\n");
2814 return;
2815 }
2816 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
2817 Value *val = fetchSrc(0, c);
2818 assert(val->defs.size() == 1);
2819 insn = val->getInsn();
2820 while (insn->op == OP_MOV) {
2821 assert(insn->getSrc(0)->defs.size() == 1);
2822 insn = insn->getSrc(0)->getInsn();
2823 if (!insn) {
2824 ERROR("Miscompiling shader due to unhandled INTERP\n");
2825 return;
2826 }
2827 }
2828 if (insn->op != OP_LINTERP && insn->op != OP_PINTERP) {
2829 ERROR("Trying to interpolate non-input, this is not allowed.\n");
2830 return;
2831 }
2832 sym[c] = insn->getSrc(0)->asSym();
2833 assert(sym[c]);
2834 op = insn->op;
2835 mode = insn->ipa;
2836 }
2837 } else {
2838 if (src.isIndirect(0))
2839 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
2840
2841 // We can assume that the fixed index will point to an input of the same
2842 // interpolation type in case of an indirect.
2843 // TODO: Make use of ArrayID.
2844 linear = info->in[src.getIndex(0)].linear;
2845 if (linear) {
2846 op = OP_LINTERP;
2847 mode = NV50_IR_INTERP_LINEAR;
2848 } else {
2849 op = OP_PINTERP;
2850 mode = NV50_IR_INTERP_PERSPECTIVE;
2851 }
2852 }
2853
2854 switch (tgsi.getOpcode()) {
2855 case TGSI_OPCODE_INTERP_CENTROID:
2856 mode |= NV50_IR_INTERP_CENTROID;
2857 break;
2858 case TGSI_OPCODE_INTERP_SAMPLE:
2859 insn = mkOp1(OP_PIXLD, TYPE_U32, (offset = getScratch()), fetchSrc(1, 0));
2860 insn->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
2861 mode |= NV50_IR_INTERP_OFFSET;
2862 break;
2863 case TGSI_OPCODE_INTERP_OFFSET: {
2864 // The input in src1.xy is float, but we need a single 32-bit value
2865 // where the upper and lower 16 bits are encoded in S0.12 format. We need
2866 // to clamp the input coordinates to (-0.5, 0.4375), multiply by 4096,
2867 // and then convert to s32.
2868 Value *offs[2];
2869 for (c = 0; c < 2; c++) {
2870 offs[c] = fetchSrc(1, c);
2871 mkOp2(OP_MIN, TYPE_F32, offs[c], offs[c], loadImm(NULL, 0.4375f));
2872 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
2873 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
2874 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
2875 }
2876 offset = mkOp3v(OP_INSBF, TYPE_U32, getScratch(),
2877 offs[1], mkImm(0x1010), offs[0]);
2878 mode |= NV50_IR_INTERP_OFFSET;
2879 break;
2880 }
2881 }
2882
2883 if (op == OP_PINTERP) {
2884 if (offset) {
2885 w = mkOp2v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 3), offset);
2886 mkOp1(OP_RCP, TYPE_F32, w, w);
2887 } else {
2888 w = fragCoord[3];
2889 }
2890 }
2891
2892
2893 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
2894 insn = mkOp1(op, TYPE_F32, dst[c], sym[c] ? sym[c] : srcToSym(src, c));
2895 if (op == OP_PINTERP)
2896 insn->setSrc(1, w);
2897 if (ptr)
2898 insn->setIndirect(0, 0, ptr);
2899 if (offset)
2900 insn->setSrc(op == OP_PINTERP ? 2 : 1, offset);
2901
2902 insn->setInterpolate(mode);
2903 }
2904 }
2905
2906 Converter::Subroutine *
2907 Converter::getSubroutine(unsigned ip)
2908 {
2909 std::map<unsigned, Subroutine>::iterator it = sub.map.find(ip);
2910
2911 if (it == sub.map.end())
2912 it = sub.map.insert(std::make_pair(
2913 ip, Subroutine(new Function(prog, "SUB", ip)))).first;
2914
2915 return &it->second;
2916 }
2917
2918 Converter::Subroutine *
2919 Converter::getSubroutine(Function *f)
2920 {
2921 unsigned ip = f->getLabel();
2922 std::map<unsigned, Subroutine>::iterator it = sub.map.find(ip);
2923
2924 if (it == sub.map.end())
2925 it = sub.map.insert(std::make_pair(ip, Subroutine(f))).first;
2926
2927 return &it->second;
2928 }
2929
2930 bool
2931 Converter::isEndOfSubroutine(uint ip)
2932 {
2933 assert(ip < code->scan.num_instructions);
2934 tgsi::Instruction insn(&code->insns[ip]);
2935 return (insn.getOpcode() == TGSI_OPCODE_END ||
2936 insn.getOpcode() == TGSI_OPCODE_ENDSUB ||
2937 // does END occur at end of main or the very end ?
2938 insn.getOpcode() == TGSI_OPCODE_BGNSUB);
2939 }
2940
2941 bool
2942 Converter::handleInstruction(const struct tgsi_full_instruction *insn)
2943 {
2944 Instruction *geni;
2945
2946 Value *dst0[4], *rDst0[4];
2947 Value *src0, *src1, *src2, *src3;
2948 Value *val0, *val1;
2949 int c;
2950
2951 tgsi = tgsi::Instruction(insn);
2952
2953 bool useScratchDst = tgsi.checkDstSrcAliasing();
2954
2955 operation op = tgsi.getOP();
2956 dstTy = tgsi.inferDstType();
2957 srcTy = tgsi.inferSrcType();
2958
2959 unsigned int mask = tgsi.dstCount() ? tgsi.getDst(0).getMask() : 0;
2960
2961 if (tgsi.dstCount()) {
2962 for (c = 0; c < 4; ++c) {
2963 rDst0[c] = acquireDst(0, c);
2964 dst0[c] = (useScratchDst && rDst0[c]) ? getScratch() : rDst0[c];
2965 }
2966 }
2967
2968 switch (tgsi.getOpcode()) {
2969 case TGSI_OPCODE_ADD:
2970 case TGSI_OPCODE_UADD:
2971 case TGSI_OPCODE_AND:
2972 case TGSI_OPCODE_DIV:
2973 case TGSI_OPCODE_IDIV:
2974 case TGSI_OPCODE_UDIV:
2975 case TGSI_OPCODE_MAX:
2976 case TGSI_OPCODE_MIN:
2977 case TGSI_OPCODE_IMAX:
2978 case TGSI_OPCODE_IMIN:
2979 case TGSI_OPCODE_UMAX:
2980 case TGSI_OPCODE_UMIN:
2981 case TGSI_OPCODE_MOD:
2982 case TGSI_OPCODE_UMOD:
2983 case TGSI_OPCODE_MUL:
2984 case TGSI_OPCODE_UMUL:
2985 case TGSI_OPCODE_IMUL_HI:
2986 case TGSI_OPCODE_UMUL_HI:
2987 case TGSI_OPCODE_OR:
2988 case TGSI_OPCODE_SHL:
2989 case TGSI_OPCODE_ISHR:
2990 case TGSI_OPCODE_USHR:
2991 case TGSI_OPCODE_SUB:
2992 case TGSI_OPCODE_XOR:
2993 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
2994 src0 = fetchSrc(0, c);
2995 src1 = fetchSrc(1, c);
2996 geni = mkOp2(op, dstTy, dst0[c], src0, src1);
2997 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
2998 }
2999 break;
3000 case TGSI_OPCODE_MAD:
3001 case TGSI_OPCODE_UMAD:
3002 case TGSI_OPCODE_SAD:
3003 case TGSI_OPCODE_FMA:
3004 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3005 src0 = fetchSrc(0, c);
3006 src1 = fetchSrc(1, c);
3007 src2 = fetchSrc(2, c);
3008 mkOp3(op, dstTy, dst0[c], src0, src1, src2);
3009 }
3010 break;
3011 case TGSI_OPCODE_MOV:
3012 case TGSI_OPCODE_CEIL:
3013 case TGSI_OPCODE_FLR:
3014 case TGSI_OPCODE_TRUNC:
3015 case TGSI_OPCODE_RCP:
3016 case TGSI_OPCODE_SQRT:
3017 case TGSI_OPCODE_IABS:
3018 case TGSI_OPCODE_INEG:
3019 case TGSI_OPCODE_NOT:
3020 case TGSI_OPCODE_DDX:
3021 case TGSI_OPCODE_DDY:
3022 case TGSI_OPCODE_DDX_FINE:
3023 case TGSI_OPCODE_DDY_FINE:
3024 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3025 mkOp1(op, dstTy, dst0[c], fetchSrc(0, c));
3026 break;
3027 case TGSI_OPCODE_RSQ:
3028 src0 = fetchSrc(0, 0);
3029 val0 = getScratch();
3030 mkOp1(OP_ABS, TYPE_F32, val0, src0);
3031 mkOp1(OP_RSQ, TYPE_F32, val0, val0);
3032 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3033 mkMov(dst0[c], val0);
3034 break;
3035 case TGSI_OPCODE_ARL:
3036 case TGSI_OPCODE_ARR:
3037 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3038 const RoundMode rnd =
3039 tgsi.getOpcode() == TGSI_OPCODE_ARR ? ROUND_N : ROUND_M;
3040 src0 = fetchSrc(0, c);
3041 mkCvt(OP_CVT, TYPE_S32, dst0[c], TYPE_F32, src0)->rnd = rnd;
3042 }
3043 break;
3044 case TGSI_OPCODE_UARL:
3045 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3046 mkOp1(OP_MOV, TYPE_U32, dst0[c], fetchSrc(0, c));
3047 break;
3048 case TGSI_OPCODE_POW:
3049 val0 = mkOp2v(op, TYPE_F32, getScratch(), fetchSrc(0, 0), fetchSrc(1, 0));
3050 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3051 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3052 break;
3053 case TGSI_OPCODE_EX2:
3054 case TGSI_OPCODE_LG2:
3055 val0 = mkOp1(op, TYPE_F32, getScratch(), fetchSrc(0, 0))->getDef(0);
3056 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3057 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3058 break;
3059 case TGSI_OPCODE_COS:
3060 case TGSI_OPCODE_SIN:
3061 val0 = getScratch();
3062 if (mask & 7) {
3063 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 0));
3064 mkOp1(op, TYPE_F32, val0, val0);
3065 for (c = 0; c < 3; ++c)
3066 if (dst0[c])
3067 mkMov(dst0[c], val0);
3068 }
3069 if (dst0[3]) {
3070 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 3));
3071 mkOp1(op, TYPE_F32, dst0[3], val0);
3072 }
3073 break;
3074 case TGSI_OPCODE_SCS:
3075 if (mask & 3) {
3076 val0 = mkOp1v(OP_PRESIN, TYPE_F32, getSSA(), fetchSrc(0, 0));
3077 if (dst0[0])
3078 mkOp1(OP_COS, TYPE_F32, dst0[0], val0);
3079 if (dst0[1])
3080 mkOp1(OP_SIN, TYPE_F32, dst0[1], val0);
3081 }
3082 if (dst0[2])
3083 loadImm(dst0[2], 0.0f);
3084 if (dst0[3])
3085 loadImm(dst0[3], 1.0f);
3086 break;
3087 case TGSI_OPCODE_EXP:
3088 src0 = fetchSrc(0, 0);
3089 val0 = mkOp1v(OP_FLOOR, TYPE_F32, getSSA(), src0);
3090 if (dst0[1])
3091 mkOp2(OP_SUB, TYPE_F32, dst0[1], src0, val0);
3092 if (dst0[0])
3093 mkOp1(OP_EX2, TYPE_F32, dst0[0], val0);
3094 if (dst0[2])
3095 mkOp1(OP_EX2, TYPE_F32, dst0[2], src0);
3096 if (dst0[3])
3097 loadImm(dst0[3], 1.0f);
3098 break;
3099 case TGSI_OPCODE_LOG:
3100 src0 = mkOp1v(OP_ABS, TYPE_F32, getSSA(), fetchSrc(0, 0));
3101 val0 = mkOp1v(OP_LG2, TYPE_F32, dst0[2] ? dst0[2] : getSSA(), src0);
3102 if (dst0[0] || dst0[1])
3103 val1 = mkOp1v(OP_FLOOR, TYPE_F32, dst0[0] ? dst0[0] : getSSA(), val0);
3104 if (dst0[1]) {
3105 mkOp1(OP_EX2, TYPE_F32, dst0[1], val1);
3106 mkOp1(OP_RCP, TYPE_F32, dst0[1], dst0[1]);
3107 mkOp2(OP_MUL, TYPE_F32, dst0[1], dst0[1], src0);
3108 }
3109 if (dst0[3])
3110 loadImm(dst0[3], 1.0f);
3111 break;
3112 case TGSI_OPCODE_DP2:
3113 val0 = buildDot(2);
3114 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3115 mkMov(dst0[c], val0);
3116 break;
3117 case TGSI_OPCODE_DP3:
3118 val0 = buildDot(3);
3119 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3120 mkMov(dst0[c], val0);
3121 break;
3122 case TGSI_OPCODE_DP4:
3123 val0 = buildDot(4);
3124 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3125 mkMov(dst0[c], val0);
3126 break;
3127 case TGSI_OPCODE_DPH:
3128 val0 = buildDot(3);
3129 src1 = fetchSrc(1, 3);
3130 mkOp2(OP_ADD, TYPE_F32, val0, val0, src1);
3131 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3132 mkMov(dst0[c], val0);
3133 break;
3134 case TGSI_OPCODE_DST:
3135 if (dst0[0])
3136 loadImm(dst0[0], 1.0f);
3137 if (dst0[1]) {
3138 src0 = fetchSrc(0, 1);
3139 src1 = fetchSrc(1, 1);
3140 mkOp2(OP_MUL, TYPE_F32, dst0[1], src0, src1);
3141 }
3142 if (dst0[2])
3143 mkMov(dst0[2], fetchSrc(0, 2));
3144 if (dst0[3])
3145 mkMov(dst0[3], fetchSrc(1, 3));
3146 break;
3147 case TGSI_OPCODE_LRP:
3148 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3149 src0 = fetchSrc(0, c);
3150 src1 = fetchSrc(1, c);
3151 src2 = fetchSrc(2, c);
3152 mkOp3(OP_MAD, TYPE_F32, dst0[c],
3153 mkOp2v(OP_SUB, TYPE_F32, getSSA(), src1, src2), src0, src2);
3154 }
3155 break;
3156 case TGSI_OPCODE_LIT:
3157 handleLIT(dst0);
3158 break;
3159 case TGSI_OPCODE_XPD:
3160 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3161 if (c < 3) {
3162 val0 = getSSA();
3163 src0 = fetchSrc(1, (c + 1) % 3);
3164 src1 = fetchSrc(0, (c + 2) % 3);
3165 mkOp2(OP_MUL, TYPE_F32, val0, src0, src1);
3166 mkOp1(OP_NEG, TYPE_F32, val0, val0);
3167
3168 src0 = fetchSrc(0, (c + 1) % 3);
3169 src1 = fetchSrc(1, (c + 2) % 3);
3170 mkOp3(OP_MAD, TYPE_F32, dst0[c], src0, src1, val0);
3171 } else {
3172 loadImm(dst0[c], 1.0f);
3173 }
3174 }
3175 break;
3176 case TGSI_OPCODE_ISSG:
3177 case TGSI_OPCODE_SSG:
3178 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3179 src0 = fetchSrc(0, c);
3180 val0 = getScratch();
3181 val1 = getScratch();
3182 mkCmp(OP_SET, CC_GT, srcTy, val0, srcTy, src0, zero);
3183 mkCmp(OP_SET, CC_LT, srcTy, val1, srcTy, src0, zero);
3184 if (srcTy == TYPE_F32)
3185 mkOp2(OP_SUB, TYPE_F32, dst0[c], val0, val1);
3186 else
3187 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
3188 }
3189 break;
3190 case TGSI_OPCODE_UCMP:
3191 srcTy = TYPE_U32;
3192 /* fallthrough */
3193 case TGSI_OPCODE_CMP:
3194 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3195 src0 = fetchSrc(0, c);
3196 src1 = fetchSrc(1, c);
3197 src2 = fetchSrc(2, c);
3198 if (src1 == src2)
3199 mkMov(dst0[c], src1);
3200 else
3201 mkCmp(OP_SLCT, (srcTy == TYPE_F32) ? CC_LT : CC_NE,
3202 srcTy, dst0[c], srcTy, src1, src2, src0);
3203 }
3204 break;
3205 case TGSI_OPCODE_FRC:
3206 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3207 src0 = fetchSrc(0, c);
3208 val0 = getScratch();
3209 mkOp1(OP_FLOOR, TYPE_F32, val0, src0);
3210 mkOp2(OP_SUB, TYPE_F32, dst0[c], src0, val0);
3211 }
3212 break;
3213 case TGSI_OPCODE_ROUND:
3214 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3215 mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F32, fetchSrc(0, c))
3216 ->rnd = ROUND_NI;
3217 break;
3218 case TGSI_OPCODE_CLAMP:
3219 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3220 src0 = fetchSrc(0, c);
3221 src1 = fetchSrc(1, c);
3222 src2 = fetchSrc(2, c);
3223 val0 = getScratch();
3224 mkOp2(OP_MIN, TYPE_F32, val0, src0, src1);
3225 mkOp2(OP_MAX, TYPE_F32, dst0[c], val0, src2);
3226 }
3227 break;
3228 case TGSI_OPCODE_SLT:
3229 case TGSI_OPCODE_SGE:
3230 case TGSI_OPCODE_SEQ:
3231 case TGSI_OPCODE_SGT:
3232 case TGSI_OPCODE_SLE:
3233 case TGSI_OPCODE_SNE:
3234 case TGSI_OPCODE_FSEQ:
3235 case TGSI_OPCODE_FSGE:
3236 case TGSI_OPCODE_FSLT:
3237 case TGSI_OPCODE_FSNE:
3238 case TGSI_OPCODE_ISGE:
3239 case TGSI_OPCODE_ISLT:
3240 case TGSI_OPCODE_USEQ:
3241 case TGSI_OPCODE_USGE:
3242 case TGSI_OPCODE_USLT:
3243 case TGSI_OPCODE_USNE:
3244 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3245 src0 = fetchSrc(0, c);
3246 src1 = fetchSrc(1, c);
3247 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3248 }
3249 break;
3250 case TGSI_OPCODE_VOTE_ALL:
3251 case TGSI_OPCODE_VOTE_ANY:
3252 case TGSI_OPCODE_VOTE_EQ:
3253 val0 = new_LValue(func, FILE_PREDICATE);
3254 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3255 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, c), zero);
3256 mkOp1(op, dstTy, val0, val0)
3257 ->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3258 mkCvt(OP_CVT, TYPE_U32, dst0[c], TYPE_U8, val0);
3259 }
3260 break;
3261 case TGSI_OPCODE_KILL_IF:
3262 val0 = new_LValue(func, FILE_PREDICATE);
3263 mask = 0;
3264 for (c = 0; c < 4; ++c) {
3265 const int s = tgsi.getSrc(0).getSwizzle(c);
3266 if (mask & (1 << s))
3267 continue;
3268 mask |= 1 << s;
3269 mkCmp(OP_SET, CC_LT, TYPE_F32, val0, TYPE_F32, fetchSrc(0, c), zero);
3270 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, val0);
3271 }
3272 break;
3273 case TGSI_OPCODE_KILL:
3274 mkOp(OP_DISCARD, TYPE_NONE, NULL);
3275 break;
3276 case TGSI_OPCODE_TEX:
3277 case TGSI_OPCODE_TXB:
3278 case TGSI_OPCODE_TXL:
3279 case TGSI_OPCODE_TXP:
3280 case TGSI_OPCODE_LODQ:
3281 // R S L C Dx Dy
3282 handleTEX(dst0, 1, 1, 0x03, 0x0f, 0x00, 0x00);
3283 break;
3284 case TGSI_OPCODE_TXD:
3285 handleTEX(dst0, 3, 3, 0x03, 0x0f, 0x10, 0x20);
3286 break;
3287 case TGSI_OPCODE_TG4:
3288 handleTEX(dst0, 2, 2, 0x03, 0x0f, 0x00, 0x00);
3289 break;
3290 case TGSI_OPCODE_TEX2:
3291 handleTEX(dst0, 2, 2, 0x03, 0x10, 0x00, 0x00);
3292 break;
3293 case TGSI_OPCODE_TXB2:
3294 case TGSI_OPCODE_TXL2:
3295 handleTEX(dst0, 2, 2, 0x10, 0x0f, 0x00, 0x00);
3296 break;
3297 case TGSI_OPCODE_SAMPLE:
3298 case TGSI_OPCODE_SAMPLE_B:
3299 case TGSI_OPCODE_SAMPLE_D:
3300 case TGSI_OPCODE_SAMPLE_L:
3301 case TGSI_OPCODE_SAMPLE_C:
3302 case TGSI_OPCODE_SAMPLE_C_LZ:
3303 handleTEX(dst0, 1, 2, 0x30, 0x30, 0x30, 0x40);
3304 break;
3305 case TGSI_OPCODE_TXF:
3306 handleTXF(dst0, 1, 0x03);
3307 break;
3308 case TGSI_OPCODE_SAMPLE_I:
3309 handleTXF(dst0, 1, 0x03);
3310 break;
3311 case TGSI_OPCODE_SAMPLE_I_MS:
3312 handleTXF(dst0, 1, 0x20);
3313 break;
3314 case TGSI_OPCODE_TXQ:
3315 case TGSI_OPCODE_SVIEWINFO:
3316 handleTXQ(dst0, TXQ_DIMS, 1);
3317 break;
3318 case TGSI_OPCODE_TXQS:
3319 // The TXQ_TYPE query returns samples in its 3rd arg, but we need it to
3320 // be in .x
3321 dst0[1] = dst0[2] = dst0[3] = NULL;
3322 std::swap(dst0[0], dst0[2]);
3323 handleTXQ(dst0, TXQ_TYPE, 0);
3324 std::swap(dst0[0], dst0[2]);
3325 break;
3326 case TGSI_OPCODE_F2I:
3327 case TGSI_OPCODE_F2U:
3328 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3329 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c))->rnd = ROUND_Z;
3330 break;
3331 case TGSI_OPCODE_I2F:
3332 case TGSI_OPCODE_U2F:
3333 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3334 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c));
3335 break;
3336 case TGSI_OPCODE_PK2H:
3337 val0 = getScratch();
3338 val1 = getScratch();
3339 mkCvt(OP_CVT, TYPE_F16, val0, TYPE_F32, fetchSrc(0, 0));
3340 mkCvt(OP_CVT, TYPE_F16, val1, TYPE_F32, fetchSrc(0, 1));
3341 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3342 mkOp3(OP_INSBF, TYPE_U32, dst0[c], val1, mkImm(0x1010), val0);
3343 break;
3344 case TGSI_OPCODE_UP2H:
3345 src0 = fetchSrc(0, 0);
3346 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3347 geni = mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F16, src0);
3348 geni->subOp = c & 1;
3349 }
3350 break;
3351 case TGSI_OPCODE_EMIT:
3352 /* export the saved viewport index */
3353 if (viewport != NULL) {
3354 Symbol *vpSym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_U32,
3355 info->out[info->io.viewportId].slot[0] * 4);
3356 mkStore(OP_EXPORT, TYPE_U32, vpSym, NULL, viewport);
3357 }
3358 /* fallthrough */
3359 case TGSI_OPCODE_ENDPRIM:
3360 {
3361 // get vertex stream (must be immediate)
3362 unsigned int stream = tgsi.getSrc(0).getValueU32(0, info);
3363 if (stream && op == OP_RESTART)
3364 break;
3365 if (info->prop.gp.maxVertices == 0)
3366 break;
3367 src0 = mkImm(stream);
3368 mkOp1(op, TYPE_U32, NULL, src0)->fixed = 1;
3369 break;
3370 }
3371 case TGSI_OPCODE_IF:
3372 case TGSI_OPCODE_UIF:
3373 {
3374 BasicBlock *ifBB = new BasicBlock(func);
3375
3376 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
3377 condBBs.push(bb);
3378 joinBBs.push(bb);
3379
3380 mkFlow(OP_BRA, NULL, CC_NOT_P, fetchSrc(0, 0))->setType(srcTy);
3381
3382 setPosition(ifBB, true);
3383 }
3384 break;
3385 case TGSI_OPCODE_ELSE:
3386 {
3387 BasicBlock *elseBB = new BasicBlock(func);
3388 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3389
3390 forkBB->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
3391 condBBs.push(bb);
3392
3393 forkBB->getExit()->asFlow()->target.bb = elseBB;
3394 if (!bb->isTerminated())
3395 mkFlow(OP_BRA, NULL, CC_ALWAYS, NULL);
3396
3397 setPosition(elseBB, true);
3398 }
3399 break;
3400 case TGSI_OPCODE_ENDIF:
3401 {
3402 BasicBlock *convBB = new BasicBlock(func);
3403 BasicBlock *prevBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3404 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(joinBBs.pop().u.p);
3405
3406 if (!bb->isTerminated()) {
3407 // we only want join if none of the clauses ended with CONT/BREAK/RET
3408 if (prevBB->getExit()->op == OP_BRA && joinBBs.getSize() < 6)
3409 insertConvergenceOps(convBB, forkBB);
3410 mkFlow(OP_BRA, convBB, CC_ALWAYS, NULL);
3411 bb->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3412 }
3413
3414 if (prevBB->getExit()->op == OP_BRA) {
3415 prevBB->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3416 prevBB->getExit()->asFlow()->target.bb = convBB;
3417 }
3418 setPosition(convBB, true);
3419 }
3420 break;
3421 case TGSI_OPCODE_BGNLOOP:
3422 {
3423 BasicBlock *lbgnBB = new BasicBlock(func);
3424 BasicBlock *lbrkBB = new BasicBlock(func);
3425
3426 loopBBs.push(lbgnBB);
3427 breakBBs.push(lbrkBB);
3428 if (loopBBs.getSize() > func->loopNestingBound)
3429 func->loopNestingBound++;
3430
3431 mkFlow(OP_PREBREAK, lbrkBB, CC_ALWAYS, NULL);
3432
3433 bb->cfg.attach(&lbgnBB->cfg, Graph::Edge::TREE);
3434 setPosition(lbgnBB, true);
3435 mkFlow(OP_PRECONT, lbgnBB, CC_ALWAYS, NULL);
3436 }
3437 break;
3438 case TGSI_OPCODE_ENDLOOP:
3439 {
3440 BasicBlock *loopBB = reinterpret_cast<BasicBlock *>(loopBBs.pop().u.p);
3441
3442 if (!bb->isTerminated()) {
3443 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
3444 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
3445 }
3446 setPosition(reinterpret_cast<BasicBlock *>(breakBBs.pop().u.p), true);
3447
3448 // If the loop never breaks (e.g. only has RET's inside), then there
3449 // will be no way to get to the break bb. However BGNLOOP will have
3450 // already made a PREBREAK to it, so it must be in the CFG.
3451 if (getBB()->cfg.incidentCount() == 0)
3452 loopBB->cfg.attach(&getBB()->cfg, Graph::Edge::TREE);
3453 }
3454 break;
3455 case TGSI_OPCODE_BRK:
3456 {
3457 if (bb->isTerminated())
3458 break;
3459 BasicBlock *brkBB = reinterpret_cast<BasicBlock *>(breakBBs.peek().u.p);
3460 mkFlow(OP_BREAK, brkBB, CC_ALWAYS, NULL);
3461 bb->cfg.attach(&brkBB->cfg, Graph::Edge::CROSS);
3462 }
3463 break;
3464 case TGSI_OPCODE_CONT:
3465 {
3466 if (bb->isTerminated())
3467 break;
3468 BasicBlock *contBB = reinterpret_cast<BasicBlock *>(loopBBs.peek().u.p);
3469 mkFlow(OP_CONT, contBB, CC_ALWAYS, NULL);
3470 contBB->explicitCont = true;
3471 bb->cfg.attach(&contBB->cfg, Graph::Edge::BACK);
3472 }
3473 break;
3474 case TGSI_OPCODE_BGNSUB:
3475 {
3476 Subroutine *s = getSubroutine(ip);
3477 BasicBlock *entry = new BasicBlock(s->f);
3478 BasicBlock *leave = new BasicBlock(s->f);
3479
3480 // multiple entrypoints possible, keep the graph connected
3481 if (prog->getType() == Program::TYPE_COMPUTE)
3482 prog->main->call.attach(&s->f->call, Graph::Edge::TREE);
3483
3484 sub.cur = s;
3485 s->f->setEntry(entry);
3486 s->f->setExit(leave);
3487 setPosition(entry, true);
3488 return true;
3489 }
3490 case TGSI_OPCODE_ENDSUB:
3491 {
3492 sub.cur = getSubroutine(prog->main);
3493 setPosition(BasicBlock::get(sub.cur->f->cfg.getRoot()), true);
3494 return true;
3495 }
3496 case TGSI_OPCODE_CAL:
3497 {
3498 Subroutine *s = getSubroutine(tgsi.getLabel());
3499 mkFlow(OP_CALL, s->f, CC_ALWAYS, NULL);
3500 func->call.attach(&s->f->call, Graph::Edge::TREE);
3501 return true;
3502 }
3503 case TGSI_OPCODE_RET:
3504 {
3505 if (bb->isTerminated())
3506 return true;
3507 BasicBlock *leave = BasicBlock::get(func->cfgExit);
3508
3509 if (!isEndOfSubroutine(ip + 1)) {
3510 // insert a PRERET at the entry if this is an early return
3511 // (only needed for sharing code in the epilogue)
3512 BasicBlock *root = BasicBlock::get(func->cfg.getRoot());
3513 if (root->getEntry() == NULL || root->getEntry()->op != OP_PRERET) {
3514 BasicBlock *pos = getBB();
3515 setPosition(root, false);
3516 mkFlow(OP_PRERET, leave, CC_ALWAYS, NULL)->fixed = 1;
3517 setPosition(pos, true);
3518 }
3519 }
3520 mkFlow(OP_RET, NULL, CC_ALWAYS, NULL)->fixed = 1;
3521 bb->cfg.attach(&leave->cfg, Graph::Edge::CROSS);
3522 }
3523 break;
3524 case TGSI_OPCODE_END:
3525 {
3526 // attach and generate epilogue code
3527 BasicBlock *epilogue = BasicBlock::get(func->cfgExit);
3528 bb->cfg.attach(&epilogue->cfg, Graph::Edge::TREE);
3529 setPosition(epilogue, true);
3530 if (prog->getType() == Program::TYPE_FRAGMENT)
3531 exportOutputs();
3532 if (info->io.genUserClip > 0)
3533 handleUserClipPlanes();
3534 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
3535 }
3536 break;
3537 case TGSI_OPCODE_SWITCH:
3538 case TGSI_OPCODE_CASE:
3539 ERROR("switch/case opcode encountered, should have been lowered\n");
3540 abort();
3541 break;
3542 case TGSI_OPCODE_LOAD:
3543 handleLOAD(dst0);
3544 break;
3545 case TGSI_OPCODE_STORE:
3546 handleSTORE();
3547 break;
3548 case TGSI_OPCODE_BARRIER:
3549 geni = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
3550 geni->fixed = 1;
3551 geni->subOp = NV50_IR_SUBOP_BAR_SYNC;
3552 break;
3553 case TGSI_OPCODE_MFENCE:
3554 case TGSI_OPCODE_LFENCE:
3555 case TGSI_OPCODE_SFENCE:
3556 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3557 geni->fixed = 1;
3558 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3559 break;
3560 case TGSI_OPCODE_MEMBAR:
3561 {
3562 uint32_t level = tgsi.getSrc(0).getValueU32(0, info);
3563 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3564 geni->fixed = 1;
3565 if (!(level & ~(TGSI_MEMBAR_THREAD_GROUP | TGSI_MEMBAR_SHARED)))
3566 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
3567 else
3568 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
3569 }
3570 break;
3571 case TGSI_OPCODE_ATOMUADD:
3572 case TGSI_OPCODE_ATOMXCHG:
3573 case TGSI_OPCODE_ATOMCAS:
3574 case TGSI_OPCODE_ATOMAND:
3575 case TGSI_OPCODE_ATOMOR:
3576 case TGSI_OPCODE_ATOMXOR:
3577 case TGSI_OPCODE_ATOMUMIN:
3578 case TGSI_OPCODE_ATOMIMIN:
3579 case TGSI_OPCODE_ATOMUMAX:
3580 case TGSI_OPCODE_ATOMIMAX:
3581 handleATOM(dst0, dstTy, tgsi::opcodeToSubOp(tgsi.getOpcode()));
3582 break;
3583 case TGSI_OPCODE_RESQ:
3584 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER) {
3585 geni = mkOp1(OP_BUFQ, TYPE_U32, dst0[0],
3586 makeSym(tgsi.getSrc(0).getFile(),
3587 tgsi.getSrc(0).getIndex(0), -1, 0, 0));
3588 if (tgsi.getSrc(0).isIndirect(0))
3589 geni->setIndirect(0, 1,
3590 fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0));
3591 } else {
3592 assert(tgsi.getSrc(0).getFile() == TGSI_FILE_IMAGE);
3593
3594 TexInstruction *texi = new_TexInstruction(func, OP_SUQ);
3595 for (int c = 0, d = 0; c < 4; ++c) {
3596 if (dst0[c]) {
3597 texi->setDef(d++, dst0[c]);
3598 texi->tex.mask |= 1 << c;
3599 }
3600 }
3601 texi->tex.r = tgsi.getSrc(0).getIndex(0);
3602 texi->tex.target = getImageTarget(code, texi->tex.r);
3603 bb->insertTail(texi);
3604
3605 if (tgsi.getSrc(0).isIndirect(0))
3606 texi->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
3607 }
3608 break;
3609 case TGSI_OPCODE_IBFE:
3610 case TGSI_OPCODE_UBFE:
3611 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3612 src0 = fetchSrc(0, c);
3613 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE &&
3614 tgsi.getSrc(2).getFile() == TGSI_FILE_IMMEDIATE) {
3615 src1 = loadImm(NULL, tgsi.getSrc(2).getValueU32(c, info) << 8 |
3616 tgsi.getSrc(1).getValueU32(c, info));
3617 } else {
3618 src1 = fetchSrc(1, c);
3619 src2 = fetchSrc(2, c);
3620 mkOp3(OP_INSBF, TYPE_U32, src1, src2, mkImm(0x808), src1);
3621 }
3622 mkOp2(OP_EXTBF, dstTy, dst0[c], src0, src1);
3623 }
3624 break;
3625 case TGSI_OPCODE_BFI:
3626 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3627 src0 = fetchSrc(0, c);
3628 src1 = fetchSrc(1, c);
3629 src2 = fetchSrc(2, c);
3630 src3 = fetchSrc(3, c);
3631 mkOp3(OP_INSBF, TYPE_U32, src2, src3, mkImm(0x808), src2);
3632 mkOp3(OP_INSBF, TYPE_U32, dst0[c], src1, src2, src0);
3633 }
3634 break;
3635 case TGSI_OPCODE_LSB:
3636 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3637 src0 = fetchSrc(0, c);
3638 geni = mkOp2(OP_EXTBF, TYPE_U32, src0, src0, mkImm(0x2000));
3639 geni->subOp = NV50_IR_SUBOP_EXTBF_REV;
3640 geni = mkOp1(OP_BFIND, TYPE_U32, dst0[c], src0);
3641 geni->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3642 }
3643 break;
3644 case TGSI_OPCODE_IMSB:
3645 case TGSI_OPCODE_UMSB:
3646 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3647 src0 = fetchSrc(0, c);
3648 mkOp1(OP_BFIND, srcTy, dst0[c], src0);
3649 }
3650 break;
3651 case TGSI_OPCODE_BREV:
3652 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3653 src0 = fetchSrc(0, c);
3654 geni = mkOp2(OP_EXTBF, TYPE_U32, dst0[c], src0, mkImm(0x2000));
3655 geni->subOp = NV50_IR_SUBOP_EXTBF_REV;
3656 }
3657 break;
3658 case TGSI_OPCODE_POPC:
3659 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3660 src0 = fetchSrc(0, c);
3661 mkOp2(OP_POPCNT, TYPE_U32, dst0[c], src0, src0);
3662 }
3663 break;
3664 case TGSI_OPCODE_INTERP_CENTROID:
3665 case TGSI_OPCODE_INTERP_SAMPLE:
3666 case TGSI_OPCODE_INTERP_OFFSET:
3667 handleINTERP(dst0);
3668 break;
3669 case TGSI_OPCODE_D2I:
3670 case TGSI_OPCODE_D2U:
3671 case TGSI_OPCODE_D2F: {
3672 int pos = 0;
3673 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3674 Value *dreg = getSSA(8);
3675 src0 = fetchSrc(0, pos);
3676 src1 = fetchSrc(0, pos + 1);
3677 mkOp2(OP_MERGE, TYPE_U64, dreg, src0, src1);
3678 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst0[c], srcTy, dreg);
3679 if (!isFloatType(dstTy))
3680 cvt->rnd = ROUND_Z;
3681 pos += 2;
3682 }
3683 break;
3684 }
3685 case TGSI_OPCODE_I2D:
3686 case TGSI_OPCODE_U2D:
3687 case TGSI_OPCODE_F2D:
3688 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3689 Value *dreg = getSSA(8);
3690 mkCvt(OP_CVT, dstTy, dreg, srcTy, fetchSrc(0, c / 2));
3691 mkSplit(&dst0[c], 4, dreg);
3692 c++;
3693 }
3694 break;
3695 case TGSI_OPCODE_DABS:
3696 case TGSI_OPCODE_DNEG:
3697 case TGSI_OPCODE_DRCP:
3698 case TGSI_OPCODE_DSQRT:
3699 case TGSI_OPCODE_DRSQ:
3700 case TGSI_OPCODE_DTRUNC:
3701 case TGSI_OPCODE_DCEIL:
3702 case TGSI_OPCODE_DFLR:
3703 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3704 src0 = getSSA(8);
3705 Value *dst = getSSA(8), *tmp[2];
3706 tmp[0] = fetchSrc(0, c);
3707 tmp[1] = fetchSrc(0, c + 1);
3708 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3709 mkOp1(op, dstTy, dst, src0);
3710 mkSplit(&dst0[c], 4, dst);
3711 c++;
3712 }
3713 break;
3714 case TGSI_OPCODE_DFRAC:
3715 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3716 src0 = getSSA(8);
3717 Value *dst = getSSA(8), *tmp[2];
3718 tmp[0] = fetchSrc(0, c);
3719 tmp[1] = fetchSrc(0, c + 1);
3720 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3721 mkOp1(OP_FLOOR, TYPE_F64, dst, src0);
3722 mkOp2(OP_SUB, TYPE_F64, dst, src0, dst);
3723 mkSplit(&dst0[c], 4, dst);
3724 c++;
3725 }
3726 break;
3727 case TGSI_OPCODE_DSLT:
3728 case TGSI_OPCODE_DSGE:
3729 case TGSI_OPCODE_DSEQ:
3730 case TGSI_OPCODE_DSNE: {
3731 int pos = 0;
3732 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3733 Value *tmp[2];
3734
3735 src0 = getSSA(8);
3736 src1 = getSSA(8);
3737 tmp[0] = fetchSrc(0, pos);
3738 tmp[1] = fetchSrc(0, pos + 1);
3739 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3740 tmp[0] = fetchSrc(1, pos);
3741 tmp[1] = fetchSrc(1, pos + 1);
3742 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
3743 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3744 pos += 2;
3745 }
3746 break;
3747 }
3748 case TGSI_OPCODE_DADD:
3749 case TGSI_OPCODE_DMUL:
3750 case TGSI_OPCODE_DMAX:
3751 case TGSI_OPCODE_DMIN:
3752 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3753 src0 = getSSA(8);
3754 src1 = getSSA(8);
3755 Value *dst = getSSA(8), *tmp[2];
3756 tmp[0] = fetchSrc(0, c);
3757 tmp[1] = fetchSrc(0, c + 1);
3758 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3759 tmp[0] = fetchSrc(1, c);
3760 tmp[1] = fetchSrc(1, c + 1);
3761 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
3762 mkOp2(op, dstTy, dst, src0, src1);
3763 mkSplit(&dst0[c], 4, dst);
3764 c++;
3765 }
3766 break;
3767 case TGSI_OPCODE_DMAD:
3768 case TGSI_OPCODE_DFMA:
3769 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3770 src0 = getSSA(8);
3771 src1 = getSSA(8);
3772 src2 = getSSA(8);
3773 Value *dst = getSSA(8), *tmp[2];
3774 tmp[0] = fetchSrc(0, c);
3775 tmp[1] = fetchSrc(0, c + 1);
3776 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3777 tmp[0] = fetchSrc(1, c);
3778 tmp[1] = fetchSrc(1, c + 1);
3779 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
3780 tmp[0] = fetchSrc(2, c);
3781 tmp[1] = fetchSrc(2, c + 1);
3782 mkOp2(OP_MERGE, TYPE_U64, src2, tmp[0], tmp[1]);
3783 mkOp3(op, dstTy, dst, src0, src1, src2);
3784 mkSplit(&dst0[c], 4, dst);
3785 c++;
3786 }
3787 break;
3788 case TGSI_OPCODE_DROUND:
3789 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3790 src0 = getSSA(8);
3791 Value *dst = getSSA(8), *tmp[2];
3792 tmp[0] = fetchSrc(0, c);
3793 tmp[1] = fetchSrc(0, c + 1);
3794 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3795 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F64, src0)
3796 ->rnd = ROUND_NI;
3797 mkSplit(&dst0[c], 4, dst);
3798 c++;
3799 }
3800 break;
3801 case TGSI_OPCODE_DSSG:
3802 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3803 src0 = getSSA(8);
3804 Value *dst = getSSA(8), *dstF32 = getSSA(), *tmp[2];
3805 tmp[0] = fetchSrc(0, c);
3806 tmp[1] = fetchSrc(0, c + 1);
3807 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3808
3809 val0 = getScratch();
3810 val1 = getScratch();
3811 // The zero is wrong here since it's only 32-bit, but it works out in
3812 // the end since it gets replaced with $r63.
3813 mkCmp(OP_SET, CC_GT, TYPE_F32, val0, TYPE_F64, src0, zero);
3814 mkCmp(OP_SET, CC_LT, TYPE_F32, val1, TYPE_F64, src0, zero);
3815 mkOp2(OP_SUB, TYPE_F32, dstF32, val0, val1);
3816 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F32, dstF32);
3817 mkSplit(&dst0[c], 4, dst);
3818 c++;
3819 }
3820 break;
3821 default:
3822 ERROR("unhandled TGSI opcode: %u\n", tgsi.getOpcode());
3823 assert(0);
3824 break;
3825 }
3826
3827 if (tgsi.dstCount()) {
3828 for (c = 0; c < 4; ++c) {
3829 if (!dst0[c])
3830 continue;
3831 if (dst0[c] != rDst0[c])
3832 mkMov(rDst0[c], dst0[c]);
3833 storeDst(0, c, rDst0[c]);
3834 }
3835 }
3836 vtxBaseValid = 0;
3837
3838 return true;
3839 }
3840
3841 void
3842 Converter::handleUserClipPlanes()
3843 {
3844 Value *res[8];
3845 int n, i, c;
3846
3847 for (c = 0; c < 4; ++c) {
3848 for (i = 0; i < info->io.genUserClip; ++i) {
3849 Symbol *sym = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
3850 TYPE_F32, info->io.ucpBase + i * 16 + c * 4);
3851 Value *ucp = mkLoadv(TYPE_F32, sym, NULL);
3852 if (c == 0)
3853 res[i] = mkOp2v(OP_MUL, TYPE_F32, getScratch(), clipVtx[c], ucp);
3854 else
3855 mkOp3(OP_MAD, TYPE_F32, res[i], clipVtx[c], ucp, res[i]);
3856 }
3857 }
3858
3859 const int first = info->numOutputs - (info->io.genUserClip + 3) / 4;
3860
3861 for (i = 0; i < info->io.genUserClip; ++i) {
3862 n = i / 4 + first;
3863 c = i % 4;
3864 Symbol *sym =
3865 mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32, info->out[n].slot[c] * 4);
3866 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, res[i]);
3867 }
3868 }
3869
3870 void
3871 Converter::exportOutputs()
3872 {
3873 if (info->io.alphaRefBase) {
3874 for (unsigned int i = 0; i < info->numOutputs; ++i) {
3875 if (info->out[i].sn != TGSI_SEMANTIC_COLOR ||
3876 info->out[i].si != 0)
3877 continue;
3878 const unsigned int c = 3;
3879 if (!oData.exists(sub.cur->values, i, c))
3880 continue;
3881 Value *val = oData.load(sub.cur->values, i, c, NULL);
3882 if (!val)
3883 continue;
3884
3885 Symbol *ref = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
3886 TYPE_U32, info->io.alphaRefBase);
3887 Value *pred = new_LValue(func, FILE_PREDICATE);
3888 mkCmp(OP_SET, CC_TR, TYPE_U32, pred, TYPE_F32, val,
3889 mkLoadv(TYPE_U32, ref, NULL))
3890 ->subOp = 1;
3891 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_NOT_P, pred);
3892 }
3893 }
3894
3895 for (unsigned int i = 0; i < info->numOutputs; ++i) {
3896 for (unsigned int c = 0; c < 4; ++c) {
3897 if (!oData.exists(sub.cur->values, i, c))
3898 continue;
3899 Symbol *sym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32,
3900 info->out[i].slot[c] * 4);
3901 Value *val = oData.load(sub.cur->values, i, c, NULL);
3902 if (val) {
3903 if (info->out[i].sn == TGSI_SEMANTIC_POSITION)
3904 mkOp1(OP_SAT, TYPE_F32, val, val);
3905 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, val);
3906 }
3907 }
3908 }
3909 }
3910
3911 Converter::Converter(Program *ir, const tgsi::Source *code) : BuildUtil(ir),
3912 code(code),
3913 tgsi(NULL),
3914 tData(this), lData(this), aData(this), pData(this), oData(this)
3915 {
3916 info = code->info;
3917
3918 const unsigned tSize = code->fileSize(TGSI_FILE_TEMPORARY);
3919 const unsigned pSize = code->fileSize(TGSI_FILE_PREDICATE);
3920 const unsigned aSize = code->fileSize(TGSI_FILE_ADDRESS);
3921 const unsigned oSize = code->fileSize(TGSI_FILE_OUTPUT);
3922
3923 tData.setup(TGSI_FILE_TEMPORARY, 0, 0, tSize, 4, 4, FILE_GPR, 0);
3924 lData.setup(TGSI_FILE_TEMPORARY, 1, 0, tSize, 4, 4, FILE_MEMORY_LOCAL, 0);
3925 pData.setup(TGSI_FILE_PREDICATE, 0, 0, pSize, 4, 4, FILE_PREDICATE, 0);
3926 aData.setup(TGSI_FILE_ADDRESS, 0, 0, aSize, 4, 4, FILE_GPR, 0);
3927 oData.setup(TGSI_FILE_OUTPUT, 0, 0, oSize, 4, 4, FILE_GPR, 0);
3928
3929 zero = mkImm((uint32_t)0);
3930
3931 vtxBaseValid = 0;
3932 }
3933
3934 Converter::~Converter()
3935 {
3936 }
3937
3938 inline const Converter::Location *
3939 Converter::BindArgumentsPass::getValueLocation(Subroutine *s, Value *v)
3940 {
3941 ValueMap::l_iterator it = s->values.l.find(v);
3942 return it == s->values.l.end() ? NULL : &it->second;
3943 }
3944
3945 template<typename T> inline void
3946 Converter::BindArgumentsPass::updateCallArgs(
3947 Instruction *i, void (Instruction::*setArg)(int, Value *),
3948 T (Function::*proto))
3949 {
3950 Function *g = i->asFlow()->target.fn;
3951 Subroutine *subg = conv.getSubroutine(g);
3952
3953 for (unsigned a = 0; a < (g->*proto).size(); ++a) {
3954 Value *v = (g->*proto)[a].get();
3955 const Converter::Location &l = *getValueLocation(subg, v);
3956 Converter::DataArray *array = conv.getArrayForFile(l.array, l.arrayIdx);
3957
3958 (i->*setArg)(a, array->acquire(sub->values, l.i, l.c));
3959 }
3960 }
3961
3962 template<typename T> inline void
3963 Converter::BindArgumentsPass::updatePrototype(
3964 BitSet *set, void (Function::*updateSet)(), T (Function::*proto))
3965 {
3966 (func->*updateSet)();
3967
3968 for (unsigned i = 0; i < set->getSize(); ++i) {
3969 Value *v = func->getLValue(i);
3970 const Converter::Location *l = getValueLocation(sub, v);
3971
3972 // only include values with a matching TGSI register
3973 if (set->test(i) && l && !conv.code->locals.count(*l))
3974 (func->*proto).push_back(v);
3975 }
3976 }
3977
3978 bool
3979 Converter::BindArgumentsPass::visit(Function *f)
3980 {
3981 sub = conv.getSubroutine(f);
3982
3983 for (ArrayList::Iterator bi = f->allBBlocks.iterator();
3984 !bi.end(); bi.next()) {
3985 for (Instruction *i = BasicBlock::get(bi)->getFirst();
3986 i; i = i->next) {
3987 if (i->op == OP_CALL && !i->asFlow()->builtin) {
3988 updateCallArgs(i, &Instruction::setSrc, &Function::ins);
3989 updateCallArgs(i, &Instruction::setDef, &Function::outs);
3990 }
3991 }
3992 }
3993
3994 if (func == prog->main && prog->getType() != Program::TYPE_COMPUTE)
3995 return true;
3996 updatePrototype(&BasicBlock::get(f->cfg.getRoot())->liveSet,
3997 &Function::buildLiveSets, &Function::ins);
3998 updatePrototype(&BasicBlock::get(f->cfgExit)->defSet,
3999 &Function::buildDefSets, &Function::outs);
4000
4001 return true;
4002 }
4003
4004 bool
4005 Converter::run()
4006 {
4007 BasicBlock *entry = new BasicBlock(prog->main);
4008 BasicBlock *leave = new BasicBlock(prog->main);
4009
4010 prog->main->setEntry(entry);
4011 prog->main->setExit(leave);
4012
4013 setPosition(entry, true);
4014 sub.cur = getSubroutine(prog->main);
4015
4016 if (info->io.genUserClip > 0) {
4017 for (int c = 0; c < 4; ++c)
4018 clipVtx[c] = getScratch();
4019 }
4020
4021 switch (prog->getType()) {
4022 case Program::TYPE_TESSELLATION_CONTROL:
4023 outBase = mkOp2v(
4024 OP_SUB, TYPE_U32, getSSA(),
4025 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
4026 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
4027 break;
4028 case Program::TYPE_FRAGMENT: {
4029 Symbol *sv = mkSysVal(SV_POSITION, 3);
4030 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
4031 mkOp1(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
4032 break;
4033 }
4034 default:
4035 break;
4036 }
4037
4038 if (info->io.viewportId >= 0)
4039 viewport = getScratch();
4040 else
4041 viewport = NULL;
4042
4043 for (ip = 0; ip < code->scan.num_instructions; ++ip) {
4044 if (!handleInstruction(&code->insns[ip]))
4045 return false;
4046 }
4047
4048 if (!BindArgumentsPass(*this).run(prog))
4049 return false;
4050
4051 return true;
4052 }
4053
4054 } // unnamed namespace
4055
4056 namespace nv50_ir {
4057
4058 bool
4059 Program::makeFromTGSI(struct nv50_ir_prog_info *info)
4060 {
4061 tgsi::Source src(info);
4062 if (!src.scanSource())
4063 return false;
4064 tlsSize = info->bin.tlsSpace;
4065
4066 Converter builder(this, &src);
4067 return builder.run();
4068 }
4069
4070 } // namespace nv50_ir