gallium: add PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_from_tgsi.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "tgsi/tgsi_dump.h"
24 #include "tgsi/tgsi_scan.h"
25 #include "tgsi/tgsi_util.h"
26
27 #include <set>
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_util.h"
31 #include "codegen/nv50_ir_build_util.h"
32
33 namespace tgsi {
34
35 class Source;
36
37 static nv50_ir::operation translateOpcode(uint opcode);
38 static nv50_ir::DataFile translateFile(uint file);
39 static nv50_ir::TexTarget translateTexture(uint texTarg);
40 static nv50_ir::SVSemantic translateSysVal(uint sysval);
41 static nv50_ir::CacheMode translateCacheMode(uint qualifier);
42 static nv50_ir::ImgFormat translateImgFormat(uint format);
43
44 class Instruction
45 {
46 public:
47 Instruction(const struct tgsi_full_instruction *inst) : insn(inst) { }
48
49 class SrcRegister
50 {
51 public:
52 SrcRegister(const struct tgsi_full_src_register *src)
53 : reg(src->Register),
54 fsr(src)
55 { }
56
57 SrcRegister(const struct tgsi_src_register& src) : reg(src), fsr(NULL) { }
58
59 SrcRegister(const struct tgsi_ind_register& ind)
60 : reg(tgsi_util_get_src_from_ind(&ind)),
61 fsr(NULL)
62 { }
63
64 struct tgsi_src_register offsetToSrc(struct tgsi_texture_offset off)
65 {
66 struct tgsi_src_register reg;
67 memset(&reg, 0, sizeof(reg));
68 reg.Index = off.Index;
69 reg.File = off.File;
70 reg.SwizzleX = off.SwizzleX;
71 reg.SwizzleY = off.SwizzleY;
72 reg.SwizzleZ = off.SwizzleZ;
73 return reg;
74 }
75
76 SrcRegister(const struct tgsi_texture_offset& off) :
77 reg(offsetToSrc(off)),
78 fsr(NULL)
79 { }
80
81 uint getFile() const { return reg.File; }
82
83 bool is2D() const { return reg.Dimension; }
84
85 bool isIndirect(int dim) const
86 {
87 return (dim && fsr) ? fsr->Dimension.Indirect : reg.Indirect;
88 }
89
90 int getIndex(int dim) const
91 {
92 return (dim && fsr) ? fsr->Dimension.Index : reg.Index;
93 }
94
95 int getSwizzle(int chan) const
96 {
97 return tgsi_util_get_src_register_swizzle(&reg, chan);
98 }
99
100 int getArrayId() const
101 {
102 if (isIndirect(0))
103 return fsr->Indirect.ArrayID;
104 return 0;
105 }
106
107 nv50_ir::Modifier getMod(int chan) const;
108
109 SrcRegister getIndirect(int dim) const
110 {
111 assert(fsr && isIndirect(dim));
112 if (dim)
113 return SrcRegister(fsr->DimIndirect);
114 return SrcRegister(fsr->Indirect);
115 }
116
117 uint32_t getValueU32(int c, const struct nv50_ir_prog_info *info) const
118 {
119 assert(reg.File == TGSI_FILE_IMMEDIATE);
120 assert(!reg.Absolute);
121 assert(!reg.Negate);
122 return info->immd.data[reg.Index * 4 + getSwizzle(c)];
123 }
124
125 private:
126 const struct tgsi_src_register reg;
127 const struct tgsi_full_src_register *fsr;
128 };
129
130 class DstRegister
131 {
132 public:
133 DstRegister(const struct tgsi_full_dst_register *dst)
134 : reg(dst->Register),
135 fdr(dst)
136 { }
137
138 DstRegister(const struct tgsi_dst_register& dst) : reg(dst), fdr(NULL) { }
139
140 uint getFile() const { return reg.File; }
141
142 bool is2D() const { return reg.Dimension; }
143
144 bool isIndirect(int dim) const
145 {
146 return (dim && fdr) ? fdr->Dimension.Indirect : reg.Indirect;
147 }
148
149 int getIndex(int dim) const
150 {
151 return (dim && fdr) ? fdr->Dimension.Dimension : reg.Index;
152 }
153
154 unsigned int getMask() const { return reg.WriteMask; }
155
156 bool isMasked(int chan) const { return !(getMask() & (1 << chan)); }
157
158 SrcRegister getIndirect(int dim) const
159 {
160 assert(fdr && isIndirect(dim));
161 if (dim)
162 return SrcRegister(fdr->DimIndirect);
163 return SrcRegister(fdr->Indirect);
164 }
165
166 int getArrayId() const
167 {
168 if (isIndirect(0))
169 return fdr->Indirect.ArrayID;
170 return 0;
171 }
172
173 private:
174 const struct tgsi_dst_register reg;
175 const struct tgsi_full_dst_register *fdr;
176 };
177
178 inline uint getOpcode() const { return insn->Instruction.Opcode; }
179
180 unsigned int srcCount() const { return insn->Instruction.NumSrcRegs; }
181 unsigned int dstCount() const { return insn->Instruction.NumDstRegs; }
182
183 // mask of used components of source s
184 unsigned int srcMask(unsigned int s) const;
185 unsigned int texOffsetMask() const;
186
187 SrcRegister getSrc(unsigned int s) const
188 {
189 assert(s < srcCount());
190 return SrcRegister(&insn->Src[s]);
191 }
192
193 DstRegister getDst(unsigned int d) const
194 {
195 assert(d < dstCount());
196 return DstRegister(&insn->Dst[d]);
197 }
198
199 SrcRegister getTexOffset(unsigned int i) const
200 {
201 assert(i < TGSI_FULL_MAX_TEX_OFFSETS);
202 return SrcRegister(insn->TexOffsets[i]);
203 }
204
205 unsigned int getNumTexOffsets() const { return insn->Texture.NumOffsets; }
206
207 bool checkDstSrcAliasing() const;
208
209 inline nv50_ir::operation getOP() const {
210 return translateOpcode(getOpcode()); }
211
212 nv50_ir::DataType inferSrcType() const;
213 nv50_ir::DataType inferDstType() const;
214
215 nv50_ir::CondCode getSetCond() const;
216
217 nv50_ir::TexInstruction::Target getTexture(const Source *, int s) const;
218
219 nv50_ir::CacheMode getCacheMode() const {
220 if (!insn->Instruction.Memory)
221 return nv50_ir::CACHE_CA;
222 return translateCacheMode(insn->Memory.Qualifier);
223 }
224
225 inline uint getLabel() { return insn->Label.Label; }
226
227 unsigned getSaturate() const { return insn->Instruction.Saturate; }
228
229 void print() const
230 {
231 tgsi_dump_instruction(insn, 1);
232 }
233
234 private:
235 const struct tgsi_full_instruction *insn;
236 };
237
238 unsigned int Instruction::texOffsetMask() const
239 {
240 const struct tgsi_instruction_texture *tex = &insn->Texture;
241 assert(insn->Instruction.Texture);
242
243 switch (tex->Texture) {
244 case TGSI_TEXTURE_BUFFER:
245 case TGSI_TEXTURE_1D:
246 case TGSI_TEXTURE_SHADOW1D:
247 case TGSI_TEXTURE_1D_ARRAY:
248 case TGSI_TEXTURE_SHADOW1D_ARRAY:
249 return 0x1;
250 case TGSI_TEXTURE_2D:
251 case TGSI_TEXTURE_SHADOW2D:
252 case TGSI_TEXTURE_2D_ARRAY:
253 case TGSI_TEXTURE_SHADOW2D_ARRAY:
254 case TGSI_TEXTURE_RECT:
255 case TGSI_TEXTURE_SHADOWRECT:
256 case TGSI_TEXTURE_2D_MSAA:
257 case TGSI_TEXTURE_2D_ARRAY_MSAA:
258 return 0x3;
259 case TGSI_TEXTURE_3D:
260 return 0x7;
261 default:
262 assert(!"Unexpected texture target");
263 return 0xf;
264 }
265 }
266
267 unsigned int Instruction::srcMask(unsigned int s) const
268 {
269 unsigned int mask = insn->Dst[0].Register.WriteMask;
270
271 switch (insn->Instruction.Opcode) {
272 case TGSI_OPCODE_COS:
273 case TGSI_OPCODE_SIN:
274 return (mask & 0x8) | ((mask & 0x7) ? 0x1 : 0x0);
275 case TGSI_OPCODE_DP2:
276 return 0x3;
277 case TGSI_OPCODE_DP3:
278 return 0x7;
279 case TGSI_OPCODE_DP4:
280 case TGSI_OPCODE_DPH:
281 case TGSI_OPCODE_KILL_IF: /* WriteMask ignored */
282 return 0xf;
283 case TGSI_OPCODE_DST:
284 return mask & (s ? 0xa : 0x6);
285 case TGSI_OPCODE_EX2:
286 case TGSI_OPCODE_EXP:
287 case TGSI_OPCODE_LG2:
288 case TGSI_OPCODE_LOG:
289 case TGSI_OPCODE_POW:
290 case TGSI_OPCODE_RCP:
291 case TGSI_OPCODE_RSQ:
292 case TGSI_OPCODE_SCS:
293 return 0x1;
294 case TGSI_OPCODE_IF:
295 case TGSI_OPCODE_UIF:
296 return 0x1;
297 case TGSI_OPCODE_LIT:
298 return 0xb;
299 case TGSI_OPCODE_TEX2:
300 case TGSI_OPCODE_TXB2:
301 case TGSI_OPCODE_TXL2:
302 return (s == 0) ? 0xf : 0x3;
303 case TGSI_OPCODE_TEX:
304 case TGSI_OPCODE_TXB:
305 case TGSI_OPCODE_TXD:
306 case TGSI_OPCODE_TXL:
307 case TGSI_OPCODE_TXP:
308 case TGSI_OPCODE_TEX_LZ:
309 case TGSI_OPCODE_TXF_LZ:
310 case TGSI_OPCODE_LODQ:
311 {
312 const struct tgsi_instruction_texture *tex = &insn->Texture;
313
314 assert(insn->Instruction.Texture);
315
316 mask = 0x7;
317 if (insn->Instruction.Opcode != TGSI_OPCODE_TEX &&
318 insn->Instruction.Opcode != TGSI_OPCODE_TEX_LZ &&
319 insn->Instruction.Opcode != TGSI_OPCODE_TXF_LZ &&
320 insn->Instruction.Opcode != TGSI_OPCODE_TXD)
321 mask |= 0x8; /* bias, lod or proj */
322
323 switch (tex->Texture) {
324 case TGSI_TEXTURE_1D:
325 mask &= 0x9;
326 break;
327 case TGSI_TEXTURE_SHADOW1D:
328 mask &= 0xd;
329 break;
330 case TGSI_TEXTURE_1D_ARRAY:
331 case TGSI_TEXTURE_2D:
332 case TGSI_TEXTURE_RECT:
333 mask &= 0xb;
334 break;
335 case TGSI_TEXTURE_CUBE_ARRAY:
336 case TGSI_TEXTURE_SHADOW2D_ARRAY:
337 case TGSI_TEXTURE_SHADOWCUBE:
338 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
339 mask |= 0x8;
340 break;
341 default:
342 break;
343 }
344 }
345 return mask;
346 case TGSI_OPCODE_XPD:
347 {
348 unsigned int x = 0;
349 if (mask & 1) x |= 0x6;
350 if (mask & 2) x |= 0x5;
351 if (mask & 4) x |= 0x3;
352 return x;
353 }
354 case TGSI_OPCODE_D2I:
355 case TGSI_OPCODE_D2U:
356 case TGSI_OPCODE_D2F:
357 case TGSI_OPCODE_DSLT:
358 case TGSI_OPCODE_DSGE:
359 case TGSI_OPCODE_DSEQ:
360 case TGSI_OPCODE_DSNE:
361 case TGSI_OPCODE_U64SEQ:
362 case TGSI_OPCODE_U64SNE:
363 case TGSI_OPCODE_I64SLT:
364 case TGSI_OPCODE_U64SLT:
365 case TGSI_OPCODE_I64SGE:
366 case TGSI_OPCODE_U64SGE:
367 case TGSI_OPCODE_I642F:
368 case TGSI_OPCODE_U642F:
369 switch (util_bitcount(mask)) {
370 case 1: return 0x3;
371 case 2: return 0xf;
372 default:
373 assert(!"unexpected mask");
374 return 0xf;
375 }
376 case TGSI_OPCODE_I2D:
377 case TGSI_OPCODE_U2D:
378 case TGSI_OPCODE_F2D: {
379 unsigned int x = 0;
380 if ((mask & 0x3) == 0x3)
381 x |= 1;
382 if ((mask & 0xc) == 0xc)
383 x |= 2;
384 return x;
385 }
386 case TGSI_OPCODE_PK2H:
387 return 0x3;
388 case TGSI_OPCODE_UP2H:
389 return 0x1;
390 default:
391 break;
392 }
393
394 return mask;
395 }
396
397 nv50_ir::Modifier Instruction::SrcRegister::getMod(int chan) const
398 {
399 nv50_ir::Modifier m(0);
400
401 if (reg.Absolute)
402 m = m | nv50_ir::Modifier(NV50_IR_MOD_ABS);
403 if (reg.Negate)
404 m = m | nv50_ir::Modifier(NV50_IR_MOD_NEG);
405 return m;
406 }
407
408 static nv50_ir::DataFile translateFile(uint file)
409 {
410 switch (file) {
411 case TGSI_FILE_CONSTANT: return nv50_ir::FILE_MEMORY_CONST;
412 case TGSI_FILE_INPUT: return nv50_ir::FILE_SHADER_INPUT;
413 case TGSI_FILE_OUTPUT: return nv50_ir::FILE_SHADER_OUTPUT;
414 case TGSI_FILE_TEMPORARY: return nv50_ir::FILE_GPR;
415 case TGSI_FILE_ADDRESS: return nv50_ir::FILE_ADDRESS;
416 case TGSI_FILE_IMMEDIATE: return nv50_ir::FILE_IMMEDIATE;
417 case TGSI_FILE_SYSTEM_VALUE: return nv50_ir::FILE_SYSTEM_VALUE;
418 case TGSI_FILE_BUFFER: return nv50_ir::FILE_MEMORY_BUFFER;
419 case TGSI_FILE_IMAGE: return nv50_ir::FILE_MEMORY_GLOBAL;
420 case TGSI_FILE_MEMORY: return nv50_ir::FILE_MEMORY_GLOBAL;
421 case TGSI_FILE_SAMPLER:
422 case TGSI_FILE_NULL:
423 default:
424 return nv50_ir::FILE_NULL;
425 }
426 }
427
428 static nv50_ir::SVSemantic translateSysVal(uint sysval)
429 {
430 switch (sysval) {
431 case TGSI_SEMANTIC_FACE: return nv50_ir::SV_FACE;
432 case TGSI_SEMANTIC_PSIZE: return nv50_ir::SV_POINT_SIZE;
433 case TGSI_SEMANTIC_PRIMID: return nv50_ir::SV_PRIMITIVE_ID;
434 case TGSI_SEMANTIC_INSTANCEID: return nv50_ir::SV_INSTANCE_ID;
435 case TGSI_SEMANTIC_VERTEXID: return nv50_ir::SV_VERTEX_ID;
436 case TGSI_SEMANTIC_GRID_SIZE: return nv50_ir::SV_NCTAID;
437 case TGSI_SEMANTIC_BLOCK_ID: return nv50_ir::SV_CTAID;
438 case TGSI_SEMANTIC_BLOCK_SIZE: return nv50_ir::SV_NTID;
439 case TGSI_SEMANTIC_THREAD_ID: return nv50_ir::SV_TID;
440 case TGSI_SEMANTIC_SAMPLEID: return nv50_ir::SV_SAMPLE_INDEX;
441 case TGSI_SEMANTIC_SAMPLEPOS: return nv50_ir::SV_SAMPLE_POS;
442 case TGSI_SEMANTIC_SAMPLEMASK: return nv50_ir::SV_SAMPLE_MASK;
443 case TGSI_SEMANTIC_INVOCATIONID: return nv50_ir::SV_INVOCATION_ID;
444 case TGSI_SEMANTIC_TESSCOORD: return nv50_ir::SV_TESS_COORD;
445 case TGSI_SEMANTIC_TESSOUTER: return nv50_ir::SV_TESS_OUTER;
446 case TGSI_SEMANTIC_TESSINNER: return nv50_ir::SV_TESS_INNER;
447 case TGSI_SEMANTIC_VERTICESIN: return nv50_ir::SV_VERTEX_COUNT;
448 case TGSI_SEMANTIC_HELPER_INVOCATION: return nv50_ir::SV_THREAD_KILL;
449 case TGSI_SEMANTIC_BASEVERTEX: return nv50_ir::SV_BASEVERTEX;
450 case TGSI_SEMANTIC_BASEINSTANCE: return nv50_ir::SV_BASEINSTANCE;
451 case TGSI_SEMANTIC_DRAWID: return nv50_ir::SV_DRAWID;
452 case TGSI_SEMANTIC_WORK_DIM: return nv50_ir::SV_WORK_DIM;
453 case TGSI_SEMANTIC_SUBGROUP_INVOCATION: return nv50_ir::SV_LANEID;
454 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK: return nv50_ir::SV_LANEMASK_EQ;
455 case TGSI_SEMANTIC_SUBGROUP_LT_MASK: return nv50_ir::SV_LANEMASK_LT;
456 case TGSI_SEMANTIC_SUBGROUP_LE_MASK: return nv50_ir::SV_LANEMASK_LE;
457 case TGSI_SEMANTIC_SUBGROUP_GT_MASK: return nv50_ir::SV_LANEMASK_GT;
458 case TGSI_SEMANTIC_SUBGROUP_GE_MASK: return nv50_ir::SV_LANEMASK_GE;
459 default:
460 assert(0);
461 return nv50_ir::SV_CLOCK;
462 }
463 }
464
465 #define NV50_IR_TEX_TARG_CASE(a, b) \
466 case TGSI_TEXTURE_##a: return nv50_ir::TEX_TARGET_##b;
467
468 static nv50_ir::TexTarget translateTexture(uint tex)
469 {
470 switch (tex) {
471 NV50_IR_TEX_TARG_CASE(1D, 1D);
472 NV50_IR_TEX_TARG_CASE(2D, 2D);
473 NV50_IR_TEX_TARG_CASE(2D_MSAA, 2D_MS);
474 NV50_IR_TEX_TARG_CASE(3D, 3D);
475 NV50_IR_TEX_TARG_CASE(CUBE, CUBE);
476 NV50_IR_TEX_TARG_CASE(RECT, RECT);
477 NV50_IR_TEX_TARG_CASE(1D_ARRAY, 1D_ARRAY);
478 NV50_IR_TEX_TARG_CASE(2D_ARRAY, 2D_ARRAY);
479 NV50_IR_TEX_TARG_CASE(2D_ARRAY_MSAA, 2D_MS_ARRAY);
480 NV50_IR_TEX_TARG_CASE(CUBE_ARRAY, CUBE_ARRAY);
481 NV50_IR_TEX_TARG_CASE(SHADOW1D, 1D_SHADOW);
482 NV50_IR_TEX_TARG_CASE(SHADOW2D, 2D_SHADOW);
483 NV50_IR_TEX_TARG_CASE(SHADOWCUBE, CUBE_SHADOW);
484 NV50_IR_TEX_TARG_CASE(SHADOWRECT, RECT_SHADOW);
485 NV50_IR_TEX_TARG_CASE(SHADOW1D_ARRAY, 1D_ARRAY_SHADOW);
486 NV50_IR_TEX_TARG_CASE(SHADOW2D_ARRAY, 2D_ARRAY_SHADOW);
487 NV50_IR_TEX_TARG_CASE(SHADOWCUBE_ARRAY, CUBE_ARRAY_SHADOW);
488 NV50_IR_TEX_TARG_CASE(BUFFER, BUFFER);
489
490 case TGSI_TEXTURE_UNKNOWN:
491 default:
492 assert(!"invalid texture target");
493 return nv50_ir::TEX_TARGET_2D;
494 }
495 }
496
497 static nv50_ir::CacheMode translateCacheMode(uint qualifier)
498 {
499 if (qualifier & TGSI_MEMORY_VOLATILE)
500 return nv50_ir::CACHE_CV;
501 if (qualifier & TGSI_MEMORY_COHERENT)
502 return nv50_ir::CACHE_CG;
503 return nv50_ir::CACHE_CA;
504 }
505
506 static nv50_ir::ImgFormat translateImgFormat(uint format)
507 {
508
509 #define FMT_CASE(a, b) \
510 case PIPE_FORMAT_ ## a: return nv50_ir::FMT_ ## b
511
512 switch (format) {
513 FMT_CASE(NONE, NONE);
514
515 FMT_CASE(R32G32B32A32_FLOAT, RGBA32F);
516 FMT_CASE(R16G16B16A16_FLOAT, RGBA16F);
517 FMT_CASE(R32G32_FLOAT, RG32F);
518 FMT_CASE(R16G16_FLOAT, RG16F);
519 FMT_CASE(R11G11B10_FLOAT, R11G11B10F);
520 FMT_CASE(R32_FLOAT, R32F);
521 FMT_CASE(R16_FLOAT, R16F);
522
523 FMT_CASE(R32G32B32A32_UINT, RGBA32UI);
524 FMT_CASE(R16G16B16A16_UINT, RGBA16UI);
525 FMT_CASE(R10G10B10A2_UINT, RGB10A2UI);
526 FMT_CASE(R8G8B8A8_UINT, RGBA8UI);
527 FMT_CASE(R32G32_UINT, RG32UI);
528 FMT_CASE(R16G16_UINT, RG16UI);
529 FMT_CASE(R8G8_UINT, RG8UI);
530 FMT_CASE(R32_UINT, R32UI);
531 FMT_CASE(R16_UINT, R16UI);
532 FMT_CASE(R8_UINT, R8UI);
533
534 FMT_CASE(R32G32B32A32_SINT, RGBA32I);
535 FMT_CASE(R16G16B16A16_SINT, RGBA16I);
536 FMT_CASE(R8G8B8A8_SINT, RGBA8I);
537 FMT_CASE(R32G32_SINT, RG32I);
538 FMT_CASE(R16G16_SINT, RG16I);
539 FMT_CASE(R8G8_SINT, RG8I);
540 FMT_CASE(R32_SINT, R32I);
541 FMT_CASE(R16_SINT, R16I);
542 FMT_CASE(R8_SINT, R8I);
543
544 FMT_CASE(R16G16B16A16_UNORM, RGBA16);
545 FMT_CASE(R10G10B10A2_UNORM, RGB10A2);
546 FMT_CASE(R8G8B8A8_UNORM, RGBA8);
547 FMT_CASE(R16G16_UNORM, RG16);
548 FMT_CASE(R8G8_UNORM, RG8);
549 FMT_CASE(R16_UNORM, R16);
550 FMT_CASE(R8_UNORM, R8);
551
552 FMT_CASE(R16G16B16A16_SNORM, RGBA16_SNORM);
553 FMT_CASE(R8G8B8A8_SNORM, RGBA8_SNORM);
554 FMT_CASE(R16G16_SNORM, RG16_SNORM);
555 FMT_CASE(R8G8_SNORM, RG8_SNORM);
556 FMT_CASE(R16_SNORM, R16_SNORM);
557 FMT_CASE(R8_SNORM, R8_SNORM);
558
559 FMT_CASE(B8G8R8A8_UNORM, BGRA8);
560 }
561
562 assert(!"Unexpected format");
563 return nv50_ir::FMT_NONE;
564 }
565
566 nv50_ir::DataType Instruction::inferSrcType() const
567 {
568 switch (getOpcode()) {
569 case TGSI_OPCODE_UIF:
570 case TGSI_OPCODE_AND:
571 case TGSI_OPCODE_OR:
572 case TGSI_OPCODE_XOR:
573 case TGSI_OPCODE_NOT:
574 case TGSI_OPCODE_SHL:
575 case TGSI_OPCODE_U2F:
576 case TGSI_OPCODE_U2D:
577 case TGSI_OPCODE_U2I64:
578 case TGSI_OPCODE_UADD:
579 case TGSI_OPCODE_UDIV:
580 case TGSI_OPCODE_UMOD:
581 case TGSI_OPCODE_UMAD:
582 case TGSI_OPCODE_UMUL:
583 case TGSI_OPCODE_UMUL_HI:
584 case TGSI_OPCODE_UMAX:
585 case TGSI_OPCODE_UMIN:
586 case TGSI_OPCODE_USEQ:
587 case TGSI_OPCODE_USGE:
588 case TGSI_OPCODE_USLT:
589 case TGSI_OPCODE_USNE:
590 case TGSI_OPCODE_USHR:
591 case TGSI_OPCODE_ATOMUADD:
592 case TGSI_OPCODE_ATOMXCHG:
593 case TGSI_OPCODE_ATOMCAS:
594 case TGSI_OPCODE_ATOMAND:
595 case TGSI_OPCODE_ATOMOR:
596 case TGSI_OPCODE_ATOMXOR:
597 case TGSI_OPCODE_ATOMUMIN:
598 case TGSI_OPCODE_ATOMUMAX:
599 case TGSI_OPCODE_UBFE:
600 case TGSI_OPCODE_UMSB:
601 case TGSI_OPCODE_UP2H:
602 case TGSI_OPCODE_VOTE_ALL:
603 case TGSI_OPCODE_VOTE_ANY:
604 case TGSI_OPCODE_VOTE_EQ:
605 return nv50_ir::TYPE_U32;
606 case TGSI_OPCODE_I2F:
607 case TGSI_OPCODE_I2D:
608 case TGSI_OPCODE_I2I64:
609 case TGSI_OPCODE_IDIV:
610 case TGSI_OPCODE_IMUL_HI:
611 case TGSI_OPCODE_IMAX:
612 case TGSI_OPCODE_IMIN:
613 case TGSI_OPCODE_IABS:
614 case TGSI_OPCODE_INEG:
615 case TGSI_OPCODE_ISGE:
616 case TGSI_OPCODE_ISHR:
617 case TGSI_OPCODE_ISLT:
618 case TGSI_OPCODE_ISSG:
619 case TGSI_OPCODE_SAD: // not sure about SAD, but no one has a float version
620 case TGSI_OPCODE_MOD:
621 case TGSI_OPCODE_UARL:
622 case TGSI_OPCODE_ATOMIMIN:
623 case TGSI_OPCODE_ATOMIMAX:
624 case TGSI_OPCODE_IBFE:
625 case TGSI_OPCODE_IMSB:
626 return nv50_ir::TYPE_S32;
627 case TGSI_OPCODE_D2F:
628 case TGSI_OPCODE_D2I:
629 case TGSI_OPCODE_D2U:
630 case TGSI_OPCODE_D2I64:
631 case TGSI_OPCODE_D2U64:
632 case TGSI_OPCODE_DABS:
633 case TGSI_OPCODE_DNEG:
634 case TGSI_OPCODE_DADD:
635 case TGSI_OPCODE_DMUL:
636 case TGSI_OPCODE_DDIV:
637 case TGSI_OPCODE_DMAX:
638 case TGSI_OPCODE_DMIN:
639 case TGSI_OPCODE_DSLT:
640 case TGSI_OPCODE_DSGE:
641 case TGSI_OPCODE_DSEQ:
642 case TGSI_OPCODE_DSNE:
643 case TGSI_OPCODE_DRCP:
644 case TGSI_OPCODE_DSQRT:
645 case TGSI_OPCODE_DMAD:
646 case TGSI_OPCODE_DFMA:
647 case TGSI_OPCODE_DFRAC:
648 case TGSI_OPCODE_DRSQ:
649 case TGSI_OPCODE_DTRUNC:
650 case TGSI_OPCODE_DCEIL:
651 case TGSI_OPCODE_DFLR:
652 case TGSI_OPCODE_DROUND:
653 return nv50_ir::TYPE_F64;
654 case TGSI_OPCODE_U64SEQ:
655 case TGSI_OPCODE_U64SNE:
656 case TGSI_OPCODE_U64SLT:
657 case TGSI_OPCODE_U64SGE:
658 case TGSI_OPCODE_U64MIN:
659 case TGSI_OPCODE_U64MAX:
660 case TGSI_OPCODE_U64ADD:
661 case TGSI_OPCODE_U64MUL:
662 case TGSI_OPCODE_U64SHL:
663 case TGSI_OPCODE_U64SHR:
664 case TGSI_OPCODE_U64DIV:
665 case TGSI_OPCODE_U64MOD:
666 case TGSI_OPCODE_U642F:
667 case TGSI_OPCODE_U642D:
668 return nv50_ir::TYPE_U64;
669 case TGSI_OPCODE_I64ABS:
670 case TGSI_OPCODE_I64SSG:
671 case TGSI_OPCODE_I64NEG:
672 case TGSI_OPCODE_I64SLT:
673 case TGSI_OPCODE_I64SGE:
674 case TGSI_OPCODE_I64MIN:
675 case TGSI_OPCODE_I64MAX:
676 case TGSI_OPCODE_I64SHR:
677 case TGSI_OPCODE_I64DIV:
678 case TGSI_OPCODE_I64MOD:
679 case TGSI_OPCODE_I642F:
680 case TGSI_OPCODE_I642D:
681 return nv50_ir::TYPE_S64;
682 default:
683 return nv50_ir::TYPE_F32;
684 }
685 }
686
687 nv50_ir::DataType Instruction::inferDstType() const
688 {
689 switch (getOpcode()) {
690 case TGSI_OPCODE_D2U:
691 case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32;
692 case TGSI_OPCODE_D2I:
693 case TGSI_OPCODE_F2I: return nv50_ir::TYPE_S32;
694 case TGSI_OPCODE_FSEQ:
695 case TGSI_OPCODE_FSGE:
696 case TGSI_OPCODE_FSLT:
697 case TGSI_OPCODE_FSNE:
698 case TGSI_OPCODE_DSEQ:
699 case TGSI_OPCODE_DSGE:
700 case TGSI_OPCODE_DSLT:
701 case TGSI_OPCODE_DSNE:
702 case TGSI_OPCODE_I64SLT:
703 case TGSI_OPCODE_I64SGE:
704 case TGSI_OPCODE_U64SEQ:
705 case TGSI_OPCODE_U64SNE:
706 case TGSI_OPCODE_U64SLT:
707 case TGSI_OPCODE_U64SGE:
708 case TGSI_OPCODE_PK2H:
709 return nv50_ir::TYPE_U32;
710 case TGSI_OPCODE_I2F:
711 case TGSI_OPCODE_U2F:
712 case TGSI_OPCODE_D2F:
713 case TGSI_OPCODE_I642F:
714 case TGSI_OPCODE_U642F:
715 case TGSI_OPCODE_UP2H:
716 return nv50_ir::TYPE_F32;
717 case TGSI_OPCODE_I2D:
718 case TGSI_OPCODE_U2D:
719 case TGSI_OPCODE_F2D:
720 case TGSI_OPCODE_I642D:
721 case TGSI_OPCODE_U642D:
722 return nv50_ir::TYPE_F64;
723 case TGSI_OPCODE_I2I64:
724 case TGSI_OPCODE_U2I64:
725 case TGSI_OPCODE_F2I64:
726 case TGSI_OPCODE_D2I64:
727 return nv50_ir::TYPE_S64;
728 case TGSI_OPCODE_F2U64:
729 case TGSI_OPCODE_D2U64:
730 return nv50_ir::TYPE_U64;
731 default:
732 return inferSrcType();
733 }
734 }
735
736 nv50_ir::CondCode Instruction::getSetCond() const
737 {
738 using namespace nv50_ir;
739
740 switch (getOpcode()) {
741 case TGSI_OPCODE_SLT:
742 case TGSI_OPCODE_ISLT:
743 case TGSI_OPCODE_USLT:
744 case TGSI_OPCODE_FSLT:
745 case TGSI_OPCODE_DSLT:
746 case TGSI_OPCODE_I64SLT:
747 case TGSI_OPCODE_U64SLT:
748 return CC_LT;
749 case TGSI_OPCODE_SLE:
750 return CC_LE;
751 case TGSI_OPCODE_SGE:
752 case TGSI_OPCODE_ISGE:
753 case TGSI_OPCODE_USGE:
754 case TGSI_OPCODE_FSGE:
755 case TGSI_OPCODE_DSGE:
756 case TGSI_OPCODE_I64SGE:
757 case TGSI_OPCODE_U64SGE:
758 return CC_GE;
759 case TGSI_OPCODE_SGT:
760 return CC_GT;
761 case TGSI_OPCODE_SEQ:
762 case TGSI_OPCODE_USEQ:
763 case TGSI_OPCODE_FSEQ:
764 case TGSI_OPCODE_DSEQ:
765 case TGSI_OPCODE_U64SEQ:
766 return CC_EQ;
767 case TGSI_OPCODE_SNE:
768 case TGSI_OPCODE_FSNE:
769 case TGSI_OPCODE_DSNE:
770 case TGSI_OPCODE_U64SNE:
771 return CC_NEU;
772 case TGSI_OPCODE_USNE:
773 return CC_NE;
774 default:
775 return CC_ALWAYS;
776 }
777 }
778
779 #define NV50_IR_OPCODE_CASE(a, b) case TGSI_OPCODE_##a: return nv50_ir::OP_##b
780
781 static nv50_ir::operation translateOpcode(uint opcode)
782 {
783 switch (opcode) {
784 NV50_IR_OPCODE_CASE(ARL, SHL);
785 NV50_IR_OPCODE_CASE(MOV, MOV);
786
787 NV50_IR_OPCODE_CASE(RCP, RCP);
788 NV50_IR_OPCODE_CASE(RSQ, RSQ);
789 NV50_IR_OPCODE_CASE(SQRT, SQRT);
790
791 NV50_IR_OPCODE_CASE(MUL, MUL);
792 NV50_IR_OPCODE_CASE(ADD, ADD);
793
794 NV50_IR_OPCODE_CASE(MIN, MIN);
795 NV50_IR_OPCODE_CASE(MAX, MAX);
796 NV50_IR_OPCODE_CASE(SLT, SET);
797 NV50_IR_OPCODE_CASE(SGE, SET);
798 NV50_IR_OPCODE_CASE(MAD, MAD);
799 NV50_IR_OPCODE_CASE(FMA, FMA);
800
801 NV50_IR_OPCODE_CASE(FLR, FLOOR);
802 NV50_IR_OPCODE_CASE(ROUND, CVT);
803 NV50_IR_OPCODE_CASE(EX2, EX2);
804 NV50_IR_OPCODE_CASE(LG2, LG2);
805 NV50_IR_OPCODE_CASE(POW, POW);
806
807 NV50_IR_OPCODE_CASE(COS, COS);
808 NV50_IR_OPCODE_CASE(DDX, DFDX);
809 NV50_IR_OPCODE_CASE(DDX_FINE, DFDX);
810 NV50_IR_OPCODE_CASE(DDY, DFDY);
811 NV50_IR_OPCODE_CASE(DDY_FINE, DFDY);
812 NV50_IR_OPCODE_CASE(KILL, DISCARD);
813
814 NV50_IR_OPCODE_CASE(SEQ, SET);
815 NV50_IR_OPCODE_CASE(SGT, SET);
816 NV50_IR_OPCODE_CASE(SIN, SIN);
817 NV50_IR_OPCODE_CASE(SLE, SET);
818 NV50_IR_OPCODE_CASE(SNE, SET);
819 NV50_IR_OPCODE_CASE(TEX, TEX);
820 NV50_IR_OPCODE_CASE(TXD, TXD);
821 NV50_IR_OPCODE_CASE(TXP, TEX);
822
823 NV50_IR_OPCODE_CASE(CAL, CALL);
824 NV50_IR_OPCODE_CASE(RET, RET);
825 NV50_IR_OPCODE_CASE(CMP, SLCT);
826
827 NV50_IR_OPCODE_CASE(TXB, TXB);
828
829 NV50_IR_OPCODE_CASE(DIV, DIV);
830
831 NV50_IR_OPCODE_CASE(TXL, TXL);
832 NV50_IR_OPCODE_CASE(TEX_LZ, TXL);
833
834 NV50_IR_OPCODE_CASE(CEIL, CEIL);
835 NV50_IR_OPCODE_CASE(I2F, CVT);
836 NV50_IR_OPCODE_CASE(NOT, NOT);
837 NV50_IR_OPCODE_CASE(TRUNC, TRUNC);
838 NV50_IR_OPCODE_CASE(SHL, SHL);
839
840 NV50_IR_OPCODE_CASE(AND, AND);
841 NV50_IR_OPCODE_CASE(OR, OR);
842 NV50_IR_OPCODE_CASE(MOD, MOD);
843 NV50_IR_OPCODE_CASE(XOR, XOR);
844 NV50_IR_OPCODE_CASE(SAD, SAD);
845 NV50_IR_OPCODE_CASE(TXF, TXF);
846 NV50_IR_OPCODE_CASE(TXF_LZ, TXF);
847 NV50_IR_OPCODE_CASE(TXQ, TXQ);
848 NV50_IR_OPCODE_CASE(TXQS, TXQ);
849 NV50_IR_OPCODE_CASE(TG4, TXG);
850 NV50_IR_OPCODE_CASE(LODQ, TXLQ);
851
852 NV50_IR_OPCODE_CASE(EMIT, EMIT);
853 NV50_IR_OPCODE_CASE(ENDPRIM, RESTART);
854
855 NV50_IR_OPCODE_CASE(KILL_IF, DISCARD);
856
857 NV50_IR_OPCODE_CASE(F2I, CVT);
858 NV50_IR_OPCODE_CASE(FSEQ, SET);
859 NV50_IR_OPCODE_CASE(FSGE, SET);
860 NV50_IR_OPCODE_CASE(FSLT, SET);
861 NV50_IR_OPCODE_CASE(FSNE, SET);
862 NV50_IR_OPCODE_CASE(IDIV, DIV);
863 NV50_IR_OPCODE_CASE(IMAX, MAX);
864 NV50_IR_OPCODE_CASE(IMIN, MIN);
865 NV50_IR_OPCODE_CASE(IABS, ABS);
866 NV50_IR_OPCODE_CASE(INEG, NEG);
867 NV50_IR_OPCODE_CASE(ISGE, SET);
868 NV50_IR_OPCODE_CASE(ISHR, SHR);
869 NV50_IR_OPCODE_CASE(ISLT, SET);
870 NV50_IR_OPCODE_CASE(F2U, CVT);
871 NV50_IR_OPCODE_CASE(U2F, CVT);
872 NV50_IR_OPCODE_CASE(UADD, ADD);
873 NV50_IR_OPCODE_CASE(UDIV, DIV);
874 NV50_IR_OPCODE_CASE(UMAD, MAD);
875 NV50_IR_OPCODE_CASE(UMAX, MAX);
876 NV50_IR_OPCODE_CASE(UMIN, MIN);
877 NV50_IR_OPCODE_CASE(UMOD, MOD);
878 NV50_IR_OPCODE_CASE(UMUL, MUL);
879 NV50_IR_OPCODE_CASE(USEQ, SET);
880 NV50_IR_OPCODE_CASE(USGE, SET);
881 NV50_IR_OPCODE_CASE(USHR, SHR);
882 NV50_IR_OPCODE_CASE(USLT, SET);
883 NV50_IR_OPCODE_CASE(USNE, SET);
884
885 NV50_IR_OPCODE_CASE(DABS, ABS);
886 NV50_IR_OPCODE_CASE(DNEG, NEG);
887 NV50_IR_OPCODE_CASE(DADD, ADD);
888 NV50_IR_OPCODE_CASE(DMUL, MUL);
889 NV50_IR_OPCODE_CASE(DDIV, DIV);
890 NV50_IR_OPCODE_CASE(DMAX, MAX);
891 NV50_IR_OPCODE_CASE(DMIN, MIN);
892 NV50_IR_OPCODE_CASE(DSLT, SET);
893 NV50_IR_OPCODE_CASE(DSGE, SET);
894 NV50_IR_OPCODE_CASE(DSEQ, SET);
895 NV50_IR_OPCODE_CASE(DSNE, SET);
896 NV50_IR_OPCODE_CASE(DRCP, RCP);
897 NV50_IR_OPCODE_CASE(DSQRT, SQRT);
898 NV50_IR_OPCODE_CASE(DMAD, MAD);
899 NV50_IR_OPCODE_CASE(DFMA, FMA);
900 NV50_IR_OPCODE_CASE(D2I, CVT);
901 NV50_IR_OPCODE_CASE(D2U, CVT);
902 NV50_IR_OPCODE_CASE(I2D, CVT);
903 NV50_IR_OPCODE_CASE(U2D, CVT);
904 NV50_IR_OPCODE_CASE(DRSQ, RSQ);
905 NV50_IR_OPCODE_CASE(DTRUNC, TRUNC);
906 NV50_IR_OPCODE_CASE(DCEIL, CEIL);
907 NV50_IR_OPCODE_CASE(DFLR, FLOOR);
908 NV50_IR_OPCODE_CASE(DROUND, CVT);
909
910 NV50_IR_OPCODE_CASE(U64SEQ, SET);
911 NV50_IR_OPCODE_CASE(U64SNE, SET);
912 NV50_IR_OPCODE_CASE(U64SLT, SET);
913 NV50_IR_OPCODE_CASE(U64SGE, SET);
914 NV50_IR_OPCODE_CASE(I64SLT, SET);
915 NV50_IR_OPCODE_CASE(I64SGE, SET);
916 NV50_IR_OPCODE_CASE(I2I64, CVT);
917 NV50_IR_OPCODE_CASE(U2I64, CVT);
918 NV50_IR_OPCODE_CASE(F2I64, CVT);
919 NV50_IR_OPCODE_CASE(F2U64, CVT);
920 NV50_IR_OPCODE_CASE(D2I64, CVT);
921 NV50_IR_OPCODE_CASE(D2U64, CVT);
922 NV50_IR_OPCODE_CASE(I642F, CVT);
923 NV50_IR_OPCODE_CASE(U642F, CVT);
924 NV50_IR_OPCODE_CASE(I642D, CVT);
925 NV50_IR_OPCODE_CASE(U642D, CVT);
926
927 NV50_IR_OPCODE_CASE(I64MIN, MIN);
928 NV50_IR_OPCODE_CASE(U64MIN, MIN);
929 NV50_IR_OPCODE_CASE(I64MAX, MAX);
930 NV50_IR_OPCODE_CASE(U64MAX, MAX);
931 NV50_IR_OPCODE_CASE(I64ABS, ABS);
932 NV50_IR_OPCODE_CASE(I64NEG, NEG);
933 NV50_IR_OPCODE_CASE(U64ADD, ADD);
934 NV50_IR_OPCODE_CASE(U64MUL, MUL);
935 NV50_IR_OPCODE_CASE(U64SHL, SHL);
936 NV50_IR_OPCODE_CASE(I64SHR, SHR);
937 NV50_IR_OPCODE_CASE(U64SHR, SHR);
938
939 NV50_IR_OPCODE_CASE(IMUL_HI, MUL);
940 NV50_IR_OPCODE_CASE(UMUL_HI, MUL);
941
942 NV50_IR_OPCODE_CASE(SAMPLE, TEX);
943 NV50_IR_OPCODE_CASE(SAMPLE_B, TXB);
944 NV50_IR_OPCODE_CASE(SAMPLE_C, TEX);
945 NV50_IR_OPCODE_CASE(SAMPLE_C_LZ, TEX);
946 NV50_IR_OPCODE_CASE(SAMPLE_D, TXD);
947 NV50_IR_OPCODE_CASE(SAMPLE_L, TXL);
948 NV50_IR_OPCODE_CASE(SAMPLE_I, TXF);
949 NV50_IR_OPCODE_CASE(SAMPLE_I_MS, TXF);
950 NV50_IR_OPCODE_CASE(GATHER4, TXG);
951 NV50_IR_OPCODE_CASE(SVIEWINFO, TXQ);
952
953 NV50_IR_OPCODE_CASE(ATOMUADD, ATOM);
954 NV50_IR_OPCODE_CASE(ATOMXCHG, ATOM);
955 NV50_IR_OPCODE_CASE(ATOMCAS, ATOM);
956 NV50_IR_OPCODE_CASE(ATOMAND, ATOM);
957 NV50_IR_OPCODE_CASE(ATOMOR, ATOM);
958 NV50_IR_OPCODE_CASE(ATOMXOR, ATOM);
959 NV50_IR_OPCODE_CASE(ATOMUMIN, ATOM);
960 NV50_IR_OPCODE_CASE(ATOMUMAX, ATOM);
961 NV50_IR_OPCODE_CASE(ATOMIMIN, ATOM);
962 NV50_IR_OPCODE_CASE(ATOMIMAX, ATOM);
963
964 NV50_IR_OPCODE_CASE(TEX2, TEX);
965 NV50_IR_OPCODE_CASE(TXB2, TXB);
966 NV50_IR_OPCODE_CASE(TXL2, TXL);
967
968 NV50_IR_OPCODE_CASE(IBFE, EXTBF);
969 NV50_IR_OPCODE_CASE(UBFE, EXTBF);
970 NV50_IR_OPCODE_CASE(BFI, INSBF);
971 NV50_IR_OPCODE_CASE(BREV, EXTBF);
972 NV50_IR_OPCODE_CASE(POPC, POPCNT);
973 NV50_IR_OPCODE_CASE(LSB, BFIND);
974 NV50_IR_OPCODE_CASE(IMSB, BFIND);
975 NV50_IR_OPCODE_CASE(UMSB, BFIND);
976
977 NV50_IR_OPCODE_CASE(VOTE_ALL, VOTE);
978 NV50_IR_OPCODE_CASE(VOTE_ANY, VOTE);
979 NV50_IR_OPCODE_CASE(VOTE_EQ, VOTE);
980
981 NV50_IR_OPCODE_CASE(BALLOT, VOTE);
982 NV50_IR_OPCODE_CASE(READ_INVOC, SHFL);
983 NV50_IR_OPCODE_CASE(READ_FIRST, SHFL);
984
985 NV50_IR_OPCODE_CASE(END, EXIT);
986
987 default:
988 return nv50_ir::OP_NOP;
989 }
990 }
991
992 static uint16_t opcodeToSubOp(uint opcode)
993 {
994 switch (opcode) {
995 case TGSI_OPCODE_LFENCE: return NV50_IR_SUBOP_MEMBAR(L, GL);
996 case TGSI_OPCODE_SFENCE: return NV50_IR_SUBOP_MEMBAR(S, GL);
997 case TGSI_OPCODE_MFENCE: return NV50_IR_SUBOP_MEMBAR(M, GL);
998 case TGSI_OPCODE_ATOMUADD: return NV50_IR_SUBOP_ATOM_ADD;
999 case TGSI_OPCODE_ATOMXCHG: return NV50_IR_SUBOP_ATOM_EXCH;
1000 case TGSI_OPCODE_ATOMCAS: return NV50_IR_SUBOP_ATOM_CAS;
1001 case TGSI_OPCODE_ATOMAND: return NV50_IR_SUBOP_ATOM_AND;
1002 case TGSI_OPCODE_ATOMOR: return NV50_IR_SUBOP_ATOM_OR;
1003 case TGSI_OPCODE_ATOMXOR: return NV50_IR_SUBOP_ATOM_XOR;
1004 case TGSI_OPCODE_ATOMUMIN: return NV50_IR_SUBOP_ATOM_MIN;
1005 case TGSI_OPCODE_ATOMIMIN: return NV50_IR_SUBOP_ATOM_MIN;
1006 case TGSI_OPCODE_ATOMUMAX: return NV50_IR_SUBOP_ATOM_MAX;
1007 case TGSI_OPCODE_ATOMIMAX: return NV50_IR_SUBOP_ATOM_MAX;
1008 case TGSI_OPCODE_IMUL_HI:
1009 case TGSI_OPCODE_UMUL_HI:
1010 return NV50_IR_SUBOP_MUL_HIGH;
1011 case TGSI_OPCODE_VOTE_ALL: return NV50_IR_SUBOP_VOTE_ALL;
1012 case TGSI_OPCODE_VOTE_ANY: return NV50_IR_SUBOP_VOTE_ANY;
1013 case TGSI_OPCODE_VOTE_EQ: return NV50_IR_SUBOP_VOTE_UNI;
1014 default:
1015 return 0;
1016 }
1017 }
1018
1019 bool Instruction::checkDstSrcAliasing() const
1020 {
1021 if (insn->Dst[0].Register.Indirect) // no danger if indirect, using memory
1022 return false;
1023
1024 for (int s = 0; s < TGSI_FULL_MAX_SRC_REGISTERS; ++s) {
1025 if (insn->Src[s].Register.File == TGSI_FILE_NULL)
1026 break;
1027 if (insn->Src[s].Register.File == insn->Dst[0].Register.File &&
1028 insn->Src[s].Register.Index == insn->Dst[0].Register.Index)
1029 return true;
1030 }
1031 return false;
1032 }
1033
1034 class Source
1035 {
1036 public:
1037 Source(struct nv50_ir_prog_info *);
1038 ~Source();
1039
1040 public:
1041 bool scanSource();
1042 unsigned fileSize(unsigned file) const { return scan.file_max[file] + 1; }
1043
1044 public:
1045 struct tgsi_shader_info scan;
1046 struct tgsi_full_instruction *insns;
1047 const struct tgsi_token *tokens;
1048 struct nv50_ir_prog_info *info;
1049
1050 nv50_ir::DynArray tempArrays;
1051 nv50_ir::DynArray immdArrays;
1052
1053 typedef nv50_ir::BuildUtil::Location Location;
1054 // these registers are per-subroutine, cannot be used for parameter passing
1055 std::set<Location> locals;
1056
1057 std::set<int> indirectTempArrays;
1058 std::map<int, int> indirectTempOffsets;
1059 std::map<int, std::pair<int, int> > tempArrayInfo;
1060 std::vector<int> tempArrayId;
1061
1062 int clipVertexOutput;
1063
1064 struct TextureView {
1065 uint8_t target; // TGSI_TEXTURE_*
1066 };
1067 std::vector<TextureView> textureViews;
1068
1069 /*
1070 struct Resource {
1071 uint8_t target; // TGSI_TEXTURE_*
1072 bool raw;
1073 uint8_t slot; // $surface index
1074 };
1075 std::vector<Resource> resources;
1076 */
1077
1078 struct Image {
1079 uint8_t target; // TGSI_TEXTURE_*
1080 bool raw;
1081 uint8_t slot;
1082 uint16_t format; // PIPE_FORMAT_*
1083 };
1084 std::vector<Image> images;
1085
1086 struct MemoryFile {
1087 uint8_t mem_type; // TGSI_MEMORY_TYPE_*
1088 };
1089 std::vector<MemoryFile> memoryFiles;
1090
1091 private:
1092 int inferSysValDirection(unsigned sn) const;
1093 bool scanDeclaration(const struct tgsi_full_declaration *);
1094 bool scanInstruction(const struct tgsi_full_instruction *);
1095 void scanInstructionSrc(const Instruction& insn,
1096 const Instruction::SrcRegister& src,
1097 unsigned mask);
1098 void scanProperty(const struct tgsi_full_property *);
1099 void scanImmediate(const struct tgsi_full_immediate *);
1100
1101 inline bool isEdgeFlagPassthrough(const Instruction&) const;
1102 };
1103
1104 Source::Source(struct nv50_ir_prog_info *prog) : info(prog)
1105 {
1106 tokens = (const struct tgsi_token *)info->bin.source;
1107
1108 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
1109 tgsi_dump(tokens, 0);
1110 }
1111
1112 Source::~Source()
1113 {
1114 if (insns)
1115 FREE(insns);
1116
1117 if (info->immd.data)
1118 FREE(info->immd.data);
1119 if (info->immd.type)
1120 FREE(info->immd.type);
1121 }
1122
1123 bool Source::scanSource()
1124 {
1125 unsigned insnCount = 0;
1126 struct tgsi_parse_context parse;
1127
1128 tgsi_scan_shader(tokens, &scan);
1129
1130 insns = (struct tgsi_full_instruction *)MALLOC(scan.num_instructions *
1131 sizeof(insns[0]));
1132 if (!insns)
1133 return false;
1134
1135 clipVertexOutput = -1;
1136
1137 textureViews.resize(scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1);
1138 //resources.resize(scan.file_max[TGSI_FILE_RESOURCE] + 1);
1139 images.resize(scan.file_max[TGSI_FILE_IMAGE] + 1);
1140 tempArrayId.resize(scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1141 memoryFiles.resize(scan.file_max[TGSI_FILE_MEMORY] + 1);
1142
1143 info->immd.bufSize = 0;
1144
1145 info->numInputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1146 info->numOutputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1147 info->numSysVals = scan.file_max[TGSI_FILE_SYSTEM_VALUE] + 1;
1148
1149 if (info->type == PIPE_SHADER_FRAGMENT) {
1150 info->prop.fp.writesDepth = scan.writes_z;
1151 info->prop.fp.usesDiscard = scan.uses_kill || info->io.alphaRefBase;
1152 } else
1153 if (info->type == PIPE_SHADER_GEOMETRY) {
1154 info->prop.gp.instanceCount = 1; // default value
1155 }
1156
1157 info->io.viewportId = -1;
1158
1159 info->immd.data = (uint32_t *)MALLOC(scan.immediate_count * 16);
1160 info->immd.type = (ubyte *)MALLOC(scan.immediate_count * sizeof(ubyte));
1161
1162 tgsi_parse_init(&parse, tokens);
1163 while (!tgsi_parse_end_of_tokens(&parse)) {
1164 tgsi_parse_token(&parse);
1165
1166 switch (parse.FullToken.Token.Type) {
1167 case TGSI_TOKEN_TYPE_IMMEDIATE:
1168 scanImmediate(&parse.FullToken.FullImmediate);
1169 break;
1170 case TGSI_TOKEN_TYPE_DECLARATION:
1171 scanDeclaration(&parse.FullToken.FullDeclaration);
1172 break;
1173 case TGSI_TOKEN_TYPE_INSTRUCTION:
1174 insns[insnCount++] = parse.FullToken.FullInstruction;
1175 scanInstruction(&parse.FullToken.FullInstruction);
1176 break;
1177 case TGSI_TOKEN_TYPE_PROPERTY:
1178 scanProperty(&parse.FullToken.FullProperty);
1179 break;
1180 default:
1181 INFO("unknown TGSI token type: %d\n", parse.FullToken.Token.Type);
1182 break;
1183 }
1184 }
1185 tgsi_parse_free(&parse);
1186
1187 if (indirectTempArrays.size()) {
1188 int tempBase = 0;
1189 for (std::set<int>::const_iterator it = indirectTempArrays.begin();
1190 it != indirectTempArrays.end(); ++it) {
1191 std::pair<int, int>& info = tempArrayInfo[*it];
1192 indirectTempOffsets.insert(std::make_pair(*it, tempBase - info.first));
1193 tempBase += info.second;
1194 }
1195 info->bin.tlsSpace += tempBase * 16;
1196 }
1197
1198 if (info->io.genUserClip > 0) {
1199 info->io.clipDistances = info->io.genUserClip;
1200
1201 const unsigned int nOut = (info->io.genUserClip + 3) / 4;
1202
1203 for (unsigned int n = 0; n < nOut; ++n) {
1204 unsigned int i = info->numOutputs++;
1205 info->out[i].id = i;
1206 info->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1207 info->out[i].si = n;
1208 info->out[i].mask = ((1 << info->io.clipDistances) - 1) >> (n * 4);
1209 }
1210 }
1211
1212 return info->assignSlots(info) == 0;
1213 }
1214
1215 void Source::scanProperty(const struct tgsi_full_property *prop)
1216 {
1217 switch (prop->Property.PropertyName) {
1218 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1219 info->prop.gp.outputPrim = prop->u[0].Data;
1220 break;
1221 case TGSI_PROPERTY_GS_INPUT_PRIM:
1222 info->prop.gp.inputPrim = prop->u[0].Data;
1223 break;
1224 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1225 info->prop.gp.maxVertices = prop->u[0].Data;
1226 break;
1227 case TGSI_PROPERTY_GS_INVOCATIONS:
1228 info->prop.gp.instanceCount = prop->u[0].Data;
1229 break;
1230 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1231 info->prop.fp.separateFragData = true;
1232 break;
1233 case TGSI_PROPERTY_FS_COORD_ORIGIN:
1234 case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1235 case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
1236 // we don't care
1237 break;
1238 case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1239 info->io.genUserClip = -1;
1240 break;
1241 case TGSI_PROPERTY_TCS_VERTICES_OUT:
1242 info->prop.tp.outputPatchSize = prop->u[0].Data;
1243 break;
1244 case TGSI_PROPERTY_TES_PRIM_MODE:
1245 info->prop.tp.domain = prop->u[0].Data;
1246 break;
1247 case TGSI_PROPERTY_TES_SPACING:
1248 info->prop.tp.partitioning = prop->u[0].Data;
1249 break;
1250 case TGSI_PROPERTY_TES_VERTEX_ORDER_CW:
1251 info->prop.tp.winding = prop->u[0].Data;
1252 break;
1253 case TGSI_PROPERTY_TES_POINT_MODE:
1254 if (prop->u[0].Data)
1255 info->prop.tp.outputPrim = PIPE_PRIM_POINTS;
1256 else
1257 info->prop.tp.outputPrim = PIPE_PRIM_TRIANGLES; /* anything but points */
1258 break;
1259 case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
1260 info->prop.cp.numThreads[0] = prop->u[0].Data;
1261 break;
1262 case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
1263 info->prop.cp.numThreads[1] = prop->u[0].Data;
1264 break;
1265 case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
1266 info->prop.cp.numThreads[2] = prop->u[0].Data;
1267 break;
1268 case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
1269 info->io.clipDistances = prop->u[0].Data;
1270 break;
1271 case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
1272 info->io.cullDistances = prop->u[0].Data;
1273 break;
1274 case TGSI_PROPERTY_NEXT_SHADER:
1275 /* Do not need to know the next shader stage. */
1276 break;
1277 case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
1278 info->prop.fp.earlyFragTests = prop->u[0].Data;
1279 break;
1280 case TGSI_PROPERTY_MUL_ZERO_WINS:
1281 info->io.mul_zero_wins = prop->u[0].Data;
1282 break;
1283 default:
1284 INFO("unhandled TGSI property %d\n", prop->Property.PropertyName);
1285 break;
1286 }
1287 }
1288
1289 void Source::scanImmediate(const struct tgsi_full_immediate *imm)
1290 {
1291 const unsigned n = info->immd.count++;
1292
1293 assert(n < scan.immediate_count);
1294
1295 for (int c = 0; c < 4; ++c)
1296 info->immd.data[n * 4 + c] = imm->u[c].Uint;
1297
1298 info->immd.type[n] = imm->Immediate.DataType;
1299 }
1300
1301 int Source::inferSysValDirection(unsigned sn) const
1302 {
1303 switch (sn) {
1304 case TGSI_SEMANTIC_INSTANCEID:
1305 case TGSI_SEMANTIC_VERTEXID:
1306 return 1;
1307 case TGSI_SEMANTIC_LAYER:
1308 #if 0
1309 case TGSI_SEMANTIC_VIEWPORTINDEX:
1310 return 0;
1311 #endif
1312 case TGSI_SEMANTIC_PRIMID:
1313 return (info->type == PIPE_SHADER_FRAGMENT) ? 1 : 0;
1314 default:
1315 return 0;
1316 }
1317 }
1318
1319 bool Source::scanDeclaration(const struct tgsi_full_declaration *decl)
1320 {
1321 unsigned i, c;
1322 unsigned sn = TGSI_SEMANTIC_GENERIC;
1323 unsigned si = 0;
1324 const unsigned first = decl->Range.First, last = decl->Range.Last;
1325 const int arrayId = decl->Array.ArrayID;
1326
1327 if (decl->Declaration.Semantic) {
1328 sn = decl->Semantic.Name;
1329 si = decl->Semantic.Index;
1330 }
1331
1332 if (decl->Declaration.Local || decl->Declaration.File == TGSI_FILE_ADDRESS) {
1333 for (i = first; i <= last; ++i) {
1334 for (c = 0; c < 4; ++c) {
1335 locals.insert(
1336 Location(decl->Declaration.File, decl->Dim.Index2D, i, c));
1337 }
1338 }
1339 }
1340
1341 switch (decl->Declaration.File) {
1342 case TGSI_FILE_INPUT:
1343 if (info->type == PIPE_SHADER_VERTEX) {
1344 // all vertex attributes are equal
1345 for (i = first; i <= last; ++i) {
1346 info->in[i].sn = TGSI_SEMANTIC_GENERIC;
1347 info->in[i].si = i;
1348 }
1349 } else {
1350 for (i = first; i <= last; ++i, ++si) {
1351 info->in[i].id = i;
1352 info->in[i].sn = sn;
1353 info->in[i].si = si;
1354 if (info->type == PIPE_SHADER_FRAGMENT) {
1355 // translate interpolation mode
1356 switch (decl->Interp.Interpolate) {
1357 case TGSI_INTERPOLATE_CONSTANT:
1358 info->in[i].flat = 1;
1359 break;
1360 case TGSI_INTERPOLATE_COLOR:
1361 info->in[i].sc = 1;
1362 break;
1363 case TGSI_INTERPOLATE_LINEAR:
1364 info->in[i].linear = 1;
1365 break;
1366 default:
1367 break;
1368 }
1369 if (decl->Interp.Location)
1370 info->in[i].centroid = 1;
1371 }
1372
1373 if (sn == TGSI_SEMANTIC_PATCH)
1374 info->in[i].patch = 1;
1375 if (sn == TGSI_SEMANTIC_PATCH)
1376 info->numPatchConstants = MAX2(info->numPatchConstants, si + 1);
1377 }
1378 }
1379 break;
1380 case TGSI_FILE_OUTPUT:
1381 for (i = first; i <= last; ++i, ++si) {
1382 switch (sn) {
1383 case TGSI_SEMANTIC_POSITION:
1384 if (info->type == PIPE_SHADER_FRAGMENT)
1385 info->io.fragDepth = i;
1386 else
1387 if (clipVertexOutput < 0)
1388 clipVertexOutput = i;
1389 break;
1390 case TGSI_SEMANTIC_COLOR:
1391 if (info->type == PIPE_SHADER_FRAGMENT)
1392 info->prop.fp.numColourResults++;
1393 break;
1394 case TGSI_SEMANTIC_EDGEFLAG:
1395 info->io.edgeFlagOut = i;
1396 break;
1397 case TGSI_SEMANTIC_CLIPVERTEX:
1398 clipVertexOutput = i;
1399 break;
1400 case TGSI_SEMANTIC_CLIPDIST:
1401 info->io.genUserClip = -1;
1402 break;
1403 case TGSI_SEMANTIC_SAMPLEMASK:
1404 info->io.sampleMask = i;
1405 break;
1406 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1407 info->io.viewportId = i;
1408 break;
1409 case TGSI_SEMANTIC_PATCH:
1410 info->numPatchConstants = MAX2(info->numPatchConstants, si + 1);
1411 /* fallthrough */
1412 case TGSI_SEMANTIC_TESSOUTER:
1413 case TGSI_SEMANTIC_TESSINNER:
1414 info->out[i].patch = 1;
1415 break;
1416 default:
1417 break;
1418 }
1419 info->out[i].id = i;
1420 info->out[i].sn = sn;
1421 info->out[i].si = si;
1422 }
1423 break;
1424 case TGSI_FILE_SYSTEM_VALUE:
1425 switch (sn) {
1426 case TGSI_SEMANTIC_INSTANCEID:
1427 info->io.instanceId = first;
1428 break;
1429 case TGSI_SEMANTIC_VERTEXID:
1430 info->io.vertexId = first;
1431 break;
1432 case TGSI_SEMANTIC_BASEVERTEX:
1433 case TGSI_SEMANTIC_BASEINSTANCE:
1434 case TGSI_SEMANTIC_DRAWID:
1435 info->prop.vp.usesDrawParameters = true;
1436 break;
1437 case TGSI_SEMANTIC_SAMPLEID:
1438 case TGSI_SEMANTIC_SAMPLEPOS:
1439 info->prop.fp.persampleInvocation = true;
1440 break;
1441 case TGSI_SEMANTIC_SAMPLEMASK:
1442 info->prop.fp.usesSampleMaskIn = true;
1443 break;
1444 default:
1445 break;
1446 }
1447 for (i = first; i <= last; ++i, ++si) {
1448 info->sv[i].sn = sn;
1449 info->sv[i].si = si;
1450 info->sv[i].input = inferSysValDirection(sn);
1451
1452 switch (sn) {
1453 case TGSI_SEMANTIC_TESSOUTER:
1454 case TGSI_SEMANTIC_TESSINNER:
1455 info->sv[i].patch = 1;
1456 break;
1457 }
1458 }
1459 break;
1460 /*
1461 case TGSI_FILE_RESOURCE:
1462 for (i = first; i <= last; ++i) {
1463 resources[i].target = decl->Resource.Resource;
1464 resources[i].raw = decl->Resource.Raw;
1465 resources[i].slot = i;
1466 }
1467 break;
1468 */
1469 case TGSI_FILE_IMAGE:
1470 for (i = first; i <= last; ++i) {
1471 images[i].target = decl->Image.Resource;
1472 images[i].raw = decl->Image.Raw;
1473 images[i].format = decl->Image.Format;
1474 images[i].slot = i;
1475 }
1476 break;
1477 case TGSI_FILE_SAMPLER_VIEW:
1478 for (i = first; i <= last; ++i)
1479 textureViews[i].target = decl->SamplerView.Resource;
1480 break;
1481 case TGSI_FILE_MEMORY:
1482 for (i = first; i <= last; ++i)
1483 memoryFiles[i].mem_type = decl->Declaration.MemType;
1484 break;
1485 case TGSI_FILE_NULL:
1486 case TGSI_FILE_TEMPORARY:
1487 for (i = first; i <= last; ++i)
1488 tempArrayId[i] = arrayId;
1489 if (arrayId)
1490 tempArrayInfo.insert(std::make_pair(arrayId, std::make_pair(
1491 first, last - first + 1)));
1492 break;
1493 case TGSI_FILE_ADDRESS:
1494 case TGSI_FILE_CONSTANT:
1495 case TGSI_FILE_IMMEDIATE:
1496 case TGSI_FILE_SAMPLER:
1497 case TGSI_FILE_BUFFER:
1498 break;
1499 default:
1500 ERROR("unhandled TGSI_FILE %d\n", decl->Declaration.File);
1501 return false;
1502 }
1503 return true;
1504 }
1505
1506 inline bool Source::isEdgeFlagPassthrough(const Instruction& insn) const
1507 {
1508 return insn.getOpcode() == TGSI_OPCODE_MOV &&
1509 insn.getDst(0).getIndex(0) == info->io.edgeFlagOut &&
1510 insn.getSrc(0).getFile() == TGSI_FILE_INPUT;
1511 }
1512
1513 void Source::scanInstructionSrc(const Instruction& insn,
1514 const Instruction::SrcRegister& src,
1515 unsigned mask)
1516 {
1517 if (src.getFile() == TGSI_FILE_TEMPORARY) {
1518 if (src.isIndirect(0))
1519 indirectTempArrays.insert(src.getArrayId());
1520 } else
1521 if (src.getFile() == TGSI_FILE_BUFFER ||
1522 src.getFile() == TGSI_FILE_IMAGE ||
1523 (src.getFile() == TGSI_FILE_MEMORY &&
1524 memoryFiles[src.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1525 info->io.globalAccess |= (insn.getOpcode() == TGSI_OPCODE_LOAD) ?
1526 0x1 : 0x2;
1527 } else
1528 if (src.getFile() == TGSI_FILE_OUTPUT) {
1529 if (src.isIndirect(0)) {
1530 // We don't know which one is accessed, just mark everything for
1531 // reading. This is an extremely unlikely occurrence.
1532 for (unsigned i = 0; i < info->numOutputs; ++i)
1533 info->out[i].oread = 1;
1534 } else {
1535 info->out[src.getIndex(0)].oread = 1;
1536 }
1537 }
1538 if (src.getFile() != TGSI_FILE_INPUT)
1539 return;
1540
1541 if (src.isIndirect(0)) {
1542 for (unsigned i = 0; i < info->numInputs; ++i)
1543 info->in[i].mask = 0xf;
1544 } else {
1545 const int i = src.getIndex(0);
1546 for (unsigned c = 0; c < 4; ++c) {
1547 if (!(mask & (1 << c)))
1548 continue;
1549 int k = src.getSwizzle(c);
1550 if (k <= TGSI_SWIZZLE_W)
1551 info->in[i].mask |= 1 << k;
1552 }
1553 switch (info->in[i].sn) {
1554 case TGSI_SEMANTIC_PSIZE:
1555 case TGSI_SEMANTIC_PRIMID:
1556 case TGSI_SEMANTIC_FOG:
1557 info->in[i].mask &= 0x1;
1558 break;
1559 case TGSI_SEMANTIC_PCOORD:
1560 info->in[i].mask &= 0x3;
1561 break;
1562 default:
1563 break;
1564 }
1565 }
1566 }
1567
1568 bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
1569 {
1570 Instruction insn(inst);
1571
1572 if (insn.getOpcode() == TGSI_OPCODE_BARRIER)
1573 info->numBarriers = 1;
1574
1575 if (insn.getOpcode() == TGSI_OPCODE_FBFETCH)
1576 info->prop.fp.readsFramebuffer = true;
1577
1578 if (insn.dstCount()) {
1579 Instruction::DstRegister dst = insn.getDst(0);
1580
1581 if (dst.getFile() == TGSI_FILE_OUTPUT) {
1582 if (dst.isIndirect(0))
1583 for (unsigned i = 0; i < info->numOutputs; ++i)
1584 info->out[i].mask = 0xf;
1585 else
1586 info->out[dst.getIndex(0)].mask |= dst.getMask();
1587
1588 if (info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PSIZE ||
1589 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PRIMID ||
1590 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_LAYER ||
1591 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_VIEWPORT_INDEX ||
1592 info->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_FOG)
1593 info->out[dst.getIndex(0)].mask &= 1;
1594
1595 if (isEdgeFlagPassthrough(insn))
1596 info->io.edgeFlagIn = insn.getSrc(0).getIndex(0);
1597 } else
1598 if (dst.getFile() == TGSI_FILE_TEMPORARY) {
1599 if (dst.isIndirect(0))
1600 indirectTempArrays.insert(dst.getArrayId());
1601 } else
1602 if (dst.getFile() == TGSI_FILE_BUFFER ||
1603 dst.getFile() == TGSI_FILE_IMAGE ||
1604 (dst.getFile() == TGSI_FILE_MEMORY &&
1605 memoryFiles[dst.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1606 info->io.globalAccess |= 0x2;
1607 }
1608 }
1609
1610 for (unsigned s = 0; s < insn.srcCount(); ++s)
1611 scanInstructionSrc(insn, insn.getSrc(s), insn.srcMask(s));
1612
1613 for (unsigned s = 0; s < insn.getNumTexOffsets(); ++s)
1614 scanInstructionSrc(insn, insn.getTexOffset(s), insn.texOffsetMask());
1615
1616 return true;
1617 }
1618
1619 nv50_ir::TexInstruction::Target
1620 Instruction::getTexture(const tgsi::Source *code, int s) const
1621 {
1622 // XXX: indirect access
1623 unsigned int r;
1624
1625 switch (getSrc(s).getFile()) {
1626 /*
1627 case TGSI_FILE_RESOURCE:
1628 r = getSrc(s).getIndex(0);
1629 return translateTexture(code->resources.at(r).target);
1630 */
1631 case TGSI_FILE_SAMPLER_VIEW:
1632 r = getSrc(s).getIndex(0);
1633 return translateTexture(code->textureViews.at(r).target);
1634 default:
1635 return translateTexture(insn->Texture.Texture);
1636 }
1637 }
1638
1639 } // namespace tgsi
1640
1641 namespace {
1642
1643 using namespace nv50_ir;
1644
1645 class Converter : public BuildUtil
1646 {
1647 public:
1648 Converter(Program *, const tgsi::Source *);
1649 ~Converter();
1650
1651 bool run();
1652
1653 private:
1654 struct Subroutine
1655 {
1656 Subroutine(Function *f) : f(f) { }
1657 Function *f;
1658 ValueMap values;
1659 };
1660
1661 Value *shiftAddress(Value *);
1662 Value *getVertexBase(int s);
1663 Value *getOutputBase(int s);
1664 DataArray *getArrayForFile(unsigned file, int idx);
1665 Value *fetchSrc(int s, int c);
1666 Value *acquireDst(int d, int c);
1667 void storeDst(int d, int c, Value *);
1668
1669 Value *fetchSrc(const tgsi::Instruction::SrcRegister src, int c, Value *ptr);
1670 void storeDst(const tgsi::Instruction::DstRegister dst, int c,
1671 Value *val, Value *ptr);
1672
1673 void adjustTempIndex(int arrayId, int &idx, int &idx2d) const;
1674 Value *applySrcMod(Value *, int s, int c);
1675
1676 Symbol *makeSym(uint file, int fileIndex, int idx, int c, uint32_t addr);
1677 Symbol *srcToSym(tgsi::Instruction::SrcRegister, int c);
1678 Symbol *dstToSym(tgsi::Instruction::DstRegister, int c);
1679
1680 bool isSubGroupMask(uint8_t semantic);
1681
1682 bool handleInstruction(const struct tgsi_full_instruction *);
1683 void exportOutputs();
1684 inline Subroutine *getSubroutine(unsigned ip);
1685 inline Subroutine *getSubroutine(Function *);
1686 inline bool isEndOfSubroutine(uint ip);
1687
1688 void loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask);
1689
1690 // R,S,L,C,Dx,Dy encode TGSI sources for respective values (0xSf for auto)
1691 void setTexRS(TexInstruction *, unsigned int& s, int R, int S);
1692 void handleTEX(Value *dst0[4], int R, int S, int L, int C, int Dx, int Dy);
1693 void handleTXF(Value *dst0[4], int R, int L_M);
1694 void handleTXQ(Value *dst0[4], enum TexQuery, int R);
1695 void handleFBFETCH(Value *dst0[4]);
1696 void handleLIT(Value *dst0[4]);
1697 void handleUserClipPlanes();
1698
1699 // Symbol *getResourceBase(int r);
1700 void getImageCoords(std::vector<Value *>&, int r, int s);
1701
1702 void handleLOAD(Value *dst0[4]);
1703 void handleSTORE();
1704 void handleATOM(Value *dst0[4], DataType, uint16_t subOp);
1705
1706 void handleINTERP(Value *dst0[4]);
1707
1708 uint8_t translateInterpMode(const struct nv50_ir_varying *var,
1709 operation& op);
1710 Value *interpolate(tgsi::Instruction::SrcRegister, int c, Value *ptr);
1711
1712 void insertConvergenceOps(BasicBlock *conv, BasicBlock *fork);
1713
1714 Value *buildDot(int dim);
1715
1716 class BindArgumentsPass : public Pass {
1717 public:
1718 BindArgumentsPass(Converter &conv) : conv(conv) { }
1719
1720 private:
1721 Converter &conv;
1722 Subroutine *sub;
1723
1724 inline const Location *getValueLocation(Subroutine *, Value *);
1725
1726 template<typename T> inline void
1727 updateCallArgs(Instruction *i, void (Instruction::*setArg)(int, Value *),
1728 T (Function::*proto));
1729
1730 template<typename T> inline void
1731 updatePrototype(BitSet *set, void (Function::*updateSet)(),
1732 T (Function::*proto));
1733
1734 protected:
1735 bool visit(Function *);
1736 bool visit(BasicBlock *bb) { return false; }
1737 };
1738
1739 private:
1740 const tgsi::Source *code;
1741 const struct nv50_ir_prog_info *info;
1742
1743 struct {
1744 std::map<unsigned, Subroutine> map;
1745 Subroutine *cur;
1746 } sub;
1747
1748 uint ip; // instruction pointer
1749
1750 tgsi::Instruction tgsi;
1751
1752 DataType dstTy;
1753 DataType srcTy;
1754
1755 DataArray tData; // TGSI_FILE_TEMPORARY
1756 DataArray lData; // TGSI_FILE_TEMPORARY, for indirect arrays
1757 DataArray aData; // TGSI_FILE_ADDRESS
1758 DataArray oData; // TGSI_FILE_OUTPUT (if outputs in registers)
1759
1760 Value *zero;
1761 Value *fragCoord[4];
1762 Value *clipVtx[4];
1763
1764 Value *vtxBase[5]; // base address of vertex in primitive (for TP/GP)
1765 uint8_t vtxBaseValid;
1766
1767 Value *outBase; // base address of vertex out patch (for TCP)
1768
1769 Stack condBBs; // fork BB, then else clause BB
1770 Stack joinBBs; // fork BB, for inserting join ops on ENDIF
1771 Stack loopBBs; // loop headers
1772 Stack breakBBs; // end of / after loop
1773
1774 Value *viewport;
1775 };
1776
1777 Symbol *
1778 Converter::srcToSym(tgsi::Instruction::SrcRegister src, int c)
1779 {
1780 const int swz = src.getSwizzle(c);
1781
1782 /* TODO: Use Array ID when it's available for the index */
1783 return makeSym(src.getFile(),
1784 src.is2D() ? src.getIndex(1) : 0,
1785 src.getIndex(0), swz,
1786 src.getIndex(0) * 16 + swz * 4);
1787 }
1788
1789 Symbol *
1790 Converter::dstToSym(tgsi::Instruction::DstRegister dst, int c)
1791 {
1792 /* TODO: Use Array ID when it's available for the index */
1793 return makeSym(dst.getFile(),
1794 dst.is2D() ? dst.getIndex(1) : 0,
1795 dst.getIndex(0), c,
1796 dst.getIndex(0) * 16 + c * 4);
1797 }
1798
1799 Symbol *
1800 Converter::makeSym(uint tgsiFile, int fileIdx, int idx, int c, uint32_t address)
1801 {
1802 Symbol *sym = new_Symbol(prog, tgsi::translateFile(tgsiFile));
1803
1804 sym->reg.fileIndex = fileIdx;
1805
1806 if (tgsiFile == TGSI_FILE_MEMORY) {
1807 switch (code->memoryFiles[fileIdx].mem_type) {
1808 case TGSI_MEMORY_TYPE_GLOBAL:
1809 /* No-op this is the default for TGSI_FILE_MEMORY */
1810 sym->setFile(FILE_MEMORY_GLOBAL);
1811 break;
1812 case TGSI_MEMORY_TYPE_SHARED:
1813 sym->setFile(FILE_MEMORY_SHARED);
1814 break;
1815 case TGSI_MEMORY_TYPE_INPUT:
1816 assert(prog->getType() == Program::TYPE_COMPUTE);
1817 assert(idx == -1);
1818 sym->setFile(FILE_SHADER_INPUT);
1819 address += info->prop.cp.inputOffset;
1820 break;
1821 default:
1822 assert(0); /* TODO: Add support for global and private memory */
1823 }
1824 }
1825
1826 if (idx >= 0) {
1827 if (sym->reg.file == FILE_SHADER_INPUT)
1828 sym->setOffset(info->in[idx].slot[c] * 4);
1829 else
1830 if (sym->reg.file == FILE_SHADER_OUTPUT)
1831 sym->setOffset(info->out[idx].slot[c] * 4);
1832 else
1833 if (sym->reg.file == FILE_SYSTEM_VALUE)
1834 sym->setSV(tgsi::translateSysVal(info->sv[idx].sn), c);
1835 else
1836 sym->setOffset(address);
1837 } else {
1838 sym->setOffset(address);
1839 }
1840 return sym;
1841 }
1842
1843 uint8_t
1844 Converter::translateInterpMode(const struct nv50_ir_varying *var, operation& op)
1845 {
1846 uint8_t mode = NV50_IR_INTERP_PERSPECTIVE;
1847
1848 if (var->flat)
1849 mode = NV50_IR_INTERP_FLAT;
1850 else
1851 if (var->linear)
1852 mode = NV50_IR_INTERP_LINEAR;
1853 else
1854 if (var->sc)
1855 mode = NV50_IR_INTERP_SC;
1856
1857 op = (mode == NV50_IR_INTERP_PERSPECTIVE || mode == NV50_IR_INTERP_SC)
1858 ? OP_PINTERP : OP_LINTERP;
1859
1860 if (var->centroid)
1861 mode |= NV50_IR_INTERP_CENTROID;
1862
1863 return mode;
1864 }
1865
1866 Value *
1867 Converter::interpolate(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1868 {
1869 operation op;
1870
1871 // XXX: no way to know interpolation mode if we don't know what's accessed
1872 const uint8_t mode = translateInterpMode(&info->in[ptr ? 0 :
1873 src.getIndex(0)], op);
1874
1875 Instruction *insn = new_Instruction(func, op, TYPE_F32);
1876
1877 insn->setDef(0, getScratch());
1878 insn->setSrc(0, srcToSym(src, c));
1879 if (op == OP_PINTERP)
1880 insn->setSrc(1, fragCoord[3]);
1881 if (ptr)
1882 insn->setIndirect(0, 0, ptr);
1883
1884 insn->setInterpolate(mode);
1885
1886 bb->insertTail(insn);
1887 return insn->getDef(0);
1888 }
1889
1890 Value *
1891 Converter::applySrcMod(Value *val, int s, int c)
1892 {
1893 Modifier m = tgsi.getSrc(s).getMod(c);
1894 DataType ty = tgsi.inferSrcType();
1895
1896 if (m & Modifier(NV50_IR_MOD_ABS))
1897 val = mkOp1v(OP_ABS, ty, getScratch(), val);
1898
1899 if (m & Modifier(NV50_IR_MOD_NEG))
1900 val = mkOp1v(OP_NEG, ty, getScratch(), val);
1901
1902 return val;
1903 }
1904
1905 Value *
1906 Converter::getVertexBase(int s)
1907 {
1908 assert(s < 5);
1909 if (!(vtxBaseValid & (1 << s))) {
1910 const int index = tgsi.getSrc(s).getIndex(1);
1911 Value *rel = NULL;
1912 if (tgsi.getSrc(s).isIndirect(1))
1913 rel = fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL);
1914 vtxBaseValid |= 1 << s;
1915 vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1916 mkImm(index), rel);
1917 }
1918 return vtxBase[s];
1919 }
1920
1921 Value *
1922 Converter::getOutputBase(int s)
1923 {
1924 assert(s < 5);
1925 if (!(vtxBaseValid & (1 << s))) {
1926 Value *offset = loadImm(NULL, tgsi.getSrc(s).getIndex(1));
1927 if (tgsi.getSrc(s).isIndirect(1))
1928 offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(),
1929 fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL),
1930 offset);
1931 vtxBaseValid |= 1 << s;
1932 vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset);
1933 }
1934 return vtxBase[s];
1935 }
1936
1937 Value *
1938 Converter::fetchSrc(int s, int c)
1939 {
1940 Value *res;
1941 Value *ptr = NULL, *dimRel = NULL;
1942
1943 tgsi::Instruction::SrcRegister src = tgsi.getSrc(s);
1944
1945 if (src.isIndirect(0))
1946 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
1947
1948 if (src.is2D()) {
1949 switch (src.getFile()) {
1950 case TGSI_FILE_OUTPUT:
1951 dimRel = getOutputBase(s);
1952 break;
1953 case TGSI_FILE_INPUT:
1954 dimRel = getVertexBase(s);
1955 break;
1956 case TGSI_FILE_CONSTANT:
1957 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1958 if (src.isIndirect(1))
1959 dimRel = fetchSrc(src.getIndirect(1), 0, 0);
1960 break;
1961 default:
1962 break;
1963 }
1964 }
1965
1966 res = fetchSrc(src, c, ptr);
1967
1968 if (dimRel)
1969 res->getInsn()->setIndirect(0, 1, dimRel);
1970
1971 return applySrcMod(res, s, c);
1972 }
1973
1974 Converter::DataArray *
1975 Converter::getArrayForFile(unsigned file, int idx)
1976 {
1977 switch (file) {
1978 case TGSI_FILE_TEMPORARY:
1979 return idx == 0 ? &tData : &lData;
1980 case TGSI_FILE_ADDRESS:
1981 return &aData;
1982 case TGSI_FILE_OUTPUT:
1983 assert(prog->getType() == Program::TYPE_FRAGMENT);
1984 return &oData;
1985 default:
1986 assert(!"invalid/unhandled TGSI source file");
1987 return NULL;
1988 }
1989 }
1990
1991 Value *
1992 Converter::shiftAddress(Value *index)
1993 {
1994 if (!index)
1995 return NULL;
1996 return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4));
1997 }
1998
1999 void
2000 Converter::adjustTempIndex(int arrayId, int &idx, int &idx2d) const
2001 {
2002 std::map<int, int>::const_iterator it =
2003 code->indirectTempOffsets.find(arrayId);
2004 if (it == code->indirectTempOffsets.end())
2005 return;
2006
2007 idx2d = 1;
2008 idx += it->second;
2009 }
2010
2011 bool
2012 Converter::isSubGroupMask(uint8_t semantic)
2013 {
2014 switch (semantic) {
2015 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2016 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2017 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2018 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2019 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2020 return true;
2021 default:
2022 return false;
2023 }
2024 }
2025
2026 Value *
2027 Converter::fetchSrc(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
2028 {
2029 int idx2d = src.is2D() ? src.getIndex(1) : 0;
2030 int idx = src.getIndex(0);
2031 const int swz = src.getSwizzle(c);
2032 Instruction *ld;
2033
2034 switch (src.getFile()) {
2035 case TGSI_FILE_IMMEDIATE:
2036 assert(!ptr);
2037 return loadImm(NULL, info->immd.data[idx * 4 + swz]);
2038 case TGSI_FILE_CONSTANT:
2039 return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr));
2040 case TGSI_FILE_INPUT:
2041 if (prog->getType() == Program::TYPE_FRAGMENT) {
2042 // don't load masked inputs, won't be assigned a slot
2043 if (!ptr && !(info->in[idx].mask & (1 << swz)))
2044 return loadImm(NULL, swz == TGSI_SWIZZLE_W ? 1.0f : 0.0f);
2045 return interpolate(src, c, shiftAddress(ptr));
2046 } else
2047 if (prog->getType() == Program::TYPE_GEOMETRY) {
2048 if (!ptr && info->in[idx].sn == TGSI_SEMANTIC_PRIMID)
2049 return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0));
2050 // XXX: This is going to be a problem with scalar arrays, i.e. when
2051 // we cannot assume that the address is given in units of vec4.
2052 //
2053 // nv50 and nvc0 need different things here, so let the lowering
2054 // passes decide what to do with the address
2055 if (ptr)
2056 return mkLoadv(TYPE_U32, srcToSym(src, c), ptr);
2057 }
2058 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2059 ld->perPatch = info->in[idx].patch;
2060 return ld->getDef(0);
2061 case TGSI_FILE_OUTPUT:
2062 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
2063 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2064 ld->perPatch = info->out[idx].patch;
2065 return ld->getDef(0);
2066 case TGSI_FILE_SYSTEM_VALUE:
2067 assert(!ptr);
2068 if (info->sv[idx].sn == TGSI_SEMANTIC_THREAD_ID &&
2069 info->prop.cp.numThreads[swz] == 1)
2070 return loadImm(NULL, 0u);
2071 if (isSubGroupMask(info->sv[idx].sn) && swz > 0)
2072 return loadImm(NULL, 0u);
2073 if (info->sv[idx].sn == TGSI_SEMANTIC_SUBGROUP_SIZE)
2074 return loadImm(NULL, 32u);
2075 ld = mkOp1(OP_RDSV, TYPE_U32, getSSA(), srcToSym(src, c));
2076 ld->perPatch = info->sv[idx].patch;
2077 return ld->getDef(0);
2078 case TGSI_FILE_TEMPORARY: {
2079 int arrayid = src.getArrayId();
2080 if (!arrayid)
2081 arrayid = code->tempArrayId[idx];
2082 adjustTempIndex(arrayid, idx, idx2d);
2083 }
2084 /* fallthrough */
2085 default:
2086 return getArrayForFile(src.getFile(), idx2d)->load(
2087 sub.cur->values, idx, swz, shiftAddress(ptr));
2088 }
2089 }
2090
2091 Value *
2092 Converter::acquireDst(int d, int c)
2093 {
2094 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2095 const unsigned f = dst.getFile();
2096 int idx = dst.getIndex(0);
2097 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2098
2099 if (dst.isMasked(c) || f == TGSI_FILE_BUFFER || f == TGSI_FILE_MEMORY ||
2100 f == TGSI_FILE_IMAGE)
2101 return NULL;
2102
2103 if (dst.isIndirect(0) ||
2104 f == TGSI_FILE_SYSTEM_VALUE ||
2105 (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT))
2106 return getScratch();
2107
2108 if (f == TGSI_FILE_TEMPORARY) {
2109 int arrayid = dst.getArrayId();
2110 if (!arrayid)
2111 arrayid = code->tempArrayId[idx];
2112 adjustTempIndex(arrayid, idx, idx2d);
2113 }
2114
2115 return getArrayForFile(f, idx2d)-> acquire(sub.cur->values, idx, c);
2116 }
2117
2118 void
2119 Converter::storeDst(int d, int c, Value *val)
2120 {
2121 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2122
2123 if (tgsi.getSaturate()) {
2124 mkOp1(OP_SAT, dstTy, val, val);
2125 }
2126
2127 Value *ptr = NULL;
2128 if (dst.isIndirect(0))
2129 ptr = shiftAddress(fetchSrc(dst.getIndirect(0), 0, NULL));
2130
2131 if (info->io.genUserClip > 0 &&
2132 dst.getFile() == TGSI_FILE_OUTPUT &&
2133 !dst.isIndirect(0) && dst.getIndex(0) == code->clipVertexOutput) {
2134 mkMov(clipVtx[c], val);
2135 val = clipVtx[c];
2136 }
2137
2138 storeDst(dst, c, val, ptr);
2139 }
2140
2141 void
2142 Converter::storeDst(const tgsi::Instruction::DstRegister dst, int c,
2143 Value *val, Value *ptr)
2144 {
2145 const unsigned f = dst.getFile();
2146 int idx = dst.getIndex(0);
2147 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2148
2149 if (f == TGSI_FILE_SYSTEM_VALUE) {
2150 assert(!ptr);
2151 mkOp2(OP_WRSV, TYPE_U32, NULL, dstToSym(dst, c), val);
2152 } else
2153 if (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT) {
2154
2155 if (ptr || (info->out[idx].mask & (1 << c))) {
2156 /* Save the viewport index into a scratch register so that it can be
2157 exported at EMIT time */
2158 if (info->out[idx].sn == TGSI_SEMANTIC_VIEWPORT_INDEX &&
2159 prog->getType() == Program::TYPE_GEOMETRY &&
2160 viewport != NULL)
2161 mkOp1(OP_MOV, TYPE_U32, viewport, val);
2162 else
2163 mkStore(OP_EXPORT, TYPE_U32, dstToSym(dst, c), ptr, val)->perPatch =
2164 info->out[idx].patch;
2165 }
2166 } else
2167 if (f == TGSI_FILE_TEMPORARY ||
2168 f == TGSI_FILE_ADDRESS ||
2169 f == TGSI_FILE_OUTPUT) {
2170 if (f == TGSI_FILE_TEMPORARY) {
2171 int arrayid = dst.getArrayId();
2172 if (!arrayid)
2173 arrayid = code->tempArrayId[idx];
2174 adjustTempIndex(arrayid, idx, idx2d);
2175 }
2176
2177 getArrayForFile(f, idx2d)->store(sub.cur->values, idx, c, ptr, val);
2178 } else {
2179 assert(!"invalid dst file");
2180 }
2181 }
2182
2183 #define FOR_EACH_DST_ENABLED_CHANNEL(d, chan, inst) \
2184 for (chan = 0; chan < 4; ++chan) \
2185 if (!inst.getDst(d).isMasked(chan))
2186
2187 Value *
2188 Converter::buildDot(int dim)
2189 {
2190 assert(dim > 0);
2191
2192 Value *src0 = fetchSrc(0, 0), *src1 = fetchSrc(1, 0);
2193 Value *dotp = getScratch();
2194
2195 mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1)
2196 ->dnz = info->io.mul_zero_wins;
2197
2198 for (int c = 1; c < dim; ++c) {
2199 src0 = fetchSrc(0, c);
2200 src1 = fetchSrc(1, c);
2201 mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp)
2202 ->dnz = info->io.mul_zero_wins;
2203 }
2204 return dotp;
2205 }
2206
2207 void
2208 Converter::insertConvergenceOps(BasicBlock *conv, BasicBlock *fork)
2209 {
2210 FlowInstruction *join = new_FlowInstruction(func, OP_JOIN, NULL);
2211 join->fixed = 1;
2212 conv->insertHead(join);
2213
2214 assert(!fork->joinAt);
2215 fork->joinAt = new_FlowInstruction(func, OP_JOINAT, conv);
2216 fork->insertBefore(fork->getExit(), fork->joinAt);
2217 }
2218
2219 void
2220 Converter::setTexRS(TexInstruction *tex, unsigned int& s, int R, int S)
2221 {
2222 unsigned rIdx = 0, sIdx = 0;
2223
2224 if (R >= 0)
2225 rIdx = tgsi.getSrc(R).getIndex(0);
2226 if (S >= 0)
2227 sIdx = tgsi.getSrc(S).getIndex(0);
2228
2229 tex->setTexture(tgsi.getTexture(code, R), rIdx, sIdx);
2230
2231 if (tgsi.getSrc(R).isIndirect(0)) {
2232 tex->tex.rIndirectSrc = s;
2233 tex->setSrc(s++, fetchSrc(tgsi.getSrc(R).getIndirect(0), 0, NULL));
2234 }
2235 if (S >= 0 && tgsi.getSrc(S).isIndirect(0)) {
2236 tex->tex.sIndirectSrc = s;
2237 tex->setSrc(s++, fetchSrc(tgsi.getSrc(S).getIndirect(0), 0, NULL));
2238 }
2239 }
2240
2241 void
2242 Converter::handleTXQ(Value *dst0[4], enum TexQuery query, int R)
2243 {
2244 TexInstruction *tex = new_TexInstruction(func, OP_TXQ);
2245 tex->tex.query = query;
2246 unsigned int c, d;
2247
2248 for (d = 0, c = 0; c < 4; ++c) {
2249 if (!dst0[c])
2250 continue;
2251 tex->tex.mask |= 1 << c;
2252 tex->setDef(d++, dst0[c]);
2253 }
2254 if (query == TXQ_DIMS)
2255 tex->setSrc((c = 0), fetchSrc(0, 0)); // mip level
2256 else
2257 tex->setSrc((c = 0), zero);
2258
2259 setTexRS(tex, ++c, R, -1);
2260
2261 bb->insertTail(tex);
2262 }
2263
2264 void
2265 Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
2266 {
2267 Value *proj = fetchSrc(0, 3);
2268 Instruction *insn = proj->getUniqueInsn();
2269 int c;
2270
2271 if (insn->op == OP_PINTERP) {
2272 bb->insertTail(insn = cloneForward(func, insn));
2273 insn->op = OP_LINTERP;
2274 insn->setInterpolate(NV50_IR_INTERP_LINEAR | insn->getSampleMode());
2275 insn->setSrc(1, NULL);
2276 proj = insn->getDef(0);
2277 }
2278 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj);
2279
2280 for (c = 0; c < 4; ++c) {
2281 if (!(mask & (1 << c)))
2282 continue;
2283 if ((insn = src[c]->getUniqueInsn())->op != OP_PINTERP)
2284 continue;
2285 mask &= ~(1 << c);
2286
2287 bb->insertTail(insn = cloneForward(func, insn));
2288 insn->setInterpolate(NV50_IR_INTERP_PERSPECTIVE | insn->getSampleMode());
2289 insn->setSrc(1, proj);
2290 dst[c] = insn->getDef(0);
2291 }
2292 if (!mask)
2293 return;
2294
2295 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3));
2296
2297 for (c = 0; c < 4; ++c)
2298 if (mask & (1 << c))
2299 dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj);
2300 }
2301
2302 // order of nv50 ir sources: x y z layer lod/bias shadow
2303 // order of TGSI TEX sources: x y z layer shadow lod/bias
2304 // lowering will finally set the hw specific order (like array first on nvc0)
2305 void
2306 Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
2307 {
2308 Value *arg[4], *src[8];
2309 Value *lod = NULL, *shd = NULL;
2310 unsigned int s, c, d;
2311 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2312
2313 TexInstruction::Target tgt = tgsi.getTexture(code, R);
2314
2315 for (s = 0; s < tgt.getArgCount(); ++s)
2316 arg[s] = src[s] = fetchSrc(0, s);
2317
2318 if (tgsi.getOpcode() == TGSI_OPCODE_TEX_LZ)
2319 lod = loadImm(NULL, 0);
2320 else if (texi->op == OP_TXL || texi->op == OP_TXB)
2321 lod = fetchSrc(L >> 4, L & 3);
2322
2323 if (C == 0x0f)
2324 C = 0x00 | MAX2(tgt.getArgCount(), 2); // guess DC src
2325
2326 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 &&
2327 tgt == TEX_TARGET_CUBE_ARRAY_SHADOW)
2328 shd = fetchSrc(1, 0);
2329 else if (tgt.isShadow())
2330 shd = fetchSrc(C >> 4, C & 3);
2331
2332 if (texi->op == OP_TXD) {
2333 for (c = 0; c < tgt.getDim() + tgt.isCube(); ++c) {
2334 texi->dPdx[c].set(fetchSrc(Dx >> 4, (Dx & 3) + c));
2335 texi->dPdy[c].set(fetchSrc(Dy >> 4, (Dy & 3) + c));
2336 }
2337 }
2338
2339 // cube textures don't care about projection value, it's divided out
2340 if (tgsi.getOpcode() == TGSI_OPCODE_TXP && !tgt.isCube() && !tgt.isArray()) {
2341 unsigned int n = tgt.getDim();
2342 if (shd) {
2343 arg[n] = shd;
2344 ++n;
2345 assert(tgt.getDim() == tgt.getArgCount());
2346 }
2347 loadProjTexCoords(src, arg, (1 << n) - 1);
2348 if (shd)
2349 shd = src[n - 1];
2350 }
2351
2352 for (c = 0, d = 0; c < 4; ++c) {
2353 if (dst[c]) {
2354 texi->setDef(d++, dst[c]);
2355 texi->tex.mask |= 1 << c;
2356 } else {
2357 // NOTE: maybe hook up def too, for CSE
2358 }
2359 }
2360 for (s = 0; s < tgt.getArgCount(); ++s)
2361 texi->setSrc(s, src[s]);
2362 if (lod)
2363 texi->setSrc(s++, lod);
2364 if (shd)
2365 texi->setSrc(s++, shd);
2366
2367 setTexRS(texi, s, R, S);
2368
2369 if (tgsi.getOpcode() == TGSI_OPCODE_SAMPLE_C_LZ)
2370 texi->tex.levelZero = true;
2371 if (prog->getType() != Program::TYPE_FRAGMENT &&
2372 (tgsi.getOpcode() == TGSI_OPCODE_TEX ||
2373 tgsi.getOpcode() == TGSI_OPCODE_TEX2 ||
2374 tgsi.getOpcode() == TGSI_OPCODE_TXP))
2375 texi->tex.levelZero = true;
2376 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 && !tgt.isShadow())
2377 texi->tex.gatherComp = tgsi.getSrc(1).getValueU32(0, info);
2378
2379 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2380 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2381 for (c = 0; c < 3; ++c) {
2382 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2383 texi->offset[s][c].setInsn(texi);
2384 }
2385 }
2386
2387 bb->insertTail(texi);
2388 }
2389
2390 // 1st source: xyz = coordinates, w = lod/sample
2391 // 2nd source: offset
2392 void
2393 Converter::handleTXF(Value *dst[4], int R, int L_M)
2394 {
2395 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2396 int ms;
2397 unsigned int c, d, s;
2398
2399 texi->tex.target = tgsi.getTexture(code, R);
2400
2401 ms = texi->tex.target.isMS() ? 1 : 0;
2402 texi->tex.levelZero = ms; /* MS textures don't have mip-maps */
2403
2404 for (c = 0, d = 0; c < 4; ++c) {
2405 if (dst[c]) {
2406 texi->setDef(d++, dst[c]);
2407 texi->tex.mask |= 1 << c;
2408 }
2409 }
2410 for (c = 0; c < (texi->tex.target.getArgCount() - ms); ++c)
2411 texi->setSrc(c, fetchSrc(0, c));
2412 if (!ms && tgsi.getOpcode() == TGSI_OPCODE_TXF_LZ)
2413 texi->setSrc(c++, loadImm(NULL, 0));
2414 else
2415 texi->setSrc(c++, fetchSrc(L_M >> 4, L_M & 3)); // lod or ms
2416
2417 setTexRS(texi, c, R, -1);
2418
2419 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2420 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2421 for (c = 0; c < 3; ++c) {
2422 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2423 texi->offset[s][c].setInsn(texi);
2424 }
2425 }
2426
2427 bb->insertTail(texi);
2428 }
2429
2430 void
2431 Converter::handleFBFETCH(Value *dst[4])
2432 {
2433 TexInstruction *texi = new_TexInstruction(func, OP_TXF);
2434 unsigned int c, d;
2435
2436 texi->tex.target = TEX_TARGET_2D_MS_ARRAY;
2437 texi->tex.levelZero = 1;
2438 texi->tex.useOffsets = 0;
2439
2440 for (c = 0, d = 0; c < 4; ++c) {
2441 if (dst[c]) {
2442 texi->setDef(d++, dst[c]);
2443 texi->tex.mask |= 1 << c;
2444 }
2445 }
2446
2447 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 0));
2448 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 1));
2449 Value *z = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_LAYER, 0));
2450 Value *ms = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_SAMPLE_INDEX, 0));
2451
2452 mkCvt(OP_CVT, TYPE_U32, x, TYPE_F32, x)->rnd = ROUND_Z;
2453 mkCvt(OP_CVT, TYPE_U32, y, TYPE_F32, y)->rnd = ROUND_Z;
2454 texi->setSrc(0, x);
2455 texi->setSrc(1, y);
2456 texi->setSrc(2, z);
2457 texi->setSrc(3, ms);
2458
2459 texi->tex.r = texi->tex.s = -1;
2460
2461 bb->insertTail(texi);
2462 }
2463
2464 void
2465 Converter::handleLIT(Value *dst0[4])
2466 {
2467 Value *val0 = NULL;
2468 unsigned int mask = tgsi.getDst(0).getMask();
2469
2470 if (mask & (1 << 0))
2471 loadImm(dst0[0], 1.0f);
2472
2473 if (mask & (1 << 3))
2474 loadImm(dst0[3], 1.0f);
2475
2476 if (mask & (3 << 1)) {
2477 val0 = getScratch();
2478 mkOp2(OP_MAX, TYPE_F32, val0, fetchSrc(0, 0), zero);
2479 if (mask & (1 << 1))
2480 mkMov(dst0[1], val0);
2481 }
2482
2483 if (mask & (1 << 2)) {
2484 Value *src1 = fetchSrc(0, 1), *src3 = fetchSrc(0, 3);
2485 Value *val1 = getScratch(), *val3 = getScratch();
2486
2487 Value *pos128 = loadImm(NULL, +127.999999f);
2488 Value *neg128 = loadImm(NULL, -127.999999f);
2489
2490 mkOp2(OP_MAX, TYPE_F32, val1, src1, zero);
2491 mkOp2(OP_MAX, TYPE_F32, val3, src3, neg128);
2492 mkOp2(OP_MIN, TYPE_F32, val3, val3, pos128);
2493 mkOp2(OP_POW, TYPE_F32, val3, val1, val3);
2494
2495 mkCmp(OP_SLCT, CC_GT, TYPE_F32, dst0[2], TYPE_F32, val3, zero, val0);
2496 }
2497 }
2498
2499 /* Keep this around for now as reference when adding img support
2500 static inline bool
2501 isResourceSpecial(const int r)
2502 {
2503 return (r == TGSI_RESOURCE_GLOBAL ||
2504 r == TGSI_RESOURCE_LOCAL ||
2505 r == TGSI_RESOURCE_PRIVATE ||
2506 r == TGSI_RESOURCE_INPUT);
2507 }
2508
2509 static inline bool
2510 isResourceRaw(const tgsi::Source *code, const int r)
2511 {
2512 return isResourceSpecial(r) || code->resources[r].raw;
2513 }
2514
2515 static inline nv50_ir::TexTarget
2516 getResourceTarget(const tgsi::Source *code, int r)
2517 {
2518 if (isResourceSpecial(r))
2519 return nv50_ir::TEX_TARGET_BUFFER;
2520 return tgsi::translateTexture(code->resources.at(r).target);
2521 }
2522
2523 Symbol *
2524 Converter::getResourceBase(const int r)
2525 {
2526 Symbol *sym = NULL;
2527
2528 switch (r) {
2529 case TGSI_RESOURCE_GLOBAL:
2530 sym = new_Symbol(prog, nv50_ir::FILE_MEMORY_GLOBAL,
2531 info->io.auxCBSlot);
2532 break;
2533 case TGSI_RESOURCE_LOCAL:
2534 assert(prog->getType() == Program::TYPE_COMPUTE);
2535 sym = mkSymbol(nv50_ir::FILE_MEMORY_SHARED, 0, TYPE_U32,
2536 info->prop.cp.sharedOffset);
2537 break;
2538 case TGSI_RESOURCE_PRIVATE:
2539 sym = mkSymbol(nv50_ir::FILE_MEMORY_LOCAL, 0, TYPE_U32,
2540 info->bin.tlsSpace);
2541 break;
2542 case TGSI_RESOURCE_INPUT:
2543 assert(prog->getType() == Program::TYPE_COMPUTE);
2544 sym = mkSymbol(nv50_ir::FILE_SHADER_INPUT, 0, TYPE_U32,
2545 info->prop.cp.inputOffset);
2546 break;
2547 default:
2548 sym = new_Symbol(prog,
2549 nv50_ir::FILE_MEMORY_GLOBAL, code->resources.at(r).slot);
2550 break;
2551 }
2552 return sym;
2553 }
2554
2555 void
2556 Converter::getResourceCoords(std::vector<Value *> &coords, int r, int s)
2557 {
2558 const int arg =
2559 TexInstruction::Target(getResourceTarget(code, r)).getArgCount();
2560
2561 for (int c = 0; c < arg; ++c)
2562 coords.push_back(fetchSrc(s, c));
2563
2564 // NOTE: TGSI_RESOURCE_GLOBAL needs FILE_GPR; this is an nv50 quirk
2565 if (r == TGSI_RESOURCE_LOCAL ||
2566 r == TGSI_RESOURCE_PRIVATE ||
2567 r == TGSI_RESOURCE_INPUT)
2568 coords[0] = mkOp1v(OP_MOV, TYPE_U32, getScratch(4, FILE_ADDRESS),
2569 coords[0]);
2570 }
2571 */
2572 static inline int
2573 partitionLoadStore(uint8_t comp[2], uint8_t size[2], uint8_t mask)
2574 {
2575 int n = 0;
2576
2577 while (mask) {
2578 if (mask & 1) {
2579 size[n]++;
2580 } else {
2581 if (size[n])
2582 comp[n = 1] = size[0] + 1;
2583 else
2584 comp[n]++;
2585 }
2586 mask >>= 1;
2587 }
2588 if (size[0] == 3) {
2589 n = 1;
2590 size[0] = (comp[0] == 1) ? 1 : 2;
2591 size[1] = 3 - size[0];
2592 comp[1] = comp[0] + size[0];
2593 }
2594 return n + 1;
2595 }
2596
2597 static inline nv50_ir::TexTarget
2598 getImageTarget(const tgsi::Source *code, int r)
2599 {
2600 return tgsi::translateTexture(code->images.at(r).target);
2601 }
2602
2603 static inline const nv50_ir::TexInstruction::ImgFormatDesc *
2604 getImageFormat(const tgsi::Source *code, int r)
2605 {
2606 return &nv50_ir::TexInstruction::formatTable[
2607 tgsi::translateImgFormat(code->images.at(r).format)];
2608 }
2609
2610 void
2611 Converter::getImageCoords(std::vector<Value *> &coords, int r, int s)
2612 {
2613 TexInstruction::Target t =
2614 TexInstruction::Target(getImageTarget(code, r));
2615 const int arg = t.getDim() + (t.isArray() || t.isCube());
2616
2617 for (int c = 0; c < arg; ++c)
2618 coords.push_back(fetchSrc(s, c));
2619
2620 if (t.isMS())
2621 coords.push_back(fetchSrc(s, 3));
2622 }
2623
2624 // For raw loads, granularity is 4 byte.
2625 // Usage of the texture read mask on OP_SULDP is not allowed.
2626 void
2627 Converter::handleLOAD(Value *dst0[4])
2628 {
2629 const int r = tgsi.getSrc(0).getIndex(0);
2630 int c;
2631 std::vector<Value *> off, src, ldv, def;
2632
2633 switch (tgsi.getSrc(0).getFile()) {
2634 case TGSI_FILE_BUFFER:
2635 case TGSI_FILE_MEMORY:
2636 for (c = 0; c < 4; ++c) {
2637 if (!dst0[c])
2638 continue;
2639
2640 Value *off;
2641 Symbol *sym;
2642 uint32_t src0_component_offset = tgsi.getSrc(0).getSwizzle(c) * 4;
2643
2644 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE) {
2645 off = NULL;
2646 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2647 tgsi.getSrc(1).getValueU32(0, info) +
2648 src0_component_offset);
2649 } else {
2650 // yzw are ignored for buffers
2651 off = fetchSrc(1, 0);
2652 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2653 src0_component_offset);
2654 }
2655
2656 Instruction *ld = mkLoad(TYPE_U32, dst0[c], sym, off);
2657 ld->cache = tgsi.getCacheMode();
2658 if (tgsi.getSrc(0).isIndirect(0))
2659 ld->setIndirect(0, 1, fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0));
2660 }
2661 break;
2662 case TGSI_FILE_IMAGE: {
2663 assert(!code->images[r].raw);
2664
2665 getImageCoords(off, r, 1);
2666 def.resize(4);
2667
2668 for (c = 0; c < 4; ++c) {
2669 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2670 def[c] = getScratch();
2671 else
2672 def[c] = dst0[c];
2673 }
2674
2675 TexInstruction *ld =
2676 mkTex(OP_SULDP, getImageTarget(code, r), code->images[r].slot, 0,
2677 def, off);
2678 ld->tex.mask = tgsi.getDst(0).getMask();
2679 ld->tex.format = getImageFormat(code, r);
2680 ld->cache = tgsi.getCacheMode();
2681 if (tgsi.getSrc(0).isIndirect(0))
2682 ld->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
2683
2684 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2685 if (dst0[c] != def[c])
2686 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2687 }
2688 break;
2689 default:
2690 assert(!"Unsupported srcFile for LOAD");
2691 }
2692
2693 /* Keep this around for now as reference when adding img support
2694 getResourceCoords(off, r, 1);
2695
2696 if (isResourceRaw(code, r)) {
2697 uint8_t mask = 0;
2698 uint8_t comp[2] = { 0, 0 };
2699 uint8_t size[2] = { 0, 0 };
2700
2701 Symbol *base = getResourceBase(r);
2702
2703 // determine the base and size of the at most 2 load ops
2704 for (c = 0; c < 4; ++c)
2705 if (!tgsi.getDst(0).isMasked(c))
2706 mask |= 1 << (tgsi.getSrc(0).getSwizzle(c) - TGSI_SWIZZLE_X);
2707
2708 int n = partitionLoadStore(comp, size, mask);
2709
2710 src = off;
2711
2712 def.resize(4); // index by component, the ones we need will be non-NULL
2713 for (c = 0; c < 4; ++c) {
2714 if (dst0[c] && tgsi.getSrc(0).getSwizzle(c) == (TGSI_SWIZZLE_X + c))
2715 def[c] = dst0[c];
2716 else
2717 if (mask & (1 << c))
2718 def[c] = getScratch();
2719 }
2720
2721 const bool useLd = isResourceSpecial(r) ||
2722 (info->io.nv50styleSurfaces &&
2723 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2724
2725 for (int i = 0; i < n; ++i) {
2726 ldv.assign(def.begin() + comp[i], def.begin() + comp[i] + size[i]);
2727
2728 if (comp[i]) // adjust x component of source address if necessary
2729 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2730 off[0], mkImm(comp[i] * 4));
2731 else
2732 src[0] = off[0];
2733
2734 if (useLd) {
2735 Instruction *ld =
2736 mkLoad(typeOfSize(size[i] * 4), ldv[0], base, src[0]);
2737 for (size_t c = 1; c < ldv.size(); ++c)
2738 ld->setDef(c, ldv[c]);
2739 } else {
2740 mkTex(OP_SULDB, getResourceTarget(code, r), code->resources[r].slot,
2741 0, ldv, src)->dType = typeOfSize(size[i] * 4);
2742 }
2743 }
2744 } else {
2745 def.resize(4);
2746 for (c = 0; c < 4; ++c) {
2747 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2748 def[c] = getScratch();
2749 else
2750 def[c] = dst0[c];
2751 }
2752
2753 mkTex(OP_SULDP, getResourceTarget(code, r), code->resources[r].slot, 0,
2754 def, off);
2755 }
2756 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2757 if (dst0[c] != def[c])
2758 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2759 */
2760 }
2761
2762 // For formatted stores, the write mask on OP_SUSTP can be used.
2763 // Raw stores have to be split.
2764 void
2765 Converter::handleSTORE()
2766 {
2767 const int r = tgsi.getDst(0).getIndex(0);
2768 int c;
2769 std::vector<Value *> off, src, dummy;
2770
2771 switch (tgsi.getDst(0).getFile()) {
2772 case TGSI_FILE_BUFFER:
2773 case TGSI_FILE_MEMORY:
2774 for (c = 0; c < 4; ++c) {
2775 if (!(tgsi.getDst(0).getMask() & (1 << c)))
2776 continue;
2777
2778 Symbol *sym;
2779 Value *off;
2780 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMMEDIATE) {
2781 off = NULL;
2782 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c,
2783 tgsi.getSrc(0).getValueU32(0, info) + 4 * c);
2784 } else {
2785 // yzw are ignored for buffers
2786 off = fetchSrc(0, 0);
2787 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c, 4 * c);
2788 }
2789
2790 Instruction *st = mkStore(OP_STORE, TYPE_U32, sym, off, fetchSrc(1, c));
2791 st->cache = tgsi.getCacheMode();
2792 if (tgsi.getDst(0).isIndirect(0))
2793 st->setIndirect(0, 1, fetchSrc(tgsi.getDst(0).getIndirect(0), 0, 0));
2794 }
2795 break;
2796 case TGSI_FILE_IMAGE: {
2797 assert(!code->images[r].raw);
2798
2799 getImageCoords(off, r, 0);
2800 src = off;
2801
2802 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2803 src.push_back(fetchSrc(1, c));
2804
2805 TexInstruction *st =
2806 mkTex(OP_SUSTP, getImageTarget(code, r), code->images[r].slot,
2807 0, dummy, src);
2808 st->tex.mask = tgsi.getDst(0).getMask();
2809 st->tex.format = getImageFormat(code, r);
2810 st->cache = tgsi.getCacheMode();
2811 if (tgsi.getDst(0).isIndirect(0))
2812 st->setIndirectR(fetchSrc(tgsi.getDst(0).getIndirect(0), 0, NULL));
2813 }
2814 break;
2815 default:
2816 assert(!"Unsupported dstFile for STORE");
2817 }
2818
2819 /* Keep this around for now as reference when adding img support
2820 getResourceCoords(off, r, 0);
2821 src = off;
2822 const int s = src.size();
2823
2824 if (isResourceRaw(code, r)) {
2825 uint8_t comp[2] = { 0, 0 };
2826 uint8_t size[2] = { 0, 0 };
2827
2828 int n = partitionLoadStore(comp, size, tgsi.getDst(0).getMask());
2829
2830 Symbol *base = getResourceBase(r);
2831
2832 const bool useSt = isResourceSpecial(r) ||
2833 (info->io.nv50styleSurfaces &&
2834 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2835
2836 for (int i = 0; i < n; ++i) {
2837 if (comp[i]) // adjust x component of source address if necessary
2838 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2839 off[0], mkImm(comp[i] * 4));
2840 else
2841 src[0] = off[0];
2842
2843 const DataType stTy = typeOfSize(size[i] * 4);
2844
2845 if (useSt) {
2846 Instruction *st =
2847 mkStore(OP_STORE, stTy, base, NULL, fetchSrc(1, comp[i]));
2848 for (c = 1; c < size[i]; ++c)
2849 st->setSrc(1 + c, fetchSrc(1, comp[i] + c));
2850 st->setIndirect(0, 0, src[0]);
2851 } else {
2852 // attach values to be stored
2853 src.resize(s + size[i]);
2854 for (c = 0; c < size[i]; ++c)
2855 src[s + c] = fetchSrc(1, comp[i] + c);
2856 mkTex(OP_SUSTB, getResourceTarget(code, r), code->resources[r].slot,
2857 0, dummy, src)->setType(stTy);
2858 }
2859 }
2860 } else {
2861 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2862 src.push_back(fetchSrc(1, c));
2863
2864 mkTex(OP_SUSTP, getResourceTarget(code, r), code->resources[r].slot, 0,
2865 dummy, src)->tex.mask = tgsi.getDst(0).getMask();
2866 }
2867 */
2868 }
2869
2870 // XXX: These only work on resources with the single-component u32/s32 formats.
2871 // Therefore the result is replicated. This might not be intended by TGSI, but
2872 // operating on more than 1 component would produce undefined results because
2873 // they do not exist.
2874 void
2875 Converter::handleATOM(Value *dst0[4], DataType ty, uint16_t subOp)
2876 {
2877 const int r = tgsi.getSrc(0).getIndex(0);
2878 std::vector<Value *> srcv;
2879 std::vector<Value *> defv;
2880 LValue *dst = getScratch();
2881
2882 switch (tgsi.getSrc(0).getFile()) {
2883 case TGSI_FILE_BUFFER:
2884 case TGSI_FILE_MEMORY:
2885 for (int c = 0; c < 4; ++c) {
2886 if (!dst0[c])
2887 continue;
2888
2889 Instruction *insn;
2890 Value *off = fetchSrc(1, c), *off2 = NULL;
2891 Value *sym;
2892 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE)
2893 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2894 tgsi.getSrc(1).getValueU32(c, info));
2895 else
2896 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c, 0);
2897 if (tgsi.getSrc(0).isIndirect(0))
2898 off2 = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2899 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2900 insn = mkOp3(OP_ATOM, ty, dst, sym, fetchSrc(2, c), fetchSrc(3, c));
2901 else
2902 insn = mkOp2(OP_ATOM, ty, dst, sym, fetchSrc(2, c));
2903 if (tgsi.getSrc(1).getFile() != TGSI_FILE_IMMEDIATE)
2904 insn->setIndirect(0, 0, off);
2905 if (off2)
2906 insn->setIndirect(0, 1, off2);
2907 insn->subOp = subOp;
2908 }
2909 for (int c = 0; c < 4; ++c)
2910 if (dst0[c])
2911 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2912 break;
2913 case TGSI_FILE_IMAGE: {
2914 assert(!code->images[r].raw);
2915
2916 getImageCoords(srcv, r, 1);
2917 defv.push_back(dst);
2918 srcv.push_back(fetchSrc(2, 0));
2919
2920 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2921 srcv.push_back(fetchSrc(3, 0));
2922
2923 TexInstruction *tex = mkTex(OP_SUREDP, getImageTarget(code, r),
2924 code->images[r].slot, 0, defv, srcv);
2925 tex->subOp = subOp;
2926 tex->tex.mask = 1;
2927 tex->tex.format = getImageFormat(code, r);
2928 tex->setType(ty);
2929 if (tgsi.getSrc(0).isIndirect(0))
2930 tex->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
2931
2932 for (int c = 0; c < 4; ++c)
2933 if (dst0[c])
2934 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2935 }
2936 break;
2937 default:
2938 assert(!"Unsupported srcFile for ATOM");
2939 }
2940
2941 /* Keep this around for now as reference when adding img support
2942 getResourceCoords(srcv, r, 1);
2943
2944 if (isResourceSpecial(r)) {
2945 assert(r != TGSI_RESOURCE_INPUT);
2946 Instruction *insn;
2947 insn = mkOp2(OP_ATOM, ty, dst, getResourceBase(r), fetchSrc(2, 0));
2948 insn->subOp = subOp;
2949 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2950 insn->setSrc(2, fetchSrc(3, 0));
2951 insn->setIndirect(0, 0, srcv.at(0));
2952 } else {
2953 operation op = isResourceRaw(code, r) ? OP_SUREDB : OP_SUREDP;
2954 TexTarget targ = getResourceTarget(code, r);
2955 int idx = code->resources[r].slot;
2956 defv.push_back(dst);
2957 srcv.push_back(fetchSrc(2, 0));
2958 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2959 srcv.push_back(fetchSrc(3, 0));
2960 TexInstruction *tex = mkTex(op, targ, idx, 0, defv, srcv);
2961 tex->subOp = subOp;
2962 tex->tex.mask = 1;
2963 tex->setType(ty);
2964 }
2965
2966 for (int c = 0; c < 4; ++c)
2967 if (dst0[c])
2968 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2969 */
2970 }
2971
2972 void
2973 Converter::handleINTERP(Value *dst[4])
2974 {
2975 // Check whether the input is linear. All other attributes ignored.
2976 Instruction *insn;
2977 Value *offset = NULL, *ptr = NULL, *w = NULL;
2978 Symbol *sym[4] = { NULL };
2979 bool linear;
2980 operation op = OP_NOP;
2981 int c, mode = 0;
2982
2983 tgsi::Instruction::SrcRegister src = tgsi.getSrc(0);
2984
2985 // In some odd cases, in large part due to varying packing, the source
2986 // might not actually be an input. This is illegal TGSI, but it's easier to
2987 // account for it here than it is to fix it where the TGSI is being
2988 // generated. In that case, it's going to be a straight up mov (or sequence
2989 // of mov's) from the input in question. We follow the mov chain to see
2990 // which input we need to use.
2991 if (src.getFile() != TGSI_FILE_INPUT) {
2992 if (src.isIndirect(0)) {
2993 ERROR("Ignoring indirect input interpolation\n");
2994 return;
2995 }
2996 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
2997 Value *val = fetchSrc(0, c);
2998 assert(val->defs.size() == 1);
2999 insn = val->getInsn();
3000 while (insn->op == OP_MOV) {
3001 assert(insn->getSrc(0)->defs.size() == 1);
3002 insn = insn->getSrc(0)->getInsn();
3003 if (!insn) {
3004 ERROR("Miscompiling shader due to unhandled INTERP\n");
3005 return;
3006 }
3007 }
3008 if (insn->op != OP_LINTERP && insn->op != OP_PINTERP) {
3009 ERROR("Trying to interpolate non-input, this is not allowed.\n");
3010 return;
3011 }
3012 sym[c] = insn->getSrc(0)->asSym();
3013 assert(sym[c]);
3014 op = insn->op;
3015 mode = insn->ipa;
3016 }
3017 } else {
3018 if (src.isIndirect(0))
3019 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
3020
3021 // We can assume that the fixed index will point to an input of the same
3022 // interpolation type in case of an indirect.
3023 // TODO: Make use of ArrayID.
3024 linear = info->in[src.getIndex(0)].linear;
3025 if (linear) {
3026 op = OP_LINTERP;
3027 mode = NV50_IR_INTERP_LINEAR;
3028 } else {
3029 op = OP_PINTERP;
3030 mode = NV50_IR_INTERP_PERSPECTIVE;
3031 }
3032 }
3033
3034 switch (tgsi.getOpcode()) {
3035 case TGSI_OPCODE_INTERP_CENTROID:
3036 mode |= NV50_IR_INTERP_CENTROID;
3037 break;
3038 case TGSI_OPCODE_INTERP_SAMPLE:
3039 insn = mkOp1(OP_PIXLD, TYPE_U32, (offset = getScratch()), fetchSrc(1, 0));
3040 insn->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
3041 mode |= NV50_IR_INTERP_OFFSET;
3042 break;
3043 case TGSI_OPCODE_INTERP_OFFSET: {
3044 // The input in src1.xy is float, but we need a single 32-bit value
3045 // where the upper and lower 16 bits are encoded in S0.12 format. We need
3046 // to clamp the input coordinates to (-0.5, 0.4375), multiply by 4096,
3047 // and then convert to s32.
3048 Value *offs[2];
3049 for (c = 0; c < 2; c++) {
3050 offs[c] = getScratch();
3051 mkOp2(OP_MIN, TYPE_F32, offs[c], fetchSrc(1, c), loadImm(NULL, 0.4375f));
3052 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
3053 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
3054 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
3055 }
3056 offset = mkOp3v(OP_INSBF, TYPE_U32, getScratch(),
3057 offs[1], mkImm(0x1010), offs[0]);
3058 mode |= NV50_IR_INTERP_OFFSET;
3059 break;
3060 }
3061 }
3062
3063 if (op == OP_PINTERP) {
3064 if (offset) {
3065 w = mkOp2v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 3), offset);
3066 mkOp1(OP_RCP, TYPE_F32, w, w);
3067 } else {
3068 w = fragCoord[3];
3069 }
3070 }
3071
3072
3073 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3074 insn = mkOp1(op, TYPE_F32, dst[c], sym[c] ? sym[c] : srcToSym(src, c));
3075 if (op == OP_PINTERP)
3076 insn->setSrc(1, w);
3077 if (ptr)
3078 insn->setIndirect(0, 0, ptr);
3079 if (offset)
3080 insn->setSrc(op == OP_PINTERP ? 2 : 1, offset);
3081
3082 insn->setInterpolate(mode);
3083 }
3084 }
3085
3086 Converter::Subroutine *
3087 Converter::getSubroutine(unsigned ip)
3088 {
3089 std::map<unsigned, Subroutine>::iterator it = sub.map.find(ip);
3090
3091 if (it == sub.map.end())
3092 it = sub.map.insert(std::make_pair(
3093 ip, Subroutine(new Function(prog, "SUB", ip)))).first;
3094
3095 return &it->second;
3096 }
3097
3098 Converter::Subroutine *
3099 Converter::getSubroutine(Function *f)
3100 {
3101 unsigned ip = f->getLabel();
3102 std::map<unsigned, Subroutine>::iterator it = sub.map.find(ip);
3103
3104 if (it == sub.map.end())
3105 it = sub.map.insert(std::make_pair(ip, Subroutine(f))).first;
3106
3107 return &it->second;
3108 }
3109
3110 bool
3111 Converter::isEndOfSubroutine(uint ip)
3112 {
3113 assert(ip < code->scan.num_instructions);
3114 tgsi::Instruction insn(&code->insns[ip]);
3115 return (insn.getOpcode() == TGSI_OPCODE_END ||
3116 insn.getOpcode() == TGSI_OPCODE_ENDSUB ||
3117 // does END occur at end of main or the very end ?
3118 insn.getOpcode() == TGSI_OPCODE_BGNSUB);
3119 }
3120
3121 bool
3122 Converter::handleInstruction(const struct tgsi_full_instruction *insn)
3123 {
3124 Instruction *geni;
3125
3126 Value *dst0[4], *rDst0[4];
3127 Value *src0, *src1, *src2, *src3;
3128 Value *val0, *val1;
3129 int c;
3130
3131 tgsi = tgsi::Instruction(insn);
3132
3133 bool useScratchDst = tgsi.checkDstSrcAliasing();
3134
3135 operation op = tgsi.getOP();
3136 dstTy = tgsi.inferDstType();
3137 srcTy = tgsi.inferSrcType();
3138
3139 unsigned int mask = tgsi.dstCount() ? tgsi.getDst(0).getMask() : 0;
3140
3141 if (tgsi.dstCount()) {
3142 for (c = 0; c < 4; ++c) {
3143 rDst0[c] = acquireDst(0, c);
3144 dst0[c] = (useScratchDst && rDst0[c]) ? getScratch() : rDst0[c];
3145 }
3146 }
3147
3148 switch (tgsi.getOpcode()) {
3149 case TGSI_OPCODE_ADD:
3150 case TGSI_OPCODE_UADD:
3151 case TGSI_OPCODE_AND:
3152 case TGSI_OPCODE_DIV:
3153 case TGSI_OPCODE_IDIV:
3154 case TGSI_OPCODE_UDIV:
3155 case TGSI_OPCODE_MAX:
3156 case TGSI_OPCODE_MIN:
3157 case TGSI_OPCODE_IMAX:
3158 case TGSI_OPCODE_IMIN:
3159 case TGSI_OPCODE_UMAX:
3160 case TGSI_OPCODE_UMIN:
3161 case TGSI_OPCODE_MOD:
3162 case TGSI_OPCODE_UMOD:
3163 case TGSI_OPCODE_MUL:
3164 case TGSI_OPCODE_UMUL:
3165 case TGSI_OPCODE_IMUL_HI:
3166 case TGSI_OPCODE_UMUL_HI:
3167 case TGSI_OPCODE_OR:
3168 case TGSI_OPCODE_SHL:
3169 case TGSI_OPCODE_ISHR:
3170 case TGSI_OPCODE_USHR:
3171 case TGSI_OPCODE_XOR:
3172 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3173 src0 = fetchSrc(0, c);
3174 src1 = fetchSrc(1, c);
3175 geni = mkOp2(op, dstTy, dst0[c], src0, src1);
3176 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3177 if (op == OP_MUL && dstTy == TYPE_F32)
3178 geni->dnz = info->io.mul_zero_wins;
3179 }
3180 break;
3181 case TGSI_OPCODE_MAD:
3182 case TGSI_OPCODE_UMAD:
3183 case TGSI_OPCODE_SAD:
3184 case TGSI_OPCODE_FMA:
3185 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3186 src0 = fetchSrc(0, c);
3187 src1 = fetchSrc(1, c);
3188 src2 = fetchSrc(2, c);
3189 geni = mkOp3(op, dstTy, dst0[c], src0, src1, src2);
3190 if (dstTy == TYPE_F32)
3191 geni->dnz = info->io.mul_zero_wins;
3192 }
3193 break;
3194 case TGSI_OPCODE_MOV:
3195 case TGSI_OPCODE_CEIL:
3196 case TGSI_OPCODE_FLR:
3197 case TGSI_OPCODE_TRUNC:
3198 case TGSI_OPCODE_RCP:
3199 case TGSI_OPCODE_SQRT:
3200 case TGSI_OPCODE_IABS:
3201 case TGSI_OPCODE_INEG:
3202 case TGSI_OPCODE_NOT:
3203 case TGSI_OPCODE_DDX:
3204 case TGSI_OPCODE_DDY:
3205 case TGSI_OPCODE_DDX_FINE:
3206 case TGSI_OPCODE_DDY_FINE:
3207 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3208 mkOp1(op, dstTy, dst0[c], fetchSrc(0, c));
3209 break;
3210 case TGSI_OPCODE_RSQ:
3211 src0 = fetchSrc(0, 0);
3212 val0 = getScratch();
3213 mkOp1(OP_ABS, TYPE_F32, val0, src0);
3214 mkOp1(OP_RSQ, TYPE_F32, val0, val0);
3215 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3216 mkMov(dst0[c], val0);
3217 break;
3218 case TGSI_OPCODE_ARL:
3219 case TGSI_OPCODE_ARR:
3220 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3221 const RoundMode rnd =
3222 tgsi.getOpcode() == TGSI_OPCODE_ARR ? ROUND_N : ROUND_M;
3223 src0 = fetchSrc(0, c);
3224 mkCvt(OP_CVT, TYPE_S32, dst0[c], TYPE_F32, src0)->rnd = rnd;
3225 }
3226 break;
3227 case TGSI_OPCODE_UARL:
3228 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3229 mkOp1(OP_MOV, TYPE_U32, dst0[c], fetchSrc(0, c));
3230 break;
3231 case TGSI_OPCODE_POW:
3232 val0 = mkOp2v(op, TYPE_F32, getScratch(), fetchSrc(0, 0), fetchSrc(1, 0));
3233 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3234 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3235 break;
3236 case TGSI_OPCODE_EX2:
3237 case TGSI_OPCODE_LG2:
3238 val0 = mkOp1(op, TYPE_F32, getScratch(), fetchSrc(0, 0))->getDef(0);
3239 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3240 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3241 break;
3242 case TGSI_OPCODE_COS:
3243 case TGSI_OPCODE_SIN:
3244 val0 = getScratch();
3245 if (mask & 7) {
3246 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 0));
3247 mkOp1(op, TYPE_F32, val0, val0);
3248 for (c = 0; c < 3; ++c)
3249 if (dst0[c])
3250 mkMov(dst0[c], val0);
3251 }
3252 if (dst0[3]) {
3253 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 3));
3254 mkOp1(op, TYPE_F32, dst0[3], val0);
3255 }
3256 break;
3257 case TGSI_OPCODE_SCS:
3258 if (mask & 3) {
3259 val0 = mkOp1v(OP_PRESIN, TYPE_F32, getSSA(), fetchSrc(0, 0));
3260 if (dst0[0])
3261 mkOp1(OP_COS, TYPE_F32, dst0[0], val0);
3262 if (dst0[1])
3263 mkOp1(OP_SIN, TYPE_F32, dst0[1], val0);
3264 }
3265 if (dst0[2])
3266 loadImm(dst0[2], 0.0f);
3267 if (dst0[3])
3268 loadImm(dst0[3], 1.0f);
3269 break;
3270 case TGSI_OPCODE_EXP:
3271 src0 = fetchSrc(0, 0);
3272 val0 = mkOp1v(OP_FLOOR, TYPE_F32, getSSA(), src0);
3273 if (dst0[1])
3274 mkOp2(OP_SUB, TYPE_F32, dst0[1], src0, val0);
3275 if (dst0[0])
3276 mkOp1(OP_EX2, TYPE_F32, dst0[0], val0);
3277 if (dst0[2])
3278 mkOp1(OP_EX2, TYPE_F32, dst0[2], src0);
3279 if (dst0[3])
3280 loadImm(dst0[3], 1.0f);
3281 break;
3282 case TGSI_OPCODE_LOG:
3283 src0 = mkOp1v(OP_ABS, TYPE_F32, getSSA(), fetchSrc(0, 0));
3284 val0 = mkOp1v(OP_LG2, TYPE_F32, dst0[2] ? dst0[2] : getSSA(), src0);
3285 if (dst0[0] || dst0[1])
3286 val1 = mkOp1v(OP_FLOOR, TYPE_F32, dst0[0] ? dst0[0] : getSSA(), val0);
3287 if (dst0[1]) {
3288 mkOp1(OP_EX2, TYPE_F32, dst0[1], val1);
3289 mkOp1(OP_RCP, TYPE_F32, dst0[1], dst0[1]);
3290 mkOp2(OP_MUL, TYPE_F32, dst0[1], dst0[1], src0)
3291 ->dnz = info->io.mul_zero_wins;
3292 }
3293 if (dst0[3])
3294 loadImm(dst0[3], 1.0f);
3295 break;
3296 case TGSI_OPCODE_DP2:
3297 val0 = buildDot(2);
3298 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3299 mkMov(dst0[c], val0);
3300 break;
3301 case TGSI_OPCODE_DP3:
3302 val0 = buildDot(3);
3303 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3304 mkMov(dst0[c], val0);
3305 break;
3306 case TGSI_OPCODE_DP4:
3307 val0 = buildDot(4);
3308 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3309 mkMov(dst0[c], val0);
3310 break;
3311 case TGSI_OPCODE_DPH:
3312 val0 = buildDot(3);
3313 src1 = fetchSrc(1, 3);
3314 mkOp2(OP_ADD, TYPE_F32, val0, val0, src1);
3315 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3316 mkMov(dst0[c], val0);
3317 break;
3318 case TGSI_OPCODE_DST:
3319 if (dst0[0])
3320 loadImm(dst0[0], 1.0f);
3321 if (dst0[1]) {
3322 src0 = fetchSrc(0, 1);
3323 src1 = fetchSrc(1, 1);
3324 mkOp2(OP_MUL, TYPE_F32, dst0[1], src0, src1)
3325 ->dnz = info->io.mul_zero_wins;
3326 }
3327 if (dst0[2])
3328 mkMov(dst0[2], fetchSrc(0, 2));
3329 if (dst0[3])
3330 mkMov(dst0[3], fetchSrc(1, 3));
3331 break;
3332 case TGSI_OPCODE_LRP:
3333 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3334 src0 = fetchSrc(0, c);
3335 src1 = fetchSrc(1, c);
3336 src2 = fetchSrc(2, c);
3337 mkOp3(OP_MAD, TYPE_F32, dst0[c],
3338 mkOp2v(OP_SUB, TYPE_F32, getSSA(), src1, src2), src0, src2)
3339 ->dnz = info->io.mul_zero_wins;
3340 }
3341 break;
3342 case TGSI_OPCODE_LIT:
3343 handleLIT(dst0);
3344 break;
3345 case TGSI_OPCODE_XPD:
3346 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3347 if (c < 3) {
3348 val0 = getSSA();
3349 src0 = fetchSrc(1, (c + 1) % 3);
3350 src1 = fetchSrc(0, (c + 2) % 3);
3351 mkOp2(OP_MUL, TYPE_F32, val0, src0, src1)
3352 ->dnz = info->io.mul_zero_wins;
3353 mkOp1(OP_NEG, TYPE_F32, val0, val0);
3354
3355 src0 = fetchSrc(0, (c + 1) % 3);
3356 src1 = fetchSrc(1, (c + 2) % 3);
3357 mkOp3(OP_MAD, TYPE_F32, dst0[c], src0, src1, val0)
3358 ->dnz = info->io.mul_zero_wins;
3359 } else {
3360 loadImm(dst0[c], 1.0f);
3361 }
3362 }
3363 break;
3364 case TGSI_OPCODE_ISSG:
3365 case TGSI_OPCODE_SSG:
3366 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3367 src0 = fetchSrc(0, c);
3368 val0 = getScratch();
3369 val1 = getScratch();
3370 mkCmp(OP_SET, CC_GT, srcTy, val0, srcTy, src0, zero);
3371 mkCmp(OP_SET, CC_LT, srcTy, val1, srcTy, src0, zero);
3372 if (srcTy == TYPE_F32)
3373 mkOp2(OP_SUB, TYPE_F32, dst0[c], val0, val1);
3374 else
3375 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
3376 }
3377 break;
3378 case TGSI_OPCODE_UCMP:
3379 srcTy = TYPE_U32;
3380 /* fallthrough */
3381 case TGSI_OPCODE_CMP:
3382 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3383 src0 = fetchSrc(0, c);
3384 src1 = fetchSrc(1, c);
3385 src2 = fetchSrc(2, c);
3386 if (src1 == src2)
3387 mkMov(dst0[c], src1);
3388 else
3389 mkCmp(OP_SLCT, (srcTy == TYPE_F32) ? CC_LT : CC_NE,
3390 srcTy, dst0[c], srcTy, src1, src2, src0);
3391 }
3392 break;
3393 case TGSI_OPCODE_FRC:
3394 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3395 src0 = fetchSrc(0, c);
3396 val0 = getScratch();
3397 mkOp1(OP_FLOOR, TYPE_F32, val0, src0);
3398 mkOp2(OP_SUB, TYPE_F32, dst0[c], src0, val0);
3399 }
3400 break;
3401 case TGSI_OPCODE_ROUND:
3402 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3403 mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F32, fetchSrc(0, c))
3404 ->rnd = ROUND_NI;
3405 break;
3406 case TGSI_OPCODE_SLT:
3407 case TGSI_OPCODE_SGE:
3408 case TGSI_OPCODE_SEQ:
3409 case TGSI_OPCODE_SGT:
3410 case TGSI_OPCODE_SLE:
3411 case TGSI_OPCODE_SNE:
3412 case TGSI_OPCODE_FSEQ:
3413 case TGSI_OPCODE_FSGE:
3414 case TGSI_OPCODE_FSLT:
3415 case TGSI_OPCODE_FSNE:
3416 case TGSI_OPCODE_ISGE:
3417 case TGSI_OPCODE_ISLT:
3418 case TGSI_OPCODE_USEQ:
3419 case TGSI_OPCODE_USGE:
3420 case TGSI_OPCODE_USLT:
3421 case TGSI_OPCODE_USNE:
3422 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3423 src0 = fetchSrc(0, c);
3424 src1 = fetchSrc(1, c);
3425 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3426 }
3427 break;
3428 case TGSI_OPCODE_VOTE_ALL:
3429 case TGSI_OPCODE_VOTE_ANY:
3430 case TGSI_OPCODE_VOTE_EQ:
3431 val0 = new_LValue(func, FILE_PREDICATE);
3432 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3433 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, c), zero);
3434 mkOp1(op, dstTy, val0, val0)
3435 ->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3436 mkCvt(OP_CVT, TYPE_U32, dst0[c], TYPE_U8, val0);
3437 }
3438 break;
3439 case TGSI_OPCODE_BALLOT:
3440 if (!tgsi.getDst(0).isMasked(0)) {
3441 val0 = new_LValue(func, FILE_PREDICATE);
3442 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, 0), zero);
3443 mkOp1(op, TYPE_U32, dst0[0], val0)->subOp = NV50_IR_SUBOP_VOTE_ANY;
3444 }
3445 if (!tgsi.getDst(0).isMasked(1))
3446 mkMov(dst0[1], zero, TYPE_U32);
3447 break;
3448 case TGSI_OPCODE_READ_FIRST:
3449 // ReadFirstInvocationARB(src) is implemented as
3450 // ReadInvocationARB(src, findLSB(ballot(true)))
3451 val0 = getScratch();
3452 mkOp1(OP_VOTE, TYPE_U32, val0, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
3453 mkOp2(OP_EXTBF, TYPE_U32, val0, val0, mkImm(0x2000))
3454 ->subOp = NV50_IR_SUBOP_EXTBF_REV;
3455 mkOp1(OP_BFIND, TYPE_U32, val0, val0)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3456 src1 = val0;
3457 /* fallthrough */
3458 case TGSI_OPCODE_READ_INVOC:
3459 if (tgsi.getOpcode() == TGSI_OPCODE_READ_INVOC)
3460 src1 = fetchSrc(1, 0);
3461 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3462 geni = mkOp3(op, dstTy, dst0[c], fetchSrc(0, c), src1, mkImm(0x1f));
3463 geni->subOp = NV50_IR_SUBOP_SHFL_IDX;
3464 }
3465 break;
3466 case TGSI_OPCODE_CLOCK:
3467 // Stick the 32-bit clock into the high dword of the logical result.
3468 if (!tgsi.getDst(0).isMasked(0))
3469 mkOp1(OP_MOV, TYPE_U32, dst0[0], zero);
3470 if (!tgsi.getDst(0).isMasked(1))
3471 mkOp1(OP_RDSV, TYPE_U32, dst0[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
3472 break;
3473 case TGSI_OPCODE_KILL_IF:
3474 val0 = new_LValue(func, FILE_PREDICATE);
3475 mask = 0;
3476 for (c = 0; c < 4; ++c) {
3477 const int s = tgsi.getSrc(0).getSwizzle(c);
3478 if (mask & (1 << s))
3479 continue;
3480 mask |= 1 << s;
3481 mkCmp(OP_SET, CC_LT, TYPE_F32, val0, TYPE_F32, fetchSrc(0, c), zero);
3482 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, val0);
3483 }
3484 break;
3485 case TGSI_OPCODE_KILL:
3486 mkOp(OP_DISCARD, TYPE_NONE, NULL);
3487 break;
3488 case TGSI_OPCODE_TEX:
3489 case TGSI_OPCODE_TEX_LZ:
3490 case TGSI_OPCODE_TXB:
3491 case TGSI_OPCODE_TXL:
3492 case TGSI_OPCODE_TXP:
3493 case TGSI_OPCODE_LODQ:
3494 // R S L C Dx Dy
3495 handleTEX(dst0, 1, 1, 0x03, 0x0f, 0x00, 0x00);
3496 break;
3497 case TGSI_OPCODE_TXD:
3498 handleTEX(dst0, 3, 3, 0x03, 0x0f, 0x10, 0x20);
3499 break;
3500 case TGSI_OPCODE_TG4:
3501 handleTEX(dst0, 2, 2, 0x03, 0x0f, 0x00, 0x00);
3502 break;
3503 case TGSI_OPCODE_TEX2:
3504 handleTEX(dst0, 2, 2, 0x03, 0x10, 0x00, 0x00);
3505 break;
3506 case TGSI_OPCODE_TXB2:
3507 case TGSI_OPCODE_TXL2:
3508 handleTEX(dst0, 2, 2, 0x10, 0x0f, 0x00, 0x00);
3509 break;
3510 case TGSI_OPCODE_SAMPLE:
3511 case TGSI_OPCODE_SAMPLE_B:
3512 case TGSI_OPCODE_SAMPLE_D:
3513 case TGSI_OPCODE_SAMPLE_L:
3514 case TGSI_OPCODE_SAMPLE_C:
3515 case TGSI_OPCODE_SAMPLE_C_LZ:
3516 handleTEX(dst0, 1, 2, 0x30, 0x30, 0x30, 0x40);
3517 break;
3518 case TGSI_OPCODE_TXF_LZ:
3519 case TGSI_OPCODE_TXF:
3520 handleTXF(dst0, 1, 0x03);
3521 break;
3522 case TGSI_OPCODE_SAMPLE_I:
3523 handleTXF(dst0, 1, 0x03);
3524 break;
3525 case TGSI_OPCODE_SAMPLE_I_MS:
3526 handleTXF(dst0, 1, 0x20);
3527 break;
3528 case TGSI_OPCODE_TXQ:
3529 case TGSI_OPCODE_SVIEWINFO:
3530 handleTXQ(dst0, TXQ_DIMS, 1);
3531 break;
3532 case TGSI_OPCODE_TXQS:
3533 // The TXQ_TYPE query returns samples in its 3rd arg, but we need it to
3534 // be in .x
3535 dst0[1] = dst0[2] = dst0[3] = NULL;
3536 std::swap(dst0[0], dst0[2]);
3537 handleTXQ(dst0, TXQ_TYPE, 0);
3538 std::swap(dst0[0], dst0[2]);
3539 break;
3540 case TGSI_OPCODE_FBFETCH:
3541 handleFBFETCH(dst0);
3542 break;
3543 case TGSI_OPCODE_F2I:
3544 case TGSI_OPCODE_F2U:
3545 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3546 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c))->rnd = ROUND_Z;
3547 break;
3548 case TGSI_OPCODE_I2F:
3549 case TGSI_OPCODE_U2F:
3550 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3551 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c));
3552 break;
3553 case TGSI_OPCODE_PK2H:
3554 val0 = getScratch();
3555 val1 = getScratch();
3556 mkCvt(OP_CVT, TYPE_F16, val0, TYPE_F32, fetchSrc(0, 0));
3557 mkCvt(OP_CVT, TYPE_F16, val1, TYPE_F32, fetchSrc(0, 1));
3558 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3559 mkOp3(OP_INSBF, TYPE_U32, dst0[c], val1, mkImm(0x1010), val0);
3560 break;
3561 case TGSI_OPCODE_UP2H:
3562 src0 = fetchSrc(0, 0);
3563 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3564 geni = mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F16, src0);
3565 geni->subOp = c & 1;
3566 }
3567 break;
3568 case TGSI_OPCODE_EMIT:
3569 /* export the saved viewport index */
3570 if (viewport != NULL) {
3571 Symbol *vpSym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_U32,
3572 info->out[info->io.viewportId].slot[0] * 4);
3573 mkStore(OP_EXPORT, TYPE_U32, vpSym, NULL, viewport);
3574 }
3575 /* fallthrough */
3576 case TGSI_OPCODE_ENDPRIM:
3577 {
3578 // get vertex stream (must be immediate)
3579 unsigned int stream = tgsi.getSrc(0).getValueU32(0, info);
3580 if (stream && op == OP_RESTART)
3581 break;
3582 if (info->prop.gp.maxVertices == 0)
3583 break;
3584 src0 = mkImm(stream);
3585 mkOp1(op, TYPE_U32, NULL, src0)->fixed = 1;
3586 break;
3587 }
3588 case TGSI_OPCODE_IF:
3589 case TGSI_OPCODE_UIF:
3590 {
3591 BasicBlock *ifBB = new BasicBlock(func);
3592
3593 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
3594 condBBs.push(bb);
3595 joinBBs.push(bb);
3596
3597 mkFlow(OP_BRA, NULL, CC_NOT_P, fetchSrc(0, 0))->setType(srcTy);
3598
3599 setPosition(ifBB, true);
3600 }
3601 break;
3602 case TGSI_OPCODE_ELSE:
3603 {
3604 BasicBlock *elseBB = new BasicBlock(func);
3605 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3606
3607 forkBB->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
3608 condBBs.push(bb);
3609
3610 forkBB->getExit()->asFlow()->target.bb = elseBB;
3611 if (!bb->isTerminated())
3612 mkFlow(OP_BRA, NULL, CC_ALWAYS, NULL);
3613
3614 setPosition(elseBB, true);
3615 }
3616 break;
3617 case TGSI_OPCODE_ENDIF:
3618 {
3619 BasicBlock *convBB = new BasicBlock(func);
3620 BasicBlock *prevBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3621 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(joinBBs.pop().u.p);
3622
3623 if (!bb->isTerminated()) {
3624 // we only want join if none of the clauses ended with CONT/BREAK/RET
3625 if (prevBB->getExit()->op == OP_BRA && joinBBs.getSize() < 6)
3626 insertConvergenceOps(convBB, forkBB);
3627 mkFlow(OP_BRA, convBB, CC_ALWAYS, NULL);
3628 bb->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3629 }
3630
3631 if (prevBB->getExit()->op == OP_BRA) {
3632 prevBB->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3633 prevBB->getExit()->asFlow()->target.bb = convBB;
3634 }
3635 setPosition(convBB, true);
3636 }
3637 break;
3638 case TGSI_OPCODE_BGNLOOP:
3639 {
3640 BasicBlock *lbgnBB = new BasicBlock(func);
3641 BasicBlock *lbrkBB = new BasicBlock(func);
3642
3643 loopBBs.push(lbgnBB);
3644 breakBBs.push(lbrkBB);
3645 if (loopBBs.getSize() > func->loopNestingBound)
3646 func->loopNestingBound++;
3647
3648 mkFlow(OP_PREBREAK, lbrkBB, CC_ALWAYS, NULL);
3649
3650 bb->cfg.attach(&lbgnBB->cfg, Graph::Edge::TREE);
3651 setPosition(lbgnBB, true);
3652 mkFlow(OP_PRECONT, lbgnBB, CC_ALWAYS, NULL);
3653 }
3654 break;
3655 case TGSI_OPCODE_ENDLOOP:
3656 {
3657 BasicBlock *loopBB = reinterpret_cast<BasicBlock *>(loopBBs.pop().u.p);
3658
3659 if (!bb->isTerminated()) {
3660 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
3661 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
3662 }
3663 setPosition(reinterpret_cast<BasicBlock *>(breakBBs.pop().u.p), true);
3664
3665 // If the loop never breaks (e.g. only has RET's inside), then there
3666 // will be no way to get to the break bb. However BGNLOOP will have
3667 // already made a PREBREAK to it, so it must be in the CFG.
3668 if (getBB()->cfg.incidentCount() == 0)
3669 loopBB->cfg.attach(&getBB()->cfg, Graph::Edge::TREE);
3670 }
3671 break;
3672 case TGSI_OPCODE_BRK:
3673 {
3674 if (bb->isTerminated())
3675 break;
3676 BasicBlock *brkBB = reinterpret_cast<BasicBlock *>(breakBBs.peek().u.p);
3677 mkFlow(OP_BREAK, brkBB, CC_ALWAYS, NULL);
3678 bb->cfg.attach(&brkBB->cfg, Graph::Edge::CROSS);
3679 }
3680 break;
3681 case TGSI_OPCODE_CONT:
3682 {
3683 if (bb->isTerminated())
3684 break;
3685 BasicBlock *contBB = reinterpret_cast<BasicBlock *>(loopBBs.peek().u.p);
3686 mkFlow(OP_CONT, contBB, CC_ALWAYS, NULL);
3687 contBB->explicitCont = true;
3688 bb->cfg.attach(&contBB->cfg, Graph::Edge::BACK);
3689 }
3690 break;
3691 case TGSI_OPCODE_BGNSUB:
3692 {
3693 Subroutine *s = getSubroutine(ip);
3694 BasicBlock *entry = new BasicBlock(s->f);
3695 BasicBlock *leave = new BasicBlock(s->f);
3696
3697 // multiple entrypoints possible, keep the graph connected
3698 if (prog->getType() == Program::TYPE_COMPUTE)
3699 prog->main->call.attach(&s->f->call, Graph::Edge::TREE);
3700
3701 sub.cur = s;
3702 s->f->setEntry(entry);
3703 s->f->setExit(leave);
3704 setPosition(entry, true);
3705 return true;
3706 }
3707 case TGSI_OPCODE_ENDSUB:
3708 {
3709 sub.cur = getSubroutine(prog->main);
3710 setPosition(BasicBlock::get(sub.cur->f->cfg.getRoot()), true);
3711 return true;
3712 }
3713 case TGSI_OPCODE_CAL:
3714 {
3715 Subroutine *s = getSubroutine(tgsi.getLabel());
3716 mkFlow(OP_CALL, s->f, CC_ALWAYS, NULL);
3717 func->call.attach(&s->f->call, Graph::Edge::TREE);
3718 return true;
3719 }
3720 case TGSI_OPCODE_RET:
3721 {
3722 if (bb->isTerminated())
3723 return true;
3724 BasicBlock *leave = BasicBlock::get(func->cfgExit);
3725
3726 if (!isEndOfSubroutine(ip + 1)) {
3727 // insert a PRERET at the entry if this is an early return
3728 // (only needed for sharing code in the epilogue)
3729 BasicBlock *root = BasicBlock::get(func->cfg.getRoot());
3730 if (root->getEntry() == NULL || root->getEntry()->op != OP_PRERET) {
3731 BasicBlock *pos = getBB();
3732 setPosition(root, false);
3733 mkFlow(OP_PRERET, leave, CC_ALWAYS, NULL)->fixed = 1;
3734 setPosition(pos, true);
3735 }
3736 }
3737 mkFlow(OP_RET, NULL, CC_ALWAYS, NULL)->fixed = 1;
3738 bb->cfg.attach(&leave->cfg, Graph::Edge::CROSS);
3739 }
3740 break;
3741 case TGSI_OPCODE_END:
3742 {
3743 // attach and generate epilogue code
3744 BasicBlock *epilogue = BasicBlock::get(func->cfgExit);
3745 bb->cfg.attach(&epilogue->cfg, Graph::Edge::TREE);
3746 setPosition(epilogue, true);
3747 if (prog->getType() == Program::TYPE_FRAGMENT)
3748 exportOutputs();
3749 if (info->io.genUserClip > 0)
3750 handleUserClipPlanes();
3751 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
3752 }
3753 break;
3754 case TGSI_OPCODE_SWITCH:
3755 case TGSI_OPCODE_CASE:
3756 ERROR("switch/case opcode encountered, should have been lowered\n");
3757 abort();
3758 break;
3759 case TGSI_OPCODE_LOAD:
3760 handleLOAD(dst0);
3761 break;
3762 case TGSI_OPCODE_STORE:
3763 handleSTORE();
3764 break;
3765 case TGSI_OPCODE_BARRIER:
3766 geni = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
3767 geni->fixed = 1;
3768 geni->subOp = NV50_IR_SUBOP_BAR_SYNC;
3769 break;
3770 case TGSI_OPCODE_MFENCE:
3771 case TGSI_OPCODE_LFENCE:
3772 case TGSI_OPCODE_SFENCE:
3773 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3774 geni->fixed = 1;
3775 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3776 break;
3777 case TGSI_OPCODE_MEMBAR:
3778 {
3779 uint32_t level = tgsi.getSrc(0).getValueU32(0, info);
3780 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3781 geni->fixed = 1;
3782 if (!(level & ~(TGSI_MEMBAR_THREAD_GROUP | TGSI_MEMBAR_SHARED)))
3783 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
3784 else
3785 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
3786 }
3787 break;
3788 case TGSI_OPCODE_ATOMUADD:
3789 case TGSI_OPCODE_ATOMXCHG:
3790 case TGSI_OPCODE_ATOMCAS:
3791 case TGSI_OPCODE_ATOMAND:
3792 case TGSI_OPCODE_ATOMOR:
3793 case TGSI_OPCODE_ATOMXOR:
3794 case TGSI_OPCODE_ATOMUMIN:
3795 case TGSI_OPCODE_ATOMIMIN:
3796 case TGSI_OPCODE_ATOMUMAX:
3797 case TGSI_OPCODE_ATOMIMAX:
3798 handleATOM(dst0, dstTy, tgsi::opcodeToSubOp(tgsi.getOpcode()));
3799 break;
3800 case TGSI_OPCODE_RESQ:
3801 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER) {
3802 geni = mkOp1(OP_BUFQ, TYPE_U32, dst0[0],
3803 makeSym(tgsi.getSrc(0).getFile(),
3804 tgsi.getSrc(0).getIndex(0), -1, 0, 0));
3805 if (tgsi.getSrc(0).isIndirect(0))
3806 geni->setIndirect(0, 1,
3807 fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0));
3808 } else {
3809 assert(tgsi.getSrc(0).getFile() == TGSI_FILE_IMAGE);
3810
3811 TexInstruction *texi = new_TexInstruction(func, OP_SUQ);
3812 for (int c = 0, d = 0; c < 4; ++c) {
3813 if (dst0[c]) {
3814 texi->setDef(d++, dst0[c]);
3815 texi->tex.mask |= 1 << c;
3816 }
3817 }
3818 texi->tex.r = tgsi.getSrc(0).getIndex(0);
3819 texi->tex.target = getImageTarget(code, texi->tex.r);
3820 bb->insertTail(texi);
3821
3822 if (tgsi.getSrc(0).isIndirect(0))
3823 texi->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
3824 }
3825 break;
3826 case TGSI_OPCODE_IBFE:
3827 case TGSI_OPCODE_UBFE:
3828 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3829 src0 = fetchSrc(0, c);
3830 val0 = getScratch();
3831 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE &&
3832 tgsi.getSrc(2).getFile() == TGSI_FILE_IMMEDIATE) {
3833 loadImm(val0, (tgsi.getSrc(2).getValueU32(c, info) << 8) |
3834 tgsi.getSrc(1).getValueU32(c, info));
3835 } else {
3836 src1 = fetchSrc(1, c);
3837 src2 = fetchSrc(2, c);
3838 mkOp3(OP_INSBF, TYPE_U32, val0, src2, mkImm(0x808), src1);
3839 }
3840 mkOp2(OP_EXTBF, dstTy, dst0[c], src0, val0);
3841 }
3842 break;
3843 case TGSI_OPCODE_BFI:
3844 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3845 src0 = fetchSrc(0, c);
3846 src1 = fetchSrc(1, c);
3847 src2 = fetchSrc(2, c);
3848 src3 = fetchSrc(3, c);
3849 val0 = getScratch();
3850 mkOp3(OP_INSBF, TYPE_U32, val0, src3, mkImm(0x808), src2);
3851 mkOp3(OP_INSBF, TYPE_U32, dst0[c], src1, val0, src0);
3852 }
3853 break;
3854 case TGSI_OPCODE_LSB:
3855 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3856 src0 = fetchSrc(0, c);
3857 val0 = getScratch();
3858 geni = mkOp2(OP_EXTBF, TYPE_U32, val0, src0, mkImm(0x2000));
3859 geni->subOp = NV50_IR_SUBOP_EXTBF_REV;
3860 geni = mkOp1(OP_BFIND, TYPE_U32, dst0[c], val0);
3861 geni->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3862 }
3863 break;
3864 case TGSI_OPCODE_IMSB:
3865 case TGSI_OPCODE_UMSB:
3866 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3867 src0 = fetchSrc(0, c);
3868 mkOp1(OP_BFIND, srcTy, dst0[c], src0);
3869 }
3870 break;
3871 case TGSI_OPCODE_BREV:
3872 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3873 src0 = fetchSrc(0, c);
3874 geni = mkOp2(OP_EXTBF, TYPE_U32, dst0[c], src0, mkImm(0x2000));
3875 geni->subOp = NV50_IR_SUBOP_EXTBF_REV;
3876 }
3877 break;
3878 case TGSI_OPCODE_POPC:
3879 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3880 src0 = fetchSrc(0, c);
3881 mkOp2(OP_POPCNT, TYPE_U32, dst0[c], src0, src0);
3882 }
3883 break;
3884 case TGSI_OPCODE_INTERP_CENTROID:
3885 case TGSI_OPCODE_INTERP_SAMPLE:
3886 case TGSI_OPCODE_INTERP_OFFSET:
3887 handleINTERP(dst0);
3888 break;
3889 case TGSI_OPCODE_I642F:
3890 case TGSI_OPCODE_U642F:
3891 case TGSI_OPCODE_D2I:
3892 case TGSI_OPCODE_D2U:
3893 case TGSI_OPCODE_D2F: {
3894 int pos = 0;
3895 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3896 Value *dreg = getSSA(8);
3897 src0 = fetchSrc(0, pos);
3898 src1 = fetchSrc(0, pos + 1);
3899 mkOp2(OP_MERGE, TYPE_U64, dreg, src0, src1);
3900 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst0[c], srcTy, dreg);
3901 if (!isFloatType(dstTy))
3902 cvt->rnd = ROUND_Z;
3903 pos += 2;
3904 }
3905 break;
3906 }
3907 case TGSI_OPCODE_I2I64:
3908 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3909 dst0[c] = fetchSrc(0, c / 2);
3910 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(NULL, 31));
3911 c++;
3912 }
3913 break;
3914 case TGSI_OPCODE_U2I64:
3915 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3916 dst0[c] = fetchSrc(0, c / 2);
3917 dst0[c + 1] = zero;
3918 c++;
3919 }
3920 break;
3921 case TGSI_OPCODE_F2I64:
3922 case TGSI_OPCODE_F2U64:
3923 case TGSI_OPCODE_I2D:
3924 case TGSI_OPCODE_U2D:
3925 case TGSI_OPCODE_F2D:
3926 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3927 Value *dreg = getSSA(8);
3928 Instruction *cvt = mkCvt(OP_CVT, dstTy, dreg, srcTy, fetchSrc(0, c / 2));
3929 if (!isFloatType(dstTy))
3930 cvt->rnd = ROUND_Z;
3931 mkSplit(&dst0[c], 4, dreg);
3932 c++;
3933 }
3934 break;
3935 case TGSI_OPCODE_D2I64:
3936 case TGSI_OPCODE_D2U64:
3937 case TGSI_OPCODE_I642D:
3938 case TGSI_OPCODE_U642D:
3939 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3940 src0 = getSSA(8);
3941 Value *dst = getSSA(8), *tmp[2];
3942 tmp[0] = fetchSrc(0, c);
3943 tmp[1] = fetchSrc(0, c + 1);
3944 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3945 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst, srcTy, src0);
3946 if (!isFloatType(dstTy))
3947 cvt->rnd = ROUND_Z;
3948 mkSplit(&dst0[c], 4, dst);
3949 c++;
3950 }
3951 break;
3952 case TGSI_OPCODE_I64NEG:
3953 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3954 src0 = getSSA(8);
3955 Value *dst = getSSA(8), *tmp[2];
3956 tmp[0] = fetchSrc(0, c);
3957 tmp[1] = fetchSrc(0, c + 1);
3958 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3959 mkOp2(OP_SUB, dstTy, dst, zero, src0);
3960 mkSplit(&dst0[c], 4, dst);
3961 c++;
3962 }
3963 break;
3964 case TGSI_OPCODE_I64ABS:
3965 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3966 src0 = getSSA(8);
3967 Value *neg = getSSA(8), *srcComp[2], *negComp[2];
3968 srcComp[0] = fetchSrc(0, c);
3969 srcComp[1] = fetchSrc(0, c + 1);
3970 mkOp2(OP_MERGE, TYPE_U64, src0, srcComp[0], srcComp[1]);
3971 mkOp2(OP_SUB, dstTy, neg, zero, src0);
3972 mkSplit(negComp, 4, neg);
3973 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c], TYPE_S32,
3974 negComp[0], srcComp[0], srcComp[1]);
3975 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c + 1], TYPE_S32,
3976 negComp[1], srcComp[1], srcComp[1]);
3977 c++;
3978 }
3979 break;
3980 case TGSI_OPCODE_DABS:
3981 case TGSI_OPCODE_DNEG:
3982 case TGSI_OPCODE_DRCP:
3983 case TGSI_OPCODE_DSQRT:
3984 case TGSI_OPCODE_DRSQ:
3985 case TGSI_OPCODE_DTRUNC:
3986 case TGSI_OPCODE_DCEIL:
3987 case TGSI_OPCODE_DFLR:
3988 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3989 src0 = getSSA(8);
3990 Value *dst = getSSA(8), *tmp[2];
3991 tmp[0] = fetchSrc(0, c);
3992 tmp[1] = fetchSrc(0, c + 1);
3993 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3994 mkOp1(op, dstTy, dst, src0);
3995 mkSplit(&dst0[c], 4, dst);
3996 c++;
3997 }
3998 break;
3999 case TGSI_OPCODE_DFRAC:
4000 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4001 src0 = getSSA(8);
4002 Value *dst = getSSA(8), *tmp[2];
4003 tmp[0] = fetchSrc(0, c);
4004 tmp[1] = fetchSrc(0, c + 1);
4005 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4006 mkOp1(OP_FLOOR, TYPE_F64, dst, src0);
4007 mkOp2(OP_SUB, TYPE_F64, dst, src0, dst);
4008 mkSplit(&dst0[c], 4, dst);
4009 c++;
4010 }
4011 break;
4012 case TGSI_OPCODE_U64SEQ:
4013 case TGSI_OPCODE_U64SNE:
4014 case TGSI_OPCODE_U64SLT:
4015 case TGSI_OPCODE_U64SGE:
4016 case TGSI_OPCODE_I64SLT:
4017 case TGSI_OPCODE_I64SGE:
4018 case TGSI_OPCODE_DSLT:
4019 case TGSI_OPCODE_DSGE:
4020 case TGSI_OPCODE_DSEQ:
4021 case TGSI_OPCODE_DSNE: {
4022 int pos = 0;
4023 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4024 Value *tmp[2];
4025
4026 src0 = getSSA(8);
4027 src1 = getSSA(8);
4028 tmp[0] = fetchSrc(0, pos);
4029 tmp[1] = fetchSrc(0, pos + 1);
4030 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4031 tmp[0] = fetchSrc(1, pos);
4032 tmp[1] = fetchSrc(1, pos + 1);
4033 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4034 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
4035 pos += 2;
4036 }
4037 break;
4038 }
4039 case TGSI_OPCODE_U64MIN:
4040 case TGSI_OPCODE_U64MAX:
4041 case TGSI_OPCODE_I64MIN:
4042 case TGSI_OPCODE_I64MAX: {
4043 dstTy = isSignedIntType(dstTy) ? TYPE_S32 : TYPE_U32;
4044 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4045 Value *flag = getSSA(1, FILE_FLAGS);
4046 src0 = fetchSrc(0, c + 1);
4047 src1 = fetchSrc(1, c + 1);
4048 geni = mkOp2(op, dstTy, dst0[c + 1], src0, src1);
4049 geni->subOp = NV50_IR_SUBOP_MINMAX_HIGH;
4050 geni->setFlagsDef(1, flag);
4051
4052 src0 = fetchSrc(0, c);
4053 src1 = fetchSrc(1, c);
4054 geni = mkOp2(op, TYPE_U32, dst0[c], src0, src1);
4055 geni->subOp = NV50_IR_SUBOP_MINMAX_LOW;
4056 geni->setFlagsSrc(2, flag);
4057
4058 c++;
4059 }
4060 break;
4061 }
4062 case TGSI_OPCODE_U64SHL:
4063 case TGSI_OPCODE_I64SHR:
4064 case TGSI_OPCODE_U64SHR:
4065 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4066 src0 = getSSA(8);
4067 Value *dst = getSSA(8), *tmp[2];
4068 tmp[0] = fetchSrc(0, c);
4069 tmp[1] = fetchSrc(0, c + 1);
4070 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4071 src1 = fetchSrc(1, c / 2);
4072 mkOp2(op, dstTy, dst, src0, src1);
4073 mkSplit(&dst0[c], 4, dst);
4074 c++;
4075 }
4076 break;
4077 case TGSI_OPCODE_U64ADD:
4078 case TGSI_OPCODE_U64MUL:
4079 case TGSI_OPCODE_DADD:
4080 case TGSI_OPCODE_DMUL:
4081 case TGSI_OPCODE_DDIV:
4082 case TGSI_OPCODE_DMAX:
4083 case TGSI_OPCODE_DMIN:
4084 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4085 src0 = getSSA(8);
4086 src1 = getSSA(8);
4087 Value *dst = getSSA(8), *tmp[2];
4088 tmp[0] = fetchSrc(0, c);
4089 tmp[1] = fetchSrc(0, c + 1);
4090 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4091 tmp[0] = fetchSrc(1, c);
4092 tmp[1] = fetchSrc(1, c + 1);
4093 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4094 mkOp2(op, dstTy, dst, src0, src1);
4095 mkSplit(&dst0[c], 4, dst);
4096 c++;
4097 }
4098 break;
4099 case TGSI_OPCODE_DMAD:
4100 case TGSI_OPCODE_DFMA:
4101 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4102 src0 = getSSA(8);
4103 src1 = getSSA(8);
4104 src2 = getSSA(8);
4105 Value *dst = getSSA(8), *tmp[2];
4106 tmp[0] = fetchSrc(0, c);
4107 tmp[1] = fetchSrc(0, c + 1);
4108 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4109 tmp[0] = fetchSrc(1, c);
4110 tmp[1] = fetchSrc(1, c + 1);
4111 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4112 tmp[0] = fetchSrc(2, c);
4113 tmp[1] = fetchSrc(2, c + 1);
4114 mkOp2(OP_MERGE, TYPE_U64, src2, tmp[0], tmp[1]);
4115 mkOp3(op, dstTy, dst, src0, src1, src2);
4116 mkSplit(&dst0[c], 4, dst);
4117 c++;
4118 }
4119 break;
4120 case TGSI_OPCODE_DROUND:
4121 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4122 src0 = getSSA(8);
4123 Value *dst = getSSA(8), *tmp[2];
4124 tmp[0] = fetchSrc(0, c);
4125 tmp[1] = fetchSrc(0, c + 1);
4126 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4127 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F64, src0)
4128 ->rnd = ROUND_NI;
4129 mkSplit(&dst0[c], 4, dst);
4130 c++;
4131 }
4132 break;
4133 case TGSI_OPCODE_DSSG:
4134 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4135 src0 = getSSA(8);
4136 Value *dst = getSSA(8), *dstF32 = getSSA(), *tmp[2];
4137 tmp[0] = fetchSrc(0, c);
4138 tmp[1] = fetchSrc(0, c + 1);
4139 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4140
4141 val0 = getScratch();
4142 val1 = getScratch();
4143 // The zero is wrong here since it's only 32-bit, but it works out in
4144 // the end since it gets replaced with $r63.
4145 mkCmp(OP_SET, CC_GT, TYPE_F32, val0, TYPE_F64, src0, zero);
4146 mkCmp(OP_SET, CC_LT, TYPE_F32, val1, TYPE_F64, src0, zero);
4147 mkOp2(OP_SUB, TYPE_F32, dstF32, val0, val1);
4148 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F32, dstF32);
4149 mkSplit(&dst0[c], 4, dst);
4150 c++;
4151 }
4152 break;
4153 case TGSI_OPCODE_I64SSG:
4154 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4155 src0 = getSSA(8);
4156 Value *tmp[2];
4157 tmp[0] = fetchSrc(0, c);
4158 tmp[1] = fetchSrc(0, c + 1);
4159 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4160
4161 val0 = getScratch();
4162 val1 = getScratch();
4163 mkCmp(OP_SET, CC_GT, TYPE_U32, val0, TYPE_S64, src0, zero);
4164 mkCmp(OP_SET, CC_LT, TYPE_U32, val1, TYPE_S64, src0, zero);
4165 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
4166 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(0, 31));
4167 c++;
4168 }
4169 break;
4170 default:
4171 ERROR("unhandled TGSI opcode: %u\n", tgsi.getOpcode());
4172 assert(0);
4173 break;
4174 }
4175
4176 if (tgsi.dstCount()) {
4177 for (c = 0; c < 4; ++c) {
4178 if (!dst0[c])
4179 continue;
4180 if (dst0[c] != rDst0[c])
4181 mkMov(rDst0[c], dst0[c]);
4182 storeDst(0, c, rDst0[c]);
4183 }
4184 }
4185 vtxBaseValid = 0;
4186
4187 return true;
4188 }
4189
4190 void
4191 Converter::handleUserClipPlanes()
4192 {
4193 Value *res[8];
4194 int n, i, c;
4195
4196 for (c = 0; c < 4; ++c) {
4197 for (i = 0; i < info->io.genUserClip; ++i) {
4198 Symbol *sym = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4199 TYPE_F32, info->io.ucpBase + i * 16 + c * 4);
4200 Value *ucp = mkLoadv(TYPE_F32, sym, NULL);
4201 if (c == 0)
4202 res[i] = mkOp2v(OP_MUL, TYPE_F32, getScratch(), clipVtx[c], ucp);
4203 else
4204 mkOp3(OP_MAD, TYPE_F32, res[i], clipVtx[c], ucp, res[i]);
4205 }
4206 }
4207
4208 const int first = info->numOutputs - (info->io.genUserClip + 3) / 4;
4209
4210 for (i = 0; i < info->io.genUserClip; ++i) {
4211 n = i / 4 + first;
4212 c = i % 4;
4213 Symbol *sym =
4214 mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32, info->out[n].slot[c] * 4);
4215 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, res[i]);
4216 }
4217 }
4218
4219 void
4220 Converter::exportOutputs()
4221 {
4222 if (info->io.alphaRefBase) {
4223 for (unsigned int i = 0; i < info->numOutputs; ++i) {
4224 if (info->out[i].sn != TGSI_SEMANTIC_COLOR ||
4225 info->out[i].si != 0)
4226 continue;
4227 const unsigned int c = 3;
4228 if (!oData.exists(sub.cur->values, i, c))
4229 continue;
4230 Value *val = oData.load(sub.cur->values, i, c, NULL);
4231 if (!val)
4232 continue;
4233
4234 Symbol *ref = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4235 TYPE_U32, info->io.alphaRefBase);
4236 Value *pred = new_LValue(func, FILE_PREDICATE);
4237 mkCmp(OP_SET, CC_TR, TYPE_U32, pred, TYPE_F32, val,
4238 mkLoadv(TYPE_U32, ref, NULL))
4239 ->subOp = 1;
4240 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_NOT_P, pred);
4241 }
4242 }
4243
4244 for (unsigned int i = 0; i < info->numOutputs; ++i) {
4245 for (unsigned int c = 0; c < 4; ++c) {
4246 if (!oData.exists(sub.cur->values, i, c))
4247 continue;
4248 Symbol *sym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32,
4249 info->out[i].slot[c] * 4);
4250 Value *val = oData.load(sub.cur->values, i, c, NULL);
4251 if (val) {
4252 if (info->out[i].sn == TGSI_SEMANTIC_POSITION)
4253 mkOp1(OP_SAT, TYPE_F32, val, val);
4254 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, val);
4255 }
4256 }
4257 }
4258 }
4259
4260 Converter::Converter(Program *ir, const tgsi::Source *code) : BuildUtil(ir),
4261 code(code),
4262 tgsi(NULL),
4263 tData(this), lData(this), aData(this), oData(this)
4264 {
4265 info = code->info;
4266
4267 const unsigned tSize = code->fileSize(TGSI_FILE_TEMPORARY);
4268 const unsigned aSize = code->fileSize(TGSI_FILE_ADDRESS);
4269 const unsigned oSize = code->fileSize(TGSI_FILE_OUTPUT);
4270
4271 tData.setup(TGSI_FILE_TEMPORARY, 0, 0, tSize, 4, 4, FILE_GPR, 0);
4272 lData.setup(TGSI_FILE_TEMPORARY, 1, 0, tSize, 4, 4, FILE_MEMORY_LOCAL, 0);
4273 aData.setup(TGSI_FILE_ADDRESS, 0, 0, aSize, 4, 4, FILE_GPR, 0);
4274 oData.setup(TGSI_FILE_OUTPUT, 0, 0, oSize, 4, 4, FILE_GPR, 0);
4275
4276 zero = mkImm((uint32_t)0);
4277
4278 vtxBaseValid = 0;
4279 }
4280
4281 Converter::~Converter()
4282 {
4283 }
4284
4285 inline const Converter::Location *
4286 Converter::BindArgumentsPass::getValueLocation(Subroutine *s, Value *v)
4287 {
4288 ValueMap::l_iterator it = s->values.l.find(v);
4289 return it == s->values.l.end() ? NULL : &it->second;
4290 }
4291
4292 template<typename T> inline void
4293 Converter::BindArgumentsPass::updateCallArgs(
4294 Instruction *i, void (Instruction::*setArg)(int, Value *),
4295 T (Function::*proto))
4296 {
4297 Function *g = i->asFlow()->target.fn;
4298 Subroutine *subg = conv.getSubroutine(g);
4299
4300 for (unsigned a = 0; a < (g->*proto).size(); ++a) {
4301 Value *v = (g->*proto)[a].get();
4302 const Converter::Location &l = *getValueLocation(subg, v);
4303 Converter::DataArray *array = conv.getArrayForFile(l.array, l.arrayIdx);
4304
4305 (i->*setArg)(a, array->acquire(sub->values, l.i, l.c));
4306 }
4307 }
4308
4309 template<typename T> inline void
4310 Converter::BindArgumentsPass::updatePrototype(
4311 BitSet *set, void (Function::*updateSet)(), T (Function::*proto))
4312 {
4313 (func->*updateSet)();
4314
4315 for (unsigned i = 0; i < set->getSize(); ++i) {
4316 Value *v = func->getLValue(i);
4317 const Converter::Location *l = getValueLocation(sub, v);
4318
4319 // only include values with a matching TGSI register
4320 if (set->test(i) && l && !conv.code->locals.count(*l))
4321 (func->*proto).push_back(v);
4322 }
4323 }
4324
4325 bool
4326 Converter::BindArgumentsPass::visit(Function *f)
4327 {
4328 sub = conv.getSubroutine(f);
4329
4330 for (ArrayList::Iterator bi = f->allBBlocks.iterator();
4331 !bi.end(); bi.next()) {
4332 for (Instruction *i = BasicBlock::get(bi)->getFirst();
4333 i; i = i->next) {
4334 if (i->op == OP_CALL && !i->asFlow()->builtin) {
4335 updateCallArgs(i, &Instruction::setSrc, &Function::ins);
4336 updateCallArgs(i, &Instruction::setDef, &Function::outs);
4337 }
4338 }
4339 }
4340
4341 if (func == prog->main && prog->getType() != Program::TYPE_COMPUTE)
4342 return true;
4343 updatePrototype(&BasicBlock::get(f->cfg.getRoot())->liveSet,
4344 &Function::buildLiveSets, &Function::ins);
4345 updatePrototype(&BasicBlock::get(f->cfgExit)->defSet,
4346 &Function::buildDefSets, &Function::outs);
4347
4348 return true;
4349 }
4350
4351 bool
4352 Converter::run()
4353 {
4354 BasicBlock *entry = new BasicBlock(prog->main);
4355 BasicBlock *leave = new BasicBlock(prog->main);
4356
4357 prog->main->setEntry(entry);
4358 prog->main->setExit(leave);
4359
4360 setPosition(entry, true);
4361 sub.cur = getSubroutine(prog->main);
4362
4363 if (info->io.genUserClip > 0) {
4364 for (int c = 0; c < 4; ++c)
4365 clipVtx[c] = getScratch();
4366 }
4367
4368 switch (prog->getType()) {
4369 case Program::TYPE_TESSELLATION_CONTROL:
4370 outBase = mkOp2v(
4371 OP_SUB, TYPE_U32, getSSA(),
4372 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
4373 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
4374 break;
4375 case Program::TYPE_FRAGMENT: {
4376 Symbol *sv = mkSysVal(SV_POSITION, 3);
4377 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
4378 mkOp1(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
4379 break;
4380 }
4381 default:
4382 break;
4383 }
4384
4385 if (info->io.viewportId >= 0)
4386 viewport = getScratch();
4387 else
4388 viewport = NULL;
4389
4390 for (ip = 0; ip < code->scan.num_instructions; ++ip) {
4391 if (!handleInstruction(&code->insns[ip]))
4392 return false;
4393 }
4394
4395 if (!BindArgumentsPass(*this).run(prog))
4396 return false;
4397
4398 return true;
4399 }
4400
4401 } // unnamed namespace
4402
4403 namespace nv50_ir {
4404
4405 bool
4406 Program::makeFromTGSI(struct nv50_ir_prog_info *info)
4407 {
4408 tgsi::Source src(info);
4409 if (!src.scanSource())
4410 return false;
4411 tlsSize = info->bin.tlsSpace;
4412
4413 Converter builder(this, &src);
4414 return builder.run();
4415 }
4416
4417 } // namespace nv50_ir