8ba3d5a05006f6703c9039e83dff78df21a708ff
[mesa.git] / src / gallium / drivers / nouveau / nv30 / nvfx_vertprog.c
1 #include <strings.h>
2 #include "pipe/p_context.h"
3 #include "pipe/p_defines.h"
4 #include "pipe/p_state.h"
5 #include "util/u_dynarray.h"
6 #include "util/u_debug.h"
7 #include "util/u_memory.h"
8
9 #include "pipe/p_shader_tokens.h"
10 #include "tgsi/tgsi_parse.h"
11 #include "tgsi/tgsi_dump.h"
12 #include "tgsi/tgsi_util.h"
13 #include "tgsi/tgsi_ureg.h"
14
15 #include "draw/draw_context.h"
16
17 #include "nv_object.xml.h"
18 #include "nouveau_debug.h"
19 #include "nv30/nv30-40_3d.xml.h"
20 #include "nv30/nv30_state.h"
21
22 /* TODO (at least...):
23 * 1. Indexed consts + ARL
24 * 3. NV_vp11, NV_vp2, NV_vp3 features
25 * - extra arith opcodes
26 * - branching
27 * - texture sampling
28 * - indexed attribs
29 * - indexed results
30 * 4. bugs
31 */
32
33 #include "nv30/nv30_vertprog.h"
34 #include "nv30/nv40_vertprog.h"
35
36 struct nvfx_loop_entry {
37 unsigned brk_target;
38 unsigned cont_target;
39 };
40
41 struct nvfx_vpc {
42 struct pipe_shader_state pipe;
43 struct nv30_vertprog *vp;
44 struct tgsi_shader_info* info;
45
46 struct nv30_vertprog_exec *vpi;
47
48 unsigned r_temps;
49 unsigned r_temps_discard;
50 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
51 struct nvfx_reg *r_address;
52 struct nvfx_reg *r_temp;
53 struct nvfx_reg *r_const;
54 struct nvfx_reg r_0_1;
55
56 struct nvfx_reg *imm;
57 unsigned nr_imm;
58
59 int hpos_idx;
60 int cvtx_idx;
61
62 unsigned is_nv4x;
63
64 struct util_dynarray label_relocs;
65 struct util_dynarray loop_stack;
66 };
67
68 static struct nvfx_reg
69 temp(struct nvfx_vpc *vpc)
70 {
71 int idx = ffs(~vpc->r_temps) - 1;
72
73 if (idx < 0 || (!vpc->is_nv4x && idx >= 16)) {
74 NOUVEAU_ERR("out of temps!!\n");
75 return nvfx_reg(NVFXSR_TEMP, 0);
76 }
77
78 vpc->r_temps |= (1 << idx);
79 vpc->r_temps_discard |= (1 << idx);
80 return nvfx_reg(NVFXSR_TEMP, idx);
81 }
82
83 static inline void
84 release_temps(struct nvfx_vpc *vpc)
85 {
86 vpc->r_temps &= ~vpc->r_temps_discard;
87 vpc->r_temps_discard = 0;
88 }
89
90 static struct nvfx_reg
91 constant(struct nvfx_vpc *vpc, int pipe, float x, float y, float z, float w)
92 {
93 struct nv30_vertprog *vp = vpc->vp;
94 struct nv30_vertprog_data *vpd;
95 int idx;
96
97 if (pipe >= 0) {
98 for (idx = 0; idx < vp->nr_consts; idx++) {
99 if (vp->consts[idx].index == pipe)
100 return nvfx_reg(NVFXSR_CONST, idx);
101 }
102 }
103
104 idx = vp->nr_consts++;
105 vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
106 vpd = &vp->consts[idx];
107
108 vpd->index = pipe;
109 vpd->value[0] = x;
110 vpd->value[1] = y;
111 vpd->value[2] = z;
112 vpd->value[3] = w;
113 return nvfx_reg(NVFXSR_CONST, idx);
114 }
115
116 #define arith(s,t,o,d,m,s0,s1,s2) \
117 nvfx_insn((s), (NVFX_VP_INST_SLOT_##t << 7) | NVFX_VP_INST_##t##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
118
119 static void
120 emit_src(struct nvfx_vpc *vpc, uint32_t *hw,
121 int pos, struct nvfx_src src)
122 {
123 struct nv30_vertprog *vp = vpc->vp;
124 uint32_t sr = 0;
125 struct nvfx_relocation reloc;
126
127 switch (src.reg.type) {
128 case NVFXSR_TEMP:
129 sr |= (NVFX_VP(SRC_REG_TYPE_TEMP) << NVFX_VP(SRC_REG_TYPE_SHIFT));
130 sr |= (src.reg.index << NVFX_VP(SRC_TEMP_SRC_SHIFT));
131 break;
132 case NVFXSR_INPUT:
133 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
134 NVFX_VP(SRC_REG_TYPE_SHIFT));
135 vp->ir |= (1 << src.reg.index);
136 hw[1] |= (src.reg.index << NVFX_VP(INST_INPUT_SRC_SHIFT));
137 break;
138 case NVFXSR_CONST:
139 sr |= (NVFX_VP(SRC_REG_TYPE_CONST) <<
140 NVFX_VP(SRC_REG_TYPE_SHIFT));
141 if (src.reg.index < 256 && src.reg.index >= -256) {
142 reloc.location = vp->nr_insns - 1;
143 reloc.target = src.reg.index;
144 util_dynarray_append(&vp->const_relocs, struct nvfx_relocation, reloc);
145 } else {
146 hw[1] |= (src.reg.index << NVFX_VP(INST_CONST_SRC_SHIFT)) &
147 NVFX_VP(INST_CONST_SRC_MASK);
148 }
149 break;
150 case NVFXSR_NONE:
151 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
152 NVFX_VP(SRC_REG_TYPE_SHIFT));
153 break;
154 default:
155 assert(0);
156 }
157
158 if (src.negate)
159 sr |= NVFX_VP(SRC_NEGATE);
160
161 if (src.abs)
162 hw[0] |= (1 << (21 + pos));
163
164 sr |= ((src.swz[0] << NVFX_VP(SRC_SWZ_X_SHIFT)) |
165 (src.swz[1] << NVFX_VP(SRC_SWZ_Y_SHIFT)) |
166 (src.swz[2] << NVFX_VP(SRC_SWZ_Z_SHIFT)) |
167 (src.swz[3] << NVFX_VP(SRC_SWZ_W_SHIFT)));
168
169 if(src.indirect) {
170 if(src.reg.type == NVFXSR_CONST)
171 hw[3] |= NVFX_VP(INST_INDEX_CONST);
172 else if(src.reg.type == NVFXSR_INPUT)
173 hw[0] |= NVFX_VP(INST_INDEX_INPUT);
174 else
175 assert(0);
176
177 if(src.indirect_reg)
178 hw[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1);
179 hw[0] |= src.indirect_swz << NVFX_VP(INST_ADDR_SWZ_SHIFT);
180 }
181
182 switch (pos) {
183 case 0:
184 hw[1] |= ((sr & NVFX_VP(SRC0_HIGH_MASK)) >>
185 NVFX_VP(SRC0_HIGH_SHIFT)) << NVFX_VP(INST_SRC0H_SHIFT);
186 hw[2] |= (sr & NVFX_VP(SRC0_LOW_MASK)) <<
187 NVFX_VP(INST_SRC0L_SHIFT);
188 break;
189 case 1:
190 hw[2] |= sr << NVFX_VP(INST_SRC1_SHIFT);
191 break;
192 case 2:
193 hw[2] |= ((sr & NVFX_VP(SRC2_HIGH_MASK)) >>
194 NVFX_VP(SRC2_HIGH_SHIFT)) << NVFX_VP(INST_SRC2H_SHIFT);
195 hw[3] |= (sr & NVFX_VP(SRC2_LOW_MASK)) <<
196 NVFX_VP(INST_SRC2L_SHIFT);
197 break;
198 default:
199 assert(0);
200 }
201 }
202
203 static void
204 emit_dst(struct nvfx_vpc *vpc, uint32_t *hw,
205 int slot, struct nvfx_reg dst)
206 {
207 struct nv30_vertprog *vp = vpc->vp;
208
209 switch (dst.type) {
210 case NVFXSR_NONE:
211 if(!vpc->is_nv4x)
212 hw[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK;
213 else {
214 hw[3] |= NV40_VP_INST_DEST_MASK;
215 if (slot == 0)
216 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
217 else
218 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
219 }
220 break;
221 case NVFXSR_TEMP:
222 if(!vpc->is_nv4x)
223 hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
224 else {
225 hw[3] |= NV40_VP_INST_DEST_MASK;
226 if (slot == 0)
227 hw[0] |= (dst.index << NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
228 else
229 hw[3] |= (dst.index << NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
230 }
231 break;
232 case NVFXSR_OUTPUT:
233 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
234 if(vpc->is_nv4x) {
235 switch (dst.index) {
236 case NV30_VP_INST_DEST_CLP(0):
237 dst.index = NVFX_VP(INST_DEST_FOGC);
238 vp->or |= (1 << 6);
239 break;
240 case NV30_VP_INST_DEST_CLP(1):
241 dst.index = NVFX_VP(INST_DEST_FOGC);
242 vp->or |= (1 << 7);
243 break;
244 case NV30_VP_INST_DEST_CLP(2):
245 dst.index = NVFX_VP(INST_DEST_FOGC);
246 vp->or |= (1 << 8);
247 break;
248 case NV30_VP_INST_DEST_CLP(3):
249 dst.index = NVFX_VP(INST_DEST_PSZ);
250 vp->or |= (1 << 9);
251 break;
252 case NV30_VP_INST_DEST_CLP(4):
253 dst.index = NVFX_VP(INST_DEST_PSZ);
254 vp->or |= (1 << 10);
255 break;
256 case NV30_VP_INST_DEST_CLP(5):
257 dst.index = NVFX_VP(INST_DEST_PSZ);
258 vp->or |= (1 << 11);
259 break;
260 case NV40_VP_INST_DEST_COL0: vp->or |= (1 << 0); break;
261 case NV40_VP_INST_DEST_COL1: vp->or |= (1 << 1); break;
262 case NV40_VP_INST_DEST_BFC0: vp->or |= (1 << 2); break;
263 case NV40_VP_INST_DEST_BFC1: vp->or |= (1 << 3); break;
264 case NV40_VP_INST_DEST_FOGC: vp->or |= (1 << 4); break;
265 case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
266 }
267 }
268
269 if(!vpc->is_nv4x) {
270 hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
271 hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK;
272
273 /*XXX: no way this is entirely correct, someone needs to
274 * figure out what exactly it is.
275 */
276 hw[3] |= 0x800;
277 } else {
278 hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
279 if (slot == 0) {
280 hw[0] |= NV40_VP_INST_VEC_RESULT;
281 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
282 } else {
283 hw[3] |= NV40_VP_INST_SCA_RESULT;
284 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
285 }
286 }
287 break;
288 default:
289 assert(0);
290 }
291 }
292
293 static void
294 nvfx_vp_emit(struct nvfx_vpc *vpc, struct nvfx_insn insn)
295 {
296 struct nv30_vertprog *vp = vpc->vp;
297 unsigned slot = insn.op >> 7;
298 unsigned op = insn.op & 0x7f;
299 uint32_t *hw;
300
301 vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
302 vpc->vpi = &vp->insns[vp->nr_insns - 1];
303 memset(vpc->vpi, 0, sizeof(*vpc->vpi));
304
305 hw = vpc->vpi->data;
306
307 if (insn.cc_test != NVFX_COND_TR)
308 hw[0] |= NVFX_VP(INST_COND_TEST_ENABLE);
309 hw[0] |= (insn.cc_test << NVFX_VP(INST_COND_SHIFT));
310 hw[0] |= ((insn.cc_swz[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT)) |
311 (insn.cc_swz[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT)) |
312 (insn.cc_swz[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT)) |
313 (insn.cc_swz[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT)));
314 if(insn.cc_update)
315 hw[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE);
316
317 if(insn.sat) {
318 assert(vpc->is_nv4x);
319 if(vpc->is_nv4x)
320 hw[0] |= NV40_VP_INST_SATURATE;
321 }
322
323 if(!vpc->is_nv4x) {
324 if(slot == 0)
325 hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
326 else {
327 hw[0] |= ((op >> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT);
328 hw[1] |= ((op & 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT);
329 }
330 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
331 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
332
333 if (insn.dst.type == NVFXSR_OUTPUT) {
334 if (slot)
335 hw[3] |= (insn.mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
336 else
337 hw[3] |= (insn.mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
338 } else {
339 if (slot)
340 hw[3] |= (insn.mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
341 else
342 hw[3] |= (insn.mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
343 }
344 } else {
345 if (slot == 0) {
346 hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
347 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
348 hw[3] |= (insn.mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
349 } else {
350 hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
351 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK ;
352 hw[3] |= (insn.mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
353 }
354 }
355
356 emit_dst(vpc, hw, slot, insn.dst);
357 emit_src(vpc, hw, 0, insn.src[0]);
358 emit_src(vpc, hw, 1, insn.src[1]);
359 emit_src(vpc, hw, 2, insn.src[2]);
360
361 // if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
362 // hw[3] |= NV40_VP_INST_SCA_RESULT;
363 }
364
365 static inline struct nvfx_src
366 tgsi_src(struct nvfx_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
367 struct nvfx_src src;
368
369 switch (fsrc->Register.File) {
370 case TGSI_FILE_INPUT:
371 src.reg = nvfx_reg(NVFXSR_INPUT, fsrc->Register.Index);
372 break;
373 case TGSI_FILE_CONSTANT:
374 if(fsrc->Register.Indirect) {
375 src.reg = vpc->r_const[0];
376 src.reg.index = fsrc->Register.Index;
377 } else {
378 src.reg = vpc->r_const[fsrc->Register.Index];
379 }
380 break;
381 case TGSI_FILE_IMMEDIATE:
382 src.reg = vpc->imm[fsrc->Register.Index];
383 break;
384 case TGSI_FILE_TEMPORARY:
385 src.reg = vpc->r_temp[fsrc->Register.Index];
386 break;
387 default:
388 NOUVEAU_ERR("bad src file\n");
389 src.reg.index = 0;
390 src.reg.type = -1;
391 break;
392 }
393
394 src.abs = fsrc->Register.Absolute;
395 src.negate = fsrc->Register.Negate;
396 src.swz[0] = fsrc->Register.SwizzleX;
397 src.swz[1] = fsrc->Register.SwizzleY;
398 src.swz[2] = fsrc->Register.SwizzleZ;
399 src.swz[3] = fsrc->Register.SwizzleW;
400 src.indirect = 0;
401 src.indirect_reg = 0;
402 src.indirect_swz = 0;
403
404 if(fsrc->Register.Indirect) {
405 if(fsrc->Indirect.File == TGSI_FILE_ADDRESS &&
406 (fsrc->Register.File == TGSI_FILE_CONSTANT ||
407 fsrc->Register.File == TGSI_FILE_INPUT)) {
408 src.indirect = 1;
409 src.indirect_reg = fsrc->Indirect.Index;
410 src.indirect_swz = fsrc->Indirect.Swizzle;
411 } else {
412 src.reg.index = 0;
413 src.reg.type = -1;
414 }
415 }
416
417 return src;
418 }
419
420 static inline struct nvfx_reg
421 tgsi_dst(struct nvfx_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
422 struct nvfx_reg dst;
423
424 switch (fdst->Register.File) {
425 case TGSI_FILE_NULL:
426 dst = nvfx_reg(NVFXSR_NONE, 0);
427 break;
428 case TGSI_FILE_OUTPUT:
429 dst = vpc->r_result[fdst->Register.Index];
430 break;
431 case TGSI_FILE_TEMPORARY:
432 dst = vpc->r_temp[fdst->Register.Index];
433 break;
434 case TGSI_FILE_ADDRESS:
435 dst = vpc->r_address[fdst->Register.Index];
436 break;
437 default:
438 NOUVEAU_ERR("bad dst file %i\n", fdst->Register.File);
439 dst.index = 0;
440 dst.type = 0;
441 break;
442 }
443
444 return dst;
445 }
446
447 static inline int
448 tgsi_mask(uint tgsi)
449 {
450 int mask = 0;
451
452 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_VP_MASK_X;
453 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_VP_MASK_Y;
454 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_VP_MASK_Z;
455 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_VP_MASK_W;
456 return mask;
457 }
458
459 static bool
460 nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc,
461 unsigned idx, const struct tgsi_full_instruction *finst)
462 {
463 struct nvfx_src src[3], tmp;
464 struct nvfx_reg dst;
465 struct nvfx_reg final_dst;
466 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
467 struct nvfx_insn insn;
468 struct nvfx_relocation reloc;
469 struct nvfx_loop_entry loop;
470 bool sat = false;
471 int mask;
472 int ai = -1, ci = -1, ii = -1;
473 int i;
474 unsigned sub_depth = 0;
475
476 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
477 const struct tgsi_full_src_register *fsrc;
478
479 fsrc = &finst->Src[i];
480 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
481 src[i] = tgsi_src(vpc, fsrc);
482 }
483 }
484
485 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
486 const struct tgsi_full_src_register *fsrc;
487
488 fsrc = &finst->Src[i];
489
490 switch (fsrc->Register.File) {
491 case TGSI_FILE_INPUT:
492 if (ai == -1 || ai == fsrc->Register.Index) {
493 ai = fsrc->Register.Index;
494 src[i] = tgsi_src(vpc, fsrc);
495 } else {
496 src[i] = nvfx_src(temp(vpc));
497 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
498 tgsi_src(vpc, fsrc), none, none));
499 }
500 break;
501 case TGSI_FILE_CONSTANT:
502 if ((ci == -1 && ii == -1) ||
503 ci == fsrc->Register.Index) {
504 ci = fsrc->Register.Index;
505 src[i] = tgsi_src(vpc, fsrc);
506 } else {
507 src[i] = nvfx_src(temp(vpc));
508 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
509 tgsi_src(vpc, fsrc), none, none));
510 }
511 break;
512 case TGSI_FILE_IMMEDIATE:
513 if ((ci == -1 && ii == -1) ||
514 ii == fsrc->Register.Index) {
515 ii = fsrc->Register.Index;
516 src[i] = tgsi_src(vpc, fsrc);
517 } else {
518 src[i] = nvfx_src(temp(vpc));
519 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
520 tgsi_src(vpc, fsrc), none, none));
521 }
522 break;
523 case TGSI_FILE_TEMPORARY:
524 /* handled above */
525 break;
526 default:
527 NOUVEAU_ERR("bad src file\n");
528 return false;
529 }
530 }
531
532 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
533 if(src[i].reg.type < 0)
534 return false;
535 }
536
537 if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS &&
538 finst->Instruction.Opcode != TGSI_OPCODE_ARL)
539 return false;
540
541 final_dst = dst = tgsi_dst(vpc, &finst->Dst[0]);
542 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
543 if(finst->Instruction.Saturate) {
544 assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL);
545 if (vpc->is_nv4x)
546 sat = true;
547 else
548 if(dst.type != NVFXSR_TEMP)
549 dst = temp(vpc);
550 }
551
552 switch (finst->Instruction.Opcode) {
553 case TGSI_OPCODE_ADD:
554 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, src[1]));
555 break;
556 case TGSI_OPCODE_ARL:
557 nvfx_vp_emit(vpc, arith(0, VEC, ARL, dst, mask, src[0], none, none));
558 break;
559 case TGSI_OPCODE_CEIL:
560 tmp = nvfx_src(temp(vpc));
561 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, neg(src[0]), none, none));
562 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none));
563 break;
564 case TGSI_OPCODE_CMP:
565 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
566 insn.cc_update = 1;
567 nvfx_vp_emit(vpc, insn);
568
569 insn = arith(sat, VEC, MOV, dst, mask, src[2], none, none);
570 insn.cc_test = NVFX_COND_GE;
571 nvfx_vp_emit(vpc, insn);
572
573 insn = arith(sat, VEC, MOV, dst, mask, src[1], none, none);
574 insn.cc_test = NVFX_COND_LT;
575 nvfx_vp_emit(vpc, insn);
576 break;
577 case TGSI_OPCODE_COS:
578 nvfx_vp_emit(vpc, arith(sat, SCA, COS, dst, mask, none, none, src[0]));
579 break;
580 case TGSI_OPCODE_DP2:
581 tmp = nvfx_src(temp(vpc));
582 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X | NVFX_VP_MASK_Y, src[0], src[1], none));
583 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, swz(tmp, X, X, X, X), none, swz(tmp, Y, Y, Y, Y)));
584 break;
585 case TGSI_OPCODE_DP3:
586 nvfx_vp_emit(vpc, arith(sat, VEC, DP3, dst, mask, src[0], src[1], none));
587 break;
588 case TGSI_OPCODE_DP4:
589 nvfx_vp_emit(vpc, arith(sat, VEC, DP4, dst, mask, src[0], src[1], none));
590 break;
591 case TGSI_OPCODE_DST:
592 nvfx_vp_emit(vpc, arith(sat, VEC, DST, dst, mask, src[0], src[1], none));
593 break;
594 case TGSI_OPCODE_EX2:
595 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, src[0]));
596 break;
597 case TGSI_OPCODE_EXP:
598 nvfx_vp_emit(vpc, arith(sat, SCA, EXP, dst, mask, none, none, src[0]));
599 break;
600 case TGSI_OPCODE_FLR:
601 nvfx_vp_emit(vpc, arith(sat, VEC, FLR, dst, mask, src[0], none, none));
602 break;
603 case TGSI_OPCODE_FRC:
604 nvfx_vp_emit(vpc, arith(sat, VEC, FRC, dst, mask, src[0], none, none));
605 break;
606 case TGSI_OPCODE_LG2:
607 nvfx_vp_emit(vpc, arith(sat, SCA, LG2, dst, mask, none, none, src[0]));
608 break;
609 case TGSI_OPCODE_LIT:
610 nvfx_vp_emit(vpc, arith(sat, SCA, LIT, dst, mask, none, none, src[0]));
611 break;
612 case TGSI_OPCODE_LOG:
613 nvfx_vp_emit(vpc, arith(sat, SCA, LOG, dst, mask, none, none, src[0]));
614 break;
615 case TGSI_OPCODE_LRP:
616 tmp = nvfx_src(temp(vpc));
617 nvfx_vp_emit(vpc, arith(0, VEC, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
618 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], tmp));
619 break;
620 case TGSI_OPCODE_MAD:
621 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], src[2]));
622 break;
623 case TGSI_OPCODE_MAX:
624 nvfx_vp_emit(vpc, arith(sat, VEC, MAX, dst, mask, src[0], src[1], none));
625 break;
626 case TGSI_OPCODE_MIN:
627 nvfx_vp_emit(vpc, arith(sat, VEC, MIN, dst, mask, src[0], src[1], none));
628 break;
629 case TGSI_OPCODE_MOV:
630 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, src[0], none, none));
631 break;
632 case TGSI_OPCODE_MUL:
633 nvfx_vp_emit(vpc, arith(sat, VEC, MUL, dst, mask, src[0], src[1], none));
634 break;
635 case TGSI_OPCODE_NOP:
636 break;
637 case TGSI_OPCODE_POW:
638 tmp = nvfx_src(temp(vpc));
639 nvfx_vp_emit(vpc, arith(0, SCA, LG2, tmp.reg, NVFX_VP_MASK_X, none, none, swz(src[0], X, X, X, X)));
640 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
641 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, swz(tmp, X, X, X, X)));
642 break;
643 case TGSI_OPCODE_RCP:
644 nvfx_vp_emit(vpc, arith(sat, SCA, RCP, dst, mask, none, none, src[0]));
645 break;
646 case TGSI_OPCODE_RSQ:
647 nvfx_vp_emit(vpc, arith(sat, SCA, RSQ, dst, mask, none, none, abs(src[0])));
648 break;
649 case TGSI_OPCODE_SEQ:
650 nvfx_vp_emit(vpc, arith(sat, VEC, SEQ, dst, mask, src[0], src[1], none));
651 break;
652 case TGSI_OPCODE_SGE:
653 nvfx_vp_emit(vpc, arith(sat, VEC, SGE, dst, mask, src[0], src[1], none));
654 break;
655 case TGSI_OPCODE_SGT:
656 nvfx_vp_emit(vpc, arith(sat, VEC, SGT, dst, mask, src[0], src[1], none));
657 break;
658 case TGSI_OPCODE_SIN:
659 nvfx_vp_emit(vpc, arith(sat, SCA, SIN, dst, mask, none, none, src[0]));
660 break;
661 case TGSI_OPCODE_SLE:
662 nvfx_vp_emit(vpc, arith(sat, VEC, SLE, dst, mask, src[0], src[1], none));
663 break;
664 case TGSI_OPCODE_SLT:
665 nvfx_vp_emit(vpc, arith(sat, VEC, SLT, dst, mask, src[0], src[1], none));
666 break;
667 case TGSI_OPCODE_SNE:
668 nvfx_vp_emit(vpc, arith(sat, VEC, SNE, dst, mask, src[0], src[1], none));
669 break;
670 case TGSI_OPCODE_SSG:
671 nvfx_vp_emit(vpc, arith(sat, VEC, SSG, dst, mask, src[0], none, none));
672 break;
673 case TGSI_OPCODE_TRUNC:
674 tmp = nvfx_src(temp(vpc));
675 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
676 insn.cc_update = 1;
677 nvfx_vp_emit(vpc, insn);
678
679 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, abs(src[0]), none, none));
680 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, tmp, none, none));
681
682 insn = arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none);
683 insn.cc_test = NVFX_COND_LT;
684 nvfx_vp_emit(vpc, insn);
685 break;
686 case TGSI_OPCODE_XPD:
687 tmp = nvfx_src(temp(vpc));
688 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
689 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, (mask & ~NVFX_VP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
690 break;
691 case TGSI_OPCODE_IF:
692 insn = arith(0, VEC, MOV, none.reg, NVFX_VP_MASK_X, src[0], none, none);
693 insn.cc_update = 1;
694 nvfx_vp_emit(vpc, insn);
695
696 reloc.location = vpc->vp->nr_insns;
697 reloc.target = finst->Label.Label + 1;
698 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
699
700 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
701 insn.cc_test = NVFX_COND_EQ;
702 insn.cc_swz[0] = insn.cc_swz[1] = insn.cc_swz[2] = insn.cc_swz[3] = 0;
703 nvfx_vp_emit(vpc, insn);
704 break;
705 case TGSI_OPCODE_ELSE:
706 case TGSI_OPCODE_CAL:
707 reloc.location = vpc->vp->nr_insns;
708 reloc.target = finst->Label.Label;
709 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
710
711 if(finst->Instruction.Opcode == TGSI_OPCODE_CAL)
712 insn = arith(0, SCA, CAL, none.reg, 0, none, none, none);
713 else
714 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
715 nvfx_vp_emit(vpc, insn);
716 break;
717 case TGSI_OPCODE_RET:
718 if(sub_depth || !vpc->vp->enabled_ucps) {
719 tmp = none;
720 tmp.swz[0] = tmp.swz[1] = tmp.swz[2] = tmp.swz[3] = 0;
721 nvfx_vp_emit(vpc, arith(0, SCA, RET, none.reg, 0, none, none, tmp));
722 } else {
723 reloc.location = vpc->vp->nr_insns;
724 reloc.target = vpc->info->num_instructions;
725 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
726 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
727 }
728 break;
729 case TGSI_OPCODE_BGNSUB:
730 ++sub_depth;
731 break;
732 case TGSI_OPCODE_ENDSUB:
733 --sub_depth;
734 break;
735 case TGSI_OPCODE_ENDIF:
736 /* nothing to do here */
737 break;
738 case TGSI_OPCODE_BGNLOOP:
739 loop.cont_target = idx;
740 loop.brk_target = finst->Label.Label + 1;
741 util_dynarray_append(&vpc->loop_stack, struct nvfx_loop_entry, loop);
742 break;
743 case TGSI_OPCODE_ENDLOOP:
744 loop = util_dynarray_pop(&vpc->loop_stack, struct nvfx_loop_entry);
745
746 reloc.location = vpc->vp->nr_insns;
747 reloc.target = loop.cont_target;
748 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
749
750 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
751 break;
752 case TGSI_OPCODE_CONT:
753 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
754
755 reloc.location = vpc->vp->nr_insns;
756 reloc.target = loop.cont_target;
757 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
758
759 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
760 break;
761 case TGSI_OPCODE_BRK:
762 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
763
764 reloc.location = vpc->vp->nr_insns;
765 reloc.target = loop.brk_target;
766 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
767
768 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
769 break;
770 case TGSI_OPCODE_END:
771 assert(!sub_depth);
772 if(vpc->vp->enabled_ucps) {
773 if(idx != (vpc->info->num_instructions - 1)) {
774 reloc.location = vpc->vp->nr_insns;
775 reloc.target = vpc->info->num_instructions;
776 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
777 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
778 }
779 } else {
780 if(vpc->vp->nr_insns)
781 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
782 nvfx_vp_emit(vpc, arith(0, VEC, NOP, none.reg, 0, none, none, none));
783 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
784 }
785 break;
786 default:
787 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
788 return false;
789 }
790
791 if(finst->Instruction.Saturate && !vpc->is_nv4x) {
792 if (!vpc->r_0_1.type)
793 vpc->r_0_1 = constant(vpc, -1, 0, 1, 0, 0);
794 nvfx_vp_emit(vpc, arith(0, VEC, MAX, dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), X, X, X, X), none));
795 nvfx_vp_emit(vpc, arith(0, VEC, MIN, final_dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), Y, Y, Y, Y), none));
796 }
797
798 release_temps(vpc);
799 return true;
800 }
801
802 static bool
803 nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc,
804 const struct tgsi_full_declaration *fdec)
805 {
806 unsigned num_texcoords = vpc->is_nv4x ? 10 : 8;
807 unsigned idx = fdec->Range.First;
808 unsigned semantic_index = fdec->Semantic.Index;
809 int hw = 0, i;
810
811 switch (fdec->Semantic.Name) {
812 case TGSI_SEMANTIC_POSITION:
813 hw = NVFX_VP(INST_DEST_POS);
814 vpc->hpos_idx = idx;
815 break;
816 case TGSI_SEMANTIC_CLIPVERTEX:
817 vpc->r_result[idx] = temp(vpc);
818 vpc->r_temps_discard = 0;
819 vpc->cvtx_idx = idx;
820 return true;
821 case TGSI_SEMANTIC_COLOR:
822 if (fdec->Semantic.Index == 0) {
823 hw = NVFX_VP(INST_DEST_COL0);
824 } else
825 if (fdec->Semantic.Index == 1) {
826 hw = NVFX_VP(INST_DEST_COL1);
827 } else {
828 NOUVEAU_ERR("bad colour semantic index\n");
829 return false;
830 }
831 break;
832 case TGSI_SEMANTIC_BCOLOR:
833 if (fdec->Semantic.Index == 0) {
834 hw = NVFX_VP(INST_DEST_BFC0);
835 } else
836 if (fdec->Semantic.Index == 1) {
837 hw = NVFX_VP(INST_DEST_BFC1);
838 } else {
839 NOUVEAU_ERR("bad bcolour semantic index\n");
840 return false;
841 }
842 break;
843 case TGSI_SEMANTIC_FOG:
844 hw = NVFX_VP(INST_DEST_FOGC);
845 break;
846 case TGSI_SEMANTIC_PSIZE:
847 hw = NVFX_VP(INST_DEST_PSZ);
848 break;
849 case TGSI_SEMANTIC_GENERIC:
850 /* this is really an identifier for VP/FP linkage */
851 semantic_index += 8;
852 /* fall through */
853 case TGSI_SEMANTIC_TEXCOORD:
854 for (i = 0; i < num_texcoords; i++) {
855 if (vpc->vp->texcoord[i] == semantic_index) {
856 hw = NVFX_VP(INST_DEST_TC(i));
857 break;
858 }
859 }
860
861 if (i == num_texcoords) {
862 vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
863 return true;
864 }
865 break;
866 case TGSI_SEMANTIC_EDGEFLAG:
867 vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
868 return true;
869 default:
870 NOUVEAU_ERR("bad output semantic\n");
871 return false;
872 }
873
874 vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
875 return true;
876 }
877
878 static bool
879 nvfx_vertprog_prepare(struct nvfx_vpc *vpc)
880 {
881 struct tgsi_parse_context p;
882 int high_const = -1, high_temp = -1, high_addr = -1, nr_imm = 0, i;
883
884 tgsi_parse_init(&p, vpc->pipe.tokens);
885 while (!tgsi_parse_end_of_tokens(&p)) {
886 const union tgsi_full_token *tok = &p.FullToken;
887
888 tgsi_parse_token(&p);
889 switch(tok->Token.Type) {
890 case TGSI_TOKEN_TYPE_IMMEDIATE:
891 nr_imm++;
892 break;
893 case TGSI_TOKEN_TYPE_DECLARATION:
894 {
895 const struct tgsi_full_declaration *fdec;
896
897 fdec = &p.FullToken.FullDeclaration;
898 switch (fdec->Declaration.File) {
899 case TGSI_FILE_TEMPORARY:
900 if (fdec->Range.Last > high_temp) {
901 high_temp =
902 fdec->Range.Last;
903 }
904 break;
905 case TGSI_FILE_ADDRESS:
906 if (fdec->Range.Last > high_addr) {
907 high_addr =
908 fdec->Range.Last;
909 }
910 break;
911 case TGSI_FILE_CONSTANT:
912 if (fdec->Range.Last > high_const) {
913 high_const =
914 fdec->Range.Last;
915 }
916 break;
917 case TGSI_FILE_OUTPUT:
918 if (!nvfx_vertprog_parse_decl_output(vpc, fdec))
919 return false;
920 break;
921 default:
922 break;
923 }
924 }
925 break;
926 default:
927 break;
928 }
929 }
930 tgsi_parse_free(&p);
931
932 if (nr_imm) {
933 vpc->imm = CALLOC(nr_imm, sizeof(struct nvfx_reg));
934 assert(vpc->imm);
935 }
936
937 if (++high_temp) {
938 vpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
939 for (i = 0; i < high_temp; i++)
940 vpc->r_temp[i] = temp(vpc);
941 }
942
943 if (++high_addr) {
944 vpc->r_address = CALLOC(high_addr, sizeof(struct nvfx_reg));
945 for (i = 0; i < high_addr; i++)
946 vpc->r_address[i] = nvfx_reg(NVFXSR_TEMP, i);
947 }
948
949 if(++high_const) {
950 vpc->r_const = CALLOC(high_const, sizeof(struct nvfx_reg));
951 for (i = 0; i < high_const; i++)
952 vpc->r_const[i] = constant(vpc, i, 0, 0, 0, 0);
953 }
954
955 vpc->r_temps_discard = 0;
956 return true;
957 }
958
959 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", false)
960
961 bool
962 _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp)
963 {
964 struct tgsi_parse_context parse;
965 struct nvfx_vpc *vpc = NULL;
966 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
967 struct util_dynarray insns;
968 int i, ucps;
969
970 vp->translated = false;
971 vp->nr_insns = 0;
972 vp->nr_consts = 0;
973
974 vpc = CALLOC_STRUCT(nvfx_vpc);
975 if (!vpc)
976 return false;
977 vpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
978 vpc->vp = vp;
979 vpc->pipe = vp->pipe;
980 vpc->info = &vp->info;
981 vpc->cvtx_idx = -1;
982
983 if (!nvfx_vertprog_prepare(vpc)) {
984 FREE(vpc);
985 return false;
986 }
987
988 /* Redirect post-transform vertex position to a temp if user clip
989 * planes are enabled. We need to append code to the vtxprog
990 * to handle clip planes later.
991 */
992 if (vp->enabled_ucps && vpc->cvtx_idx < 0) {
993 vpc->r_result[vpc->hpos_idx] = temp(vpc);
994 vpc->r_temps_discard = 0;
995 vpc->cvtx_idx = vpc->hpos_idx;
996 }
997
998 util_dynarray_init(&insns, NULL);
999
1000 tgsi_parse_init(&parse, vp->pipe.tokens);
1001 while (!tgsi_parse_end_of_tokens(&parse)) {
1002 tgsi_parse_token(&parse);
1003
1004 switch (parse.FullToken.Token.Type) {
1005 case TGSI_TOKEN_TYPE_IMMEDIATE:
1006 {
1007 const struct tgsi_full_immediate *imm;
1008
1009 imm = &parse.FullToken.FullImmediate;
1010 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1011 assert(imm->Immediate.NrTokens == 4 + 1);
1012 vpc->imm[vpc->nr_imm++] =
1013 constant(vpc, -1,
1014 imm->u[0].Float,
1015 imm->u[1].Float,
1016 imm->u[2].Float,
1017 imm->u[3].Float);
1018 }
1019 break;
1020 case TGSI_TOKEN_TYPE_INSTRUCTION:
1021 {
1022 const struct tgsi_full_instruction *finst;
1023 unsigned idx = insns.size >> 2;
1024 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1025 finst = &parse.FullToken.FullInstruction;
1026 if (!nvfx_vertprog_parse_instruction(vpc, idx, finst))
1027 goto out;
1028 }
1029 break;
1030 default:
1031 break;
1032 }
1033 }
1034
1035 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1036
1037 for(unsigned i = 0; i < vpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1038 {
1039 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)vpc->label_relocs.data + i);
1040 struct nvfx_relocation hw_reloc;
1041
1042 hw_reloc.location = label_reloc->location;
1043 hw_reloc.target = ((unsigned*)insns.data)[label_reloc->target];
1044
1045 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1046
1047 util_dynarray_append(&vp->branch_relocs, struct nvfx_relocation, hw_reloc);
1048 }
1049 util_dynarray_fini(&insns);
1050 util_dynarray_trim(&vp->branch_relocs);
1051
1052 /* XXX: what if we add a RET before?! make sure we jump here...*/
1053
1054 /* Write out HPOS if it was redirected to a temp earlier */
1055 if (vpc->r_result[vpc->hpos_idx].type != NVFXSR_OUTPUT) {
1056 struct nvfx_reg hpos = nvfx_reg(NVFXSR_OUTPUT,
1057 NVFX_VP(INST_DEST_POS));
1058 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->hpos_idx]);
1059
1060 nvfx_vp_emit(vpc, arith(0, VEC, MOV, hpos, NVFX_VP_MASK_ALL, htmp, none, none));
1061 }
1062
1063 /* Insert code to handle user clip planes */
1064 ucps = vp->enabled_ucps;
1065 while (ucps) {
1066 int i = ffs(ucps) - 1; ucps &= ~(1 << i);
1067 struct nvfx_reg cdst = nvfx_reg(NVFXSR_OUTPUT, NV30_VP_INST_DEST_CLP(i));
1068 struct nvfx_src ceqn = nvfx_src(nvfx_reg(NVFXSR_CONST, 512 + i));
1069 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->cvtx_idx]);
1070 unsigned mask;
1071
1072 if(vpc->is_nv4x)
1073 {
1074 switch (i) {
1075 case 0: case 3: mask = NVFX_VP_MASK_Y; break;
1076 case 1: case 4: mask = NVFX_VP_MASK_Z; break;
1077 case 2: case 5: mask = NVFX_VP_MASK_W; break;
1078 default:
1079 NOUVEAU_ERR("invalid clip dist #%d\n", i);
1080 goto out;
1081 }
1082 }
1083 else
1084 mask = NVFX_VP_MASK_X;
1085
1086 nvfx_vp_emit(vpc, arith(0, VEC, DP4, cdst, mask, htmp, ceqn, none));
1087 }
1088
1089 if (vpc->vp->nr_insns)
1090 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
1091
1092 if(debug_get_option_nvfx_dump_vp())
1093 {
1094 debug_printf("\n");
1095 tgsi_dump(vpc->pipe.tokens, 0);
1096
1097 debug_printf("\n%s vertex program:\n", vpc->is_nv4x ? "nv4x" : "nv3x");
1098 for (i = 0; i < vp->nr_insns; i++)
1099 debug_printf("%3u: %08x %08x %08x %08x\n", i, vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1100 debug_printf("\n");
1101 }
1102
1103 vp->translated = true;
1104
1105 out:
1106 tgsi_parse_free(&parse);
1107 if (vpc) {
1108 util_dynarray_fini(&vpc->label_relocs);
1109 util_dynarray_fini(&vpc->loop_stack);
1110 FREE(vpc->r_temp);
1111 FREE(vpc->r_address);
1112 FREE(vpc->r_const);
1113 FREE(vpc->imm);
1114 FREE(vpc);
1115 }
1116
1117 return vp->translated;
1118 }