b39c4b74eca8648eccf4fea5f87f0e9d6d130071
[mesa.git] / src / gallium / drivers / nouveau / nv30 / nvfx_vertprog.c
1 #include <strings.h>
2 #include "pipe/p_context.h"
3 #include "pipe/p_defines.h"
4 #include "pipe/p_state.h"
5 #include "util/u_dynarray.h"
6 #include "util/u_debug.h"
7
8 #include "pipe/p_shader_tokens.h"
9 #include "tgsi/tgsi_parse.h"
10 #include "tgsi/tgsi_dump.h"
11 #include "tgsi/tgsi_util.h"
12 #include "tgsi/tgsi_ureg.h"
13
14 #include "draw/draw_context.h"
15
16 #include "nv_object.xml.h"
17 #include "nouveau_debug.h"
18 #include "nv30/nv30-40_3d.xml.h"
19 #include "nv30/nv30_state.h"
20
21 /* TODO (at least...):
22 * 1. Indexed consts + ARL
23 * 3. NV_vp11, NV_vp2, NV_vp3 features
24 * - extra arith opcodes
25 * - branching
26 * - texture sampling
27 * - indexed attribs
28 * - indexed results
29 * 4. bugs
30 */
31
32 #include "nv30/nv30_vertprog.h"
33 #include "nv30/nv40_vertprog.h"
34
35 struct nvfx_loop_entry {
36 unsigned brk_target;
37 unsigned cont_target;
38 };
39
40 struct nvfx_vpc {
41 struct pipe_shader_state pipe;
42 struct nv30_vertprog *vp;
43 struct tgsi_shader_info* info;
44
45 struct nv30_vertprog_exec *vpi;
46
47 unsigned r_temps;
48 unsigned r_temps_discard;
49 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
50 struct nvfx_reg *r_address;
51 struct nvfx_reg *r_temp;
52 struct nvfx_reg *r_const;
53 struct nvfx_reg r_0_1;
54
55 struct nvfx_reg *imm;
56 unsigned nr_imm;
57
58 int hpos_idx;
59 int cvtx_idx;
60
61 unsigned is_nv4x;
62
63 struct util_dynarray label_relocs;
64 struct util_dynarray loop_stack;
65 };
66
67 static struct nvfx_reg
68 temp(struct nvfx_vpc *vpc)
69 {
70 int idx = ffs(~vpc->r_temps) - 1;
71
72 if (idx < 0 || (!vpc->is_nv4x && idx >= 16)) {
73 NOUVEAU_ERR("out of temps!!\n");
74 return nvfx_reg(NVFXSR_TEMP, 0);
75 }
76
77 vpc->r_temps |= (1 << idx);
78 vpc->r_temps_discard |= (1 << idx);
79 return nvfx_reg(NVFXSR_TEMP, idx);
80 }
81
82 static inline void
83 release_temps(struct nvfx_vpc *vpc)
84 {
85 vpc->r_temps &= ~vpc->r_temps_discard;
86 vpc->r_temps_discard = 0;
87 }
88
89 static struct nvfx_reg
90 constant(struct nvfx_vpc *vpc, int pipe, float x, float y, float z, float w)
91 {
92 struct nv30_vertprog *vp = vpc->vp;
93 struct nv30_vertprog_data *vpd;
94 int idx;
95
96 if (pipe >= 0) {
97 for (idx = 0; idx < vp->nr_consts; idx++) {
98 if (vp->consts[idx].index == pipe)
99 return nvfx_reg(NVFXSR_CONST, idx);
100 }
101 }
102
103 idx = vp->nr_consts++;
104 vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
105 vpd = &vp->consts[idx];
106
107 vpd->index = pipe;
108 vpd->value[0] = x;
109 vpd->value[1] = y;
110 vpd->value[2] = z;
111 vpd->value[3] = w;
112 return nvfx_reg(NVFXSR_CONST, idx);
113 }
114
115 #define arith(s,t,o,d,m,s0,s1,s2) \
116 nvfx_insn((s), (NVFX_VP_INST_SLOT_##t << 7) | NVFX_VP_INST_##t##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
117
118 static void
119 emit_src(struct nvfx_vpc *vpc, uint32_t *hw,
120 int pos, struct nvfx_src src)
121 {
122 struct nv30_vertprog *vp = vpc->vp;
123 uint32_t sr = 0;
124 struct nvfx_relocation reloc;
125
126 switch (src.reg.type) {
127 case NVFXSR_TEMP:
128 sr |= (NVFX_VP(SRC_REG_TYPE_TEMP) << NVFX_VP(SRC_REG_TYPE_SHIFT));
129 sr |= (src.reg.index << NVFX_VP(SRC_TEMP_SRC_SHIFT));
130 break;
131 case NVFXSR_INPUT:
132 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
133 NVFX_VP(SRC_REG_TYPE_SHIFT));
134 vp->ir |= (1 << src.reg.index);
135 hw[1] |= (src.reg.index << NVFX_VP(INST_INPUT_SRC_SHIFT));
136 break;
137 case NVFXSR_CONST:
138 sr |= (NVFX_VP(SRC_REG_TYPE_CONST) <<
139 NVFX_VP(SRC_REG_TYPE_SHIFT));
140 if (src.reg.index < 256 && src.reg.index >= -256) {
141 reloc.location = vp->nr_insns - 1;
142 reloc.target = src.reg.index;
143 util_dynarray_append(&vp->const_relocs, struct nvfx_relocation, reloc);
144 } else {
145 hw[1] |= (src.reg.index << NVFX_VP(INST_CONST_SRC_SHIFT)) &
146 NVFX_VP(INST_CONST_SRC_MASK);
147 }
148 break;
149 case NVFXSR_NONE:
150 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
151 NVFX_VP(SRC_REG_TYPE_SHIFT));
152 break;
153 default:
154 assert(0);
155 }
156
157 if (src.negate)
158 sr |= NVFX_VP(SRC_NEGATE);
159
160 if (src.abs)
161 hw[0] |= (1 << (21 + pos));
162
163 sr |= ((src.swz[0] << NVFX_VP(SRC_SWZ_X_SHIFT)) |
164 (src.swz[1] << NVFX_VP(SRC_SWZ_Y_SHIFT)) |
165 (src.swz[2] << NVFX_VP(SRC_SWZ_Z_SHIFT)) |
166 (src.swz[3] << NVFX_VP(SRC_SWZ_W_SHIFT)));
167
168 if(src.indirect) {
169 if(src.reg.type == NVFXSR_CONST)
170 hw[3] |= NVFX_VP(INST_INDEX_CONST);
171 else if(src.reg.type == NVFXSR_INPUT)
172 hw[0] |= NVFX_VP(INST_INDEX_INPUT);
173 else
174 assert(0);
175
176 if(src.indirect_reg)
177 hw[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1);
178 hw[0] |= src.indirect_swz << NVFX_VP(INST_ADDR_SWZ_SHIFT);
179 }
180
181 switch (pos) {
182 case 0:
183 hw[1] |= ((sr & NVFX_VP(SRC0_HIGH_MASK)) >>
184 NVFX_VP(SRC0_HIGH_SHIFT)) << NVFX_VP(INST_SRC0H_SHIFT);
185 hw[2] |= (sr & NVFX_VP(SRC0_LOW_MASK)) <<
186 NVFX_VP(INST_SRC0L_SHIFT);
187 break;
188 case 1:
189 hw[2] |= sr << NVFX_VP(INST_SRC1_SHIFT);
190 break;
191 case 2:
192 hw[2] |= ((sr & NVFX_VP(SRC2_HIGH_MASK)) >>
193 NVFX_VP(SRC2_HIGH_SHIFT)) << NVFX_VP(INST_SRC2H_SHIFT);
194 hw[3] |= (sr & NVFX_VP(SRC2_LOW_MASK)) <<
195 NVFX_VP(INST_SRC2L_SHIFT);
196 break;
197 default:
198 assert(0);
199 }
200 }
201
202 static void
203 emit_dst(struct nvfx_vpc *vpc, uint32_t *hw,
204 int slot, struct nvfx_reg dst)
205 {
206 struct nv30_vertprog *vp = vpc->vp;
207
208 switch (dst.type) {
209 case NVFXSR_NONE:
210 if(!vpc->is_nv4x)
211 hw[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK;
212 else {
213 hw[3] |= NV40_VP_INST_DEST_MASK;
214 if (slot == 0)
215 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
216 else
217 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
218 }
219 break;
220 case NVFXSR_TEMP:
221 if(!vpc->is_nv4x)
222 hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
223 else {
224 hw[3] |= NV40_VP_INST_DEST_MASK;
225 if (slot == 0)
226 hw[0] |= (dst.index << NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
227 else
228 hw[3] |= (dst.index << NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
229 }
230 break;
231 case NVFXSR_OUTPUT:
232 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
233 if(vpc->is_nv4x) {
234 switch (dst.index) {
235 case NV30_VP_INST_DEST_CLP(0):
236 dst.index = NVFX_VP(INST_DEST_FOGC);
237 vp->or |= (1 << 6);
238 break;
239 case NV30_VP_INST_DEST_CLP(1):
240 dst.index = NVFX_VP(INST_DEST_FOGC);
241 vp->or |= (1 << 7);
242 break;
243 case NV30_VP_INST_DEST_CLP(2):
244 dst.index = NVFX_VP(INST_DEST_FOGC);
245 vp->or |= (1 << 8);
246 break;
247 case NV30_VP_INST_DEST_CLP(3):
248 dst.index = NVFX_VP(INST_DEST_PSZ);
249 vp->or |= (1 << 9);
250 break;
251 case NV30_VP_INST_DEST_CLP(4):
252 dst.index = NVFX_VP(INST_DEST_PSZ);
253 vp->or |= (1 << 10);
254 break;
255 case NV30_VP_INST_DEST_CLP(5):
256 dst.index = NVFX_VP(INST_DEST_PSZ);
257 vp->or |= (1 << 11);
258 break;
259 case NV40_VP_INST_DEST_COL0: vp->or |= (1 << 0); break;
260 case NV40_VP_INST_DEST_COL1: vp->or |= (1 << 1); break;
261 case NV40_VP_INST_DEST_BFC0: vp->or |= (1 << 2); break;
262 case NV40_VP_INST_DEST_BFC1: vp->or |= (1 << 3); break;
263 case NV40_VP_INST_DEST_FOGC: vp->or |= (1 << 4); break;
264 case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
265 }
266 }
267
268 if(!vpc->is_nv4x) {
269 hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
270 hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK;
271
272 /*XXX: no way this is entirely correct, someone needs to
273 * figure out what exactly it is.
274 */
275 hw[3] |= 0x800;
276 } else {
277 hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
278 if (slot == 0) {
279 hw[0] |= NV40_VP_INST_VEC_RESULT;
280 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
281 } else {
282 hw[3] |= NV40_VP_INST_SCA_RESULT;
283 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
284 }
285 }
286 break;
287 default:
288 assert(0);
289 }
290 }
291
292 static void
293 nvfx_vp_emit(struct nvfx_vpc *vpc, struct nvfx_insn insn)
294 {
295 struct nv30_vertprog *vp = vpc->vp;
296 unsigned slot = insn.op >> 7;
297 unsigned op = insn.op & 0x7f;
298 uint32_t *hw;
299
300 vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
301 vpc->vpi = &vp->insns[vp->nr_insns - 1];
302 memset(vpc->vpi, 0, sizeof(*vpc->vpi));
303
304 hw = vpc->vpi->data;
305
306 if (insn.cc_test != NVFX_COND_TR)
307 hw[0] |= NVFX_VP(INST_COND_TEST_ENABLE);
308 hw[0] |= (insn.cc_test << NVFX_VP(INST_COND_SHIFT));
309 hw[0] |= ((insn.cc_swz[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT)) |
310 (insn.cc_swz[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT)) |
311 (insn.cc_swz[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT)) |
312 (insn.cc_swz[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT)));
313 if(insn.cc_update)
314 hw[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE);
315
316 if(insn.sat) {
317 assert(vpc->is_nv4x);
318 if(vpc->is_nv4x)
319 hw[0] |= NV40_VP_INST_SATURATE;
320 }
321
322 if(!vpc->is_nv4x) {
323 if(slot == 0)
324 hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
325 else {
326 hw[0] |= ((op >> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT);
327 hw[1] |= ((op & 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT);
328 }
329 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
330 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
331
332 if (insn.dst.type == NVFXSR_OUTPUT) {
333 if (slot)
334 hw[3] |= (insn.mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
335 else
336 hw[3] |= (insn.mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
337 } else {
338 if (slot)
339 hw[3] |= (insn.mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
340 else
341 hw[3] |= (insn.mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
342 }
343 } else {
344 if (slot == 0) {
345 hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
346 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
347 hw[3] |= (insn.mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
348 } else {
349 hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
350 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK ;
351 hw[3] |= (insn.mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
352 }
353 }
354
355 emit_dst(vpc, hw, slot, insn.dst);
356 emit_src(vpc, hw, 0, insn.src[0]);
357 emit_src(vpc, hw, 1, insn.src[1]);
358 emit_src(vpc, hw, 2, insn.src[2]);
359
360 // if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
361 // hw[3] |= NV40_VP_INST_SCA_RESULT;
362 }
363
364 static inline struct nvfx_src
365 tgsi_src(struct nvfx_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
366 struct nvfx_src src;
367
368 switch (fsrc->Register.File) {
369 case TGSI_FILE_INPUT:
370 src.reg = nvfx_reg(NVFXSR_INPUT, fsrc->Register.Index);
371 break;
372 case TGSI_FILE_CONSTANT:
373 if(fsrc->Register.Indirect) {
374 src.reg = vpc->r_const[0];
375 src.reg.index = fsrc->Register.Index;
376 } else {
377 src.reg = vpc->r_const[fsrc->Register.Index];
378 }
379 break;
380 case TGSI_FILE_IMMEDIATE:
381 src.reg = vpc->imm[fsrc->Register.Index];
382 break;
383 case TGSI_FILE_TEMPORARY:
384 src.reg = vpc->r_temp[fsrc->Register.Index];
385 break;
386 default:
387 NOUVEAU_ERR("bad src file\n");
388 src.reg.index = 0;
389 src.reg.type = -1;
390 break;
391 }
392
393 src.abs = fsrc->Register.Absolute;
394 src.negate = fsrc->Register.Negate;
395 src.swz[0] = fsrc->Register.SwizzleX;
396 src.swz[1] = fsrc->Register.SwizzleY;
397 src.swz[2] = fsrc->Register.SwizzleZ;
398 src.swz[3] = fsrc->Register.SwizzleW;
399 src.indirect = 0;
400 src.indirect_reg = 0;
401 src.indirect_swz = 0;
402
403 if(fsrc->Register.Indirect) {
404 if(fsrc->Indirect.File == TGSI_FILE_ADDRESS &&
405 (fsrc->Register.File == TGSI_FILE_CONSTANT ||
406 fsrc->Register.File == TGSI_FILE_INPUT)) {
407 src.indirect = 1;
408 src.indirect_reg = fsrc->Indirect.Index;
409 src.indirect_swz = fsrc->Indirect.Swizzle;
410 } else {
411 src.reg.index = 0;
412 src.reg.type = -1;
413 }
414 }
415
416 return src;
417 }
418
419 static inline struct nvfx_reg
420 tgsi_dst(struct nvfx_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
421 struct nvfx_reg dst;
422
423 switch (fdst->Register.File) {
424 case TGSI_FILE_NULL:
425 dst = nvfx_reg(NVFXSR_NONE, 0);
426 break;
427 case TGSI_FILE_OUTPUT:
428 dst = vpc->r_result[fdst->Register.Index];
429 break;
430 case TGSI_FILE_TEMPORARY:
431 dst = vpc->r_temp[fdst->Register.Index];
432 break;
433 case TGSI_FILE_ADDRESS:
434 dst = vpc->r_address[fdst->Register.Index];
435 break;
436 default:
437 NOUVEAU_ERR("bad dst file %i\n", fdst->Register.File);
438 dst.index = 0;
439 dst.type = 0;
440 break;
441 }
442
443 return dst;
444 }
445
446 static inline int
447 tgsi_mask(uint tgsi)
448 {
449 int mask = 0;
450
451 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_VP_MASK_X;
452 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_VP_MASK_Y;
453 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_VP_MASK_Z;
454 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_VP_MASK_W;
455 return mask;
456 }
457
458 static bool
459 nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc,
460 unsigned idx, const struct tgsi_full_instruction *finst)
461 {
462 struct nvfx_src src[3], tmp;
463 struct nvfx_reg dst;
464 struct nvfx_reg final_dst;
465 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
466 struct nvfx_insn insn;
467 struct nvfx_relocation reloc;
468 struct nvfx_loop_entry loop;
469 bool sat = false;
470 int mask;
471 int ai = -1, ci = -1, ii = -1;
472 int i;
473 unsigned sub_depth = 0;
474
475 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
476 const struct tgsi_full_src_register *fsrc;
477
478 fsrc = &finst->Src[i];
479 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
480 src[i] = tgsi_src(vpc, fsrc);
481 }
482 }
483
484 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
485 const struct tgsi_full_src_register *fsrc;
486
487 fsrc = &finst->Src[i];
488
489 switch (fsrc->Register.File) {
490 case TGSI_FILE_INPUT:
491 if (ai == -1 || ai == fsrc->Register.Index) {
492 ai = fsrc->Register.Index;
493 src[i] = tgsi_src(vpc, fsrc);
494 } else {
495 src[i] = nvfx_src(temp(vpc));
496 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
497 tgsi_src(vpc, fsrc), none, none));
498 }
499 break;
500 case TGSI_FILE_CONSTANT:
501 if ((ci == -1 && ii == -1) ||
502 ci == fsrc->Register.Index) {
503 ci = fsrc->Register.Index;
504 src[i] = tgsi_src(vpc, fsrc);
505 } else {
506 src[i] = nvfx_src(temp(vpc));
507 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
508 tgsi_src(vpc, fsrc), none, none));
509 }
510 break;
511 case TGSI_FILE_IMMEDIATE:
512 if ((ci == -1 && ii == -1) ||
513 ii == fsrc->Register.Index) {
514 ii = fsrc->Register.Index;
515 src[i] = tgsi_src(vpc, fsrc);
516 } else {
517 src[i] = nvfx_src(temp(vpc));
518 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
519 tgsi_src(vpc, fsrc), none, none));
520 }
521 break;
522 case TGSI_FILE_TEMPORARY:
523 /* handled above */
524 break;
525 default:
526 NOUVEAU_ERR("bad src file\n");
527 return false;
528 }
529 }
530
531 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
532 if(src[i].reg.type < 0)
533 return false;
534 }
535
536 if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS &&
537 finst->Instruction.Opcode != TGSI_OPCODE_ARL)
538 return false;
539
540 final_dst = dst = tgsi_dst(vpc, &finst->Dst[0]);
541 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
542 if(finst->Instruction.Saturate) {
543 assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL);
544 if (vpc->is_nv4x)
545 sat = true;
546 else
547 if(dst.type != NVFXSR_TEMP)
548 dst = temp(vpc);
549 }
550
551 switch (finst->Instruction.Opcode) {
552 case TGSI_OPCODE_ABS:
553 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, abs(src[0]), none, none));
554 break;
555 case TGSI_OPCODE_ADD:
556 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, src[1]));
557 break;
558 case TGSI_OPCODE_ARL:
559 nvfx_vp_emit(vpc, arith(0, VEC, ARL, dst, mask, src[0], none, none));
560 break;
561 case TGSI_OPCODE_CEIL:
562 tmp = nvfx_src(temp(vpc));
563 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, neg(src[0]), none, none));
564 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none));
565 break;
566 case TGSI_OPCODE_CMP:
567 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
568 insn.cc_update = 1;
569 nvfx_vp_emit(vpc, insn);
570
571 insn = arith(sat, VEC, MOV, dst, mask, src[2], none, none);
572 insn.cc_test = NVFX_COND_GE;
573 nvfx_vp_emit(vpc, insn);
574
575 insn = arith(sat, VEC, MOV, dst, mask, src[1], none, none);
576 insn.cc_test = NVFX_COND_LT;
577 nvfx_vp_emit(vpc, insn);
578 break;
579 case TGSI_OPCODE_COS:
580 nvfx_vp_emit(vpc, arith(sat, SCA, COS, dst, mask, none, none, src[0]));
581 break;
582 case TGSI_OPCODE_DP2:
583 tmp = nvfx_src(temp(vpc));
584 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X | NVFX_VP_MASK_Y, src[0], src[1], none));
585 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, swz(tmp, X, X, X, X), none, swz(tmp, Y, Y, Y, Y)));
586 break;
587 case TGSI_OPCODE_DP3:
588 nvfx_vp_emit(vpc, arith(sat, VEC, DP3, dst, mask, src[0], src[1], none));
589 break;
590 case TGSI_OPCODE_DP4:
591 nvfx_vp_emit(vpc, arith(sat, VEC, DP4, dst, mask, src[0], src[1], none));
592 break;
593 case TGSI_OPCODE_DPH:
594 nvfx_vp_emit(vpc, arith(sat, VEC, DPH, dst, mask, src[0], src[1], none));
595 break;
596 case TGSI_OPCODE_DST:
597 nvfx_vp_emit(vpc, arith(sat, VEC, DST, dst, mask, src[0], src[1], none));
598 break;
599 case TGSI_OPCODE_EX2:
600 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, src[0]));
601 break;
602 case TGSI_OPCODE_EXP:
603 nvfx_vp_emit(vpc, arith(sat, SCA, EXP, dst, mask, none, none, src[0]));
604 break;
605 case TGSI_OPCODE_FLR:
606 nvfx_vp_emit(vpc, arith(sat, VEC, FLR, dst, mask, src[0], none, none));
607 break;
608 case TGSI_OPCODE_FRC:
609 nvfx_vp_emit(vpc, arith(sat, VEC, FRC, dst, mask, src[0], none, none));
610 break;
611 case TGSI_OPCODE_LG2:
612 nvfx_vp_emit(vpc, arith(sat, SCA, LG2, dst, mask, none, none, src[0]));
613 break;
614 case TGSI_OPCODE_LIT:
615 nvfx_vp_emit(vpc, arith(sat, SCA, LIT, dst, mask, none, none, src[0]));
616 break;
617 case TGSI_OPCODE_LOG:
618 nvfx_vp_emit(vpc, arith(sat, SCA, LOG, dst, mask, none, none, src[0]));
619 break;
620 case TGSI_OPCODE_LRP:
621 tmp = nvfx_src(temp(vpc));
622 nvfx_vp_emit(vpc, arith(0, VEC, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
623 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], tmp));
624 break;
625 case TGSI_OPCODE_MAD:
626 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], src[2]));
627 break;
628 case TGSI_OPCODE_MAX:
629 nvfx_vp_emit(vpc, arith(sat, VEC, MAX, dst, mask, src[0], src[1], none));
630 break;
631 case TGSI_OPCODE_MIN:
632 nvfx_vp_emit(vpc, arith(sat, VEC, MIN, dst, mask, src[0], src[1], none));
633 break;
634 case TGSI_OPCODE_MOV:
635 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, src[0], none, none));
636 break;
637 case TGSI_OPCODE_MUL:
638 nvfx_vp_emit(vpc, arith(sat, VEC, MUL, dst, mask, src[0], src[1], none));
639 break;
640 case TGSI_OPCODE_NOP:
641 break;
642 case TGSI_OPCODE_POW:
643 tmp = nvfx_src(temp(vpc));
644 nvfx_vp_emit(vpc, arith(0, SCA, LG2, tmp.reg, NVFX_VP_MASK_X, none, none, swz(src[0], X, X, X, X)));
645 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
646 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, swz(tmp, X, X, X, X)));
647 break;
648 case TGSI_OPCODE_RCP:
649 nvfx_vp_emit(vpc, arith(sat, SCA, RCP, dst, mask, none, none, src[0]));
650 break;
651 case TGSI_OPCODE_RSQ:
652 nvfx_vp_emit(vpc, arith(sat, SCA, RSQ, dst, mask, none, none, abs(src[0])));
653 break;
654 case TGSI_OPCODE_SEQ:
655 nvfx_vp_emit(vpc, arith(sat, VEC, SEQ, dst, mask, src[0], src[1], none));
656 break;
657 case TGSI_OPCODE_SGE:
658 nvfx_vp_emit(vpc, arith(sat, VEC, SGE, dst, mask, src[0], src[1], none));
659 break;
660 case TGSI_OPCODE_SGT:
661 nvfx_vp_emit(vpc, arith(sat, VEC, SGT, dst, mask, src[0], src[1], none));
662 break;
663 case TGSI_OPCODE_SIN:
664 nvfx_vp_emit(vpc, arith(sat, SCA, SIN, dst, mask, none, none, src[0]));
665 break;
666 case TGSI_OPCODE_SLE:
667 nvfx_vp_emit(vpc, arith(sat, VEC, SLE, dst, mask, src[0], src[1], none));
668 break;
669 case TGSI_OPCODE_SLT:
670 nvfx_vp_emit(vpc, arith(sat, VEC, SLT, dst, mask, src[0], src[1], none));
671 break;
672 case TGSI_OPCODE_SNE:
673 nvfx_vp_emit(vpc, arith(sat, VEC, SNE, dst, mask, src[0], src[1], none));
674 break;
675 case TGSI_OPCODE_SSG:
676 nvfx_vp_emit(vpc, arith(sat, VEC, SSG, dst, mask, src[0], none, none));
677 break;
678 case TGSI_OPCODE_SUB:
679 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, neg(src[1])));
680 break;
681 case TGSI_OPCODE_TRUNC:
682 tmp = nvfx_src(temp(vpc));
683 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
684 insn.cc_update = 1;
685 nvfx_vp_emit(vpc, insn);
686
687 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, abs(src[0]), none, none));
688 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, tmp, none, none));
689
690 insn = arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none);
691 insn.cc_test = NVFX_COND_LT;
692 nvfx_vp_emit(vpc, insn);
693 break;
694 case TGSI_OPCODE_XPD:
695 tmp = nvfx_src(temp(vpc));
696 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
697 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, (mask & ~NVFX_VP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
698 break;
699 case TGSI_OPCODE_IF:
700 insn = arith(0, VEC, MOV, none.reg, NVFX_VP_MASK_X, src[0], none, none);
701 insn.cc_update = 1;
702 nvfx_vp_emit(vpc, insn);
703
704 reloc.location = vpc->vp->nr_insns;
705 reloc.target = finst->Label.Label + 1;
706 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
707
708 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
709 insn.cc_test = NVFX_COND_EQ;
710 insn.cc_swz[0] = insn.cc_swz[1] = insn.cc_swz[2] = insn.cc_swz[3] = 0;
711 nvfx_vp_emit(vpc, insn);
712 break;
713 case TGSI_OPCODE_ELSE:
714 case TGSI_OPCODE_CAL:
715 reloc.location = vpc->vp->nr_insns;
716 reloc.target = finst->Label.Label;
717 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
718
719 if(finst->Instruction.Opcode == TGSI_OPCODE_CAL)
720 insn = arith(0, SCA, CAL, none.reg, 0, none, none, none);
721 else
722 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
723 nvfx_vp_emit(vpc, insn);
724 break;
725 case TGSI_OPCODE_RET:
726 if(sub_depth || !vpc->vp->enabled_ucps) {
727 tmp = none;
728 tmp.swz[0] = tmp.swz[1] = tmp.swz[2] = tmp.swz[3] = 0;
729 nvfx_vp_emit(vpc, arith(0, SCA, RET, none.reg, 0, none, none, tmp));
730 } else {
731 reloc.location = vpc->vp->nr_insns;
732 reloc.target = vpc->info->num_instructions;
733 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
734 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
735 }
736 break;
737 case TGSI_OPCODE_BGNSUB:
738 ++sub_depth;
739 break;
740 case TGSI_OPCODE_ENDSUB:
741 --sub_depth;
742 break;
743 case TGSI_OPCODE_ENDIF:
744 /* nothing to do here */
745 break;
746 case TGSI_OPCODE_BGNLOOP:
747 loop.cont_target = idx;
748 loop.brk_target = finst->Label.Label + 1;
749 util_dynarray_append(&vpc->loop_stack, struct nvfx_loop_entry, loop);
750 break;
751 case TGSI_OPCODE_ENDLOOP:
752 loop = util_dynarray_pop(&vpc->loop_stack, struct nvfx_loop_entry);
753
754 reloc.location = vpc->vp->nr_insns;
755 reloc.target = loop.cont_target;
756 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
757
758 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
759 break;
760 case TGSI_OPCODE_CONT:
761 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
762
763 reloc.location = vpc->vp->nr_insns;
764 reloc.target = loop.cont_target;
765 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
766
767 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
768 break;
769 case TGSI_OPCODE_BRK:
770 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
771
772 reloc.location = vpc->vp->nr_insns;
773 reloc.target = loop.brk_target;
774 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
775
776 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
777 break;
778 case TGSI_OPCODE_END:
779 assert(!sub_depth);
780 if(vpc->vp->enabled_ucps) {
781 if(idx != (vpc->info->num_instructions - 1)) {
782 reloc.location = vpc->vp->nr_insns;
783 reloc.target = vpc->info->num_instructions;
784 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
785 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
786 }
787 } else {
788 if(vpc->vp->nr_insns)
789 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
790 nvfx_vp_emit(vpc, arith(0, VEC, NOP, none.reg, 0, none, none, none));
791 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
792 }
793 break;
794 default:
795 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
796 return false;
797 }
798
799 if(finst->Instruction.Saturate && !vpc->is_nv4x) {
800 if (!vpc->r_0_1.type)
801 vpc->r_0_1 = constant(vpc, -1, 0, 1, 0, 0);
802 nvfx_vp_emit(vpc, arith(0, VEC, MAX, dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), X, X, X, X), none));
803 nvfx_vp_emit(vpc, arith(0, VEC, MIN, final_dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), Y, Y, Y, Y), none));
804 }
805
806 release_temps(vpc);
807 return true;
808 }
809
810 static bool
811 nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc,
812 const struct tgsi_full_declaration *fdec)
813 {
814 unsigned num_texcoords = vpc->is_nv4x ? 10 : 8;
815 unsigned idx = fdec->Range.First;
816 unsigned semantic_index = fdec->Semantic.Index;
817 int hw = 0, i;
818
819 switch (fdec->Semantic.Name) {
820 case TGSI_SEMANTIC_POSITION:
821 hw = NVFX_VP(INST_DEST_POS);
822 vpc->hpos_idx = idx;
823 break;
824 case TGSI_SEMANTIC_CLIPVERTEX:
825 vpc->r_result[idx] = temp(vpc);
826 vpc->r_temps_discard = 0;
827 vpc->cvtx_idx = idx;
828 return true;
829 case TGSI_SEMANTIC_COLOR:
830 if (fdec->Semantic.Index == 0) {
831 hw = NVFX_VP(INST_DEST_COL0);
832 } else
833 if (fdec->Semantic.Index == 1) {
834 hw = NVFX_VP(INST_DEST_COL1);
835 } else {
836 NOUVEAU_ERR("bad colour semantic index\n");
837 return false;
838 }
839 break;
840 case TGSI_SEMANTIC_BCOLOR:
841 if (fdec->Semantic.Index == 0) {
842 hw = NVFX_VP(INST_DEST_BFC0);
843 } else
844 if (fdec->Semantic.Index == 1) {
845 hw = NVFX_VP(INST_DEST_BFC1);
846 } else {
847 NOUVEAU_ERR("bad bcolour semantic index\n");
848 return false;
849 }
850 break;
851 case TGSI_SEMANTIC_FOG:
852 hw = NVFX_VP(INST_DEST_FOGC);
853 break;
854 case TGSI_SEMANTIC_PSIZE:
855 hw = NVFX_VP(INST_DEST_PSZ);
856 break;
857 case TGSI_SEMANTIC_GENERIC:
858 /* this is really an identifier for VP/FP linkage */
859 semantic_index += 8;
860 /* fall through */
861 case TGSI_SEMANTIC_TEXCOORD:
862 for (i = 0; i < num_texcoords; i++) {
863 if (vpc->vp->texcoord[i] == semantic_index) {
864 hw = NVFX_VP(INST_DEST_TC(i));
865 break;
866 }
867 }
868
869 if (i == num_texcoords) {
870 vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
871 return true;
872 }
873 break;
874 case TGSI_SEMANTIC_EDGEFLAG:
875 vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
876 return true;
877 default:
878 NOUVEAU_ERR("bad output semantic\n");
879 return false;
880 }
881
882 vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
883 return true;
884 }
885
886 static bool
887 nvfx_vertprog_prepare(struct nvfx_vpc *vpc)
888 {
889 struct tgsi_parse_context p;
890 int high_const = -1, high_temp = -1, high_addr = -1, nr_imm = 0, i;
891
892 tgsi_parse_init(&p, vpc->pipe.tokens);
893 while (!tgsi_parse_end_of_tokens(&p)) {
894 const union tgsi_full_token *tok = &p.FullToken;
895
896 tgsi_parse_token(&p);
897 switch(tok->Token.Type) {
898 case TGSI_TOKEN_TYPE_IMMEDIATE:
899 nr_imm++;
900 break;
901 case TGSI_TOKEN_TYPE_DECLARATION:
902 {
903 const struct tgsi_full_declaration *fdec;
904
905 fdec = &p.FullToken.FullDeclaration;
906 switch (fdec->Declaration.File) {
907 case TGSI_FILE_TEMPORARY:
908 if (fdec->Range.Last > high_temp) {
909 high_temp =
910 fdec->Range.Last;
911 }
912 break;
913 case TGSI_FILE_ADDRESS:
914 if (fdec->Range.Last > high_addr) {
915 high_addr =
916 fdec->Range.Last;
917 }
918 break;
919 case TGSI_FILE_CONSTANT:
920 if (fdec->Range.Last > high_const) {
921 high_const =
922 fdec->Range.Last;
923 }
924 break;
925 case TGSI_FILE_OUTPUT:
926 if (!nvfx_vertprog_parse_decl_output(vpc, fdec))
927 return false;
928 break;
929 default:
930 break;
931 }
932 }
933 break;
934 default:
935 break;
936 }
937 }
938 tgsi_parse_free(&p);
939
940 if (nr_imm) {
941 vpc->imm = CALLOC(nr_imm, sizeof(struct nvfx_reg));
942 assert(vpc->imm);
943 }
944
945 if (++high_temp) {
946 vpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
947 for (i = 0; i < high_temp; i++)
948 vpc->r_temp[i] = temp(vpc);
949 }
950
951 if (++high_addr) {
952 vpc->r_address = CALLOC(high_addr, sizeof(struct nvfx_reg));
953 for (i = 0; i < high_addr; i++)
954 vpc->r_address[i] = nvfx_reg(NVFXSR_TEMP, i);
955 }
956
957 if(++high_const) {
958 vpc->r_const = CALLOC(high_const, sizeof(struct nvfx_reg));
959 for (i = 0; i < high_const; i++)
960 vpc->r_const[i] = constant(vpc, i, 0, 0, 0, 0);
961 }
962
963 vpc->r_temps_discard = 0;
964 return true;
965 }
966
967 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", false)
968
969 bool
970 _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp)
971 {
972 struct tgsi_parse_context parse;
973 struct nvfx_vpc *vpc = NULL;
974 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
975 struct util_dynarray insns;
976 int i, ucps;
977
978 vp->translated = false;
979 vp->nr_insns = 0;
980 vp->nr_consts = 0;
981
982 vpc = CALLOC_STRUCT(nvfx_vpc);
983 if (!vpc)
984 return false;
985 vpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
986 vpc->vp = vp;
987 vpc->pipe = vp->pipe;
988 vpc->info = &vp->info;
989 vpc->cvtx_idx = -1;
990
991 if (!nvfx_vertprog_prepare(vpc)) {
992 FREE(vpc);
993 return false;
994 }
995
996 /* Redirect post-transform vertex position to a temp if user clip
997 * planes are enabled. We need to append code to the vtxprog
998 * to handle clip planes later.
999 */
1000 if (vp->enabled_ucps && vpc->cvtx_idx < 0) {
1001 vpc->r_result[vpc->hpos_idx] = temp(vpc);
1002 vpc->r_temps_discard = 0;
1003 vpc->cvtx_idx = vpc->hpos_idx;
1004 }
1005
1006 util_dynarray_init(&insns);
1007
1008 tgsi_parse_init(&parse, vp->pipe.tokens);
1009 while (!tgsi_parse_end_of_tokens(&parse)) {
1010 tgsi_parse_token(&parse);
1011
1012 switch (parse.FullToken.Token.Type) {
1013 case TGSI_TOKEN_TYPE_IMMEDIATE:
1014 {
1015 const struct tgsi_full_immediate *imm;
1016
1017 imm = &parse.FullToken.FullImmediate;
1018 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1019 assert(imm->Immediate.NrTokens == 4 + 1);
1020 vpc->imm[vpc->nr_imm++] =
1021 constant(vpc, -1,
1022 imm->u[0].Float,
1023 imm->u[1].Float,
1024 imm->u[2].Float,
1025 imm->u[3].Float);
1026 }
1027 break;
1028 case TGSI_TOKEN_TYPE_INSTRUCTION:
1029 {
1030 const struct tgsi_full_instruction *finst;
1031 unsigned idx = insns.size >> 2;
1032 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1033 finst = &parse.FullToken.FullInstruction;
1034 if (!nvfx_vertprog_parse_instruction(vpc, idx, finst))
1035 goto out;
1036 }
1037 break;
1038 default:
1039 break;
1040 }
1041 }
1042
1043 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1044
1045 for(unsigned i = 0; i < vpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1046 {
1047 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)vpc->label_relocs.data + i);
1048 struct nvfx_relocation hw_reloc;
1049
1050 hw_reloc.location = label_reloc->location;
1051 hw_reloc.target = ((unsigned*)insns.data)[label_reloc->target];
1052
1053 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1054
1055 util_dynarray_append(&vp->branch_relocs, struct nvfx_relocation, hw_reloc);
1056 }
1057 util_dynarray_fini(&insns);
1058 util_dynarray_trim(&vp->branch_relocs);
1059
1060 /* XXX: what if we add a RET before?! make sure we jump here...*/
1061
1062 /* Write out HPOS if it was redirected to a temp earlier */
1063 if (vpc->r_result[vpc->hpos_idx].type != NVFXSR_OUTPUT) {
1064 struct nvfx_reg hpos = nvfx_reg(NVFXSR_OUTPUT,
1065 NVFX_VP(INST_DEST_POS));
1066 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->hpos_idx]);
1067
1068 nvfx_vp_emit(vpc, arith(0, VEC, MOV, hpos, NVFX_VP_MASK_ALL, htmp, none, none));
1069 }
1070
1071 /* Insert code to handle user clip planes */
1072 ucps = vp->enabled_ucps;
1073 while (ucps) {
1074 int i = ffs(ucps) - 1; ucps &= ~(1 << i);
1075 struct nvfx_reg cdst = nvfx_reg(NVFXSR_OUTPUT, NV30_VP_INST_DEST_CLP(i));
1076 struct nvfx_src ceqn = nvfx_src(nvfx_reg(NVFXSR_CONST, 512 + i));
1077 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->cvtx_idx]);
1078 unsigned mask;
1079
1080 if(vpc->is_nv4x)
1081 {
1082 switch (i) {
1083 case 0: case 3: mask = NVFX_VP_MASK_Y; break;
1084 case 1: case 4: mask = NVFX_VP_MASK_Z; break;
1085 case 2: case 5: mask = NVFX_VP_MASK_W; break;
1086 default:
1087 NOUVEAU_ERR("invalid clip dist #%d\n", i);
1088 goto out;
1089 }
1090 }
1091 else
1092 mask = NVFX_VP_MASK_X;
1093
1094 nvfx_vp_emit(vpc, arith(0, VEC, DP4, cdst, mask, htmp, ceqn, none));
1095 }
1096
1097 if (vpc->vp->nr_insns)
1098 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
1099
1100 if(debug_get_option_nvfx_dump_vp())
1101 {
1102 debug_printf("\n");
1103 tgsi_dump(vpc->pipe.tokens, 0);
1104
1105 debug_printf("\n%s vertex program:\n", vpc->is_nv4x ? "nv4x" : "nv3x");
1106 for (i = 0; i < vp->nr_insns; i++)
1107 debug_printf("%3u: %08x %08x %08x %08x\n", i, vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1108 debug_printf("\n");
1109 }
1110
1111 vp->translated = true;
1112
1113 out:
1114 tgsi_parse_free(&parse);
1115 if (vpc) {
1116 util_dynarray_fini(&vpc->label_relocs);
1117 util_dynarray_fini(&vpc->loop_stack);
1118 FREE(vpc->r_temp);
1119 FREE(vpc->r_address);
1120 FREE(vpc->r_const);
1121 FREE(vpc->imm);
1122 FREE(vpc);
1123 }
1124
1125 return vp->translated;
1126 }