nv30: add a couple of missed shader caps
[mesa.git] / src / gallium / drivers / nouveau / nv30 / nvfx_vertprog.c
1 #include <strings.h>
2 #include "pipe/p_context.h"
3 #include "pipe/p_defines.h"
4 #include "pipe/p_state.h"
5 #include "util/u_dynarray.h"
6 #include "util/u_debug.h"
7 #include "util/u_memory.h"
8
9 #include "pipe/p_shader_tokens.h"
10 #include "tgsi/tgsi_parse.h"
11 #include "tgsi/tgsi_dump.h"
12 #include "tgsi/tgsi_util.h"
13 #include "tgsi/tgsi_ureg.h"
14
15 #include "draw/draw_context.h"
16
17 #include "nv_object.xml.h"
18 #include "nouveau_debug.h"
19 #include "nv30/nv30-40_3d.xml.h"
20 #include "nv30/nv30_state.h"
21
22 /* TODO (at least...):
23 * 1. Indexed consts + ARL
24 * 3. NV_vp11, NV_vp2, NV_vp3 features
25 * - extra arith opcodes
26 * - branching
27 * - texture sampling
28 * - indexed attribs
29 * - indexed results
30 * 4. bugs
31 */
32
33 #include "nv30/nv30_vertprog.h"
34 #include "nv30/nv40_vertprog.h"
35
36 struct nvfx_loop_entry {
37 unsigned brk_target;
38 unsigned cont_target;
39 };
40
41 struct nvfx_vpc {
42 struct pipe_shader_state pipe;
43 struct nv30_vertprog *vp;
44 struct tgsi_shader_info* info;
45
46 struct nv30_vertprog_exec *vpi;
47
48 unsigned r_temps;
49 unsigned r_temps_discard;
50 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
51 struct nvfx_reg *r_address;
52 struct nvfx_reg *r_temp;
53 struct nvfx_reg *r_const;
54 struct nvfx_reg r_0_1;
55
56 struct nvfx_reg *imm;
57 unsigned nr_imm;
58
59 int hpos_idx;
60 int cvtx_idx;
61
62 unsigned is_nv4x;
63
64 struct util_dynarray label_relocs;
65 struct util_dynarray loop_stack;
66 };
67
68 static struct nvfx_reg
69 temp(struct nvfx_vpc *vpc)
70 {
71 int idx = ffs(~vpc->r_temps) - 1;
72
73 if (idx < 0 || (!vpc->is_nv4x && idx >= 16)) {
74 NOUVEAU_ERR("out of temps!!\n");
75 return nvfx_reg(NVFXSR_TEMP, 0);
76 }
77
78 vpc->r_temps |= (1 << idx);
79 vpc->r_temps_discard |= (1 << idx);
80 return nvfx_reg(NVFXSR_TEMP, idx);
81 }
82
83 static inline void
84 release_temps(struct nvfx_vpc *vpc)
85 {
86 vpc->r_temps &= ~vpc->r_temps_discard;
87 vpc->r_temps_discard = 0;
88 }
89
90 static struct nvfx_reg
91 constant(struct nvfx_vpc *vpc, int pipe, float x, float y, float z, float w)
92 {
93 struct nv30_vertprog *vp = vpc->vp;
94 struct nv30_vertprog_data *vpd;
95 int idx;
96
97 if (pipe >= 0) {
98 for (idx = 0; idx < vp->nr_consts; idx++) {
99 if (vp->consts[idx].index == pipe)
100 return nvfx_reg(NVFXSR_CONST, idx);
101 }
102 }
103
104 idx = vp->nr_consts++;
105 vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
106 vpd = &vp->consts[idx];
107
108 vpd->index = pipe;
109 vpd->value[0] = x;
110 vpd->value[1] = y;
111 vpd->value[2] = z;
112 vpd->value[3] = w;
113 return nvfx_reg(NVFXSR_CONST, idx);
114 }
115
116 #define arith(s,t,o,d,m,s0,s1,s2) \
117 nvfx_insn((s), (NVFX_VP_INST_SLOT_##t << 7) | NVFX_VP_INST_##t##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
118
119 static void
120 emit_src(struct nvfx_vpc *vpc, uint32_t *hw,
121 int pos, struct nvfx_src src)
122 {
123 struct nv30_vertprog *vp = vpc->vp;
124 uint32_t sr = 0;
125 struct nvfx_relocation reloc;
126
127 switch (src.reg.type) {
128 case NVFXSR_TEMP:
129 sr |= (NVFX_VP(SRC_REG_TYPE_TEMP) << NVFX_VP(SRC_REG_TYPE_SHIFT));
130 sr |= (src.reg.index << NVFX_VP(SRC_TEMP_SRC_SHIFT));
131 break;
132 case NVFXSR_INPUT:
133 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
134 NVFX_VP(SRC_REG_TYPE_SHIFT));
135 vp->ir |= (1 << src.reg.index);
136 hw[1] |= (src.reg.index << NVFX_VP(INST_INPUT_SRC_SHIFT));
137 break;
138 case NVFXSR_CONST:
139 sr |= (NVFX_VP(SRC_REG_TYPE_CONST) <<
140 NVFX_VP(SRC_REG_TYPE_SHIFT));
141 if (src.reg.index < 256 && src.reg.index >= -256) {
142 reloc.location = vp->nr_insns - 1;
143 reloc.target = src.reg.index;
144 util_dynarray_append(&vp->const_relocs, struct nvfx_relocation, reloc);
145 } else {
146 hw[1] |= (src.reg.index << NVFX_VP(INST_CONST_SRC_SHIFT)) &
147 NVFX_VP(INST_CONST_SRC_MASK);
148 }
149 break;
150 case NVFXSR_NONE:
151 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
152 NVFX_VP(SRC_REG_TYPE_SHIFT));
153 break;
154 default:
155 assert(0);
156 }
157
158 if (src.negate)
159 sr |= NVFX_VP(SRC_NEGATE);
160
161 if (src.abs)
162 hw[0] |= (1 << (21 + pos));
163
164 sr |= ((src.swz[0] << NVFX_VP(SRC_SWZ_X_SHIFT)) |
165 (src.swz[1] << NVFX_VP(SRC_SWZ_Y_SHIFT)) |
166 (src.swz[2] << NVFX_VP(SRC_SWZ_Z_SHIFT)) |
167 (src.swz[3] << NVFX_VP(SRC_SWZ_W_SHIFT)));
168
169 if(src.indirect) {
170 if(src.reg.type == NVFXSR_CONST)
171 hw[3] |= NVFX_VP(INST_INDEX_CONST);
172 else if(src.reg.type == NVFXSR_INPUT)
173 hw[0] |= NVFX_VP(INST_INDEX_INPUT);
174 else
175 assert(0);
176
177 if(src.indirect_reg)
178 hw[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1);
179 hw[0] |= src.indirect_swz << NVFX_VP(INST_ADDR_SWZ_SHIFT);
180 }
181
182 switch (pos) {
183 case 0:
184 hw[1] |= ((sr & NVFX_VP(SRC0_HIGH_MASK)) >>
185 NVFX_VP(SRC0_HIGH_SHIFT)) << NVFX_VP(INST_SRC0H_SHIFT);
186 hw[2] |= (sr & NVFX_VP(SRC0_LOW_MASK)) <<
187 NVFX_VP(INST_SRC0L_SHIFT);
188 break;
189 case 1:
190 hw[2] |= sr << NVFX_VP(INST_SRC1_SHIFT);
191 break;
192 case 2:
193 hw[2] |= ((sr & NVFX_VP(SRC2_HIGH_MASK)) >>
194 NVFX_VP(SRC2_HIGH_SHIFT)) << NVFX_VP(INST_SRC2H_SHIFT);
195 hw[3] |= (sr & NVFX_VP(SRC2_LOW_MASK)) <<
196 NVFX_VP(INST_SRC2L_SHIFT);
197 break;
198 default:
199 assert(0);
200 }
201 }
202
203 static void
204 emit_dst(struct nvfx_vpc *vpc, uint32_t *hw,
205 int slot, struct nvfx_reg dst)
206 {
207 struct nv30_vertprog *vp = vpc->vp;
208
209 switch (dst.type) {
210 case NVFXSR_NONE:
211 if(!vpc->is_nv4x)
212 hw[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK;
213 else {
214 hw[3] |= NV40_VP_INST_DEST_MASK;
215 if (slot == 0)
216 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
217 else
218 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
219 }
220 break;
221 case NVFXSR_TEMP:
222 if(!vpc->is_nv4x)
223 hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
224 else {
225 hw[3] |= NV40_VP_INST_DEST_MASK;
226 if (slot == 0)
227 hw[0] |= (dst.index << NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
228 else
229 hw[3] |= (dst.index << NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
230 }
231 break;
232 case NVFXSR_OUTPUT:
233 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
234 if(vpc->is_nv4x) {
235 switch (dst.index) {
236 case NV30_VP_INST_DEST_CLP(0):
237 dst.index = NVFX_VP(INST_DEST_FOGC);
238 vp->or |= (1 << 6);
239 break;
240 case NV30_VP_INST_DEST_CLP(1):
241 dst.index = NVFX_VP(INST_DEST_FOGC);
242 vp->or |= (1 << 7);
243 break;
244 case NV30_VP_INST_DEST_CLP(2):
245 dst.index = NVFX_VP(INST_DEST_FOGC);
246 vp->or |= (1 << 8);
247 break;
248 case NV30_VP_INST_DEST_CLP(3):
249 dst.index = NVFX_VP(INST_DEST_PSZ);
250 vp->or |= (1 << 9);
251 break;
252 case NV30_VP_INST_DEST_CLP(4):
253 dst.index = NVFX_VP(INST_DEST_PSZ);
254 vp->or |= (1 << 10);
255 break;
256 case NV30_VP_INST_DEST_CLP(5):
257 dst.index = NVFX_VP(INST_DEST_PSZ);
258 vp->or |= (1 << 11);
259 break;
260 case NV40_VP_INST_DEST_COL0: vp->or |= (1 << 0); break;
261 case NV40_VP_INST_DEST_COL1: vp->or |= (1 << 1); break;
262 case NV40_VP_INST_DEST_BFC0: vp->or |= (1 << 2); break;
263 case NV40_VP_INST_DEST_BFC1: vp->or |= (1 << 3); break;
264 case NV40_VP_INST_DEST_FOGC: vp->or |= (1 << 4); break;
265 case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
266 }
267 }
268
269 if(!vpc->is_nv4x) {
270 hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
271 hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK;
272
273 /*XXX: no way this is entirely correct, someone needs to
274 * figure out what exactly it is.
275 */
276 hw[3] |= 0x800;
277 } else {
278 hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
279 if (slot == 0) {
280 hw[0] |= NV40_VP_INST_VEC_RESULT;
281 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
282 } else {
283 hw[3] |= NV40_VP_INST_SCA_RESULT;
284 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
285 }
286 }
287 break;
288 default:
289 assert(0);
290 }
291 }
292
293 static void
294 nvfx_vp_emit(struct nvfx_vpc *vpc, struct nvfx_insn insn)
295 {
296 struct nv30_vertprog *vp = vpc->vp;
297 unsigned slot = insn.op >> 7;
298 unsigned op = insn.op & 0x7f;
299 uint32_t *hw;
300
301 vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
302 vpc->vpi = &vp->insns[vp->nr_insns - 1];
303 memset(vpc->vpi, 0, sizeof(*vpc->vpi));
304
305 hw = vpc->vpi->data;
306
307 if (insn.cc_test != NVFX_COND_TR)
308 hw[0] |= NVFX_VP(INST_COND_TEST_ENABLE);
309 hw[0] |= (insn.cc_test << NVFX_VP(INST_COND_SHIFT));
310 hw[0] |= ((insn.cc_swz[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT)) |
311 (insn.cc_swz[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT)) |
312 (insn.cc_swz[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT)) |
313 (insn.cc_swz[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT)));
314 if(insn.cc_update)
315 hw[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE);
316
317 if(insn.sat) {
318 assert(vpc->is_nv4x);
319 if(vpc->is_nv4x)
320 hw[0] |= NV40_VP_INST_SATURATE;
321 }
322
323 if(!vpc->is_nv4x) {
324 if(slot == 0)
325 hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
326 else {
327 hw[0] |= ((op >> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT);
328 hw[1] |= ((op & 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT);
329 }
330 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
331 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
332
333 if (insn.dst.type == NVFXSR_OUTPUT) {
334 if (slot)
335 hw[3] |= (insn.mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
336 else
337 hw[3] |= (insn.mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
338 } else {
339 if (slot)
340 hw[3] |= (insn.mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
341 else
342 hw[3] |= (insn.mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
343 }
344 } else {
345 if (slot == 0) {
346 hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
347 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
348 hw[3] |= (insn.mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
349 } else {
350 hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
351 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK ;
352 hw[3] |= (insn.mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
353 }
354 }
355
356 emit_dst(vpc, hw, slot, insn.dst);
357 emit_src(vpc, hw, 0, insn.src[0]);
358 emit_src(vpc, hw, 1, insn.src[1]);
359 emit_src(vpc, hw, 2, insn.src[2]);
360
361 // if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
362 // hw[3] |= NV40_VP_INST_SCA_RESULT;
363 }
364
365 static inline struct nvfx_src
366 tgsi_src(struct nvfx_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
367 struct nvfx_src src;
368
369 switch (fsrc->Register.File) {
370 case TGSI_FILE_INPUT:
371 src.reg = nvfx_reg(NVFXSR_INPUT, fsrc->Register.Index);
372 break;
373 case TGSI_FILE_CONSTANT:
374 if(fsrc->Register.Indirect) {
375 src.reg = vpc->r_const[0];
376 src.reg.index = fsrc->Register.Index;
377 } else {
378 src.reg = vpc->r_const[fsrc->Register.Index];
379 }
380 break;
381 case TGSI_FILE_IMMEDIATE:
382 src.reg = vpc->imm[fsrc->Register.Index];
383 break;
384 case TGSI_FILE_TEMPORARY:
385 src.reg = vpc->r_temp[fsrc->Register.Index];
386 break;
387 default:
388 NOUVEAU_ERR("bad src file\n");
389 src.reg.index = 0;
390 src.reg.type = -1;
391 break;
392 }
393
394 src.abs = fsrc->Register.Absolute;
395 src.negate = fsrc->Register.Negate;
396 src.swz[0] = fsrc->Register.SwizzleX;
397 src.swz[1] = fsrc->Register.SwizzleY;
398 src.swz[2] = fsrc->Register.SwizzleZ;
399 src.swz[3] = fsrc->Register.SwizzleW;
400 src.indirect = 0;
401 src.indirect_reg = 0;
402 src.indirect_swz = 0;
403
404 if(fsrc->Register.Indirect) {
405 if(fsrc->Indirect.File == TGSI_FILE_ADDRESS &&
406 (fsrc->Register.File == TGSI_FILE_CONSTANT ||
407 fsrc->Register.File == TGSI_FILE_INPUT)) {
408 src.indirect = 1;
409 src.indirect_reg = fsrc->Indirect.Index;
410 src.indirect_swz = fsrc->Indirect.Swizzle;
411 } else {
412 src.reg.index = 0;
413 src.reg.type = -1;
414 }
415 }
416
417 return src;
418 }
419
420 static inline struct nvfx_reg
421 tgsi_dst(struct nvfx_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
422 struct nvfx_reg dst;
423
424 switch (fdst->Register.File) {
425 case TGSI_FILE_NULL:
426 dst = nvfx_reg(NVFXSR_NONE, 0);
427 break;
428 case TGSI_FILE_OUTPUT:
429 dst = vpc->r_result[fdst->Register.Index];
430 break;
431 case TGSI_FILE_TEMPORARY:
432 dst = vpc->r_temp[fdst->Register.Index];
433 break;
434 case TGSI_FILE_ADDRESS:
435 dst = vpc->r_address[fdst->Register.Index];
436 break;
437 default:
438 NOUVEAU_ERR("bad dst file %i\n", fdst->Register.File);
439 dst.index = 0;
440 dst.type = 0;
441 break;
442 }
443
444 return dst;
445 }
446
447 static inline int
448 tgsi_mask(uint tgsi)
449 {
450 int mask = 0;
451
452 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_VP_MASK_X;
453 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_VP_MASK_Y;
454 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_VP_MASK_Z;
455 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_VP_MASK_W;
456 return mask;
457 }
458
459 static bool
460 nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc,
461 unsigned idx, const struct tgsi_full_instruction *finst)
462 {
463 struct nvfx_src src[3], tmp;
464 struct nvfx_reg dst;
465 struct nvfx_reg final_dst;
466 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
467 struct nvfx_insn insn;
468 struct nvfx_relocation reloc;
469 struct nvfx_loop_entry loop;
470 bool sat = false;
471 int mask;
472 int ai = -1, ci = -1, ii = -1;
473 int i;
474 unsigned sub_depth = 0;
475
476 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
477 const struct tgsi_full_src_register *fsrc;
478
479 fsrc = &finst->Src[i];
480 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
481 src[i] = tgsi_src(vpc, fsrc);
482 }
483 }
484
485 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
486 const struct tgsi_full_src_register *fsrc;
487
488 fsrc = &finst->Src[i];
489
490 switch (fsrc->Register.File) {
491 case TGSI_FILE_INPUT:
492 if (ai == -1 || ai == fsrc->Register.Index) {
493 ai = fsrc->Register.Index;
494 src[i] = tgsi_src(vpc, fsrc);
495 } else {
496 src[i] = nvfx_src(temp(vpc));
497 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
498 tgsi_src(vpc, fsrc), none, none));
499 }
500 break;
501 case TGSI_FILE_CONSTANT:
502 if ((ci == -1 && ii == -1) ||
503 ci == fsrc->Register.Index) {
504 ci = fsrc->Register.Index;
505 src[i] = tgsi_src(vpc, fsrc);
506 } else {
507 src[i] = nvfx_src(temp(vpc));
508 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
509 tgsi_src(vpc, fsrc), none, none));
510 }
511 break;
512 case TGSI_FILE_IMMEDIATE:
513 if ((ci == -1 && ii == -1) ||
514 ii == fsrc->Register.Index) {
515 ii = fsrc->Register.Index;
516 src[i] = tgsi_src(vpc, fsrc);
517 } else {
518 src[i] = nvfx_src(temp(vpc));
519 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
520 tgsi_src(vpc, fsrc), none, none));
521 }
522 break;
523 case TGSI_FILE_TEMPORARY:
524 /* handled above */
525 break;
526 default:
527 NOUVEAU_ERR("bad src file\n");
528 return false;
529 }
530 }
531
532 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
533 if(src[i].reg.type < 0)
534 return false;
535 }
536
537 if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS &&
538 finst->Instruction.Opcode != TGSI_OPCODE_ARL)
539 return false;
540
541 final_dst = dst = tgsi_dst(vpc, &finst->Dst[0]);
542 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
543 if(finst->Instruction.Saturate) {
544 assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL);
545 if (vpc->is_nv4x)
546 sat = true;
547 else
548 if(dst.type != NVFXSR_TEMP)
549 dst = temp(vpc);
550 }
551
552 switch (finst->Instruction.Opcode) {
553 case TGSI_OPCODE_ADD:
554 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, src[1]));
555 break;
556 case TGSI_OPCODE_ARL:
557 nvfx_vp_emit(vpc, arith(0, VEC, ARL, dst, mask, src[0], none, none));
558 break;
559 case TGSI_OPCODE_CEIL:
560 tmp = nvfx_src(temp(vpc));
561 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, neg(src[0]), none, none));
562 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none));
563 break;
564 case TGSI_OPCODE_CMP:
565 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
566 insn.cc_update = 1;
567 nvfx_vp_emit(vpc, insn);
568
569 insn = arith(sat, VEC, MOV, dst, mask, src[2], none, none);
570 insn.cc_test = NVFX_COND_GE;
571 nvfx_vp_emit(vpc, insn);
572
573 insn = arith(sat, VEC, MOV, dst, mask, src[1], none, none);
574 insn.cc_test = NVFX_COND_LT;
575 nvfx_vp_emit(vpc, insn);
576 break;
577 case TGSI_OPCODE_COS:
578 nvfx_vp_emit(vpc, arith(sat, SCA, COS, dst, mask, none, none, src[0]));
579 break;
580 case TGSI_OPCODE_DP2:
581 tmp = nvfx_src(temp(vpc));
582 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X | NVFX_VP_MASK_Y, src[0], src[1], none));
583 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, swz(tmp, X, X, X, X), none, swz(tmp, Y, Y, Y, Y)));
584 break;
585 case TGSI_OPCODE_DP3:
586 nvfx_vp_emit(vpc, arith(sat, VEC, DP3, dst, mask, src[0], src[1], none));
587 break;
588 case TGSI_OPCODE_DP4:
589 nvfx_vp_emit(vpc, arith(sat, VEC, DP4, dst, mask, src[0], src[1], none));
590 break;
591 case TGSI_OPCODE_DST:
592 nvfx_vp_emit(vpc, arith(sat, VEC, DST, dst, mask, src[0], src[1], none));
593 break;
594 case TGSI_OPCODE_EX2:
595 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, src[0]));
596 break;
597 case TGSI_OPCODE_EXP:
598 nvfx_vp_emit(vpc, arith(sat, SCA, EXP, dst, mask, none, none, src[0]));
599 break;
600 case TGSI_OPCODE_FLR:
601 nvfx_vp_emit(vpc, arith(sat, VEC, FLR, dst, mask, src[0], none, none));
602 break;
603 case TGSI_OPCODE_FRC:
604 nvfx_vp_emit(vpc, arith(sat, VEC, FRC, dst, mask, src[0], none, none));
605 break;
606 case TGSI_OPCODE_LG2:
607 nvfx_vp_emit(vpc, arith(sat, SCA, LG2, dst, mask, none, none, src[0]));
608 break;
609 case TGSI_OPCODE_LIT:
610 nvfx_vp_emit(vpc, arith(sat, SCA, LIT, dst, mask, none, none, src[0]));
611 break;
612 case TGSI_OPCODE_LOG:
613 nvfx_vp_emit(vpc, arith(sat, SCA, LOG, dst, mask, none, none, src[0]));
614 break;
615 case TGSI_OPCODE_LRP:
616 tmp = nvfx_src(temp(vpc));
617 nvfx_vp_emit(vpc, arith(0, VEC, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
618 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], tmp));
619 break;
620 case TGSI_OPCODE_MAD:
621 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], src[2]));
622 break;
623 case TGSI_OPCODE_MAX:
624 nvfx_vp_emit(vpc, arith(sat, VEC, MAX, dst, mask, src[0], src[1], none));
625 break;
626 case TGSI_OPCODE_MIN:
627 nvfx_vp_emit(vpc, arith(sat, VEC, MIN, dst, mask, src[0], src[1], none));
628 break;
629 case TGSI_OPCODE_MOV:
630 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, src[0], none, none));
631 break;
632 case TGSI_OPCODE_MUL:
633 nvfx_vp_emit(vpc, arith(sat, VEC, MUL, dst, mask, src[0], src[1], none));
634 break;
635 case TGSI_OPCODE_NOP:
636 break;
637 case TGSI_OPCODE_POW:
638 tmp = nvfx_src(temp(vpc));
639 nvfx_vp_emit(vpc, arith(0, SCA, LG2, tmp.reg, NVFX_VP_MASK_X, none, none, swz(src[0], X, X, X, X)));
640 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
641 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, swz(tmp, X, X, X, X)));
642 break;
643 case TGSI_OPCODE_RCP:
644 nvfx_vp_emit(vpc, arith(sat, SCA, RCP, dst, mask, none, none, src[0]));
645 break;
646 case TGSI_OPCODE_RSQ:
647 nvfx_vp_emit(vpc, arith(sat, SCA, RSQ, dst, mask, none, none, abs(src[0])));
648 break;
649 case TGSI_OPCODE_SEQ:
650 nvfx_vp_emit(vpc, arith(sat, VEC, SEQ, dst, mask, src[0], src[1], none));
651 break;
652 case TGSI_OPCODE_SGE:
653 nvfx_vp_emit(vpc, arith(sat, VEC, SGE, dst, mask, src[0], src[1], none));
654 break;
655 case TGSI_OPCODE_SGT:
656 nvfx_vp_emit(vpc, arith(sat, VEC, SGT, dst, mask, src[0], src[1], none));
657 break;
658 case TGSI_OPCODE_SIN:
659 nvfx_vp_emit(vpc, arith(sat, SCA, SIN, dst, mask, none, none, src[0]));
660 break;
661 case TGSI_OPCODE_SLE:
662 nvfx_vp_emit(vpc, arith(sat, VEC, SLE, dst, mask, src[0], src[1], none));
663 break;
664 case TGSI_OPCODE_SLT:
665 nvfx_vp_emit(vpc, arith(sat, VEC, SLT, dst, mask, src[0], src[1], none));
666 break;
667 case TGSI_OPCODE_SNE:
668 nvfx_vp_emit(vpc, arith(sat, VEC, SNE, dst, mask, src[0], src[1], none));
669 break;
670 case TGSI_OPCODE_SSG:
671 nvfx_vp_emit(vpc, arith(sat, VEC, SSG, dst, mask, src[0], none, none));
672 break;
673 case TGSI_OPCODE_TRUNC:
674 tmp = nvfx_src(temp(vpc));
675 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
676 insn.cc_update = 1;
677 nvfx_vp_emit(vpc, insn);
678
679 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, abs(src[0]), none, none));
680 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, tmp, none, none));
681
682 insn = arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none);
683 insn.cc_test = NVFX_COND_LT;
684 nvfx_vp_emit(vpc, insn);
685 break;
686 case TGSI_OPCODE_IF:
687 insn = arith(0, VEC, MOV, none.reg, NVFX_VP_MASK_X, src[0], none, none);
688 insn.cc_update = 1;
689 nvfx_vp_emit(vpc, insn);
690
691 reloc.location = vpc->vp->nr_insns;
692 reloc.target = finst->Label.Label + 1;
693 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
694
695 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
696 insn.cc_test = NVFX_COND_EQ;
697 insn.cc_swz[0] = insn.cc_swz[1] = insn.cc_swz[2] = insn.cc_swz[3] = 0;
698 nvfx_vp_emit(vpc, insn);
699 break;
700 case TGSI_OPCODE_ELSE:
701 case TGSI_OPCODE_CAL:
702 reloc.location = vpc->vp->nr_insns;
703 reloc.target = finst->Label.Label;
704 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
705
706 if(finst->Instruction.Opcode == TGSI_OPCODE_CAL)
707 insn = arith(0, SCA, CAL, none.reg, 0, none, none, none);
708 else
709 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
710 nvfx_vp_emit(vpc, insn);
711 break;
712 case TGSI_OPCODE_RET:
713 if(sub_depth || !vpc->vp->enabled_ucps) {
714 tmp = none;
715 tmp.swz[0] = tmp.swz[1] = tmp.swz[2] = tmp.swz[3] = 0;
716 nvfx_vp_emit(vpc, arith(0, SCA, RET, none.reg, 0, none, none, tmp));
717 } else {
718 reloc.location = vpc->vp->nr_insns;
719 reloc.target = vpc->info->num_instructions;
720 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
721 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
722 }
723 break;
724 case TGSI_OPCODE_BGNSUB:
725 ++sub_depth;
726 break;
727 case TGSI_OPCODE_ENDSUB:
728 --sub_depth;
729 break;
730 case TGSI_OPCODE_ENDIF:
731 /* nothing to do here */
732 break;
733 case TGSI_OPCODE_BGNLOOP:
734 loop.cont_target = idx;
735 loop.brk_target = finst->Label.Label + 1;
736 util_dynarray_append(&vpc->loop_stack, struct nvfx_loop_entry, loop);
737 break;
738 case TGSI_OPCODE_ENDLOOP:
739 loop = util_dynarray_pop(&vpc->loop_stack, struct nvfx_loop_entry);
740
741 reloc.location = vpc->vp->nr_insns;
742 reloc.target = loop.cont_target;
743 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
744
745 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
746 break;
747 case TGSI_OPCODE_CONT:
748 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
749
750 reloc.location = vpc->vp->nr_insns;
751 reloc.target = loop.cont_target;
752 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
753
754 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
755 break;
756 case TGSI_OPCODE_BRK:
757 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
758
759 reloc.location = vpc->vp->nr_insns;
760 reloc.target = loop.brk_target;
761 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
762
763 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
764 break;
765 case TGSI_OPCODE_END:
766 assert(!sub_depth);
767 if(vpc->vp->enabled_ucps) {
768 if(idx != (vpc->info->num_instructions - 1)) {
769 reloc.location = vpc->vp->nr_insns;
770 reloc.target = vpc->info->num_instructions;
771 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
772 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
773 }
774 } else {
775 if(vpc->vp->nr_insns)
776 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
777 nvfx_vp_emit(vpc, arith(0, VEC, NOP, none.reg, 0, none, none, none));
778 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
779 }
780 break;
781 default:
782 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
783 return false;
784 }
785
786 if(finst->Instruction.Saturate && !vpc->is_nv4x) {
787 if (!vpc->r_0_1.type)
788 vpc->r_0_1 = constant(vpc, -1, 0, 1, 0, 0);
789 nvfx_vp_emit(vpc, arith(0, VEC, MAX, dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), X, X, X, X), none));
790 nvfx_vp_emit(vpc, arith(0, VEC, MIN, final_dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), Y, Y, Y, Y), none));
791 }
792
793 release_temps(vpc);
794 return true;
795 }
796
797 static bool
798 nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc,
799 const struct tgsi_full_declaration *fdec)
800 {
801 unsigned num_texcoords = vpc->is_nv4x ? 10 : 8;
802 unsigned idx = fdec->Range.First;
803 unsigned semantic_index = fdec->Semantic.Index;
804 int hw = 0, i;
805
806 switch (fdec->Semantic.Name) {
807 case TGSI_SEMANTIC_POSITION:
808 hw = NVFX_VP(INST_DEST_POS);
809 vpc->hpos_idx = idx;
810 break;
811 case TGSI_SEMANTIC_CLIPVERTEX:
812 vpc->r_result[idx] = temp(vpc);
813 vpc->r_temps_discard = 0;
814 vpc->cvtx_idx = idx;
815 return true;
816 case TGSI_SEMANTIC_COLOR:
817 if (fdec->Semantic.Index == 0) {
818 hw = NVFX_VP(INST_DEST_COL0);
819 } else
820 if (fdec->Semantic.Index == 1) {
821 hw = NVFX_VP(INST_DEST_COL1);
822 } else {
823 NOUVEAU_ERR("bad colour semantic index\n");
824 return false;
825 }
826 break;
827 case TGSI_SEMANTIC_BCOLOR:
828 if (fdec->Semantic.Index == 0) {
829 hw = NVFX_VP(INST_DEST_BFC0);
830 } else
831 if (fdec->Semantic.Index == 1) {
832 hw = NVFX_VP(INST_DEST_BFC1);
833 } else {
834 NOUVEAU_ERR("bad bcolour semantic index\n");
835 return false;
836 }
837 break;
838 case TGSI_SEMANTIC_FOG:
839 hw = NVFX_VP(INST_DEST_FOGC);
840 break;
841 case TGSI_SEMANTIC_PSIZE:
842 hw = NVFX_VP(INST_DEST_PSZ);
843 break;
844 case TGSI_SEMANTIC_GENERIC:
845 /* this is really an identifier for VP/FP linkage */
846 semantic_index += 8;
847 /* fall through */
848 case TGSI_SEMANTIC_TEXCOORD:
849 for (i = 0; i < num_texcoords; i++) {
850 if (vpc->vp->texcoord[i] == semantic_index) {
851 hw = NVFX_VP(INST_DEST_TC(i));
852 break;
853 }
854 }
855
856 if (i == num_texcoords) {
857 vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
858 return true;
859 }
860 break;
861 case TGSI_SEMANTIC_EDGEFLAG:
862 vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
863 return true;
864 default:
865 NOUVEAU_ERR("bad output semantic\n");
866 return false;
867 }
868
869 vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
870 return true;
871 }
872
873 static bool
874 nvfx_vertprog_prepare(struct nvfx_vpc *vpc)
875 {
876 struct tgsi_parse_context p;
877 int high_const = -1, high_temp = -1, high_addr = -1, nr_imm = 0, i;
878
879 tgsi_parse_init(&p, vpc->pipe.tokens);
880 while (!tgsi_parse_end_of_tokens(&p)) {
881 const union tgsi_full_token *tok = &p.FullToken;
882
883 tgsi_parse_token(&p);
884 switch(tok->Token.Type) {
885 case TGSI_TOKEN_TYPE_IMMEDIATE:
886 nr_imm++;
887 break;
888 case TGSI_TOKEN_TYPE_DECLARATION:
889 {
890 const struct tgsi_full_declaration *fdec;
891
892 fdec = &p.FullToken.FullDeclaration;
893 switch (fdec->Declaration.File) {
894 case TGSI_FILE_TEMPORARY:
895 if (fdec->Range.Last > high_temp) {
896 high_temp =
897 fdec->Range.Last;
898 }
899 break;
900 case TGSI_FILE_ADDRESS:
901 if (fdec->Range.Last > high_addr) {
902 high_addr =
903 fdec->Range.Last;
904 }
905 break;
906 case TGSI_FILE_CONSTANT:
907 if (fdec->Range.Last > high_const) {
908 high_const =
909 fdec->Range.Last;
910 }
911 break;
912 case TGSI_FILE_OUTPUT:
913 if (!nvfx_vertprog_parse_decl_output(vpc, fdec))
914 return false;
915 break;
916 default:
917 break;
918 }
919 }
920 break;
921 default:
922 break;
923 }
924 }
925 tgsi_parse_free(&p);
926
927 if (nr_imm) {
928 vpc->imm = CALLOC(nr_imm, sizeof(struct nvfx_reg));
929 assert(vpc->imm);
930 }
931
932 if (++high_temp) {
933 vpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
934 for (i = 0; i < high_temp; i++)
935 vpc->r_temp[i] = temp(vpc);
936 }
937
938 if (++high_addr) {
939 vpc->r_address = CALLOC(high_addr, sizeof(struct nvfx_reg));
940 for (i = 0; i < high_addr; i++)
941 vpc->r_address[i] = nvfx_reg(NVFXSR_TEMP, i);
942 }
943
944 if(++high_const) {
945 vpc->r_const = CALLOC(high_const, sizeof(struct nvfx_reg));
946 for (i = 0; i < high_const; i++)
947 vpc->r_const[i] = constant(vpc, i, 0, 0, 0, 0);
948 }
949
950 vpc->r_temps_discard = 0;
951 return true;
952 }
953
954 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", false)
955
956 bool
957 _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp)
958 {
959 struct tgsi_parse_context parse;
960 struct nvfx_vpc *vpc = NULL;
961 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
962 struct util_dynarray insns;
963 int i, ucps;
964
965 vp->translated = false;
966 vp->nr_insns = 0;
967 vp->nr_consts = 0;
968
969 vpc = CALLOC_STRUCT(nvfx_vpc);
970 if (!vpc)
971 return false;
972 vpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
973 vpc->vp = vp;
974 vpc->pipe = vp->pipe;
975 vpc->info = &vp->info;
976 vpc->cvtx_idx = -1;
977
978 if (!nvfx_vertprog_prepare(vpc)) {
979 FREE(vpc);
980 return false;
981 }
982
983 /* Redirect post-transform vertex position to a temp if user clip
984 * planes are enabled. We need to append code to the vtxprog
985 * to handle clip planes later.
986 */
987 if (vp->enabled_ucps && vpc->cvtx_idx < 0) {
988 vpc->r_result[vpc->hpos_idx] = temp(vpc);
989 vpc->r_temps_discard = 0;
990 vpc->cvtx_idx = vpc->hpos_idx;
991 }
992
993 util_dynarray_init(&insns, NULL);
994
995 tgsi_parse_init(&parse, vp->pipe.tokens);
996 while (!tgsi_parse_end_of_tokens(&parse)) {
997 tgsi_parse_token(&parse);
998
999 switch (parse.FullToken.Token.Type) {
1000 case TGSI_TOKEN_TYPE_IMMEDIATE:
1001 {
1002 const struct tgsi_full_immediate *imm;
1003
1004 imm = &parse.FullToken.FullImmediate;
1005 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1006 assert(imm->Immediate.NrTokens == 4 + 1);
1007 vpc->imm[vpc->nr_imm++] =
1008 constant(vpc, -1,
1009 imm->u[0].Float,
1010 imm->u[1].Float,
1011 imm->u[2].Float,
1012 imm->u[3].Float);
1013 }
1014 break;
1015 case TGSI_TOKEN_TYPE_INSTRUCTION:
1016 {
1017 const struct tgsi_full_instruction *finst;
1018 unsigned idx = insns.size >> 2;
1019 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1020 finst = &parse.FullToken.FullInstruction;
1021 if (!nvfx_vertprog_parse_instruction(vpc, idx, finst))
1022 goto out;
1023 }
1024 break;
1025 default:
1026 break;
1027 }
1028 }
1029
1030 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1031
1032 for(unsigned i = 0; i < vpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1033 {
1034 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)vpc->label_relocs.data + i);
1035 struct nvfx_relocation hw_reloc;
1036
1037 hw_reloc.location = label_reloc->location;
1038 hw_reloc.target = ((unsigned*)insns.data)[label_reloc->target];
1039
1040 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1041
1042 util_dynarray_append(&vp->branch_relocs, struct nvfx_relocation, hw_reloc);
1043 }
1044 util_dynarray_fini(&insns);
1045 util_dynarray_trim(&vp->branch_relocs);
1046
1047 /* XXX: what if we add a RET before?! make sure we jump here...*/
1048
1049 /* Write out HPOS if it was redirected to a temp earlier */
1050 if (vpc->r_result[vpc->hpos_idx].type != NVFXSR_OUTPUT) {
1051 struct nvfx_reg hpos = nvfx_reg(NVFXSR_OUTPUT,
1052 NVFX_VP(INST_DEST_POS));
1053 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->hpos_idx]);
1054
1055 nvfx_vp_emit(vpc, arith(0, VEC, MOV, hpos, NVFX_VP_MASK_ALL, htmp, none, none));
1056 }
1057
1058 /* Insert code to handle user clip planes */
1059 ucps = vp->enabled_ucps;
1060 while (ucps) {
1061 int i = ffs(ucps) - 1; ucps &= ~(1 << i);
1062 struct nvfx_reg cdst = nvfx_reg(NVFXSR_OUTPUT, NV30_VP_INST_DEST_CLP(i));
1063 struct nvfx_src ceqn = nvfx_src(nvfx_reg(NVFXSR_CONST, 512 + i));
1064 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->cvtx_idx]);
1065 unsigned mask;
1066
1067 if(vpc->is_nv4x)
1068 {
1069 switch (i) {
1070 case 0: case 3: mask = NVFX_VP_MASK_Y; break;
1071 case 1: case 4: mask = NVFX_VP_MASK_Z; break;
1072 case 2: case 5: mask = NVFX_VP_MASK_W; break;
1073 default:
1074 NOUVEAU_ERR("invalid clip dist #%d\n", i);
1075 goto out;
1076 }
1077 }
1078 else
1079 mask = NVFX_VP_MASK_X;
1080
1081 nvfx_vp_emit(vpc, arith(0, VEC, DP4, cdst, mask, htmp, ceqn, none));
1082 }
1083
1084 if (vpc->vp->nr_insns)
1085 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
1086
1087 if(debug_get_option_nvfx_dump_vp())
1088 {
1089 debug_printf("\n");
1090 tgsi_dump(vpc->pipe.tokens, 0);
1091
1092 debug_printf("\n%s vertex program:\n", vpc->is_nv4x ? "nv4x" : "nv3x");
1093 for (i = 0; i < vp->nr_insns; i++)
1094 debug_printf("%3u: %08x %08x %08x %08x\n", i, vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1095 debug_printf("\n");
1096 }
1097
1098 vp->translated = true;
1099
1100 out:
1101 tgsi_parse_free(&parse);
1102 if (vpc) {
1103 util_dynarray_fini(&vpc->label_relocs);
1104 util_dynarray_fini(&vpc->loop_stack);
1105 FREE(vpc->r_temp);
1106 FREE(vpc->r_address);
1107 FREE(vpc->r_const);
1108 FREE(vpc->imm);
1109 FREE(vpc);
1110 }
1111
1112 return vp->translated;
1113 }