Merge branch 'draw-instanced'
[mesa.git] / src / gallium / drivers / nvfx / nvfx_vertprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_linkage.h"
5 #include "util/u_debug.h"
6
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_dump.h"
10 #include "tgsi/tgsi_util.h"
11 #include "tgsi/tgsi_ureg.h"
12
13 #include "draw/draw_context.h"
14
15 #include "nvfx_context.h"
16 #include "nvfx_state.h"
17 #include "nvfx_resource.h"
18
19 /* TODO (at least...):
20 * 1. Indexed consts + ARL
21 * 3. NV_vp11, NV_vp2, NV_vp3 features
22 * - extra arith opcodes
23 * - branching
24 * - texture sampling
25 * - indexed attribs
26 * - indexed results
27 * 4. bugs
28 */
29
30 #include "nv30_vertprog.h"
31 #include "nv40_vertprog.h"
32
33 struct nvfx_loop_entry
34 {
35 unsigned brk_target;
36 unsigned cont_target;
37 };
38
39 struct nvfx_vpc {
40 struct nvfx_context* nvfx;
41 struct pipe_shader_state pipe;
42 struct nvfx_vertex_program *vp;
43 struct tgsi_shader_info* info;
44
45 struct nvfx_vertex_program_exec *vpi;
46
47 unsigned r_temps;
48 unsigned r_temps_discard;
49 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
50 struct nvfx_reg *r_address;
51 struct nvfx_reg *r_temp;
52 struct nvfx_reg *r_const;
53 struct nvfx_reg r_0_1;
54
55 struct nvfx_reg *imm;
56 unsigned nr_imm;
57
58 unsigned hpos_idx;
59
60 struct util_dynarray label_relocs;
61 struct util_dynarray loop_stack;
62 };
63
64 static struct nvfx_reg
65 temp(struct nvfx_vpc *vpc)
66 {
67 int idx = ffs(~vpc->r_temps) - 1;
68
69 if (idx < 0) {
70 NOUVEAU_ERR("out of temps!!\n");
71 assert(0);
72 return nvfx_reg(NVFXSR_TEMP, 0);
73 }
74
75 vpc->r_temps |= (1 << idx);
76 vpc->r_temps_discard |= (1 << idx);
77 return nvfx_reg(NVFXSR_TEMP, idx);
78 }
79
80 static inline void
81 release_temps(struct nvfx_vpc *vpc)
82 {
83 vpc->r_temps &= ~vpc->r_temps_discard;
84 vpc->r_temps_discard = 0;
85 }
86
87 static struct nvfx_reg
88 constant(struct nvfx_vpc *vpc, int pipe, float x, float y, float z, float w)
89 {
90 struct nvfx_vertex_program *vp = vpc->vp;
91 struct nvfx_vertex_program_data *vpd;
92 int idx;
93
94 if (pipe >= 0) {
95 for (idx = 0; idx < vp->nr_consts; idx++) {
96 if (vp->consts[idx].index == pipe)
97 return nvfx_reg(NVFXSR_CONST, idx);
98 }
99 }
100
101 idx = vp->nr_consts++;
102 vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
103 vpd = &vp->consts[idx];
104
105 vpd->index = pipe;
106 vpd->value[0] = x;
107 vpd->value[1] = y;
108 vpd->value[2] = z;
109 vpd->value[3] = w;
110 return nvfx_reg(NVFXSR_CONST, idx);
111 }
112
113 #define arith(s,t,o,d,m,s0,s1,s2) \
114 nvfx_insn((s), (NVFX_VP_INST_SLOT_##t << 7) | NVFX_VP_INST_##t##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
115
116 static void
117 emit_src(struct nvfx_context* nvfx, struct nvfx_vpc *vpc, uint32_t *hw, int pos, struct nvfx_src src)
118 {
119 struct nvfx_vertex_program *vp = vpc->vp;
120 uint32_t sr = 0;
121 struct nvfx_relocation reloc;
122
123 switch (src.reg.type) {
124 case NVFXSR_TEMP:
125 sr |= (NVFX_VP(SRC_REG_TYPE_TEMP) << NVFX_VP(SRC_REG_TYPE_SHIFT));
126 sr |= (src.reg.index << NVFX_VP(SRC_TEMP_SRC_SHIFT));
127 break;
128 case NVFXSR_INPUT:
129 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
130 NVFX_VP(SRC_REG_TYPE_SHIFT));
131 vp->ir |= (1 << src.reg.index);
132 hw[1] |= (src.reg.index << NVFX_VP(INST_INPUT_SRC_SHIFT));
133 break;
134 case NVFXSR_CONST:
135 sr |= (NVFX_VP(SRC_REG_TYPE_CONST) <<
136 NVFX_VP(SRC_REG_TYPE_SHIFT));
137 reloc.location = vp->nr_insns - 1;
138 reloc.target = src.reg.index;
139 util_dynarray_append(&vp->const_relocs, struct nvfx_relocation, reloc);
140 break;
141 case NVFXSR_NONE:
142 sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
143 NVFX_VP(SRC_REG_TYPE_SHIFT));
144 break;
145 default:
146 assert(0);
147 }
148
149 if (src.negate)
150 sr |= NVFX_VP(SRC_NEGATE);
151
152 if (src.abs)
153 hw[0] |= (1 << (21 + pos));
154
155 sr |= ((src.swz[0] << NVFX_VP(SRC_SWZ_X_SHIFT)) |
156 (src.swz[1] << NVFX_VP(SRC_SWZ_Y_SHIFT)) |
157 (src.swz[2] << NVFX_VP(SRC_SWZ_Z_SHIFT)) |
158 (src.swz[3] << NVFX_VP(SRC_SWZ_W_SHIFT)));
159
160 if(src.indirect) {
161 if(src.reg.type == NVFXSR_CONST)
162 hw[3] |= NVFX_VP(INST_INDEX_CONST);
163 else if(src.reg.type == NVFXSR_INPUT)
164 hw[0] |= NVFX_VP(INST_INDEX_INPUT);
165 else
166 assert(0);
167 if(src.indirect_reg)
168 hw[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1);
169 hw[0] |= src.indirect_swz << NVFX_VP(INST_ADDR_SWZ_SHIFT);
170 }
171
172 switch (pos) {
173 case 0:
174 hw[1] |= ((sr & NVFX_VP(SRC0_HIGH_MASK)) >>
175 NVFX_VP(SRC0_HIGH_SHIFT)) << NVFX_VP(INST_SRC0H_SHIFT);
176 hw[2] |= (sr & NVFX_VP(SRC0_LOW_MASK)) <<
177 NVFX_VP(INST_SRC0L_SHIFT);
178 break;
179 case 1:
180 hw[2] |= sr << NVFX_VP(INST_SRC1_SHIFT);
181 break;
182 case 2:
183 hw[2] |= ((sr & NVFX_VP(SRC2_HIGH_MASK)) >>
184 NVFX_VP(SRC2_HIGH_SHIFT)) << NVFX_VP(INST_SRC2H_SHIFT);
185 hw[3] |= (sr & NVFX_VP(SRC2_LOW_MASK)) <<
186 NVFX_VP(INST_SRC2L_SHIFT);
187 break;
188 default:
189 assert(0);
190 }
191 }
192
193 static void
194 emit_dst(struct nvfx_context* nvfx, struct nvfx_vpc *vpc, uint32_t *hw, int slot, struct nvfx_reg dst)
195 {
196 struct nvfx_vertex_program *vp = vpc->vp;
197
198 switch (dst.type) {
199 case NVFXSR_NONE:
200 if(!nvfx->is_nv4x)
201 hw[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK;
202 else {
203 hw[3] |= NV40_VP_INST_DEST_MASK;
204 if (slot == 0)
205 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
206 else
207 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
208 }
209 break;
210 case NVFXSR_TEMP:
211 if(!nvfx->is_nv4x)
212 hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
213 else {
214 hw[3] |= NV40_VP_INST_DEST_MASK;
215 if (slot == 0)
216 hw[0] |= (dst.index << NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
217 else
218 hw[3] |= (dst.index << NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
219 }
220 break;
221 case NVFXSR_OUTPUT:
222 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
223 if(nvfx->is_nv4x) {
224 switch (dst.index) {
225 case NV30_VP_INST_DEST_CLP(0):
226 dst.index = NVFX_VP(INST_DEST_FOGC);
227 break;
228 case NV30_VP_INST_DEST_CLP(1):
229 dst.index = NVFX_VP(INST_DEST_FOGC);
230 break;
231 case NV30_VP_INST_DEST_CLP(2):
232 dst.index = NVFX_VP(INST_DEST_FOGC);
233 break;
234 case NV30_VP_INST_DEST_CLP(3):
235 dst.index = NVFX_VP(INST_DEST_PSZ);
236 break;
237 case NV30_VP_INST_DEST_CLP(4):
238 dst.index = NVFX_VP(INST_DEST_PSZ);
239 break;
240 case NV30_VP_INST_DEST_CLP(5):
241 dst.index = NVFX_VP(INST_DEST_PSZ);
242 break;
243 case NV40_VP_INST_DEST_COL0 : vp->or |= (1 << 0); break;
244 case NV40_VP_INST_DEST_COL1 : vp->or |= (1 << 1); break;
245 case NV40_VP_INST_DEST_BFC0 : vp->or |= (1 << 2); break;
246 case NV40_VP_INST_DEST_BFC1 : vp->or |= (1 << 3); break;
247 case NV40_VP_INST_DEST_FOGC: vp->or |= (1 << 4); break;
248 case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
249 }
250 }
251
252 if(!nvfx->is_nv4x) {
253 hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
254 hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK;
255
256 /*XXX: no way this is entirely correct, someone needs to
257 * figure out what exactly it is.
258 */
259 hw[3] |= 0x800;
260 } else {
261 hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
262 if (slot == 0) {
263 hw[0] |= NV40_VP_INST_VEC_RESULT;
264 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
265 } else {
266 hw[3] |= NV40_VP_INST_SCA_RESULT;
267 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
268 }
269 }
270 break;
271 default:
272 assert(0);
273 }
274 }
275
276 static void
277 nvfx_vp_emit(struct nvfx_vpc *vpc, struct nvfx_insn insn)
278 {
279 struct nvfx_context* nvfx = vpc->nvfx;
280 struct nvfx_vertex_program *vp = vpc->vp;
281 unsigned slot = insn.op >> 7;
282 unsigned op = insn.op & 0x7f;
283 uint32_t *hw;
284
285 vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
286 vpc->vpi = &vp->insns[vp->nr_insns - 1];
287 memset(vpc->vpi, 0, sizeof(*vpc->vpi));
288
289 hw = vpc->vpi->data;
290
291 hw[0] |= (insn.cc_test << NVFX_VP(INST_COND_SHIFT));
292 hw[0] |= ((insn.cc_swz[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT)) |
293 (insn.cc_swz[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT)) |
294 (insn.cc_swz[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT)) |
295 (insn.cc_swz[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT)));
296 if(insn.cc_update)
297 hw[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE);
298
299 if(insn.sat)
300 {
301 assert(nvfx->use_nv4x);
302 if(nvfx->use_nv4x)
303 hw[0] |= NV40_VP_INST_SATURATE;
304 }
305
306 if(!nvfx->is_nv4x) {
307 if(slot == 0)
308 hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
309 else
310 {
311 hw[0] |= ((op >> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT);
312 hw[1] |= ((op & 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT);
313 }
314 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
315 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
316
317 if (insn.dst.type == NVFXSR_OUTPUT) {
318 if (slot)
319 hw[3] |= (insn.mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
320 else
321 hw[3] |= (insn.mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
322 } else {
323 if (slot)
324 hw[3] |= (insn.mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
325 else
326 hw[3] |= (insn.mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
327 }
328 } else {
329 if (slot == 0) {
330 hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
331 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
332 hw[3] |= (insn.mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
333 } else {
334 hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
335 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK ;
336 hw[3] |= (insn.mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
337 }
338 }
339
340 emit_dst(nvfx, vpc, hw, slot, insn.dst);
341 emit_src(nvfx, vpc, hw, 0, insn.src[0]);
342 emit_src(nvfx, vpc, hw, 1, insn.src[1]);
343 emit_src(nvfx, vpc, hw, 2, insn.src[2]);
344
345 // if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
346 // hw[3] |= NV40_VP_INST_SCA_RESULT;
347 }
348
349 static inline struct nvfx_src
350 tgsi_src(struct nvfx_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
351 struct nvfx_src src;
352
353 switch (fsrc->Register.File) {
354 case TGSI_FILE_INPUT:
355 src.reg = nvfx_reg(NVFXSR_INPUT, fsrc->Register.Index);
356 break;
357 case TGSI_FILE_CONSTANT:
358 src.reg = vpc->r_const[fsrc->Register.Index];
359 break;
360 case TGSI_FILE_IMMEDIATE:
361 src.reg = vpc->imm[fsrc->Register.Index];
362 break;
363 case TGSI_FILE_TEMPORARY:
364 src.reg = vpc->r_temp[fsrc->Register.Index];
365 break;
366 default:
367 NOUVEAU_ERR("bad src file\n");
368 src.reg.index = 0;
369 src.reg.type = -1;
370 break;
371 }
372
373 src.abs = fsrc->Register.Absolute;
374 src.negate = fsrc->Register.Negate;
375 src.swz[0] = fsrc->Register.SwizzleX;
376 src.swz[1] = fsrc->Register.SwizzleY;
377 src.swz[2] = fsrc->Register.SwizzleZ;
378 src.swz[3] = fsrc->Register.SwizzleW;
379 src.indirect = 0;
380 src.indirect_reg = 0;
381 src.indirect_swz = 0;
382
383 if(fsrc->Register.Indirect) {
384 if(fsrc->Indirect.File == TGSI_FILE_ADDRESS &&
385 (fsrc->Register.File == TGSI_FILE_CONSTANT || fsrc->Register.File == TGSI_FILE_INPUT))
386 {
387 src.indirect = 1;
388 src.indirect_reg = fsrc->Indirect.Index;
389 src.indirect_swz = fsrc->Indirect.SwizzleX;
390 }
391 else
392 {
393 src.reg.index = 0;
394 src.reg.type = -1;
395 }
396 }
397 return src;
398 }
399
400 static INLINE struct nvfx_reg
401 tgsi_dst(struct nvfx_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
402 struct nvfx_reg dst;
403
404 switch (fdst->Register.File) {
405 case TGSI_FILE_NULL:
406 dst = nvfx_reg(NVFXSR_NONE, 0);
407 break;
408 case TGSI_FILE_OUTPUT:
409 dst = vpc->r_result[fdst->Register.Index];
410 break;
411 case TGSI_FILE_TEMPORARY:
412 dst = vpc->r_temp[fdst->Register.Index];
413 break;
414 case TGSI_FILE_ADDRESS:
415 dst = vpc->r_address[fdst->Register.Index];
416 break;
417 default:
418 NOUVEAU_ERR("bad dst file %i\n", fdst->Register.File);
419 dst.index = 0;
420 dst.type = 0;
421 break;
422 }
423
424 return dst;
425 }
426
427 static inline int
428 tgsi_mask(uint tgsi)
429 {
430 int mask = 0;
431
432 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_VP_MASK_X;
433 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_VP_MASK_Y;
434 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_VP_MASK_Z;
435 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_VP_MASK_W;
436 return mask;
437 }
438
439 static boolean
440 nvfx_vertprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_vpc *vpc,
441 unsigned idx, const struct tgsi_full_instruction *finst)
442 {
443 struct nvfx_src src[3], tmp;
444 struct nvfx_reg dst;
445 struct nvfx_reg final_dst;
446 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
447 struct nvfx_insn insn;
448 struct nvfx_relocation reloc;
449 struct nvfx_loop_entry loop;
450 boolean sat = FALSE;
451 int mask;
452 int ai = -1, ci = -1, ii = -1;
453 int i;
454 unsigned sub_depth = 0;
455
456 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
457 const struct tgsi_full_src_register *fsrc;
458
459 fsrc = &finst->Src[i];
460 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
461 src[i] = tgsi_src(vpc, fsrc);
462 }
463 }
464
465 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
466 const struct tgsi_full_src_register *fsrc;
467
468 fsrc = &finst->Src[i];
469
470 switch (fsrc->Register.File) {
471 case TGSI_FILE_INPUT:
472 if (ai == -1 || ai == fsrc->Register.Index) {
473 ai = fsrc->Register.Index;
474 src[i] = tgsi_src(vpc, fsrc);
475 } else {
476 src[i] = nvfx_src(temp(vpc));
477 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL, tgsi_src(vpc, fsrc), none, none));
478 }
479 break;
480 case TGSI_FILE_CONSTANT:
481 if ((ci == -1 && ii == -1) ||
482 ci == fsrc->Register.Index) {
483 ci = fsrc->Register.Index;
484 src[i] = tgsi_src(vpc, fsrc);
485 } else {
486 src[i] = nvfx_src(temp(vpc));
487 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL, tgsi_src(vpc, fsrc), none, none));
488 }
489 break;
490 case TGSI_FILE_IMMEDIATE:
491 if ((ci == -1 && ii == -1) ||
492 ii == fsrc->Register.Index) {
493 ii = fsrc->Register.Index;
494 src[i] = tgsi_src(vpc, fsrc);
495 } else {
496 src[i] = nvfx_src(temp(vpc));
497 nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL, tgsi_src(vpc, fsrc), none, none));
498 }
499 break;
500 case TGSI_FILE_TEMPORARY:
501 /* handled above */
502 break;
503 default:
504 NOUVEAU_ERR("bad src file\n");
505 return FALSE;
506 }
507 }
508
509 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
510 if(src[i].reg.type < 0)
511 return FALSE;
512 }
513
514 if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS &&
515 finst->Instruction.Opcode != TGSI_OPCODE_ARL)
516 return FALSE;
517
518 final_dst = dst = tgsi_dst(vpc, &finst->Dst[0]);
519 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
520 if(finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE)
521 {
522 assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL);
523 if(nvfx->use_nv4x)
524 sat = TRUE;
525 else if(dst.type != NVFXSR_TEMP)
526 dst = temp(vpc);
527 }
528
529 switch (finst->Instruction.Opcode) {
530 case TGSI_OPCODE_ABS:
531 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, abs(src[0]), none, none));
532 break;
533 case TGSI_OPCODE_ADD:
534 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, src[1]));
535 break;
536 case TGSI_OPCODE_ARL:
537 nvfx_vp_emit(vpc, arith(0, VEC, ARL, dst, mask, src[0], none, none));
538 break;
539 case TGSI_OPCODE_CMP:
540 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
541 insn.cc_update = 1;
542 nvfx_vp_emit(vpc, insn);
543
544 insn = arith(sat, VEC, MOV, dst, mask, src[2], none, none);
545 insn.cc_test = NVFX_COND_GE;
546 nvfx_vp_emit(vpc, insn);
547
548 insn = arith(sat, VEC, MOV, dst, mask, src[1], none, none);
549 insn.cc_test = NVFX_COND_LT;
550 nvfx_vp_emit(vpc, insn);
551 break;
552 case TGSI_OPCODE_COS:
553 nvfx_vp_emit(vpc, arith(sat, SCA, COS, dst, mask, none, none, src[0]));
554 break;
555 case TGSI_OPCODE_DP2:
556 tmp = nvfx_src(temp(vpc));
557 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X | NVFX_VP_MASK_Y, src[0], src[1], none));
558 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, swz(tmp, X, X, X, X), none, swz(tmp, Y, Y, Y, Y)));
559 break;
560 case TGSI_OPCODE_DP3:
561 nvfx_vp_emit(vpc, arith(sat, VEC, DP3, dst, mask, src[0], src[1], none));
562 break;
563 case TGSI_OPCODE_DP4:
564 nvfx_vp_emit(vpc, arith(sat, VEC, DP4, dst, mask, src[0], src[1], none));
565 break;
566 case TGSI_OPCODE_DPH:
567 nvfx_vp_emit(vpc, arith(sat, VEC, DPH, dst, mask, src[0], src[1], none));
568 break;
569 case TGSI_OPCODE_DST:
570 nvfx_vp_emit(vpc, arith(sat, VEC, DST, dst, mask, src[0], src[1], none));
571 break;
572 case TGSI_OPCODE_EX2:
573 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, src[0]));
574 break;
575 case TGSI_OPCODE_EXP:
576 nvfx_vp_emit(vpc, arith(sat, SCA, EXP, dst, mask, none, none, src[0]));
577 break;
578 case TGSI_OPCODE_FLR:
579 nvfx_vp_emit(vpc, arith(sat, VEC, FLR, dst, mask, src[0], none, none));
580 break;
581 case TGSI_OPCODE_FRC:
582 nvfx_vp_emit(vpc, arith(sat, VEC, FRC, dst, mask, src[0], none, none));
583 break;
584 case TGSI_OPCODE_LG2:
585 nvfx_vp_emit(vpc, arith(sat, SCA, LG2, dst, mask, none, none, src[0]));
586 break;
587 case TGSI_OPCODE_LIT:
588 nvfx_vp_emit(vpc, arith(sat, SCA, LIT, dst, mask, none, none, src[0]));
589 break;
590 case TGSI_OPCODE_LOG:
591 nvfx_vp_emit(vpc, arith(sat, SCA, LOG, dst, mask, none, none, src[0]));
592 break;
593 case TGSI_OPCODE_LRP:
594 tmp = nvfx_src(temp(vpc));
595 nvfx_vp_emit(vpc, arith(0, VEC, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
596 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], tmp));
597 break;
598 case TGSI_OPCODE_MAD:
599 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], src[2]));
600 break;
601 case TGSI_OPCODE_MAX:
602 nvfx_vp_emit(vpc, arith(sat, VEC, MAX, dst, mask, src[0], src[1], none));
603 break;
604 case TGSI_OPCODE_MIN:
605 nvfx_vp_emit(vpc, arith(sat, VEC, MIN, dst, mask, src[0], src[1], none));
606 break;
607 case TGSI_OPCODE_MOV:
608 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, src[0], none, none));
609 break;
610 case TGSI_OPCODE_MUL:
611 nvfx_vp_emit(vpc, arith(sat, VEC, MUL, dst, mask, src[0], src[1], none));
612 break;
613 case TGSI_OPCODE_NOP:
614 break;
615 case TGSI_OPCODE_POW:
616 tmp = nvfx_src(temp(vpc));
617 nvfx_vp_emit(vpc, arith(0, SCA, LG2, tmp.reg, NVFX_VP_MASK_X, none, none, swz(src[0], X, X, X, X)));
618 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
619 nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, swz(tmp, X, X, X, X)));
620 break;
621 case TGSI_OPCODE_RCP:
622 nvfx_vp_emit(vpc, arith(sat, SCA, RCP, dst, mask, none, none, src[0]));
623 break;
624 case TGSI_OPCODE_RSQ:
625 nvfx_vp_emit(vpc, arith(sat, SCA, RSQ, dst, mask, none, none, abs(src[0])));
626 break;
627 case TGSI_OPCODE_SEQ:
628 nvfx_vp_emit(vpc, arith(sat, VEC, SEQ, dst, mask, src[0], src[1], none));
629 break;
630 case TGSI_OPCODE_SFL:
631 nvfx_vp_emit(vpc, arith(sat, VEC, SFL, dst, mask, src[0], src[1], none));
632 break;
633 case TGSI_OPCODE_SGE:
634 nvfx_vp_emit(vpc, arith(sat, VEC, SGE, dst, mask, src[0], src[1], none));
635 break;
636 case TGSI_OPCODE_SGT:
637 nvfx_vp_emit(vpc, arith(sat, VEC, SGT, dst, mask, src[0], src[1], none));
638 break;
639 case TGSI_OPCODE_SIN:
640 nvfx_vp_emit(vpc, arith(sat, SCA, SIN, dst, mask, none, none, src[0]));
641 break;
642 case TGSI_OPCODE_SLE:
643 nvfx_vp_emit(vpc, arith(sat, VEC, SLE, dst, mask, src[0], src[1], none));
644 break;
645 case TGSI_OPCODE_SLT:
646 nvfx_vp_emit(vpc, arith(sat, VEC, SLT, dst, mask, src[0], src[1], none));
647 break;
648 case TGSI_OPCODE_SNE:
649 nvfx_vp_emit(vpc, arith(sat, VEC, SNE, dst, mask, src[0], src[1], none));
650 break;
651 case TGSI_OPCODE_SSG:
652 nvfx_vp_emit(vpc, arith(sat, VEC, SSG, dst, mask, src[0], src[1], none));
653 break;
654 case TGSI_OPCODE_STR:
655 nvfx_vp_emit(vpc, arith(sat, VEC, STR, dst, mask, src[0], src[1], none));
656 break;
657 case TGSI_OPCODE_SUB:
658 nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, neg(src[1])));
659 break;
660 case TGSI_OPCODE_TRUNC:
661 tmp = nvfx_src(temp(vpc));
662 insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
663 insn.cc_update = 1;
664 nvfx_vp_emit(vpc, insn);
665
666 nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, abs(src[0]), none, none));
667 nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, tmp, none, none));
668
669 insn = arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none);
670 insn.cc_test = NVFX_COND_LT;
671 nvfx_vp_emit(vpc, insn);
672 break;
673 case TGSI_OPCODE_XPD:
674 tmp = nvfx_src(temp(vpc));
675 nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
676 nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, (mask & ~NVFX_VP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
677 break;
678
679 case TGSI_OPCODE_IF:
680 insn = arith(0, VEC, MOV, none.reg, NVFX_VP_MASK_X, src[0], none, none);
681 insn.cc_update = 1;
682 nvfx_vp_emit(vpc, insn);
683
684 reloc.location = vpc->vp->nr_insns;
685 reloc.target = finst->Label.Label + 1;
686 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
687
688 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
689 insn.cc_test = NVFX_COND_EQ;
690 insn.cc_swz[0] = insn.cc_swz[1] = insn.cc_swz[2] = insn.cc_swz[3] = 0;
691 nvfx_vp_emit(vpc, insn);
692 break;
693
694 case TGSI_OPCODE_ELSE:
695 case TGSI_OPCODE_BRA:
696 case TGSI_OPCODE_CAL:
697 reloc.location = vpc->vp->nr_insns;
698 reloc.target = finst->Label.Label;
699 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
700
701 if(finst->Instruction.Opcode == TGSI_OPCODE_CAL)
702 insn = arith(0, SCA, CAL, none.reg, 0, none, none, none);
703 else
704 insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
705 nvfx_vp_emit(vpc, insn);
706 break;
707
708 case TGSI_OPCODE_RET:
709 if(sub_depth || !nvfx->use_vp_clipping) {
710 tmp = none;
711 tmp.swz[0] = tmp.swz[1] = tmp.swz[2] = tmp.swz[3] = 0;
712 nvfx_vp_emit(vpc, arith(0, SCA, RET, none.reg, 0, none, none, tmp));
713 } else {
714 reloc.location = vpc->vp->nr_insns;
715 reloc.target = vpc->info->num_instructions;
716 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
717 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
718 }
719 break;
720
721 case TGSI_OPCODE_BGNSUB:
722 ++sub_depth;
723 break;
724 case TGSI_OPCODE_ENDSUB:
725 --sub_depth;
726 break;
727 case TGSI_OPCODE_ENDIF:
728 /* nothing to do here */
729 break;
730
731 case TGSI_OPCODE_BGNLOOP:
732 loop.cont_target = idx;
733 loop.brk_target = finst->Label.Label + 1;
734 util_dynarray_append(&vpc->loop_stack, struct nvfx_loop_entry, loop);
735 break;
736
737 case TGSI_OPCODE_ENDLOOP:
738 loop = util_dynarray_pop(&vpc->loop_stack, struct nvfx_loop_entry);
739
740 reloc.location = vpc->vp->nr_insns;
741 reloc.target = loop.cont_target;
742 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
743
744 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
745 break;
746
747 case TGSI_OPCODE_CONT:
748 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
749
750 reloc.location = vpc->vp->nr_insns;
751 reloc.target = loop.cont_target;
752 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
753
754 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
755 break;
756
757 case TGSI_OPCODE_BRK:
758 loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
759
760 reloc.location = vpc->vp->nr_insns;
761 reloc.target = loop.brk_target;
762 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
763
764 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
765 break;
766
767 case TGSI_OPCODE_END:
768 assert(!sub_depth);
769 if(nvfx->use_vp_clipping) {
770 if(idx != (vpc->info->num_instructions - 1)) {
771 reloc.location = vpc->vp->nr_insns;
772 reloc.target = vpc->info->num_instructions;
773 util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
774 nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
775 }
776 } else {
777 if(vpc->vp->nr_insns)
778 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
779 nvfx_vp_emit(vpc, arith(0, VEC, NOP, none.reg, 0, none, none, none));
780 vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
781 }
782 break;
783
784 default:
785 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
786 return FALSE;
787 }
788
789 if(finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE && !nvfx->use_nv4x)
790 {
791 if(!vpc->r_0_1.type)
792 vpc->r_0_1 = constant(vpc, -1, 0, 1, 0, 0);
793 nvfx_vp_emit(vpc, arith(0, VEC, MAX, dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), X, X, X, X), none));
794 nvfx_vp_emit(vpc, arith(0, VEC, MIN, final_dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), Y, Y, Y, Y), none));
795 }
796
797 release_temps(vpc);
798 return TRUE;
799 }
800
801 static boolean
802 nvfx_vertprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_vpc *vpc,
803 const struct tgsi_full_declaration *fdec)
804 {
805 unsigned idx = fdec->Range.First;
806 int hw;
807
808 switch (fdec->Semantic.Name) {
809 case TGSI_SEMANTIC_POSITION:
810 hw = NVFX_VP(INST_DEST_POS);
811 vpc->hpos_idx = idx;
812 break;
813 case TGSI_SEMANTIC_COLOR:
814 if (fdec->Semantic.Index == 0) {
815 hw = NVFX_VP(INST_DEST_COL0);
816 } else
817 if (fdec->Semantic.Index == 1) {
818 hw = NVFX_VP(INST_DEST_COL1);
819 } else {
820 NOUVEAU_ERR("bad colour semantic index\n");
821 return FALSE;
822 }
823 break;
824 case TGSI_SEMANTIC_BCOLOR:
825 if (fdec->Semantic.Index == 0) {
826 hw = NVFX_VP(INST_DEST_BFC0);
827 } else
828 if (fdec->Semantic.Index == 1) {
829 hw = NVFX_VP(INST_DEST_BFC1);
830 } else {
831 NOUVEAU_ERR("bad bcolour semantic index\n");
832 return FALSE;
833 }
834 break;
835 case TGSI_SEMANTIC_FOG:
836 hw = NVFX_VP(INST_DEST_FOGC);
837 break;
838 case TGSI_SEMANTIC_PSIZE:
839 hw = NVFX_VP(INST_DEST_PSZ);
840 break;
841 case TGSI_SEMANTIC_GENERIC:
842 hw = (vpc->vp->generic_to_fp_input[fdec->Semantic.Index] & 0xf) - NVFX_FP_OP_INPUT_SRC_TC(0);
843 if(hw <= 8)
844 hw = NVFX_VP(INST_DEST_TC(hw));
845 else if(hw == 9) /* TODO: this is correct, but how does this overlapping work exactly? */
846 hw = NV40_VP_INST_DEST_PSZ;
847 else
848 assert(0);
849 break;
850 case TGSI_SEMANTIC_EDGEFLAG:
851 /* not really an error just a fallback */
852 NOUVEAU_ERR("cannot handle edgeflag output\n");
853 return FALSE;
854 default:
855 NOUVEAU_ERR("bad output semantic\n");
856 return FALSE;
857 }
858
859 vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
860 return TRUE;
861 }
862
863 static boolean
864 nvfx_vertprog_prepare(struct nvfx_context* nvfx, struct nvfx_vpc *vpc)
865 {
866 struct tgsi_parse_context p;
867 int high_const = -1, high_temp = -1, high_addr = -1, nr_imm = 0, i;
868 struct util_semantic_set set;
869 unsigned char sem_layout[10];
870 unsigned num_outputs;
871 unsigned num_texcoords = nvfx->is_nv4x ? 10 : 8;
872
873 num_outputs = util_semantic_set_from_program_file(&set, vpc->pipe.tokens, TGSI_FILE_OUTPUT);
874
875 if(num_outputs > num_texcoords) {
876 NOUVEAU_ERR("too many vertex program outputs: %i\n", num_outputs);
877 return FALSE;
878 }
879 util_semantic_layout_from_set(sem_layout, &set, num_texcoords, num_texcoords);
880
881 /* hope 0xf is (0, 0, 0, 1) initialized; otherwise, we are _probably_ not required to do this */
882 memset(vpc->vp->generic_to_fp_input, 0x0f, sizeof(vpc->vp->generic_to_fp_input));
883 for(int i = 0; i < num_texcoords; ++i) {
884 if(sem_layout[i] == 0xff)
885 continue;
886 //printf("vp: GENERIC[%i] to fpreg %i\n", sem_layout[i], NVFX_FP_OP_INPUT_SRC_TC(0) + i);
887 vpc->vp->generic_to_fp_input[sem_layout[i]] = 0xf0 | NVFX_FP_OP_INPUT_SRC_TC(i);
888 }
889
890 vpc->vp->sprite_fp_input = -1;
891 for(int i = 0; i < num_texcoords; ++i)
892 {
893 if(sem_layout[i] == 0xff)
894 {
895 vpc->vp->sprite_fp_input = NVFX_FP_OP_INPUT_SRC_TC(i);
896 break;
897 }
898 }
899
900 tgsi_parse_init(&p, vpc->pipe.tokens);
901 while (!tgsi_parse_end_of_tokens(&p)) {
902 const union tgsi_full_token *tok = &p.FullToken;
903
904 tgsi_parse_token(&p);
905 switch(tok->Token.Type) {
906 case TGSI_TOKEN_TYPE_IMMEDIATE:
907 nr_imm++;
908 break;
909 case TGSI_TOKEN_TYPE_DECLARATION:
910 {
911 const struct tgsi_full_declaration *fdec;
912
913 fdec = &p.FullToken.FullDeclaration;
914 switch (fdec->Declaration.File) {
915 case TGSI_FILE_TEMPORARY:
916 if (fdec->Range.Last > high_temp) {
917 high_temp =
918 fdec->Range.Last;
919 }
920 break;
921 case TGSI_FILE_ADDRESS:
922 if (fdec->Range.Last > high_addr) {
923 high_addr =
924 fdec->Range.Last;
925 }
926 break;
927 case TGSI_FILE_CONSTANT:
928 if (fdec->Range.Last > high_const) {
929 high_const =
930 fdec->Range.Last;
931 }
932 break;
933 case TGSI_FILE_OUTPUT:
934 if (!nvfx_vertprog_parse_decl_output(nvfx, vpc, fdec))
935 return FALSE;
936 break;
937 default:
938 break;
939 }
940 }
941 break;
942 default:
943 break;
944 }
945 }
946 tgsi_parse_free(&p);
947
948 if (nr_imm) {
949 vpc->imm = CALLOC(nr_imm, sizeof(struct nvfx_reg));
950 assert(vpc->imm);
951 }
952
953 if (++high_temp) {
954 vpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
955 for (i = 0; i < high_temp; i++)
956 vpc->r_temp[i] = temp(vpc);
957 }
958
959 if (++high_addr) {
960 vpc->r_address = CALLOC(high_addr, sizeof(struct nvfx_reg));
961 for (i = 0; i < high_addr; i++)
962 vpc->r_address[i] = nvfx_reg(NVFXSR_TEMP, i);
963 }
964
965 if(++high_const) {
966 vpc->r_const = CALLOC(high_const, sizeof(struct nvfx_reg));
967 for (i = 0; i < high_const; i++)
968 vpc->r_const[i] = constant(vpc, i, 0, 0, 0, 0);
969 }
970
971 vpc->r_temps_discard = 0;
972 return TRUE;
973 }
974
975 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", FALSE)
976
977 static struct nvfx_vertex_program*
978 nvfx_vertprog_translate(struct nvfx_context *nvfx, const struct pipe_shader_state* vps, struct tgsi_shader_info* info)
979 {
980 struct tgsi_parse_context parse;
981 struct nvfx_vertex_program* vp = NULL;
982 struct nvfx_vpc *vpc = NULL;
983 struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
984 struct util_dynarray insns;
985 int i;
986
987 tgsi_parse_init(&parse, vps->tokens);
988
989 vp = CALLOC_STRUCT(nvfx_vertex_program);
990 if(!vp)
991 goto out_err;
992
993 vpc = CALLOC_STRUCT(nvfx_vpc);
994 if (!vpc)
995 goto out_err;
996
997 vpc->nvfx = nvfx;
998 vpc->vp = vp;
999 vpc->pipe = *vps;
1000 vpc->info = info;
1001
1002 {
1003 // TODO: use a 64-bit atomic here!
1004 static unsigned long long id = 0;
1005 vp->id = ++id;
1006 }
1007
1008 /* reserve space for ucps */
1009 if(nvfx->use_vp_clipping)
1010 {
1011 for(i = 0; i < 6; ++i)
1012 constant(vpc, -1, 0, 0, 0, 0);
1013 }
1014
1015 if (!nvfx_vertprog_prepare(nvfx, vpc)) {
1016 FREE(vpc);
1017 return NULL;
1018 }
1019
1020 /* Redirect post-transform vertex position to a temp if user clip
1021 * planes are enabled. We need to append code to the vtxprog
1022 * to handle clip planes later.
1023 */
1024 /* TODO: maybe support patching this depending on whether there are ucps: not sure if it is really matters much */
1025 if (nvfx->use_vp_clipping) {
1026 vpc->r_result[vpc->hpos_idx] = temp(vpc);
1027 vpc->r_temps_discard = 0;
1028 }
1029
1030 util_dynarray_init(&insns);
1031 while (!tgsi_parse_end_of_tokens(&parse)) {
1032 tgsi_parse_token(&parse);
1033
1034 switch (parse.FullToken.Token.Type) {
1035 case TGSI_TOKEN_TYPE_IMMEDIATE:
1036 {
1037 const struct tgsi_full_immediate *imm;
1038
1039 imm = &parse.FullToken.FullImmediate;
1040 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1041 assert(imm->Immediate.NrTokens == 4 + 1);
1042 vpc->imm[vpc->nr_imm++] =
1043 constant(vpc, -1,
1044 imm->u[0].Float,
1045 imm->u[1].Float,
1046 imm->u[2].Float,
1047 imm->u[3].Float);
1048 }
1049 break;
1050 case TGSI_TOKEN_TYPE_INSTRUCTION:
1051 {
1052 const struct tgsi_full_instruction *finst;
1053 unsigned idx = insns.size >> 2;
1054 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1055 finst = &parse.FullToken.FullInstruction;
1056 if (!nvfx_vertprog_parse_instruction(nvfx, vpc, idx, finst))
1057 goto out_err;
1058 }
1059 break;
1060 default:
1061 break;
1062 }
1063 }
1064
1065 util_dynarray_append(&insns, unsigned, vp->nr_insns);
1066
1067 for(unsigned i = 0; i < vpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1068 {
1069 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)vpc->label_relocs.data + i);
1070 struct nvfx_relocation hw_reloc;
1071
1072 hw_reloc.location = label_reloc->location;
1073 hw_reloc.target = ((unsigned*)insns.data)[label_reloc->target];
1074
1075 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1076
1077 util_dynarray_append(&vp->branch_relocs, struct nvfx_relocation, hw_reloc);
1078 }
1079 util_dynarray_fini(&insns);
1080 util_dynarray_trim(&vp->branch_relocs);
1081
1082 /* XXX: what if we add a RET before?! make sure we jump here...*/
1083
1084 /* Write out HPOS if it was redirected to a temp earlier */
1085 if (vpc->r_result[vpc->hpos_idx].type != NVFXSR_OUTPUT) {
1086 struct nvfx_reg hpos = nvfx_reg(NVFXSR_OUTPUT,
1087 NVFX_VP(INST_DEST_POS));
1088 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->hpos_idx]);
1089
1090 nvfx_vp_emit(vpc, arith(0, VEC, MOV, hpos, NVFX_VP_MASK_ALL, htmp, none, none));
1091 }
1092
1093 /* Insert code to handle user clip planes */
1094 if(nvfx->use_vp_clipping)
1095 {
1096 for (i = 0; i < 6; i++) {
1097 struct nvfx_reg cdst = nvfx_reg(NVFXSR_OUTPUT, NV30_VP_INST_DEST_CLP(i));
1098 struct nvfx_src ceqn = nvfx_src(nvfx_reg(NVFXSR_CONST, i));
1099 struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->hpos_idx]);
1100 unsigned mask;
1101
1102 if(nvfx->is_nv4x)
1103 {
1104 switch (i) {
1105 case 0: case 3: mask = NVFX_VP_MASK_Y; break;
1106 case 1: case 4: mask = NVFX_VP_MASK_Z; break;
1107 case 2: case 5: mask = NVFX_VP_MASK_W; break;
1108 default:
1109 NOUVEAU_ERR("invalid clip dist #%d\n", i);
1110 goto out_err;
1111 }
1112 }
1113 else
1114 mask = NVFX_VP_MASK_X;
1115
1116 nvfx_vp_emit(vpc, arith(0, VEC, DP4, cdst, mask, htmp, ceqn, none));
1117 }
1118 }
1119
1120 if(debug_get_option_nvfx_dump_vp())
1121 {
1122 debug_printf("\n");
1123 tgsi_dump(vpc->pipe.tokens, 0);
1124
1125 debug_printf("\n%s vertex program:\n", nvfx->is_nv4x ? "nv4x" : "nv3x");
1126 for (i = 0; i < vp->nr_insns; i++)
1127 debug_printf("%3u: %08x %08x %08x %08x\n", i, vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1128 debug_printf("\n");
1129 }
1130
1131 vp->clip_nr = -1;
1132 vp->exec_start = -1;
1133
1134 out:
1135 tgsi_parse_free(&parse);
1136 if(vpc) {
1137 util_dynarray_fini(&vpc->label_relocs);
1138 util_dynarray_fini(&vpc->loop_stack);
1139 FREE(vpc->r_temp);
1140 FREE(vpc->r_address);
1141 FREE(vpc->r_const);
1142 FREE(vpc->imm);
1143 FREE(vpc);
1144 }
1145 return vp;
1146
1147 out_err:
1148 FREE(vp);
1149 vp = NULL;
1150 goto out;
1151 }
1152
1153 static struct nvfx_vertex_program*
1154 nvfx_vertprog_translate_draw_vp(struct nvfx_context *nvfx, struct nvfx_pipe_vertex_program* pvp)
1155 {
1156 struct nvfx_vertex_program* vp = NULL;
1157 struct pipe_shader_state vps;
1158 struct tgsi_shader_info info;
1159 struct ureg_program *ureg = NULL;
1160 unsigned num_outputs = MIN2(pvp->info.num_outputs, 16);
1161
1162 ureg = ureg_create( TGSI_PROCESSOR_VERTEX );
1163 if(ureg == NULL)
1164 return 0;
1165
1166 for (unsigned i = 0; i < num_outputs; i++)
1167 ureg_MOV(ureg, ureg_DECL_output(ureg, pvp->info.output_semantic_name[i], pvp->info.output_semantic_index[i]), ureg_DECL_vs_input(ureg, i));
1168
1169 ureg_END( ureg );
1170
1171 vps.tokens = ureg_get_tokens(ureg, 0);
1172 tgsi_scan_shader(vps.tokens, &info);
1173 vp = nvfx_vertprog_translate(nvfx, &vps, &info);
1174 ureg_free_tokens(vps.tokens);
1175 ureg_destroy(ureg);
1176
1177 return vp;
1178 }
1179
1180 boolean
1181 nvfx_vertprog_validate(struct nvfx_context *nvfx)
1182 {
1183 struct nvfx_screen *screen = nvfx->screen;
1184 struct nouveau_channel *chan = screen->base.channel;
1185 struct nouveau_grobj *eng3d = screen->eng3d;
1186 struct nvfx_pipe_vertex_program *pvp = nvfx->vertprog;
1187 struct nvfx_vertex_program* vp;
1188 struct pipe_resource *constbuf;
1189 boolean upload_code = FALSE, upload_data = FALSE;
1190 int i;
1191
1192 if (nvfx->render_mode == HW) {
1193 nvfx->fallback_swtnl &= ~NVFX_NEW_VERTPROG;
1194 vp = pvp->vp;
1195
1196 if(!vp) {
1197 vp = nvfx_vertprog_translate(nvfx, &pvp->pipe, &pvp->info);
1198 if(!vp)
1199 vp = NVFX_VP_FAILED;
1200 pvp->vp = vp;
1201 }
1202
1203 if(vp == NVFX_VP_FAILED) {
1204 nvfx->fallback_swtnl |= NVFX_NEW_VERTPROG;
1205 return FALSE;
1206 }
1207
1208 constbuf = nvfx->constbuf[PIPE_SHADER_VERTEX];
1209 } else {
1210 vp = pvp->draw_vp;
1211 if(!vp)
1212 {
1213 pvp->draw_vp = vp = nvfx_vertprog_translate_draw_vp(nvfx, pvp);
1214 if(!vp) {
1215 _debug_printf("Error: unable to create a swtnl passthrough vertex shader: aborting.");
1216 abort();
1217 }
1218 }
1219 constbuf = NULL;
1220 }
1221
1222 nvfx->hw_vertprog = vp;
1223
1224 /* Allocate hw vtxprog exec slots */
1225 if (!vp->exec) {
1226 struct nouveau_resource *heap = nvfx->screen->vp_exec_heap;
1227 uint vplen = vp->nr_insns;
1228
1229 if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) {
1230 while (heap->next && heap->size < vplen) {
1231 struct nvfx_vertex_program *evict;
1232
1233 evict = heap->next->priv;
1234 nouveau_resource_free(&evict->exec);
1235 }
1236
1237 if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec))
1238 {
1239 debug_printf("Vertex shader too long: %u instructions\n", vplen);
1240 nvfx->fallback_swtnl |= NVFX_NEW_VERTPROG;
1241 return FALSE;
1242 }
1243 }
1244
1245 upload_code = TRUE;
1246 }
1247
1248 /* Allocate hw vtxprog const slots */
1249 if (vp->nr_consts && !vp->data) {
1250 struct nouveau_resource *heap = nvfx->screen->vp_data_heap;
1251
1252 if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) {
1253 while (heap->next && heap->size < vp->nr_consts) {
1254 struct nvfx_vertex_program *evict;
1255
1256 evict = heap->next->priv;
1257 nouveau_resource_free(&evict->data);
1258 }
1259
1260 if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data))
1261 {
1262 debug_printf("Vertex shader uses too many constants: %u constants\n", vp->nr_consts);
1263 nvfx->fallback_swtnl |= NVFX_NEW_VERTPROG;
1264 return FALSE;
1265 }
1266 }
1267
1268 //printf("start at %u nc %u\n", vp->data->start, vp->nr_consts);
1269
1270 /*XXX: handle this some day */
1271 assert(vp->data->start >= vp->data_start_min);
1272
1273 upload_data = TRUE;
1274 if (vp->data_start != vp->data->start)
1275 upload_code = TRUE;
1276 }
1277
1278 /* If exec or data segments moved we need to patch the program to
1279 * fixup offsets and register IDs.
1280 */
1281 if (vp->exec_start != vp->exec->start) {
1282 //printf("vp_relocs %u -> %u\n", vp->exec_start, vp->exec->start);
1283 for(unsigned i = 0; i < vp->branch_relocs.size; i += sizeof(struct nvfx_relocation))
1284 {
1285 struct nvfx_relocation* reloc = (struct nvfx_relocation*)((char*)vp->branch_relocs.data + i);
1286 uint32_t* hw = vp->insns[reloc->location].data;
1287 unsigned target = vp->exec->start + reloc->target;
1288
1289 //debug_printf("vp_reloc hw %u -> hw %u\n", reloc->location, target);
1290
1291 if(!nvfx->is_nv4x)
1292 {
1293 hw[2] &=~ NV30_VP_INST_IADDR_MASK;
1294 hw[2] |= (target & 0x1ff) << NV30_VP_INST_IADDR_SHIFT;
1295 }
1296 else
1297 {
1298 hw[3] &=~ NV40_VP_INST_IADDRL_MASK;
1299 hw[3] |= (target & 7) << NV40_VP_INST_IADDRL_SHIFT;
1300
1301 hw[2] &=~ NV40_VP_INST_IADDRH_MASK;
1302 hw[2] |= ((target >> 3) & 0x3f) << NV40_VP_INST_IADDRH_SHIFT;
1303 }
1304 }
1305
1306 vp->exec_start = vp->exec->start;
1307 }
1308
1309 if (vp->data_start != vp->data->start) {
1310 for(unsigned i = 0; i < vp->const_relocs.size; i += sizeof(struct nvfx_relocation))
1311 {
1312 struct nvfx_relocation* reloc = (struct nvfx_relocation*)((char*)vp->const_relocs.data + i);
1313 struct nvfx_vertex_program_exec *vpi = &vp->insns[reloc->location];
1314
1315 //printf("reloc %i to %i + %i\n", reloc->location, vp->data->start, reloc->target);
1316
1317 vpi->data[1] &= ~NVFX_VP(INST_CONST_SRC_MASK);
1318 vpi->data[1] |=
1319 (reloc->target + vp->data->start) <<
1320 NVFX_VP(INST_CONST_SRC_SHIFT);
1321 }
1322
1323 vp->data_start = vp->data->start;
1324 upload_code = TRUE;
1325 }
1326
1327 /* Update + Upload constant values */
1328 if (vp->nr_consts) {
1329 float *map = NULL;
1330
1331 if (constbuf)
1332 map = (float*)nvfx_buffer(constbuf)->data;
1333
1334 /*
1335 * WAIT_RING(chan, 512 * 6);
1336 for (i = 0; i < 512; i++) {
1337 float v[4] = {0.1, 0,2, 0.3, 0.4};
1338 OUT_RING(chan, RING_3D(NV30_3D_VP_UPLOAD_CONST_ID, 5));
1339 OUT_RING(chan, i);
1340 OUT_RINGp(chan, (uint32_t *)v, 4);
1341 printf("frob %i\n", i);
1342 }
1343 */
1344
1345 for (i = nvfx->use_vp_clipping ? 6 : 0; i < vp->nr_consts; i++) {
1346 struct nvfx_vertex_program_data *vpd = &vp->consts[i];
1347
1348 if (vpd->index >= 0) {
1349 if (!upload_data &&
1350 !memcmp(vpd->value, &map[vpd->index * 4],
1351 4 * sizeof(float)))
1352 continue;
1353 memcpy(vpd->value, &map[vpd->index * 4],
1354 4 * sizeof(float));
1355 }
1356
1357 //printf("upload into %i + %i: %f %f %f %f\n", vp->data->start, i, vpd->value[0], vpd->value[1], vpd->value[2], vpd->value[3]);
1358
1359 BEGIN_RING(chan, eng3d, NV30_3D_VP_UPLOAD_CONST_ID, 5);
1360 OUT_RING(chan, i + vp->data->start);
1361 OUT_RINGp(chan, (uint32_t *)vpd->value, 4);
1362 }
1363 }
1364
1365 /* Upload vtxprog */
1366 if (upload_code) {
1367 BEGIN_RING(chan, eng3d, NV30_3D_VP_UPLOAD_FROM_ID, 1);
1368 OUT_RING(chan, vp->exec->start);
1369 for (i = 0; i < vp->nr_insns; i++) {
1370 BEGIN_RING(chan, eng3d, NV30_3D_VP_UPLOAD_INST(0), 4);
1371 //printf("%08x %08x %08x %08x\n", vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1372 OUT_RINGp(chan, vp->insns[i].data, 4);
1373 }
1374 vp->clip_nr = -1;
1375 }
1376
1377 if(nvfx->dirty & (NVFX_NEW_VERTPROG))
1378 {
1379 BEGIN_RING(chan, eng3d, NV30_3D_VP_START_FROM_ID, 1);
1380 OUT_RING(chan, vp->exec->start);
1381 if(nvfx->is_nv4x) {
1382 BEGIN_RING(chan, eng3d, NV40_3D_VP_ATTRIB_EN, 1);
1383 OUT_RING(chan, vp->ir);
1384 }
1385 }
1386
1387 return TRUE;
1388 }
1389
1390 void
1391 nvfx_vertprog_destroy(struct nvfx_context *nvfx, struct nvfx_vertex_program *vp)
1392 {
1393 if (vp->nr_insns)
1394 FREE(vp->insns);
1395
1396 if (vp->nr_consts)
1397 FREE(vp->consts);
1398
1399 nouveau_resource_free(&vp->exec);
1400 nouveau_resource_free(&vp->data);
1401
1402 util_dynarray_fini(&vp->branch_relocs);
1403 util_dynarray_fini(&vp->const_relocs);
1404 FREE(vp);
1405 }
1406
1407 static void *
1408 nvfx_vp_state_create(struct pipe_context *pipe, const struct pipe_shader_state *cso)
1409 {
1410 struct nvfx_pipe_vertex_program *pvp;
1411
1412 pvp = CALLOC(1, sizeof(struct nvfx_pipe_vertex_program));
1413 pvp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
1414 tgsi_scan_shader(pvp->pipe.tokens, &pvp->info);
1415 pvp->draw_elements = MAX2(1, MIN2(pvp->info.num_outputs, 16));
1416 pvp->draw_no_elements = pvp->info.num_outputs == 0;
1417
1418 return (void *)pvp;
1419 }
1420
1421 static void
1422 nvfx_vp_state_bind(struct pipe_context *pipe, void *hwcso)
1423 {
1424 struct nvfx_context *nvfx = nvfx_context(pipe);
1425
1426 nvfx->vertprog = hwcso;
1427 nvfx->dirty |= NVFX_NEW_VERTPROG;
1428 nvfx->draw_dirty |= NVFX_NEW_VERTPROG;
1429 }
1430
1431 static void
1432 nvfx_vp_state_delete(struct pipe_context *pipe, void *hwcso)
1433 {
1434 struct nvfx_context *nvfx = nvfx_context(pipe);
1435 struct nvfx_pipe_vertex_program *pvp = hwcso;
1436
1437 if(pvp->draw_vs)
1438 draw_delete_vertex_shader(nvfx->draw, pvp->draw_vs);
1439 if(pvp->vp && pvp->vp != NVFX_VP_FAILED)
1440 nvfx_vertprog_destroy(nvfx, pvp->vp);
1441 if(pvp->draw_vp)
1442 nvfx_vertprog_destroy(nvfx, pvp->draw_vp);
1443 FREE((void*)pvp->pipe.tokens);
1444 FREE(pvp);
1445 }
1446
1447 void
1448 nvfx_init_vertprog_functions(struct nvfx_context *nvfx)
1449 {
1450 nvfx->pipe.create_vs_state = nvfx_vp_state_create;
1451 nvfx->pipe.bind_vs_state = nvfx_vp_state_bind;
1452 nvfx->pipe.delete_vs_state = nvfx_vp_state_delete;
1453 }