Merge branch 'mesa_7_5_branch'
[mesa.git] / src / gallium / drivers / nv40 / nv40_vertprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "pipe/p_inlines.h"
5
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_util.h"
9
10 #include "nv40_context.h"
11 #include "nv40_state.h"
12
13 /* TODO (at least...):
14 * 1. Indexed consts + ARL
15 * 3. NV_vp11, NV_vp2, NV_vp3 features
16 * - extra arith opcodes
17 * - branching
18 * - texture sampling
19 * - indexed attribs
20 * - indexed results
21 * 4. bugs
22 */
23
24 #define SWZ_X 0
25 #define SWZ_Y 1
26 #define SWZ_Z 2
27 #define SWZ_W 3
28 #define MASK_X 8
29 #define MASK_Y 4
30 #define MASK_Z 2
31 #define MASK_W 1
32 #define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
33 #define DEF_SCALE 0
34 #define DEF_CTEST 0
35 #include "nv40_shader.h"
36
37 #define swz(s,x,y,z,w) nv40_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
38 #define neg(s) nv40_sr_neg((s))
39 #define abs(s) nv40_sr_abs((s))
40
41 #define NV40_VP_INST_DEST_CLIP(n) ((~0 - 6) + (n))
42
43 struct nv40_vpc {
44 struct nv40_vertex_program *vp;
45
46 struct nv40_vertex_program_exec *vpi;
47
48 unsigned r_temps;
49 unsigned r_temps_discard;
50 struct nv40_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
51 struct nv40_sreg *r_address;
52 struct nv40_sreg *r_temp;
53
54 struct nv40_sreg *imm;
55 unsigned nr_imm;
56
57 unsigned hpos_idx;
58 };
59
60 static struct nv40_sreg
61 temp(struct nv40_vpc *vpc)
62 {
63 int idx = ffs(~vpc->r_temps) - 1;
64
65 if (idx < 0) {
66 NOUVEAU_ERR("out of temps!!\n");
67 assert(0);
68 return nv40_sr(NV40SR_TEMP, 0);
69 }
70
71 vpc->r_temps |= (1 << idx);
72 vpc->r_temps_discard |= (1 << idx);
73 return nv40_sr(NV40SR_TEMP, idx);
74 }
75
76 static INLINE void
77 release_temps(struct nv40_vpc *vpc)
78 {
79 vpc->r_temps &= ~vpc->r_temps_discard;
80 vpc->r_temps_discard = 0;
81 }
82
83 static struct nv40_sreg
84 constant(struct nv40_vpc *vpc, int pipe, float x, float y, float z, float w)
85 {
86 struct nv40_vertex_program *vp = vpc->vp;
87 struct nv40_vertex_program_data *vpd;
88 int idx;
89
90 if (pipe >= 0) {
91 for (idx = 0; idx < vp->nr_consts; idx++) {
92 if (vp->consts[idx].index == pipe)
93 return nv40_sr(NV40SR_CONST, idx);
94 }
95 }
96
97 idx = vp->nr_consts++;
98 vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
99 vpd = &vp->consts[idx];
100
101 vpd->index = pipe;
102 vpd->value[0] = x;
103 vpd->value[1] = y;
104 vpd->value[2] = z;
105 vpd->value[3] = w;
106 return nv40_sr(NV40SR_CONST, idx);
107 }
108
109 #define arith(cc,s,o,d,m,s0,s1,s2) \
110 nv40_vp_arith((cc), (s), NV40_VP_INST_##o, (d), (m), (s0), (s1), (s2))
111
112 static void
113 emit_src(struct nv40_vpc *vpc, uint32_t *hw, int pos, struct nv40_sreg src)
114 {
115 struct nv40_vertex_program *vp = vpc->vp;
116 uint32_t sr = 0;
117
118 switch (src.type) {
119 case NV40SR_TEMP:
120 sr |= (NV40_VP_SRC_REG_TYPE_TEMP << NV40_VP_SRC_REG_TYPE_SHIFT);
121 sr |= (src.index << NV40_VP_SRC_TEMP_SRC_SHIFT);
122 break;
123 case NV40SR_INPUT:
124 sr |= (NV40_VP_SRC_REG_TYPE_INPUT <<
125 NV40_VP_SRC_REG_TYPE_SHIFT);
126 vp->ir |= (1 << src.index);
127 hw[1] |= (src.index << NV40_VP_INST_INPUT_SRC_SHIFT);
128 break;
129 case NV40SR_CONST:
130 sr |= (NV40_VP_SRC_REG_TYPE_CONST <<
131 NV40_VP_SRC_REG_TYPE_SHIFT);
132 assert(vpc->vpi->const_index == -1 ||
133 vpc->vpi->const_index == src.index);
134 vpc->vpi->const_index = src.index;
135 break;
136 case NV40SR_NONE:
137 sr |= (NV40_VP_SRC_REG_TYPE_INPUT <<
138 NV40_VP_SRC_REG_TYPE_SHIFT);
139 break;
140 default:
141 assert(0);
142 }
143
144 if (src.negate)
145 sr |= NV40_VP_SRC_NEGATE;
146
147 if (src.abs)
148 hw[0] |= (1 << (21 + pos));
149
150 sr |= ((src.swz[0] << NV40_VP_SRC_SWZ_X_SHIFT) |
151 (src.swz[1] << NV40_VP_SRC_SWZ_Y_SHIFT) |
152 (src.swz[2] << NV40_VP_SRC_SWZ_Z_SHIFT) |
153 (src.swz[3] << NV40_VP_SRC_SWZ_W_SHIFT));
154
155 switch (pos) {
156 case 0:
157 hw[1] |= ((sr & NV40_VP_SRC0_HIGH_MASK) >>
158 NV40_VP_SRC0_HIGH_SHIFT) << NV40_VP_INST_SRC0H_SHIFT;
159 hw[2] |= (sr & NV40_VP_SRC0_LOW_MASK) <<
160 NV40_VP_INST_SRC0L_SHIFT;
161 break;
162 case 1:
163 hw[2] |= sr << NV40_VP_INST_SRC1_SHIFT;
164 break;
165 case 2:
166 hw[2] |= ((sr & NV40_VP_SRC2_HIGH_MASK) >>
167 NV40_VP_SRC2_HIGH_SHIFT) << NV40_VP_INST_SRC2H_SHIFT;
168 hw[3] |= (sr & NV40_VP_SRC2_LOW_MASK) <<
169 NV40_VP_INST_SRC2L_SHIFT;
170 break;
171 default:
172 assert(0);
173 }
174 }
175
176 static void
177 emit_dst(struct nv40_vpc *vpc, uint32_t *hw, int slot, struct nv40_sreg dst)
178 {
179 struct nv40_vertex_program *vp = vpc->vp;
180
181 switch (dst.type) {
182 case NV40SR_TEMP:
183 hw[3] |= NV40_VP_INST_DEST_MASK;
184 if (slot == 0) {
185 hw[0] |= (dst.index <<
186 NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
187 } else {
188 hw[3] |= (dst.index <<
189 NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
190 }
191 break;
192 case NV40SR_OUTPUT:
193 switch (dst.index) {
194 case NV40_VP_INST_DEST_COL0 : vp->or |= (1 << 0); break;
195 case NV40_VP_INST_DEST_COL1 : vp->or |= (1 << 1); break;
196 case NV40_VP_INST_DEST_BFC0 : vp->or |= (1 << 2); break;
197 case NV40_VP_INST_DEST_BFC1 : vp->or |= (1 << 3); break;
198 case NV40_VP_INST_DEST_FOGC : vp->or |= (1 << 4); break;
199 case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
200 case NV40_VP_INST_DEST_TC(0): vp->or |= (1 << 14); break;
201 case NV40_VP_INST_DEST_TC(1): vp->or |= (1 << 15); break;
202 case NV40_VP_INST_DEST_TC(2): vp->or |= (1 << 16); break;
203 case NV40_VP_INST_DEST_TC(3): vp->or |= (1 << 17); break;
204 case NV40_VP_INST_DEST_TC(4): vp->or |= (1 << 18); break;
205 case NV40_VP_INST_DEST_TC(5): vp->or |= (1 << 19); break;
206 case NV40_VP_INST_DEST_TC(6): vp->or |= (1 << 20); break;
207 case NV40_VP_INST_DEST_TC(7): vp->or |= (1 << 21); break;
208 case NV40_VP_INST_DEST_CLIP(0):
209 vp->or |= (1 << 6);
210 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE0;
211 dst.index = NV40_VP_INST_DEST_FOGC;
212 break;
213 case NV40_VP_INST_DEST_CLIP(1):
214 vp->or |= (1 << 7);
215 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE1;
216 dst.index = NV40_VP_INST_DEST_FOGC;
217 break;
218 case NV40_VP_INST_DEST_CLIP(2):
219 vp->or |= (1 << 8);
220 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE2;
221 dst.index = NV40_VP_INST_DEST_FOGC;
222 break;
223 case NV40_VP_INST_DEST_CLIP(3):
224 vp->or |= (1 << 9);
225 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE3;
226 dst.index = NV40_VP_INST_DEST_PSZ;
227 break;
228 case NV40_VP_INST_DEST_CLIP(4):
229 vp->or |= (1 << 10);
230 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE4;
231 dst.index = NV40_VP_INST_DEST_PSZ;
232 break;
233 case NV40_VP_INST_DEST_CLIP(5):
234 vp->or |= (1 << 11);
235 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE5;
236 dst.index = NV40_VP_INST_DEST_PSZ;
237 break;
238 default:
239 break;
240 }
241
242 hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
243 if (slot == 0) {
244 hw[0] |= NV40_VP_INST_VEC_RESULT;
245 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK | (1<<20);
246 } else {
247 hw[3] |= NV40_VP_INST_SCA_RESULT;
248 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
249 }
250 break;
251 default:
252 assert(0);
253 }
254 }
255
256 static void
257 nv40_vp_arith(struct nv40_vpc *vpc, int slot, int op,
258 struct nv40_sreg dst, int mask,
259 struct nv40_sreg s0, struct nv40_sreg s1,
260 struct nv40_sreg s2)
261 {
262 struct nv40_vertex_program *vp = vpc->vp;
263 uint32_t *hw;
264
265 vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
266 vpc->vpi = &vp->insns[vp->nr_insns - 1];
267 memset(vpc->vpi, 0, sizeof(*vpc->vpi));
268 vpc->vpi->const_index = -1;
269
270 hw = vpc->vpi->data;
271
272 hw[0] |= (NV40_VP_INST_COND_TR << NV40_VP_INST_COND_SHIFT);
273 hw[0] |= ((0 << NV40_VP_INST_COND_SWZ_X_SHIFT) |
274 (1 << NV40_VP_INST_COND_SWZ_Y_SHIFT) |
275 (2 << NV40_VP_INST_COND_SWZ_Z_SHIFT) |
276 (3 << NV40_VP_INST_COND_SWZ_W_SHIFT));
277
278 if (slot == 0) {
279 hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
280 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
281 hw[3] |= (mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
282 } else {
283 hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
284 hw[0] |= (NV40_VP_INST_VEC_DEST_TEMP_MASK | (1 << 20));
285 hw[3] |= (mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
286 }
287
288 emit_dst(vpc, hw, slot, dst);
289 emit_src(vpc, hw, 0, s0);
290 emit_src(vpc, hw, 1, s1);
291 emit_src(vpc, hw, 2, s2);
292 }
293
294 static INLINE struct nv40_sreg
295 tgsi_src(struct nv40_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
296 struct nv40_sreg src;
297
298 switch (fsrc->SrcRegister.File) {
299 case TGSI_FILE_INPUT:
300 src = nv40_sr(NV40SR_INPUT, fsrc->SrcRegister.Index);
301 break;
302 case TGSI_FILE_CONSTANT:
303 src = constant(vpc, fsrc->SrcRegister.Index, 0, 0, 0, 0);
304 break;
305 case TGSI_FILE_IMMEDIATE:
306 src = vpc->imm[fsrc->SrcRegister.Index];
307 break;
308 case TGSI_FILE_TEMPORARY:
309 src = vpc->r_temp[fsrc->SrcRegister.Index];
310 break;
311 default:
312 NOUVEAU_ERR("bad src file\n");
313 break;
314 }
315
316 src.abs = fsrc->SrcRegisterExtMod.Absolute;
317 src.negate = fsrc->SrcRegister.Negate;
318 src.swz[0] = fsrc->SrcRegister.SwizzleX;
319 src.swz[1] = fsrc->SrcRegister.SwizzleY;
320 src.swz[2] = fsrc->SrcRegister.SwizzleZ;
321 src.swz[3] = fsrc->SrcRegister.SwizzleW;
322 return src;
323 }
324
325 static INLINE struct nv40_sreg
326 tgsi_dst(struct nv40_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
327 struct nv40_sreg dst;
328
329 switch (fdst->DstRegister.File) {
330 case TGSI_FILE_OUTPUT:
331 dst = vpc->r_result[fdst->DstRegister.Index];
332 break;
333 case TGSI_FILE_TEMPORARY:
334 dst = vpc->r_temp[fdst->DstRegister.Index];
335 break;
336 case TGSI_FILE_ADDRESS:
337 dst = vpc->r_address[fdst->DstRegister.Index];
338 break;
339 default:
340 NOUVEAU_ERR("bad dst file\n");
341 break;
342 }
343
344 return dst;
345 }
346
347 static INLINE int
348 tgsi_mask(uint tgsi)
349 {
350 int mask = 0;
351
352 if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
353 if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
354 if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
355 if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
356 return mask;
357 }
358
359 static boolean
360 src_native_swz(struct nv40_vpc *vpc, const struct tgsi_full_src_register *fsrc,
361 struct nv40_sreg *src)
362 {
363 const struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
364 struct nv40_sreg tgsi = tgsi_src(vpc, fsrc);
365 uint mask = 0, zero_mask = 0, one_mask = 0, neg_mask = 0;
366 uint neg[4] = { fsrc->SrcRegisterExtSwz.NegateX,
367 fsrc->SrcRegisterExtSwz.NegateY,
368 fsrc->SrcRegisterExtSwz.NegateZ,
369 fsrc->SrcRegisterExtSwz.NegateW };
370 uint c;
371
372 for (c = 0; c < 4; c++) {
373 switch (tgsi_util_get_full_src_register_extswizzle(fsrc, c)) {
374 case TGSI_EXTSWIZZLE_X:
375 case TGSI_EXTSWIZZLE_Y:
376 case TGSI_EXTSWIZZLE_Z:
377 case TGSI_EXTSWIZZLE_W:
378 mask |= tgsi_mask(1 << c);
379 break;
380 case TGSI_EXTSWIZZLE_ZERO:
381 zero_mask |= tgsi_mask(1 << c);
382 tgsi.swz[c] = SWZ_X;
383 break;
384 case TGSI_EXTSWIZZLE_ONE:
385 one_mask |= tgsi_mask(1 << c);
386 tgsi.swz[c] = SWZ_X;
387 break;
388 default:
389 assert(0);
390 }
391
392 if (!tgsi.negate && neg[c])
393 neg_mask |= tgsi_mask(1 << c);
394 }
395
396 if (mask == MASK_ALL && !neg_mask)
397 return TRUE;
398
399 *src = temp(vpc);
400
401 if (mask)
402 arith(vpc, 0, OP_MOV, *src, mask, tgsi, none, none);
403
404 if (zero_mask)
405 arith(vpc, 0, OP_SFL, *src, zero_mask, *src, none, none);
406
407 if (one_mask)
408 arith(vpc, 0, OP_STR, *src, one_mask, *src, none, none);
409
410 if (neg_mask) {
411 struct nv40_sreg one = temp(vpc);
412 arith(vpc, 0, OP_STR, one, neg_mask, one, none, none);
413 arith(vpc, 0, OP_MUL, *src, neg_mask, *src, neg(one), none);
414 }
415
416 return FALSE;
417 }
418
419 static boolean
420 nv40_vertprog_parse_instruction(struct nv40_vpc *vpc,
421 const struct tgsi_full_instruction *finst)
422 {
423 struct nv40_sreg src[3], dst, tmp;
424 struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
425 int mask;
426 int ai = -1, ci = -1, ii = -1;
427 int i;
428
429 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
430 return TRUE;
431
432 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
433 const struct tgsi_full_src_register *fsrc;
434
435 fsrc = &finst->FullSrcRegisters[i];
436 if (fsrc->SrcRegister.File == TGSI_FILE_TEMPORARY) {
437 src[i] = tgsi_src(vpc, fsrc);
438 }
439 }
440
441 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
442 const struct tgsi_full_src_register *fsrc;
443
444 fsrc = &finst->FullSrcRegisters[i];
445
446 switch (fsrc->SrcRegister.File) {
447 case TGSI_FILE_INPUT:
448 case TGSI_FILE_CONSTANT:
449 case TGSI_FILE_TEMPORARY:
450 if (!src_native_swz(vpc, fsrc, &src[i]))
451 continue;
452 break;
453 default:
454 break;
455 }
456
457 switch (fsrc->SrcRegister.File) {
458 case TGSI_FILE_INPUT:
459 if (ai == -1 || ai == fsrc->SrcRegister.Index) {
460 ai = fsrc->SrcRegister.Index;
461 src[i] = tgsi_src(vpc, fsrc);
462 } else {
463 src[i] = temp(vpc);
464 arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
465 tgsi_src(vpc, fsrc), none, none);
466 }
467 break;
468 case TGSI_FILE_CONSTANT:
469 if ((ci == -1 && ii == -1) ||
470 ci == fsrc->SrcRegister.Index) {
471 ci = fsrc->SrcRegister.Index;
472 src[i] = tgsi_src(vpc, fsrc);
473 } else {
474 src[i] = temp(vpc);
475 arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
476 tgsi_src(vpc, fsrc), none, none);
477 }
478 break;
479 case TGSI_FILE_IMMEDIATE:
480 if ((ci == -1 && ii == -1) ||
481 ii == fsrc->SrcRegister.Index) {
482 ii = fsrc->SrcRegister.Index;
483 src[i] = tgsi_src(vpc, fsrc);
484 } else {
485 src[i] = temp(vpc);
486 arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
487 tgsi_src(vpc, fsrc), none, none);
488 }
489 break;
490 case TGSI_FILE_TEMPORARY:
491 /* handled above */
492 break;
493 default:
494 NOUVEAU_ERR("bad src file\n");
495 return FALSE;
496 }
497 }
498
499 dst = tgsi_dst(vpc, &finst->FullDstRegisters[0]);
500 mask = tgsi_mask(finst->FullDstRegisters[0].DstRegister.WriteMask);
501
502 switch (finst->Instruction.Opcode) {
503 case TGSI_OPCODE_ABS:
504 arith(vpc, 0, OP_MOV, dst, mask, abs(src[0]), none, none);
505 break;
506 case TGSI_OPCODE_ADD:
507 arith(vpc, 0, OP_ADD, dst, mask, src[0], none, src[1]);
508 break;
509 case TGSI_OPCODE_ARL:
510 arith(vpc, 0, OP_ARL, dst, mask, src[0], none, none);
511 break;
512 case TGSI_OPCODE_DP3:
513 arith(vpc, 0, OP_DP3, dst, mask, src[0], src[1], none);
514 break;
515 case TGSI_OPCODE_DP4:
516 arith(vpc, 0, OP_DP4, dst, mask, src[0], src[1], none);
517 break;
518 case TGSI_OPCODE_DPH:
519 arith(vpc, 0, OP_DPH, dst, mask, src[0], src[1], none);
520 break;
521 case TGSI_OPCODE_DST:
522 arith(vpc, 0, OP_DST, dst, mask, src[0], src[1], none);
523 break;
524 case TGSI_OPCODE_EX2:
525 arith(vpc, 1, OP_EX2, dst, mask, none, none, src[0]);
526 break;
527 case TGSI_OPCODE_EXP:
528 arith(vpc, 1, OP_EXP, dst, mask, none, none, src[0]);
529 break;
530 case TGSI_OPCODE_FLR:
531 arith(vpc, 0, OP_FLR, dst, mask, src[0], none, none);
532 break;
533 case TGSI_OPCODE_FRC:
534 arith(vpc, 0, OP_FRC, dst, mask, src[0], none, none);
535 break;
536 case TGSI_OPCODE_LG2:
537 arith(vpc, 1, OP_LG2, dst, mask, none, none, src[0]);
538 break;
539 case TGSI_OPCODE_LIT:
540 arith(vpc, 1, OP_LIT, dst, mask, none, none, src[0]);
541 break;
542 case TGSI_OPCODE_LOG:
543 arith(vpc, 1, OP_LOG, dst, mask, none, none, src[0]);
544 break;
545 case TGSI_OPCODE_MAD:
546 arith(vpc, 0, OP_MAD, dst, mask, src[0], src[1], src[2]);
547 break;
548 case TGSI_OPCODE_MAX:
549 arith(vpc, 0, OP_MAX, dst, mask, src[0], src[1], none);
550 break;
551 case TGSI_OPCODE_MIN:
552 arith(vpc, 0, OP_MIN, dst, mask, src[0], src[1], none);
553 break;
554 case TGSI_OPCODE_MOV:
555 arith(vpc, 0, OP_MOV, dst, mask, src[0], none, none);
556 break;
557 case TGSI_OPCODE_MUL:
558 arith(vpc, 0, OP_MUL, dst, mask, src[0], src[1], none);
559 break;
560 case TGSI_OPCODE_POW:
561 tmp = temp(vpc);
562 arith(vpc, 1, OP_LG2, tmp, MASK_X, none, none,
563 swz(src[0], X, X, X, X));
564 arith(vpc, 0, OP_MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
565 swz(src[1], X, X, X, X), none);
566 arith(vpc, 1, OP_EX2, dst, mask, none, none,
567 swz(tmp, X, X, X, X));
568 break;
569 case TGSI_OPCODE_RCP:
570 arith(vpc, 1, OP_RCP, dst, mask, none, none, src[0]);
571 break;
572 case TGSI_OPCODE_RET:
573 break;
574 case TGSI_OPCODE_RSQ:
575 arith(vpc, 1, OP_RSQ, dst, mask, none, none, abs(src[0]));
576 break;
577 case TGSI_OPCODE_SGE:
578 arith(vpc, 0, OP_SGE, dst, mask, src[0], src[1], none);
579 break;
580 case TGSI_OPCODE_SLT:
581 arith(vpc, 0, OP_SLT, dst, mask, src[0], src[1], none);
582 break;
583 case TGSI_OPCODE_SUB:
584 arith(vpc, 0, OP_ADD, dst, mask, src[0], none, neg(src[1]));
585 break;
586 case TGSI_OPCODE_XPD:
587 tmp = temp(vpc);
588 arith(vpc, 0, OP_MUL, tmp, mask,
589 swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
590 arith(vpc, 0, OP_MAD, dst, (mask & ~MASK_W),
591 swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
592 neg(tmp));
593 break;
594 default:
595 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
596 return FALSE;
597 }
598
599 release_temps(vpc);
600 return TRUE;
601 }
602
603 static boolean
604 nv40_vertprog_parse_decl_output(struct nv40_vpc *vpc,
605 const struct tgsi_full_declaration *fdec)
606 {
607 unsigned idx = fdec->DeclarationRange.First;
608 int hw;
609
610 switch (fdec->Semantic.SemanticName) {
611 case TGSI_SEMANTIC_POSITION:
612 hw = NV40_VP_INST_DEST_POS;
613 vpc->hpos_idx = idx;
614 break;
615 case TGSI_SEMANTIC_COLOR:
616 if (fdec->Semantic.SemanticIndex == 0) {
617 hw = NV40_VP_INST_DEST_COL0;
618 } else
619 if (fdec->Semantic.SemanticIndex == 1) {
620 hw = NV40_VP_INST_DEST_COL1;
621 } else {
622 NOUVEAU_ERR("bad colour semantic index\n");
623 return FALSE;
624 }
625 break;
626 case TGSI_SEMANTIC_BCOLOR:
627 if (fdec->Semantic.SemanticIndex == 0) {
628 hw = NV40_VP_INST_DEST_BFC0;
629 } else
630 if (fdec->Semantic.SemanticIndex == 1) {
631 hw = NV40_VP_INST_DEST_BFC1;
632 } else {
633 NOUVEAU_ERR("bad bcolour semantic index\n");
634 return FALSE;
635 }
636 break;
637 case TGSI_SEMANTIC_FOG:
638 hw = NV40_VP_INST_DEST_FOGC;
639 break;
640 case TGSI_SEMANTIC_PSIZE:
641 hw = NV40_VP_INST_DEST_PSZ;
642 break;
643 case TGSI_SEMANTIC_GENERIC:
644 if (fdec->Semantic.SemanticIndex <= 7) {
645 hw = NV40_VP_INST_DEST_TC(fdec->Semantic.SemanticIndex);
646 } else {
647 NOUVEAU_ERR("bad generic semantic index\n");
648 return FALSE;
649 }
650 break;
651 default:
652 NOUVEAU_ERR("bad output semantic\n");
653 return FALSE;
654 }
655
656 vpc->r_result[idx] = nv40_sr(NV40SR_OUTPUT, hw);
657 return TRUE;
658 }
659
660 static boolean
661 nv40_vertprog_prepare(struct nv40_vpc *vpc)
662 {
663 struct tgsi_parse_context p;
664 int high_temp = -1, high_addr = -1, nr_imm = 0, i;
665
666 tgsi_parse_init(&p, vpc->vp->pipe.tokens);
667 while (!tgsi_parse_end_of_tokens(&p)) {
668 const union tgsi_full_token *tok = &p.FullToken;
669
670 tgsi_parse_token(&p);
671 switch(tok->Token.Type) {
672 case TGSI_TOKEN_TYPE_IMMEDIATE:
673 nr_imm++;
674 break;
675 case TGSI_TOKEN_TYPE_DECLARATION:
676 {
677 const struct tgsi_full_declaration *fdec;
678
679 fdec = &p.FullToken.FullDeclaration;
680 switch (fdec->Declaration.File) {
681 case TGSI_FILE_TEMPORARY:
682 if (fdec->DeclarationRange.Last > high_temp) {
683 high_temp =
684 fdec->DeclarationRange.Last;
685 }
686 break;
687 #if 0 /* this would be nice.. except gallium doesn't track it */
688 case TGSI_FILE_ADDRESS:
689 if (fdec->DeclarationRange.Last > high_addr) {
690 high_addr =
691 fdec->DeclarationRange.Last;
692 }
693 break;
694 #endif
695 case TGSI_FILE_OUTPUT:
696 if (!nv40_vertprog_parse_decl_output(vpc, fdec))
697 return FALSE;
698 break;
699 default:
700 break;
701 }
702 }
703 break;
704 #if 1 /* yay, parse instructions looking for address regs instead */
705 case TGSI_TOKEN_TYPE_INSTRUCTION:
706 {
707 const struct tgsi_full_instruction *finst;
708 const struct tgsi_full_dst_register *fdst;
709
710 finst = &p.FullToken.FullInstruction;
711 fdst = &finst->FullDstRegisters[0];
712
713 if (fdst->DstRegister.File == TGSI_FILE_ADDRESS) {
714 if (fdst->DstRegister.Index > high_addr)
715 high_addr = fdst->DstRegister.Index;
716 }
717
718 }
719 break;
720 #endif
721 default:
722 break;
723 }
724 }
725 tgsi_parse_free(&p);
726
727 if (nr_imm) {
728 vpc->imm = CALLOC(nr_imm, sizeof(struct nv40_sreg));
729 assert(vpc->imm);
730 }
731
732 if (++high_temp) {
733 vpc->r_temp = CALLOC(high_temp, sizeof(struct nv40_sreg));
734 for (i = 0; i < high_temp; i++)
735 vpc->r_temp[i] = temp(vpc);
736 }
737
738 if (++high_addr) {
739 vpc->r_address = CALLOC(high_addr, sizeof(struct nv40_sreg));
740 for (i = 0; i < high_addr; i++)
741 vpc->r_address[i] = temp(vpc);
742 }
743
744 vpc->r_temps_discard = 0;
745 return TRUE;
746 }
747
748 static void
749 nv40_vertprog_translate(struct nv40_context *nv40,
750 struct nv40_vertex_program *vp)
751 {
752 struct tgsi_parse_context parse;
753 struct nv40_vpc *vpc = NULL;
754 struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
755 int i;
756
757 vpc = CALLOC(1, sizeof(struct nv40_vpc));
758 if (!vpc)
759 return;
760 vpc->vp = vp;
761
762 if (!nv40_vertprog_prepare(vpc)) {
763 FREE(vpc);
764 return;
765 }
766
767 /* Redirect post-transform vertex position to a temp if user clip
768 * planes are enabled. We need to append code the the vtxprog
769 * to handle clip planes later.
770 */
771 if (vp->ucp.nr) {
772 vpc->r_result[vpc->hpos_idx] = temp(vpc);
773 vpc->r_temps_discard = 0;
774 }
775
776 tgsi_parse_init(&parse, vp->pipe.tokens);
777
778 while (!tgsi_parse_end_of_tokens(&parse)) {
779 tgsi_parse_token(&parse);
780
781 switch (parse.FullToken.Token.Type) {
782 case TGSI_TOKEN_TYPE_IMMEDIATE:
783 {
784 const struct tgsi_full_immediate *imm;
785
786 imm = &parse.FullToken.FullImmediate;
787 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
788 assert(imm->Immediate.NrTokens == 4 + 1);
789 vpc->imm[vpc->nr_imm++] =
790 constant(vpc, -1,
791 imm->u[0].Float,
792 imm->u[1].Float,
793 imm->u[2].Float,
794 imm->u[3].Float);
795 }
796 break;
797 case TGSI_TOKEN_TYPE_INSTRUCTION:
798 {
799 const struct tgsi_full_instruction *finst;
800 finst = &parse.FullToken.FullInstruction;
801 if (!nv40_vertprog_parse_instruction(vpc, finst))
802 goto out_err;
803 }
804 break;
805 default:
806 break;
807 }
808 }
809
810 /* Write out HPOS if it was redirected to a temp earlier */
811 if (vpc->r_result[vpc->hpos_idx].type != NV40SR_OUTPUT) {
812 struct nv40_sreg hpos = nv40_sr(NV40SR_OUTPUT,
813 NV40_VP_INST_DEST_POS);
814 struct nv40_sreg htmp = vpc->r_result[vpc->hpos_idx];
815
816 arith(vpc, 0, OP_MOV, hpos, MASK_ALL, htmp, none, none);
817 }
818
819 /* Insert code to handle user clip planes */
820 for (i = 0; i < vp->ucp.nr; i++) {
821 struct nv40_sreg cdst = nv40_sr(NV40SR_OUTPUT,
822 NV40_VP_INST_DEST_CLIP(i));
823 struct nv40_sreg ceqn = constant(vpc, -1,
824 nv40->clip.ucp[i][0],
825 nv40->clip.ucp[i][1],
826 nv40->clip.ucp[i][2],
827 nv40->clip.ucp[i][3]);
828 struct nv40_sreg htmp = vpc->r_result[vpc->hpos_idx];
829 unsigned mask;
830
831 switch (i) {
832 case 0: case 3: mask = MASK_Y; break;
833 case 1: case 4: mask = MASK_Z; break;
834 case 2: case 5: mask = MASK_W; break;
835 default:
836 NOUVEAU_ERR("invalid clip dist #%d\n", i);
837 goto out_err;
838 }
839
840 arith(vpc, 0, OP_DP4, cdst, mask, htmp, ceqn, none);
841 }
842
843 vp->insns[vp->nr_insns - 1].data[3] |= NV40_VP_INST_LAST;
844 vp->translated = TRUE;
845 out_err:
846 tgsi_parse_free(&parse);
847 if (vpc->r_temp)
848 FREE(vpc->r_temp);
849 if (vpc->r_address)
850 FREE(vpc->r_address);
851 if (vpc->imm)
852 FREE(vpc->imm);
853 FREE(vpc);
854 }
855
856 static boolean
857 nv40_vertprog_validate(struct nv40_context *nv40)
858 {
859 struct pipe_screen *pscreen = nv40->pipe.screen;
860 struct nouveau_grobj *curie = nv40->screen->curie;
861 struct nv40_vertex_program *vp;
862 struct pipe_buffer *constbuf;
863 boolean upload_code = FALSE, upload_data = FALSE;
864 int i;
865
866 if (nv40->render_mode == HW) {
867 vp = nv40->vertprog;
868 constbuf = nv40->constbuf[PIPE_SHADER_VERTEX];
869
870 if ((nv40->dirty & NV40_NEW_UCP) ||
871 memcmp(&nv40->clip, &vp->ucp, sizeof(vp->ucp))) {
872 nv40_vertprog_destroy(nv40, vp);
873 memcpy(&vp->ucp, &nv40->clip, sizeof(vp->ucp));
874 }
875 } else {
876 vp = nv40->swtnl.vertprog;
877 constbuf = NULL;
878 }
879
880 /* Translate TGSI shader into hw bytecode */
881 if (vp->translated)
882 goto check_gpu_resources;
883
884 nv40->fallback_swtnl &= ~NV40_NEW_VERTPROG;
885 nv40_vertprog_translate(nv40, vp);
886 if (!vp->translated) {
887 nv40->fallback_swtnl |= NV40_NEW_VERTPROG;
888 return FALSE;
889 }
890
891 check_gpu_resources:
892 /* Allocate hw vtxprog exec slots */
893 if (!vp->exec) {
894 struct nouveau_resource *heap = nv40->screen->vp_exec_heap;
895 struct nouveau_stateobj *so;
896 uint vplen = vp->nr_insns;
897
898 if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) {
899 while (heap->next && heap->size < vplen) {
900 struct nv40_vertex_program *evict;
901
902 evict = heap->next->priv;
903 nouveau_resource_free(&evict->exec);
904 }
905
906 if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec))
907 assert(0);
908 }
909
910 so = so_new(7, 0);
911 so_method(so, curie, NV40TCL_VP_START_FROM_ID, 1);
912 so_data (so, vp->exec->start);
913 so_method(so, curie, NV40TCL_VP_ATTRIB_EN, 2);
914 so_data (so, vp->ir);
915 so_data (so, vp->or);
916 so_method(so, curie, NV40TCL_CLIP_PLANE_ENABLE, 1);
917 so_data (so, vp->clip_ctrl);
918 so_ref(so, &vp->so);
919 so_ref(NULL, &so);
920
921 upload_code = TRUE;
922 }
923
924 /* Allocate hw vtxprog const slots */
925 if (vp->nr_consts && !vp->data) {
926 struct nouveau_resource *heap = nv40->screen->vp_data_heap;
927
928 if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) {
929 while (heap->next && heap->size < vp->nr_consts) {
930 struct nv40_vertex_program *evict;
931
932 evict = heap->next->priv;
933 nouveau_resource_free(&evict->data);
934 }
935
936 if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data))
937 assert(0);
938 }
939
940 /*XXX: handle this some day */
941 assert(vp->data->start >= vp->data_start_min);
942
943 upload_data = TRUE;
944 if (vp->data_start != vp->data->start)
945 upload_code = TRUE;
946 }
947
948 /* If exec or data segments moved we need to patch the program to
949 * fixup offsets and register IDs.
950 */
951 if (vp->exec_start != vp->exec->start) {
952 for (i = 0; i < vp->nr_insns; i++) {
953 struct nv40_vertex_program_exec *vpi = &vp->insns[i];
954
955 if (vpi->has_branch_offset) {
956 assert(0);
957 }
958 }
959
960 vp->exec_start = vp->exec->start;
961 }
962
963 if (vp->nr_consts && vp->data_start != vp->data->start) {
964 for (i = 0; i < vp->nr_insns; i++) {
965 struct nv40_vertex_program_exec *vpi = &vp->insns[i];
966
967 if (vpi->const_index >= 0) {
968 vpi->data[1] &= ~NV40_VP_INST_CONST_SRC_MASK;
969 vpi->data[1] |=
970 (vpi->const_index + vp->data->start) <<
971 NV40_VP_INST_CONST_SRC_SHIFT;
972
973 }
974 }
975
976 vp->data_start = vp->data->start;
977 }
978
979 /* Update + Upload constant values */
980 if (vp->nr_consts) {
981 float *map = NULL;
982
983 if (constbuf) {
984 map = pipe_buffer_map(pscreen, constbuf,
985 PIPE_BUFFER_USAGE_CPU_READ);
986 }
987
988 for (i = 0; i < vp->nr_consts; i++) {
989 struct nv40_vertex_program_data *vpd = &vp->consts[i];
990
991 if (vpd->index >= 0) {
992 if (!upload_data &&
993 !memcmp(vpd->value, &map[vpd->index * 4],
994 4 * sizeof(float)))
995 continue;
996 memcpy(vpd->value, &map[vpd->index * 4],
997 4 * sizeof(float));
998 }
999
1000 BEGIN_RING(curie, NV40TCL_VP_UPLOAD_CONST_ID, 5);
1001 OUT_RING (i + vp->data->start);
1002 OUT_RINGp ((uint32_t *)vpd->value, 4);
1003 }
1004
1005 if (constbuf)
1006 pscreen->buffer_unmap(pscreen, constbuf);
1007 }
1008
1009 /* Upload vtxprog */
1010 if (upload_code) {
1011 #if 0
1012 for (i = 0; i < vp->nr_insns; i++) {
1013 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[0]);
1014 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[1]);
1015 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[2]);
1016 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[3]);
1017 }
1018 #endif
1019 BEGIN_RING(curie, NV40TCL_VP_UPLOAD_FROM_ID, 1);
1020 OUT_RING (vp->exec->start);
1021 for (i = 0; i < vp->nr_insns; i++) {
1022 BEGIN_RING(curie, NV40TCL_VP_UPLOAD_INST(0), 4);
1023 OUT_RINGp (vp->insns[i].data, 4);
1024 }
1025 }
1026
1027 if (vp->so != nv40->state.hw[NV40_STATE_VERTPROG]) {
1028 so_ref(vp->so, &nv40->state.hw[NV40_STATE_VERTPROG]);
1029 return TRUE;
1030 }
1031
1032 return FALSE;
1033 }
1034
1035 void
1036 nv40_vertprog_destroy(struct nv40_context *nv40, struct nv40_vertex_program *vp)
1037 {
1038 vp->translated = FALSE;
1039
1040 if (vp->nr_insns) {
1041 FREE(vp->insns);
1042 vp->insns = NULL;
1043 vp->nr_insns = 0;
1044 }
1045
1046 if (vp->nr_consts) {
1047 FREE(vp->consts);
1048 vp->consts = NULL;
1049 vp->nr_consts = 0;
1050 }
1051
1052 nouveau_resource_free(&vp->exec);
1053 vp->exec_start = 0;
1054 nouveau_resource_free(&vp->data);
1055 vp->data_start = 0;
1056 vp->data_start_min = 0;
1057
1058 vp->ir = vp->or = vp->clip_ctrl = 0;
1059 so_ref(NULL, &vp->so);
1060 }
1061
1062 struct nv40_state_entry nv40_state_vertprog = {
1063 .validate = nv40_vertprog_validate,
1064 .dirty = {
1065 .pipe = NV40_NEW_VERTPROG | NV40_NEW_UCP,
1066 .hw = NV40_STATE_VERTPROG,
1067 }
1068 };
1069