Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / drivers / nv40 / nv40_vertprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "pipe/p_inlines.h"
5
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_util.h"
9
10 #include "nv40_context.h"
11 #include "nv40_state.h"
12
13 /* TODO (at least...):
14 * 1. Indexed consts + ARL
15 * 3. NV_vp11, NV_vp2, NV_vp3 features
16 * - extra arith opcodes
17 * - branching
18 * - texture sampling
19 * - indexed attribs
20 * - indexed results
21 * 4. bugs
22 */
23
24 #define SWZ_X 0
25 #define SWZ_Y 1
26 #define SWZ_Z 2
27 #define SWZ_W 3
28 #define MASK_X 8
29 #define MASK_Y 4
30 #define MASK_Z 2
31 #define MASK_W 1
32 #define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
33 #define DEF_SCALE 0
34 #define DEF_CTEST 0
35 #include "nv40_shader.h"
36
37 #define swz(s,x,y,z,w) nv40_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
38 #define neg(s) nv40_sr_neg((s))
39 #define abs(s) nv40_sr_abs((s))
40
41 #define NV40_VP_INST_DEST_CLIP(n) ((~0 - 6) + (n))
42
43 struct nv40_vpc {
44 struct nv40_vertex_program *vp;
45
46 struct nv40_vertex_program_exec *vpi;
47
48 unsigned r_temps;
49 unsigned r_temps_discard;
50 struct nv40_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
51 struct nv40_sreg *r_address;
52 struct nv40_sreg *r_temp;
53
54 struct nv40_sreg *imm;
55 unsigned nr_imm;
56
57 unsigned hpos_idx;
58 };
59
60 static struct nv40_sreg
61 temp(struct nv40_vpc *vpc)
62 {
63 int idx = ffs(~vpc->r_temps) - 1;
64
65 if (idx < 0) {
66 NOUVEAU_ERR("out of temps!!\n");
67 assert(0);
68 return nv40_sr(NV40SR_TEMP, 0);
69 }
70
71 vpc->r_temps |= (1 << idx);
72 vpc->r_temps_discard |= (1 << idx);
73 return nv40_sr(NV40SR_TEMP, idx);
74 }
75
76 static INLINE void
77 release_temps(struct nv40_vpc *vpc)
78 {
79 vpc->r_temps &= ~vpc->r_temps_discard;
80 vpc->r_temps_discard = 0;
81 }
82
83 static struct nv40_sreg
84 constant(struct nv40_vpc *vpc, int pipe, float x, float y, float z, float w)
85 {
86 struct nv40_vertex_program *vp = vpc->vp;
87 struct nv40_vertex_program_data *vpd;
88 int idx;
89
90 if (pipe >= 0) {
91 for (idx = 0; idx < vp->nr_consts; idx++) {
92 if (vp->consts[idx].index == pipe)
93 return nv40_sr(NV40SR_CONST, idx);
94 }
95 }
96
97 idx = vp->nr_consts++;
98 vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
99 vpd = &vp->consts[idx];
100
101 vpd->index = pipe;
102 vpd->value[0] = x;
103 vpd->value[1] = y;
104 vpd->value[2] = z;
105 vpd->value[3] = w;
106 return nv40_sr(NV40SR_CONST, idx);
107 }
108
109 #define arith(cc,s,o,d,m,s0,s1,s2) \
110 nv40_vp_arith((cc), (s), NV40_VP_INST_##o, (d), (m), (s0), (s1), (s2))
111
112 static void
113 emit_src(struct nv40_vpc *vpc, uint32_t *hw, int pos, struct nv40_sreg src)
114 {
115 struct nv40_vertex_program *vp = vpc->vp;
116 uint32_t sr = 0;
117
118 switch (src.type) {
119 case NV40SR_TEMP:
120 sr |= (NV40_VP_SRC_REG_TYPE_TEMP << NV40_VP_SRC_REG_TYPE_SHIFT);
121 sr |= (src.index << NV40_VP_SRC_TEMP_SRC_SHIFT);
122 break;
123 case NV40SR_INPUT:
124 sr |= (NV40_VP_SRC_REG_TYPE_INPUT <<
125 NV40_VP_SRC_REG_TYPE_SHIFT);
126 vp->ir |= (1 << src.index);
127 hw[1] |= (src.index << NV40_VP_INST_INPUT_SRC_SHIFT);
128 break;
129 case NV40SR_CONST:
130 sr |= (NV40_VP_SRC_REG_TYPE_CONST <<
131 NV40_VP_SRC_REG_TYPE_SHIFT);
132 assert(vpc->vpi->const_index == -1 ||
133 vpc->vpi->const_index == src.index);
134 vpc->vpi->const_index = src.index;
135 break;
136 case NV40SR_NONE:
137 sr |= (NV40_VP_SRC_REG_TYPE_INPUT <<
138 NV40_VP_SRC_REG_TYPE_SHIFT);
139 break;
140 default:
141 assert(0);
142 }
143
144 if (src.negate)
145 sr |= NV40_VP_SRC_NEGATE;
146
147 if (src.abs)
148 hw[0] |= (1 << (21 + pos));
149
150 sr |= ((src.swz[0] << NV40_VP_SRC_SWZ_X_SHIFT) |
151 (src.swz[1] << NV40_VP_SRC_SWZ_Y_SHIFT) |
152 (src.swz[2] << NV40_VP_SRC_SWZ_Z_SHIFT) |
153 (src.swz[3] << NV40_VP_SRC_SWZ_W_SHIFT));
154
155 switch (pos) {
156 case 0:
157 hw[1] |= ((sr & NV40_VP_SRC0_HIGH_MASK) >>
158 NV40_VP_SRC0_HIGH_SHIFT) << NV40_VP_INST_SRC0H_SHIFT;
159 hw[2] |= (sr & NV40_VP_SRC0_LOW_MASK) <<
160 NV40_VP_INST_SRC0L_SHIFT;
161 break;
162 case 1:
163 hw[2] |= sr << NV40_VP_INST_SRC1_SHIFT;
164 break;
165 case 2:
166 hw[2] |= ((sr & NV40_VP_SRC2_HIGH_MASK) >>
167 NV40_VP_SRC2_HIGH_SHIFT) << NV40_VP_INST_SRC2H_SHIFT;
168 hw[3] |= (sr & NV40_VP_SRC2_LOW_MASK) <<
169 NV40_VP_INST_SRC2L_SHIFT;
170 break;
171 default:
172 assert(0);
173 }
174 }
175
176 static void
177 emit_dst(struct nv40_vpc *vpc, uint32_t *hw, int slot, struct nv40_sreg dst)
178 {
179 struct nv40_vertex_program *vp = vpc->vp;
180
181 switch (dst.type) {
182 case NV40SR_TEMP:
183 hw[3] |= NV40_VP_INST_DEST_MASK;
184 if (slot == 0) {
185 hw[0] |= (dst.index <<
186 NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
187 } else {
188 hw[3] |= (dst.index <<
189 NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
190 }
191 break;
192 case NV40SR_OUTPUT:
193 switch (dst.index) {
194 case NV40_VP_INST_DEST_COL0 : vp->or |= (1 << 0); break;
195 case NV40_VP_INST_DEST_COL1 : vp->or |= (1 << 1); break;
196 case NV40_VP_INST_DEST_BFC0 : vp->or |= (1 << 2); break;
197 case NV40_VP_INST_DEST_BFC1 : vp->or |= (1 << 3); break;
198 case NV40_VP_INST_DEST_FOGC : vp->or |= (1 << 4); break;
199 case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
200 case NV40_VP_INST_DEST_TC(0): vp->or |= (1 << 14); break;
201 case NV40_VP_INST_DEST_TC(1): vp->or |= (1 << 15); break;
202 case NV40_VP_INST_DEST_TC(2): vp->or |= (1 << 16); break;
203 case NV40_VP_INST_DEST_TC(3): vp->or |= (1 << 17); break;
204 case NV40_VP_INST_DEST_TC(4): vp->or |= (1 << 18); break;
205 case NV40_VP_INST_DEST_TC(5): vp->or |= (1 << 19); break;
206 case NV40_VP_INST_DEST_TC(6): vp->or |= (1 << 20); break;
207 case NV40_VP_INST_DEST_TC(7): vp->or |= (1 << 21); break;
208 case NV40_VP_INST_DEST_CLIP(0):
209 vp->or |= (1 << 6);
210 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE0;
211 dst.index = NV40_VP_INST_DEST_FOGC;
212 break;
213 case NV40_VP_INST_DEST_CLIP(1):
214 vp->or |= (1 << 7);
215 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE1;
216 dst.index = NV40_VP_INST_DEST_FOGC;
217 break;
218 case NV40_VP_INST_DEST_CLIP(2):
219 vp->or |= (1 << 8);
220 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE2;
221 dst.index = NV40_VP_INST_DEST_FOGC;
222 break;
223 case NV40_VP_INST_DEST_CLIP(3):
224 vp->or |= (1 << 9);
225 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE3;
226 dst.index = NV40_VP_INST_DEST_PSZ;
227 break;
228 case NV40_VP_INST_DEST_CLIP(4):
229 vp->or |= (1 << 10);
230 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE4;
231 dst.index = NV40_VP_INST_DEST_PSZ;
232 break;
233 case NV40_VP_INST_DEST_CLIP(5):
234 vp->or |= (1 << 11);
235 vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE5;
236 dst.index = NV40_VP_INST_DEST_PSZ;
237 break;
238 default:
239 break;
240 }
241
242 hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
243 if (slot == 0) {
244 hw[0] |= NV40_VP_INST_VEC_RESULT;
245 hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK | (1<<20);
246 } else {
247 hw[3] |= NV40_VP_INST_SCA_RESULT;
248 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
249 }
250 break;
251 default:
252 assert(0);
253 }
254 }
255
256 static void
257 nv40_vp_arith(struct nv40_vpc *vpc, int slot, int op,
258 struct nv40_sreg dst, int mask,
259 struct nv40_sreg s0, struct nv40_sreg s1,
260 struct nv40_sreg s2)
261 {
262 struct nv40_vertex_program *vp = vpc->vp;
263 uint32_t *hw;
264
265 vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
266 vpc->vpi = &vp->insns[vp->nr_insns - 1];
267 memset(vpc->vpi, 0, sizeof(*vpc->vpi));
268 vpc->vpi->const_index = -1;
269
270 hw = vpc->vpi->data;
271
272 hw[0] |= (NV40_VP_INST_COND_TR << NV40_VP_INST_COND_SHIFT);
273 hw[0] |= ((0 << NV40_VP_INST_COND_SWZ_X_SHIFT) |
274 (1 << NV40_VP_INST_COND_SWZ_Y_SHIFT) |
275 (2 << NV40_VP_INST_COND_SWZ_Z_SHIFT) |
276 (3 << NV40_VP_INST_COND_SWZ_W_SHIFT));
277
278 if (slot == 0) {
279 hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
280 hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
281 hw[3] |= (mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
282 } else {
283 hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
284 hw[0] |= (NV40_VP_INST_VEC_DEST_TEMP_MASK | (1 << 20));
285 hw[3] |= (mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
286 }
287
288 emit_dst(vpc, hw, slot, dst);
289 emit_src(vpc, hw, 0, s0);
290 emit_src(vpc, hw, 1, s1);
291 emit_src(vpc, hw, 2, s2);
292 }
293
294 static INLINE struct nv40_sreg
295 tgsi_src(struct nv40_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
296 struct nv40_sreg src;
297
298 switch (fsrc->Register.File) {
299 case TGSI_FILE_INPUT:
300 src = nv40_sr(NV40SR_INPUT, fsrc->Register.Index);
301 break;
302 case TGSI_FILE_CONSTANT:
303 src = constant(vpc, fsrc->Register.Index, 0, 0, 0, 0);
304 break;
305 case TGSI_FILE_IMMEDIATE:
306 src = vpc->imm[fsrc->Register.Index];
307 break;
308 case TGSI_FILE_TEMPORARY:
309 src = vpc->r_temp[fsrc->Register.Index];
310 break;
311 default:
312 NOUVEAU_ERR("bad src file\n");
313 break;
314 }
315
316 src.abs = fsrc->Register.Absolute;
317 src.negate = fsrc->Register.Negate;
318 src.swz[0] = fsrc->Register.SwizzleX;
319 src.swz[1] = fsrc->Register.SwizzleY;
320 src.swz[2] = fsrc->Register.SwizzleZ;
321 src.swz[3] = fsrc->Register.SwizzleW;
322 return src;
323 }
324
325 static INLINE struct nv40_sreg
326 tgsi_dst(struct nv40_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
327 struct nv40_sreg dst;
328
329 switch (fdst->Register.File) {
330 case TGSI_FILE_OUTPUT:
331 dst = vpc->r_result[fdst->Register.Index];
332 break;
333 case TGSI_FILE_TEMPORARY:
334 dst = vpc->r_temp[fdst->Register.Index];
335 break;
336 case TGSI_FILE_ADDRESS:
337 dst = vpc->r_address[fdst->Register.Index];
338 break;
339 default:
340 NOUVEAU_ERR("bad dst file\n");
341 break;
342 }
343
344 return dst;
345 }
346
347 static INLINE int
348 tgsi_mask(uint tgsi)
349 {
350 int mask = 0;
351
352 if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
353 if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
354 if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
355 if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
356 return mask;
357 }
358
359 static boolean
360 src_native_swz(struct nv40_vpc *vpc, const struct tgsi_full_src_register *fsrc,
361 struct nv40_sreg *src)
362 {
363 const struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
364 struct nv40_sreg tgsi = tgsi_src(vpc, fsrc);
365 uint mask = 0;
366 uint c;
367
368 for (c = 0; c < 4; c++) {
369 switch (tgsi_util_get_full_src_register_swizzle(fsrc, c)) {
370 case TGSI_SWIZZLE_X:
371 case TGSI_SWIZZLE_Y:
372 case TGSI_SWIZZLE_Z:
373 case TGSI_SWIZZLE_W:
374 mask |= tgsi_mask(1 << c);
375 break;
376 default:
377 assert(0);
378 }
379 }
380
381 if (mask == MASK_ALL)
382 return TRUE;
383
384 *src = temp(vpc);
385
386 if (mask)
387 arith(vpc, 0, OP_MOV, *src, mask, tgsi, none, none);
388
389 return FALSE;
390 }
391
392 static boolean
393 nv40_vertprog_parse_instruction(struct nv40_vpc *vpc,
394 const struct tgsi_full_instruction *finst)
395 {
396 struct nv40_sreg src[3], dst, tmp;
397 struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
398 int mask;
399 int ai = -1, ci = -1, ii = -1;
400 int i;
401
402 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
403 return TRUE;
404
405 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
406 const struct tgsi_full_src_register *fsrc;
407
408 fsrc = &finst->Src[i];
409 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
410 src[i] = tgsi_src(vpc, fsrc);
411 }
412 }
413
414 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
415 const struct tgsi_full_src_register *fsrc;
416
417 fsrc = &finst->Src[i];
418
419 switch (fsrc->Register.File) {
420 case TGSI_FILE_INPUT:
421 case TGSI_FILE_CONSTANT:
422 case TGSI_FILE_TEMPORARY:
423 if (!src_native_swz(vpc, fsrc, &src[i]))
424 continue;
425 break;
426 default:
427 break;
428 }
429
430 switch (fsrc->Register.File) {
431 case TGSI_FILE_INPUT:
432 if (ai == -1 || ai == fsrc->Register.Index) {
433 ai = fsrc->Register.Index;
434 src[i] = tgsi_src(vpc, fsrc);
435 } else {
436 src[i] = temp(vpc);
437 arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
438 tgsi_src(vpc, fsrc), none, none);
439 }
440 break;
441 case TGSI_FILE_CONSTANT:
442 if ((ci == -1 && ii == -1) ||
443 ci == fsrc->Register.Index) {
444 ci = fsrc->Register.Index;
445 src[i] = tgsi_src(vpc, fsrc);
446 } else {
447 src[i] = temp(vpc);
448 arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
449 tgsi_src(vpc, fsrc), none, none);
450 }
451 break;
452 case TGSI_FILE_IMMEDIATE:
453 if ((ci == -1 && ii == -1) ||
454 ii == fsrc->Register.Index) {
455 ii = fsrc->Register.Index;
456 src[i] = tgsi_src(vpc, fsrc);
457 } else {
458 src[i] = temp(vpc);
459 arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
460 tgsi_src(vpc, fsrc), none, none);
461 }
462 break;
463 case TGSI_FILE_TEMPORARY:
464 /* handled above */
465 break;
466 default:
467 NOUVEAU_ERR("bad src file\n");
468 return FALSE;
469 }
470 }
471
472 dst = tgsi_dst(vpc, &finst->Dst[0]);
473 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
474
475 switch (finst->Instruction.Opcode) {
476 case TGSI_OPCODE_ABS:
477 arith(vpc, 0, OP_MOV, dst, mask, abs(src[0]), none, none);
478 break;
479 case TGSI_OPCODE_ADD:
480 arith(vpc, 0, OP_ADD, dst, mask, src[0], none, src[1]);
481 break;
482 case TGSI_OPCODE_ARL:
483 arith(vpc, 0, OP_ARL, dst, mask, src[0], none, none);
484 break;
485 case TGSI_OPCODE_DP3:
486 arith(vpc, 0, OP_DP3, dst, mask, src[0], src[1], none);
487 break;
488 case TGSI_OPCODE_DP4:
489 arith(vpc, 0, OP_DP4, dst, mask, src[0], src[1], none);
490 break;
491 case TGSI_OPCODE_DPH:
492 arith(vpc, 0, OP_DPH, dst, mask, src[0], src[1], none);
493 break;
494 case TGSI_OPCODE_DST:
495 arith(vpc, 0, OP_DST, dst, mask, src[0], src[1], none);
496 break;
497 case TGSI_OPCODE_EX2:
498 arith(vpc, 1, OP_EX2, dst, mask, none, none, src[0]);
499 break;
500 case TGSI_OPCODE_EXP:
501 arith(vpc, 1, OP_EXP, dst, mask, none, none, src[0]);
502 break;
503 case TGSI_OPCODE_FLR:
504 arith(vpc, 0, OP_FLR, dst, mask, src[0], none, none);
505 break;
506 case TGSI_OPCODE_FRC:
507 arith(vpc, 0, OP_FRC, dst, mask, src[0], none, none);
508 break;
509 case TGSI_OPCODE_LG2:
510 arith(vpc, 1, OP_LG2, dst, mask, none, none, src[0]);
511 break;
512 case TGSI_OPCODE_LIT:
513 arith(vpc, 1, OP_LIT, dst, mask, none, none, src[0]);
514 break;
515 case TGSI_OPCODE_LOG:
516 arith(vpc, 1, OP_LOG, dst, mask, none, none, src[0]);
517 break;
518 case TGSI_OPCODE_MAD:
519 arith(vpc, 0, OP_MAD, dst, mask, src[0], src[1], src[2]);
520 break;
521 case TGSI_OPCODE_MAX:
522 arith(vpc, 0, OP_MAX, dst, mask, src[0], src[1], none);
523 break;
524 case TGSI_OPCODE_MIN:
525 arith(vpc, 0, OP_MIN, dst, mask, src[0], src[1], none);
526 break;
527 case TGSI_OPCODE_MOV:
528 arith(vpc, 0, OP_MOV, dst, mask, src[0], none, none);
529 break;
530 case TGSI_OPCODE_MUL:
531 arith(vpc, 0, OP_MUL, dst, mask, src[0], src[1], none);
532 break;
533 case TGSI_OPCODE_POW:
534 tmp = temp(vpc);
535 arith(vpc, 1, OP_LG2, tmp, MASK_X, none, none,
536 swz(src[0], X, X, X, X));
537 arith(vpc, 0, OP_MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
538 swz(src[1], X, X, X, X), none);
539 arith(vpc, 1, OP_EX2, dst, mask, none, none,
540 swz(tmp, X, X, X, X));
541 break;
542 case TGSI_OPCODE_RCP:
543 arith(vpc, 1, OP_RCP, dst, mask, none, none, src[0]);
544 break;
545 case TGSI_OPCODE_RET:
546 break;
547 case TGSI_OPCODE_RSQ:
548 arith(vpc, 1, OP_RSQ, dst, mask, none, none, abs(src[0]));
549 break;
550 case TGSI_OPCODE_SGE:
551 arith(vpc, 0, OP_SGE, dst, mask, src[0], src[1], none);
552 break;
553 case TGSI_OPCODE_SLT:
554 arith(vpc, 0, OP_SLT, dst, mask, src[0], src[1], none);
555 break;
556 case TGSI_OPCODE_SUB:
557 arith(vpc, 0, OP_ADD, dst, mask, src[0], none, neg(src[1]));
558 break;
559 case TGSI_OPCODE_XPD:
560 tmp = temp(vpc);
561 arith(vpc, 0, OP_MUL, tmp, mask,
562 swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
563 arith(vpc, 0, OP_MAD, dst, (mask & ~MASK_W),
564 swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
565 neg(tmp));
566 break;
567 default:
568 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
569 return FALSE;
570 }
571
572 release_temps(vpc);
573 return TRUE;
574 }
575
576 static boolean
577 nv40_vertprog_parse_decl_output(struct nv40_vpc *vpc,
578 const struct tgsi_full_declaration *fdec)
579 {
580 unsigned idx = fdec->Range.First;
581 int hw;
582
583 switch (fdec->Semantic.Name) {
584 case TGSI_SEMANTIC_POSITION:
585 hw = NV40_VP_INST_DEST_POS;
586 vpc->hpos_idx = idx;
587 break;
588 case TGSI_SEMANTIC_COLOR:
589 if (fdec->Semantic.Index == 0) {
590 hw = NV40_VP_INST_DEST_COL0;
591 } else
592 if (fdec->Semantic.Index == 1) {
593 hw = NV40_VP_INST_DEST_COL1;
594 } else {
595 NOUVEAU_ERR("bad colour semantic index\n");
596 return FALSE;
597 }
598 break;
599 case TGSI_SEMANTIC_BCOLOR:
600 if (fdec->Semantic.Index == 0) {
601 hw = NV40_VP_INST_DEST_BFC0;
602 } else
603 if (fdec->Semantic.Index == 1) {
604 hw = NV40_VP_INST_DEST_BFC1;
605 } else {
606 NOUVEAU_ERR("bad bcolour semantic index\n");
607 return FALSE;
608 }
609 break;
610 case TGSI_SEMANTIC_FOG:
611 hw = NV40_VP_INST_DEST_FOGC;
612 break;
613 case TGSI_SEMANTIC_PSIZE:
614 hw = NV40_VP_INST_DEST_PSZ;
615 break;
616 case TGSI_SEMANTIC_GENERIC:
617 if (fdec->Semantic.Index <= 7) {
618 hw = NV40_VP_INST_DEST_TC(fdec->Semantic.Index);
619 } else {
620 NOUVEAU_ERR("bad generic semantic index\n");
621 return FALSE;
622 }
623 break;
624 default:
625 NOUVEAU_ERR("bad output semantic\n");
626 return FALSE;
627 }
628
629 vpc->r_result[idx] = nv40_sr(NV40SR_OUTPUT, hw);
630 return TRUE;
631 }
632
633 static boolean
634 nv40_vertprog_prepare(struct nv40_vpc *vpc)
635 {
636 struct tgsi_parse_context p;
637 int high_temp = -1, high_addr = -1, nr_imm = 0, i;
638
639 tgsi_parse_init(&p, vpc->vp->pipe.tokens);
640 while (!tgsi_parse_end_of_tokens(&p)) {
641 const union tgsi_full_token *tok = &p.FullToken;
642
643 tgsi_parse_token(&p);
644 switch(tok->Token.Type) {
645 case TGSI_TOKEN_TYPE_IMMEDIATE:
646 nr_imm++;
647 break;
648 case TGSI_TOKEN_TYPE_DECLARATION:
649 {
650 const struct tgsi_full_declaration *fdec;
651
652 fdec = &p.FullToken.FullDeclaration;
653 switch (fdec->Declaration.File) {
654 case TGSI_FILE_TEMPORARY:
655 if (fdec->Range.Last > high_temp) {
656 high_temp =
657 fdec->Range.Last;
658 }
659 break;
660 #if 0 /* this would be nice.. except gallium doesn't track it */
661 case TGSI_FILE_ADDRESS:
662 if (fdec->Range.Last > high_addr) {
663 high_addr =
664 fdec->Range.Last;
665 }
666 break;
667 #endif
668 case TGSI_FILE_OUTPUT:
669 if (!nv40_vertprog_parse_decl_output(vpc, fdec))
670 return FALSE;
671 break;
672 default:
673 break;
674 }
675 }
676 break;
677 #if 1 /* yay, parse instructions looking for address regs instead */
678 case TGSI_TOKEN_TYPE_INSTRUCTION:
679 {
680 const struct tgsi_full_instruction *finst;
681 const struct tgsi_full_dst_register *fdst;
682
683 finst = &p.FullToken.FullInstruction;
684 fdst = &finst->Dst[0];
685
686 if (fdst->Register.File == TGSI_FILE_ADDRESS) {
687 if (fdst->Register.Index > high_addr)
688 high_addr = fdst->Register.Index;
689 }
690
691 }
692 break;
693 #endif
694 default:
695 break;
696 }
697 }
698 tgsi_parse_free(&p);
699
700 if (nr_imm) {
701 vpc->imm = CALLOC(nr_imm, sizeof(struct nv40_sreg));
702 assert(vpc->imm);
703 }
704
705 if (++high_temp) {
706 vpc->r_temp = CALLOC(high_temp, sizeof(struct nv40_sreg));
707 for (i = 0; i < high_temp; i++)
708 vpc->r_temp[i] = temp(vpc);
709 }
710
711 if (++high_addr) {
712 vpc->r_address = CALLOC(high_addr, sizeof(struct nv40_sreg));
713 for (i = 0; i < high_addr; i++)
714 vpc->r_address[i] = temp(vpc);
715 }
716
717 vpc->r_temps_discard = 0;
718 return TRUE;
719 }
720
721 static void
722 nv40_vertprog_translate(struct nv40_context *nv40,
723 struct nv40_vertex_program *vp)
724 {
725 struct tgsi_parse_context parse;
726 struct nv40_vpc *vpc = NULL;
727 struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
728 int i;
729
730 vpc = CALLOC(1, sizeof(struct nv40_vpc));
731 if (!vpc)
732 return;
733 vpc->vp = vp;
734
735 if (!nv40_vertprog_prepare(vpc)) {
736 FREE(vpc);
737 return;
738 }
739
740 /* Redirect post-transform vertex position to a temp if user clip
741 * planes are enabled. We need to append code the the vtxprog
742 * to handle clip planes later.
743 */
744 if (vp->ucp.nr) {
745 vpc->r_result[vpc->hpos_idx] = temp(vpc);
746 vpc->r_temps_discard = 0;
747 }
748
749 tgsi_parse_init(&parse, vp->pipe.tokens);
750
751 while (!tgsi_parse_end_of_tokens(&parse)) {
752 tgsi_parse_token(&parse);
753
754 switch (parse.FullToken.Token.Type) {
755 case TGSI_TOKEN_TYPE_IMMEDIATE:
756 {
757 const struct tgsi_full_immediate *imm;
758
759 imm = &parse.FullToken.FullImmediate;
760 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
761 assert(imm->Immediate.NrTokens == 4 + 1);
762 vpc->imm[vpc->nr_imm++] =
763 constant(vpc, -1,
764 imm->u[0].Float,
765 imm->u[1].Float,
766 imm->u[2].Float,
767 imm->u[3].Float);
768 }
769 break;
770 case TGSI_TOKEN_TYPE_INSTRUCTION:
771 {
772 const struct tgsi_full_instruction *finst;
773 finst = &parse.FullToken.FullInstruction;
774 if (!nv40_vertprog_parse_instruction(vpc, finst))
775 goto out_err;
776 }
777 break;
778 default:
779 break;
780 }
781 }
782
783 /* Write out HPOS if it was redirected to a temp earlier */
784 if (vpc->r_result[vpc->hpos_idx].type != NV40SR_OUTPUT) {
785 struct nv40_sreg hpos = nv40_sr(NV40SR_OUTPUT,
786 NV40_VP_INST_DEST_POS);
787 struct nv40_sreg htmp = vpc->r_result[vpc->hpos_idx];
788
789 arith(vpc, 0, OP_MOV, hpos, MASK_ALL, htmp, none, none);
790 }
791
792 /* Insert code to handle user clip planes */
793 for (i = 0; i < vp->ucp.nr; i++) {
794 struct nv40_sreg cdst = nv40_sr(NV40SR_OUTPUT,
795 NV40_VP_INST_DEST_CLIP(i));
796 struct nv40_sreg ceqn = constant(vpc, -1,
797 nv40->clip.ucp[i][0],
798 nv40->clip.ucp[i][1],
799 nv40->clip.ucp[i][2],
800 nv40->clip.ucp[i][3]);
801 struct nv40_sreg htmp = vpc->r_result[vpc->hpos_idx];
802 unsigned mask;
803
804 switch (i) {
805 case 0: case 3: mask = MASK_Y; break;
806 case 1: case 4: mask = MASK_Z; break;
807 case 2: case 5: mask = MASK_W; break;
808 default:
809 NOUVEAU_ERR("invalid clip dist #%d\n", i);
810 goto out_err;
811 }
812
813 arith(vpc, 0, OP_DP4, cdst, mask, htmp, ceqn, none);
814 }
815
816 vp->insns[vp->nr_insns - 1].data[3] |= NV40_VP_INST_LAST;
817 vp->translated = TRUE;
818 out_err:
819 tgsi_parse_free(&parse);
820 if (vpc->r_temp)
821 FREE(vpc->r_temp);
822 if (vpc->r_address)
823 FREE(vpc->r_address);
824 if (vpc->imm)
825 FREE(vpc->imm);
826 FREE(vpc);
827 }
828
829 static boolean
830 nv40_vertprog_validate(struct nv40_context *nv40)
831 {
832 struct pipe_screen *pscreen = nv40->pipe.screen;
833 struct nouveau_grobj *curie = nv40->screen->curie;
834 struct nv40_vertex_program *vp;
835 struct pipe_buffer *constbuf;
836 boolean upload_code = FALSE, upload_data = FALSE;
837 int i;
838
839 if (nv40->render_mode == HW) {
840 vp = nv40->vertprog;
841 constbuf = nv40->constbuf[PIPE_SHADER_VERTEX];
842
843 if ((nv40->dirty & NV40_NEW_UCP) ||
844 memcmp(&nv40->clip, &vp->ucp, sizeof(vp->ucp))) {
845 nv40_vertprog_destroy(nv40, vp);
846 memcpy(&vp->ucp, &nv40->clip, sizeof(vp->ucp));
847 }
848 } else {
849 vp = nv40->swtnl.vertprog;
850 constbuf = NULL;
851 }
852
853 /* Translate TGSI shader into hw bytecode */
854 if (vp->translated)
855 goto check_gpu_resources;
856
857 nv40->fallback_swtnl &= ~NV40_NEW_VERTPROG;
858 nv40_vertprog_translate(nv40, vp);
859 if (!vp->translated) {
860 nv40->fallback_swtnl |= NV40_NEW_VERTPROG;
861 return FALSE;
862 }
863
864 check_gpu_resources:
865 /* Allocate hw vtxprog exec slots */
866 if (!vp->exec) {
867 struct nouveau_resource *heap = nv40->screen->vp_exec_heap;
868 struct nouveau_stateobj *so;
869 uint vplen = vp->nr_insns;
870
871 if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) {
872 while (heap->next && heap->size < vplen) {
873 struct nv40_vertex_program *evict;
874
875 evict = heap->next->priv;
876 nouveau_resource_free(&evict->exec);
877 }
878
879 if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec))
880 assert(0);
881 }
882
883 so = so_new(7, 0);
884 so_method(so, curie, NV40TCL_VP_START_FROM_ID, 1);
885 so_data (so, vp->exec->start);
886 so_method(so, curie, NV40TCL_VP_ATTRIB_EN, 2);
887 so_data (so, vp->ir);
888 so_data (so, vp->or);
889 so_method(so, curie, NV40TCL_CLIP_PLANE_ENABLE, 1);
890 so_data (so, vp->clip_ctrl);
891 so_ref(so, &vp->so);
892 so_ref(NULL, &so);
893
894 upload_code = TRUE;
895 }
896
897 /* Allocate hw vtxprog const slots */
898 if (vp->nr_consts && !vp->data) {
899 struct nouveau_resource *heap = nv40->screen->vp_data_heap;
900
901 if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) {
902 while (heap->next && heap->size < vp->nr_consts) {
903 struct nv40_vertex_program *evict;
904
905 evict = heap->next->priv;
906 nouveau_resource_free(&evict->data);
907 }
908
909 if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data))
910 assert(0);
911 }
912
913 /*XXX: handle this some day */
914 assert(vp->data->start >= vp->data_start_min);
915
916 upload_data = TRUE;
917 if (vp->data_start != vp->data->start)
918 upload_code = TRUE;
919 }
920
921 /* If exec or data segments moved we need to patch the program to
922 * fixup offsets and register IDs.
923 */
924 if (vp->exec_start != vp->exec->start) {
925 for (i = 0; i < vp->nr_insns; i++) {
926 struct nv40_vertex_program_exec *vpi = &vp->insns[i];
927
928 if (vpi->has_branch_offset) {
929 assert(0);
930 }
931 }
932
933 vp->exec_start = vp->exec->start;
934 }
935
936 if (vp->nr_consts && vp->data_start != vp->data->start) {
937 for (i = 0; i < vp->nr_insns; i++) {
938 struct nv40_vertex_program_exec *vpi = &vp->insns[i];
939
940 if (vpi->const_index >= 0) {
941 vpi->data[1] &= ~NV40_VP_INST_CONST_SRC_MASK;
942 vpi->data[1] |=
943 (vpi->const_index + vp->data->start) <<
944 NV40_VP_INST_CONST_SRC_SHIFT;
945
946 }
947 }
948
949 vp->data_start = vp->data->start;
950 }
951
952 /* Update + Upload constant values */
953 if (vp->nr_consts) {
954 float *map = NULL;
955
956 if (constbuf) {
957 map = pipe_buffer_map(pscreen, constbuf,
958 PIPE_BUFFER_USAGE_CPU_READ);
959 }
960
961 for (i = 0; i < vp->nr_consts; i++) {
962 struct nv40_vertex_program_data *vpd = &vp->consts[i];
963
964 if (vpd->index >= 0) {
965 if (!upload_data &&
966 !memcmp(vpd->value, &map[vpd->index * 4],
967 4 * sizeof(float)))
968 continue;
969 memcpy(vpd->value, &map[vpd->index * 4],
970 4 * sizeof(float));
971 }
972
973 BEGIN_RING(curie, NV40TCL_VP_UPLOAD_CONST_ID, 5);
974 OUT_RING (i + vp->data->start);
975 OUT_RINGp ((uint32_t *)vpd->value, 4);
976 }
977
978 if (constbuf)
979 pscreen->buffer_unmap(pscreen, constbuf);
980 }
981
982 /* Upload vtxprog */
983 if (upload_code) {
984 #if 0
985 for (i = 0; i < vp->nr_insns; i++) {
986 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[0]);
987 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[1]);
988 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[2]);
989 NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[3]);
990 }
991 #endif
992 BEGIN_RING(curie, NV40TCL_VP_UPLOAD_FROM_ID, 1);
993 OUT_RING (vp->exec->start);
994 for (i = 0; i < vp->nr_insns; i++) {
995 BEGIN_RING(curie, NV40TCL_VP_UPLOAD_INST(0), 4);
996 OUT_RINGp (vp->insns[i].data, 4);
997 }
998 }
999
1000 if (vp->so != nv40->state.hw[NV40_STATE_VERTPROG]) {
1001 so_ref(vp->so, &nv40->state.hw[NV40_STATE_VERTPROG]);
1002 return TRUE;
1003 }
1004
1005 return FALSE;
1006 }
1007
1008 void
1009 nv40_vertprog_destroy(struct nv40_context *nv40, struct nv40_vertex_program *vp)
1010 {
1011 vp->translated = FALSE;
1012
1013 if (vp->nr_insns) {
1014 FREE(vp->insns);
1015 vp->insns = NULL;
1016 vp->nr_insns = 0;
1017 }
1018
1019 if (vp->nr_consts) {
1020 FREE(vp->consts);
1021 vp->consts = NULL;
1022 vp->nr_consts = 0;
1023 }
1024
1025 nouveau_resource_free(&vp->exec);
1026 vp->exec_start = 0;
1027 nouveau_resource_free(&vp->data);
1028 vp->data_start = 0;
1029 vp->data_start_min = 0;
1030
1031 vp->ir = vp->or = vp->clip_ctrl = 0;
1032 so_ref(NULL, &vp->so);
1033 }
1034
1035 struct nv40_state_entry nv40_state_vertprog = {
1036 .validate = nv40_vertprog_validate,
1037 .dirty = {
1038 .pipe = NV40_NEW_VERTPROG | NV40_NEW_UCP,
1039 .hw = NV40_STATE_VERTPROG,
1040 }
1041 };
1042