nvfx: clean up shader header
[mesa.git] / src / gallium / drivers / nvfx / nvfx_fragprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_util.h"
9
10 #include "nvfx_context.h"
11 #include "nvfx_shader.h"
12
13 #define MAX_CONSTS 128
14 #define MAX_IMM 32
15 struct nvfx_fpc {
16 struct nvfx_fragment_program *fp;
17
18 uint attrib_map[PIPE_MAX_SHADER_INPUTS];
19
20 unsigned r_temps;
21 unsigned r_temps_discard;
22 struct nvfx_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
23 struct nvfx_sreg *r_temp;
24
25 int num_regs;
26
27 unsigned inst_offset;
28 unsigned have_const;
29
30 struct {
31 int pipe;
32 float vals[4];
33 } consts[MAX_CONSTS];
34 int nr_consts;
35
36 struct nvfx_sreg imm[MAX_IMM];
37 unsigned nr_imm;
38 };
39
40 static INLINE struct nvfx_sreg
41 temp(struct nvfx_fpc *fpc)
42 {
43 int idx = ffs(~fpc->r_temps) - 1;
44
45 if (idx < 0) {
46 NOUVEAU_ERR("out of temps!!\n");
47 assert(0);
48 return nvfx_sr(NVFXSR_TEMP, 0);
49 }
50
51 fpc->r_temps |= (1 << idx);
52 fpc->r_temps_discard |= (1 << idx);
53 return nvfx_sr(NVFXSR_TEMP, idx);
54 }
55
56 static INLINE void
57 release_temps(struct nvfx_fpc *fpc)
58 {
59 fpc->r_temps &= ~fpc->r_temps_discard;
60 fpc->r_temps_discard = 0;
61 }
62
63 static INLINE struct nvfx_sreg
64 constant(struct nvfx_fpc *fpc, int pipe, float vals[4])
65 {
66 int idx;
67
68 if (fpc->nr_consts == MAX_CONSTS)
69 assert(0);
70 idx = fpc->nr_consts++;
71
72 fpc->consts[idx].pipe = pipe;
73 if (pipe == -1)
74 memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
75 return nvfx_sr(NVFXSR_CONST, idx);
76 }
77
78 #define arith(cc,s,o,d,m,s0,s1,s2) \
79 nvfx_fp_arith((cc), (s), NVFX_FP_OP_OPCODE_##o, \
80 (d), (m), (s0), (s1), (s2))
81 #define tex(cc,s,o,u,d,m,s0,s1,s2) \
82 nvfx_fp_tex((cc), (s), NVFX_FP_OP_OPCODE_##o, (u), \
83 (d), (m), (s0), none, none)
84
85 static void
86 grow_insns(struct nvfx_fpc *fpc, int size)
87 {
88 struct nvfx_fragment_program *fp = fpc->fp;
89
90 fp->insn_len += size;
91 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
92 }
93
94 static void
95 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_sreg src)
96 {
97 struct nvfx_fragment_program *fp = fpc->fp;
98 uint32_t *hw = &fp->insn[fpc->inst_offset];
99 uint32_t sr = 0;
100
101 switch (src.type) {
102 case NVFXSR_INPUT:
103 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
104 hw[0] |= (src.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
105 break;
106 case NVFXSR_OUTPUT:
107 sr |= NVFX_FP_REG_SRC_HALF;
108 /* fall-through */
109 case NVFXSR_TEMP:
110 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
111 sr |= (src.index << NVFX_FP_REG_SRC_SHIFT);
112 break;
113 case NVFXSR_CONST:
114 if (!fpc->have_const) {
115 grow_insns(fpc, 4);
116 fpc->have_const = 1;
117 }
118
119 hw = &fp->insn[fpc->inst_offset];
120 if (fpc->consts[src.index].pipe >= 0) {
121 struct nvfx_fragment_program_data *fpd;
122
123 fp->consts = realloc(fp->consts, ++fp->nr_consts *
124 sizeof(*fpd));
125 fpd = &fp->consts[fp->nr_consts - 1];
126 fpd->offset = fpc->inst_offset + 4;
127 fpd->index = fpc->consts[src.index].pipe;
128 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
129 } else {
130 memcpy(&fp->insn[fpc->inst_offset + 4],
131 fpc->consts[src.index].vals,
132 sizeof(uint32_t) * 4);
133 }
134
135 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
136 break;
137 case NVFXSR_NONE:
138 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
139 break;
140 default:
141 assert(0);
142 }
143
144 if (src.negate)
145 sr |= NVFX_FP_REG_NEGATE;
146
147 if (src.abs)
148 hw[1] |= (1 << (29 + pos));
149
150 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
151 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
152 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
153 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
154
155 hw[pos + 1] |= sr;
156 }
157
158 static void
159 emit_dst(struct nvfx_fpc *fpc, struct nvfx_sreg dst)
160 {
161 struct nvfx_fragment_program *fp = fpc->fp;
162 uint32_t *hw = &fp->insn[fpc->inst_offset];
163
164 switch (dst.type) {
165 case NVFXSR_TEMP:
166 if (fpc->num_regs < (dst.index + 1))
167 fpc->num_regs = dst.index + 1;
168 break;
169 case NVFXSR_OUTPUT:
170 if (dst.index == 1) {
171 fp->fp_control |= 0xe;
172 } else {
173 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
174 }
175 break;
176 case NVFXSR_NONE:
177 hw[0] |= (1 << 30);
178 break;
179 default:
180 assert(0);
181 }
182
183 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
184 }
185
186 static void
187 nvfx_fp_arith(struct nvfx_fpc *fpc, int sat, int op,
188 struct nvfx_sreg dst, int mask,
189 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
190 {
191 struct nvfx_fragment_program *fp = fpc->fp;
192 uint32_t *hw;
193
194 fpc->inst_offset = fp->insn_len;
195 fpc->have_const = 0;
196 grow_insns(fpc, 4);
197 hw = &fp->insn[fpc->inst_offset];
198 memset(hw, 0, sizeof(uint32_t) * 4);
199
200 if (op == NVFX_FP_OP_OPCODE_KIL)
201 fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
202 hw[0] |= (op << NVFX_FP_OP_OPCODE_SHIFT);
203 hw[0] |= (mask << NVFX_FP_OP_OUTMASK_SHIFT);
204 hw[2] |= (dst.dst_scale << NVFX_FP_OP_DST_SCALE_SHIFT);
205
206 if (sat)
207 hw[0] |= NVFX_FP_OP_OUT_SAT;
208
209 if (dst.cc_update)
210 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
211 hw[1] |= (dst.cc_test << NVFX_FP_OP_COND_SHIFT);
212 hw[1] |= ((dst.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
213 (dst.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
214 (dst.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
215 (dst.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
216
217 emit_dst(fpc, dst);
218 emit_src(fpc, 0, s0);
219 emit_src(fpc, 1, s1);
220 emit_src(fpc, 2, s2);
221 }
222
223 static void
224 nvfx_fp_tex(struct nvfx_fpc *fpc, int sat, int op, int unit,
225 struct nvfx_sreg dst, int mask,
226 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
227 {
228 struct nvfx_fragment_program *fp = fpc->fp;
229
230 nvfx_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2);
231
232 fp->insn[fpc->inst_offset] |= (unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
233 fp->samplers |= (1 << unit);
234 }
235
236 static INLINE struct nvfx_sreg
237 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
238 {
239 struct nvfx_sreg src;
240
241 switch (fsrc->Register.File) {
242 case TGSI_FILE_INPUT:
243 src = nvfx_sr(NVFXSR_INPUT,
244 fpc->attrib_map[fsrc->Register.Index]);
245 break;
246 case TGSI_FILE_CONSTANT:
247 src = constant(fpc, fsrc->Register.Index, NULL);
248 break;
249 case TGSI_FILE_IMMEDIATE:
250 assert(fsrc->Register.Index < fpc->nr_imm);
251 src = fpc->imm[fsrc->Register.Index];
252 break;
253 case TGSI_FILE_TEMPORARY:
254 src = fpc->r_temp[fsrc->Register.Index];
255 break;
256 /* NV40 fragprog result regs are just temps, so this is simple */
257 case TGSI_FILE_OUTPUT:
258 src = fpc->r_result[fsrc->Register.Index];
259 break;
260 default:
261 NOUVEAU_ERR("bad src file\n");
262 break;
263 }
264
265 src.abs = fsrc->Register.Absolute;
266 src.negate = fsrc->Register.Negate;
267 src.swz[0] = fsrc->Register.SwizzleX;
268 src.swz[1] = fsrc->Register.SwizzleY;
269 src.swz[2] = fsrc->Register.SwizzleZ;
270 src.swz[3] = fsrc->Register.SwizzleW;
271 return src;
272 }
273
274 static INLINE struct nvfx_sreg
275 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
276 switch (fdst->Register.File) {
277 case TGSI_FILE_OUTPUT:
278 return fpc->r_result[fdst->Register.Index];
279 case TGSI_FILE_TEMPORARY:
280 return fpc->r_temp[fdst->Register.Index];
281 case TGSI_FILE_NULL:
282 return nvfx_sr(NVFXSR_NONE, 0);
283 default:
284 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
285 return nvfx_sr(NVFXSR_NONE, 0);
286 }
287 }
288
289 static INLINE int
290 tgsi_mask(uint tgsi)
291 {
292 int mask = 0;
293
294 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
295 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
296 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
297 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
298 return mask;
299 }
300
301 static boolean
302 src_native_swz(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc,
303 struct nvfx_sreg *src)
304 {
305 const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0);
306 struct nvfx_sreg tgsi = tgsi_src(fpc, fsrc);
307 uint mask = 0;
308 uint c;
309
310 for (c = 0; c < 4; c++) {
311 switch (tgsi_util_get_full_src_register_swizzle(fsrc, c)) {
312 case TGSI_SWIZZLE_X:
313 case TGSI_SWIZZLE_Y:
314 case TGSI_SWIZZLE_Z:
315 case TGSI_SWIZZLE_W:
316 mask |= (1 << c);
317 break;
318 default:
319 assert(0);
320 }
321 }
322
323 if (mask == NVFX_FP_MASK_ALL)
324 return TRUE;
325
326 *src = temp(fpc);
327
328 if (mask)
329 arith(fpc, 0, MOV, *src, mask, tgsi, none, none);
330
331 return FALSE;
332 }
333
334 static boolean
335 nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
336 const struct tgsi_full_instruction *finst)
337 {
338 const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0);
339 struct nvfx_sreg src[3], dst, tmp;
340 int mask, sat, unit;
341 int ai = -1, ci = -1, ii = -1;
342 int i;
343
344 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
345 return TRUE;
346
347 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
348 const struct tgsi_full_src_register *fsrc;
349
350 fsrc = &finst->Src[i];
351 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
352 src[i] = tgsi_src(fpc, fsrc);
353 }
354 }
355
356 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
357 const struct tgsi_full_src_register *fsrc;
358
359 fsrc = &finst->Src[i];
360
361 switch (fsrc->Register.File) {
362 case TGSI_FILE_INPUT:
363 case TGSI_FILE_CONSTANT:
364 case TGSI_FILE_TEMPORARY:
365 if (!src_native_swz(fpc, fsrc, &src[i]))
366 continue;
367 break;
368 default:
369 break;
370 }
371
372 switch (fsrc->Register.File) {
373 case TGSI_FILE_INPUT:
374 if (ai == -1 || ai == fsrc->Register.Index) {
375 ai = fsrc->Register.Index;
376 src[i] = tgsi_src(fpc, fsrc);
377 } else {
378 src[i] = temp(fpc);
379 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
380 tgsi_src(fpc, fsrc), none, none);
381 }
382 break;
383 case TGSI_FILE_CONSTANT:
384 if ((ci == -1 && ii == -1) ||
385 ci == fsrc->Register.Index) {
386 ci = fsrc->Register.Index;
387 src[i] = tgsi_src(fpc, fsrc);
388 } else {
389 src[i] = temp(fpc);
390 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
391 tgsi_src(fpc, fsrc), none, none);
392 }
393 break;
394 case TGSI_FILE_IMMEDIATE:
395 if ((ci == -1 && ii == -1) ||
396 ii == fsrc->Register.Index) {
397 ii = fsrc->Register.Index;
398 src[i] = tgsi_src(fpc, fsrc);
399 } else {
400 src[i] = temp(fpc);
401 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
402 tgsi_src(fpc, fsrc), none, none);
403 }
404 break;
405 case TGSI_FILE_TEMPORARY:
406 /* handled above */
407 break;
408 case TGSI_FILE_SAMPLER:
409 unit = fsrc->Register.Index;
410 break;
411 case TGSI_FILE_OUTPUT:
412 break;
413 default:
414 NOUVEAU_ERR("bad src file\n");
415 return FALSE;
416 }
417 }
418
419 dst = tgsi_dst(fpc, &finst->Dst[0]);
420 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
421 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
422
423 switch (finst->Instruction.Opcode) {
424 case TGSI_OPCODE_ABS:
425 arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none);
426 break;
427 case TGSI_OPCODE_ADD:
428 arith(fpc, sat, ADD, dst, mask, src[0], src[1], none);
429 break;
430 case TGSI_OPCODE_CMP:
431 tmp = nvfx_sr(NVFXSR_NONE, 0);
432 tmp.cc_update = 1;
433 arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none);
434 dst.cc_test = NVFX_COND_GE;
435 arith(fpc, sat, MOV, dst, mask, src[2], none, none);
436 dst.cc_test = NVFX_COND_LT;
437 arith(fpc, sat, MOV, dst, mask, src[1], none, none);
438 break;
439 case TGSI_OPCODE_COS:
440 arith(fpc, sat, COS, dst, mask, src[0], none, none);
441 break;
442 case TGSI_OPCODE_DDX:
443 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
444 tmp = temp(fpc);
445 arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y,
446 swz(src[0], Z, W, Z, W), none, none);
447 arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W,
448 swz(tmp, X, Y, X, Y), none, none);
449 arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0],
450 none, none);
451 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
452 } else {
453 arith(fpc, sat, DDX, dst, mask, src[0], none, none);
454 }
455 break;
456 case TGSI_OPCODE_DDY:
457 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
458 tmp = temp(fpc);
459 arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y,
460 swz(src[0], Z, W, Z, W), none, none);
461 arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W,
462 swz(tmp, X, Y, X, Y), none, none);
463 arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0],
464 none, none);
465 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
466 } else {
467 arith(fpc, sat, DDY, dst, mask, src[0], none, none);
468 }
469 break;
470 case TGSI_OPCODE_DP3:
471 arith(fpc, sat, DP3, dst, mask, src[0], src[1], none);
472 break;
473 case TGSI_OPCODE_DP4:
474 arith(fpc, sat, DP4, dst, mask, src[0], src[1], none);
475 break;
476 case TGSI_OPCODE_DPH:
477 tmp = temp(fpc);
478 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[1], none);
479 arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X),
480 swz(src[1], W, W, W, W), none);
481 break;
482 case TGSI_OPCODE_DST:
483 arith(fpc, sat, DST, dst, mask, src[0], src[1], none);
484 break;
485 case TGSI_OPCODE_EX2:
486 arith(fpc, sat, EX2, dst, mask, src[0], none, none);
487 break;
488 case TGSI_OPCODE_FLR:
489 arith(fpc, sat, FLR, dst, mask, src[0], none, none);
490 break;
491 case TGSI_OPCODE_FRC:
492 arith(fpc, sat, FRC, dst, mask, src[0], none, none);
493 break;
494 case TGSI_OPCODE_KILP:
495 arith(fpc, 0, KIL, none, 0, none, none, none);
496 break;
497 case TGSI_OPCODE_KIL:
498 dst = nvfx_sr(NVFXSR_NONE, 0);
499 dst.cc_update = 1;
500 arith(fpc, 0, MOV, dst, NVFX_FP_MASK_ALL, src[0], none, none);
501 dst.cc_update = 0; dst.cc_test = NVFX_COND_LT;
502 arith(fpc, 0, KIL, dst, 0, none, none, none);
503 break;
504 case TGSI_OPCODE_LG2:
505 arith(fpc, sat, LG2, dst, mask, src[0], none, none);
506 break;
507 // case TGSI_OPCODE_LIT:
508 case TGSI_OPCODE_LRP:
509 if(!nvfx->is_nv4x)
510 arith(fpc, sat, LRP_NV30, dst, mask, src[0], src[1], src[2]);
511 else {
512 tmp = temp(fpc);
513 arith(fpc, 0, MAD, tmp, mask, neg(src[0]), src[2], src[2]);
514 arith(fpc, sat, MAD, dst, mask, src[0], src[1], tmp);
515 }
516 break;
517 case TGSI_OPCODE_MAD:
518 arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]);
519 break;
520 case TGSI_OPCODE_MAX:
521 arith(fpc, sat, MAX, dst, mask, src[0], src[1], none);
522 break;
523 case TGSI_OPCODE_MIN:
524 arith(fpc, sat, MIN, dst, mask, src[0], src[1], none);
525 break;
526 case TGSI_OPCODE_MOV:
527 arith(fpc, sat, MOV, dst, mask, src[0], none, none);
528 break;
529 case TGSI_OPCODE_MUL:
530 arith(fpc, sat, MUL, dst, mask, src[0], src[1], none);
531 break;
532 case TGSI_OPCODE_POW:
533 if(!nvfx->is_nv4x)
534 arith(fpc, sat, POW_NV30, dst, mask, src[0], src[1], none);
535 else {
536 tmp = temp(fpc);
537 arith(fpc, 0, LG2, tmp, NVFX_FP_MASK_X,
538 swz(src[0], X, X, X, X), none, none);
539 arith(fpc, 0, MUL, tmp, NVFX_FP_MASK_X, swz(tmp, X, X, X, X),
540 swz(src[1], X, X, X, X), none);
541 arith(fpc, sat, EX2, dst, mask,
542 swz(tmp, X, X, X, X), none, none);
543 }
544 break;
545 case TGSI_OPCODE_RCP:
546 arith(fpc, sat, RCP, dst, mask, src[0], none, none);
547 break;
548 case TGSI_OPCODE_RET:
549 assert(0);
550 break;
551 case TGSI_OPCODE_RFL:
552 if(!nvfx->is_nv4x)
553 arith(fpc, 0, RFL_NV30, dst, mask, src[0], src[1], none);
554 else {
555 tmp = temp(fpc);
556 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[0], none);
557 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_Y, src[0], src[1], none);
558 arith(fpc, 0, DIV, scale(tmp, 2X), NVFX_FP_MASK_Z,
559 swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
560 arith(fpc, sat, MAD, dst, mask,
561 swz(tmp, Z, Z, Z, Z), src[0], neg(src[1]));
562 }
563 break;
564 case TGSI_OPCODE_RSQ:
565 if(!nvfx->is_nv4x)
566 arith(fpc, sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none);
567 else {
568 tmp = temp(fpc);
569 arith(fpc, 0, LG2, scale(tmp, INV_2X), NVFX_FP_MASK_X,
570 abs(swz(src[0], X, X, X, X)), none, none);
571 arith(fpc, sat, EX2, dst, mask,
572 neg(swz(tmp, X, X, X, X)), none, none);
573 }
574 break;
575 case TGSI_OPCODE_SCS:
576 /* avoid overwriting the source */
577 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
578 {
579 if (mask & NVFX_FP_MASK_X) {
580 arith(fpc, sat, COS, dst, NVFX_FP_MASK_X,
581 swz(src[0], X, X, X, X), none, none);
582 }
583 if (mask & NVFX_FP_MASK_Y) {
584 arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y,
585 swz(src[0], X, X, X, X), none, none);
586 }
587 }
588 else
589 {
590 if (mask & NVFX_FP_MASK_Y) {
591 arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y,
592 swz(src[0], X, X, X, X), none, none);
593 }
594 if (mask & NVFX_FP_MASK_X) {
595 arith(fpc, sat, COS, dst, NVFX_FP_MASK_X,
596 swz(src[0], X, X, X, X), none, none);
597 }
598 }
599 break;
600 case TGSI_OPCODE_SEQ:
601 arith(fpc, sat, SEQ, dst, mask, src[0], src[1], none);
602 break;
603 case TGSI_OPCODE_SFL:
604 arith(fpc, sat, SFL, dst, mask, src[0], src[1], none);
605 break;
606 case TGSI_OPCODE_SGE:
607 arith(fpc, sat, SGE, dst, mask, src[0], src[1], none);
608 break;
609 case TGSI_OPCODE_SGT:
610 arith(fpc, sat, SGT, dst, mask, src[0], src[1], none);
611 break;
612 case TGSI_OPCODE_SIN:
613 arith(fpc, sat, SIN, dst, mask, src[0], none, none);
614 break;
615 case TGSI_OPCODE_SLE:
616 arith(fpc, sat, SLE, dst, mask, src[0], src[1], none);
617 break;
618 case TGSI_OPCODE_SLT:
619 arith(fpc, sat, SLT, dst, mask, src[0], src[1], none);
620 break;
621 case TGSI_OPCODE_SNE:
622 arith(fpc, sat, SNE, dst, mask, src[0], src[1], none);
623 break;
624 case TGSI_OPCODE_STR:
625 arith(fpc, sat, STR, dst, mask, src[0], src[1], none);
626 break;
627 case TGSI_OPCODE_SUB:
628 arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none);
629 break;
630 case TGSI_OPCODE_TEX:
631 tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none);
632 break;
633 case TGSI_OPCODE_TXB:
634 tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none);
635 break;
636 case TGSI_OPCODE_TXP:
637 tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none);
638 break;
639 case TGSI_OPCODE_XPD:
640 tmp = temp(fpc);
641 arith(fpc, 0, MUL, tmp, mask,
642 swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
643 arith(fpc, sat, MAD, dst, (mask & ~NVFX_FP_MASK_W),
644 swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
645 neg(tmp));
646 break;
647 default:
648 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
649 return FALSE;
650 }
651
652 release_temps(fpc);
653 return TRUE;
654 }
655
656 static boolean
657 nvfx_fragprog_parse_decl_attrib(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
658 const struct tgsi_full_declaration *fdec)
659 {
660 int hw;
661
662 switch (fdec->Semantic.Name) {
663 case TGSI_SEMANTIC_POSITION:
664 hw = NVFX_FP_OP_INPUT_SRC_POSITION;
665 break;
666 case TGSI_SEMANTIC_COLOR:
667 if (fdec->Semantic.Index == 0) {
668 hw = NVFX_FP_OP_INPUT_SRC_COL0;
669 } else
670 if (fdec->Semantic.Index == 1) {
671 hw = NVFX_FP_OP_INPUT_SRC_COL1;
672 } else {
673 NOUVEAU_ERR("bad colour semantic index\n");
674 return FALSE;
675 }
676 break;
677 case TGSI_SEMANTIC_FOG:
678 hw = NVFX_FP_OP_INPUT_SRC_FOGC;
679 break;
680 case TGSI_SEMANTIC_GENERIC:
681 if (fdec->Semantic.Index <= 7) {
682 hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic.
683 Index);
684 } else {
685 NOUVEAU_ERR("bad generic semantic index\n");
686 return FALSE;
687 }
688 break;
689 default:
690 NOUVEAU_ERR("bad input semantic\n");
691 return FALSE;
692 }
693
694 fpc->attrib_map[fdec->Range.First] = hw;
695 return TRUE;
696 }
697
698 static boolean
699 nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
700 const struct tgsi_full_declaration *fdec)
701 {
702 unsigned idx = fdec->Range.First;
703 unsigned hw;
704
705 switch (fdec->Semantic.Name) {
706 case TGSI_SEMANTIC_POSITION:
707 hw = 1;
708 break;
709 case TGSI_SEMANTIC_COLOR:
710 hw = ~0;
711 switch (fdec->Semantic.Index) {
712 case 0: hw = 0; break;
713 case 1: hw = 2; break;
714 case 2: hw = 3; break;
715 case 3: hw = 4; break;
716 }
717 if(hw > ((nvfx->is_nv4x) ? 4 : 2)) {
718 NOUVEAU_ERR("bad rcol index\n");
719 return FALSE;
720 }
721 break;
722 default:
723 NOUVEAU_ERR("bad output semantic\n");
724 return FALSE;
725 }
726
727 fpc->r_result[idx] = nvfx_sr(NVFXSR_OUTPUT, hw);
728 fpc->r_temps |= (1 << hw);
729 return TRUE;
730 }
731
732 static boolean
733 nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc)
734 {
735 struct tgsi_parse_context p;
736 int high_temp = -1, i;
737
738 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
739 while (!tgsi_parse_end_of_tokens(&p)) {
740 const union tgsi_full_token *tok = &p.FullToken;
741
742 tgsi_parse_token(&p);
743 switch(tok->Token.Type) {
744 case TGSI_TOKEN_TYPE_DECLARATION:
745 {
746 const struct tgsi_full_declaration *fdec;
747 fdec = &p.FullToken.FullDeclaration;
748 switch (fdec->Declaration.File) {
749 case TGSI_FILE_INPUT:
750 if (!nvfx_fragprog_parse_decl_attrib(nvfx, fpc, fdec))
751 goto out_err;
752 break;
753 case TGSI_FILE_OUTPUT:
754 if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec))
755 goto out_err;
756 break;
757 case TGSI_FILE_TEMPORARY:
758 if (fdec->Range.Last > high_temp) {
759 high_temp =
760 fdec->Range.Last;
761 }
762 break;
763 default:
764 break;
765 }
766 }
767 break;
768 case TGSI_TOKEN_TYPE_IMMEDIATE:
769 {
770 struct tgsi_full_immediate *imm;
771 float vals[4];
772
773 imm = &p.FullToken.FullImmediate;
774 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
775 assert(fpc->nr_imm < MAX_IMM);
776
777 vals[0] = imm->u[0].Float;
778 vals[1] = imm->u[1].Float;
779 vals[2] = imm->u[2].Float;
780 vals[3] = imm->u[3].Float;
781 fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
782 }
783 break;
784 default:
785 break;
786 }
787 }
788 tgsi_parse_free(&p);
789
790 if (++high_temp) {
791 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_sreg));
792 for (i = 0; i < high_temp; i++)
793 fpc->r_temp[i] = temp(fpc);
794 fpc->r_temps_discard = 0;
795 }
796
797 return TRUE;
798
799 out_err:
800 if (fpc->r_temp)
801 FREE(fpc->r_temp);
802 tgsi_parse_free(&p);
803 return FALSE;
804 }
805
806 static void
807 nvfx_fragprog_translate(struct nvfx_context *nvfx,
808 struct nvfx_fragment_program *fp)
809 {
810 struct tgsi_parse_context parse;
811 struct nvfx_fpc *fpc = NULL;
812
813 fpc = CALLOC(1, sizeof(struct nvfx_fpc));
814 if (!fpc)
815 return;
816 fpc->fp = fp;
817 fpc->num_regs = 2;
818
819 if (!nvfx_fragprog_prepare(nvfx, fpc)) {
820 FREE(fpc);
821 return;
822 }
823
824 tgsi_parse_init(&parse, fp->pipe.tokens);
825
826 while (!tgsi_parse_end_of_tokens(&parse)) {
827 tgsi_parse_token(&parse);
828
829 switch (parse.FullToken.Token.Type) {
830 case TGSI_TOKEN_TYPE_INSTRUCTION:
831 {
832 const struct tgsi_full_instruction *finst;
833
834 finst = &parse.FullToken.FullInstruction;
835 if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst))
836 goto out_err;
837 }
838 break;
839 default:
840 break;
841 }
842 }
843
844 if(!nvfx->is_nv4x)
845 fp->fp_control |= (fpc->num_regs-1)/2;
846 else
847 fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
848
849 /* Terminate final instruction */
850 fp->insn[fpc->inst_offset] |= 0x00000001;
851
852 /* Append NOP + END instruction, may or may not be necessary. */
853 fpc->inst_offset = fp->insn_len;
854 grow_insns(fpc, 4);
855 fp->insn[fpc->inst_offset + 0] = 0x00000001;
856 fp->insn[fpc->inst_offset + 1] = 0x00000000;
857 fp->insn[fpc->inst_offset + 2] = 0x00000000;
858 fp->insn[fpc->inst_offset + 3] = 0x00000000;
859
860 fp->translated = TRUE;
861 out_err:
862 tgsi_parse_free(&parse);
863 if (fpc->r_temp)
864 FREE(fpc->r_temp);
865 FREE(fpc);
866 }
867
868 static void
869 nvfx_fragprog_upload(struct nvfx_context *nvfx,
870 struct nvfx_fragment_program *fp)
871 {
872 struct pipe_screen *pscreen = nvfx->pipe.screen;
873 const uint32_t le = 1;
874 uint32_t *map;
875 int i;
876
877 map = pipe_buffer_map(pscreen, fp->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
878
879 #if 0
880 for (i = 0; i < fp->insn_len; i++) {
881 fflush(stdout); fflush(stderr);
882 NOUVEAU_ERR("%d 0x%08x\n", i, fp->insn[i]);
883 fflush(stdout); fflush(stderr);
884 }
885 #endif
886
887 if ((*(const uint8_t *)&le)) {
888 for (i = 0; i < fp->insn_len; i++) {
889 map[i] = fp->insn[i];
890 }
891 } else {
892 /* Weird swapping for big-endian chips */
893 for (i = 0; i < fp->insn_len; i++) {
894 map[i] = ((fp->insn[i] & 0xffff) << 16) |
895 ((fp->insn[i] >> 16) & 0xffff);
896 }
897 }
898
899 pipe_buffer_unmap(pscreen, fp->buffer);
900 }
901
902 static boolean
903 nvfx_fragprog_validate(struct nvfx_context *nvfx)
904 {
905 struct nvfx_fragment_program *fp = nvfx->fragprog;
906 struct pipe_buffer *constbuf =
907 nvfx->constbuf[PIPE_SHADER_FRAGMENT];
908 struct pipe_screen *pscreen = nvfx->pipe.screen;
909 struct nouveau_stateobj *so;
910 boolean new_consts = FALSE;
911 int i;
912
913 if (fp->translated)
914 goto update_constants;
915
916 nvfx->fallback_swrast &= ~NVFX_NEW_FRAGPROG;
917 nvfx_fragprog_translate(nvfx, fp);
918 if (!fp->translated) {
919 nvfx->fallback_swrast |= NVFX_NEW_FRAGPROG;
920 return FALSE;
921 }
922
923 fp->buffer = pscreen->buffer_create(pscreen, 0x100, 0, fp->insn_len * 4);
924 nvfx_fragprog_upload(nvfx, fp);
925
926 so = so_new(4, 4, 1);
927 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_ACTIVE_PROGRAM, 1);
928 so_reloc (so, nouveau_bo(fp->buffer), 0, NOUVEAU_BO_VRAM |
929 NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
930 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
931 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
932 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_CONTROL, 1);
933 so_data (so, fp->fp_control);
934 if(!nvfx->is_nv4x) {
935 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_REG_CONTROL, 1);
936 so_data (so, (1<<16)|0x4);
937 so_method(so, nvfx->screen->eng3d, NV34TCL_TX_UNITS_ENABLE, 1);
938 so_data (so, fp->samplers);
939 }
940
941 so_ref(so, &fp->so);
942 so_ref(NULL, &so);
943
944 update_constants:
945 if (fp->nr_consts) {
946 float *map;
947
948 map = pipe_buffer_map(pscreen, constbuf,
949 PIPE_BUFFER_USAGE_CPU_READ);
950 for (i = 0; i < fp->nr_consts; i++) {
951 struct nvfx_fragment_program_data *fpd = &fp->consts[i];
952 uint32_t *p = &fp->insn[fpd->offset];
953 uint32_t *cb = (uint32_t *)&map[fpd->index * 4];
954
955 if (!memcmp(p, cb, 4 * sizeof(float)))
956 continue;
957 memcpy(p, cb, 4 * sizeof(float));
958 new_consts = TRUE;
959 }
960 pipe_buffer_unmap(pscreen, constbuf);
961
962 if (new_consts)
963 nvfx_fragprog_upload(nvfx, fp);
964 }
965
966 if (new_consts || fp->so != nvfx->state.hw[NVFX_STATE_FRAGPROG]) {
967 so_ref(fp->so, &nvfx->state.hw[NVFX_STATE_FRAGPROG]);
968 return TRUE;
969 }
970
971 return FALSE;
972 }
973
974 void
975 nvfx_fragprog_destroy(struct nvfx_context *nvfx,
976 struct nvfx_fragment_program *fp)
977 {
978 if (fp->buffer)
979 pipe_buffer_reference(&fp->buffer, NULL);
980
981 if (fp->so)
982 so_ref(NULL, &fp->so);
983
984 if (fp->insn_len)
985 FREE(fp->insn);
986 }
987
988 struct nvfx_state_entry nvfx_state_fragprog = {
989 .validate = nvfx_fragprog_validate,
990 .dirty = {
991 .pipe = NVFX_NEW_FRAGPROG,
992 .hw = NVFX_STATE_FRAGPROG
993 }
994 };