nv30, nv40: partially unify nv[34]0_state.c
[mesa.git] / src / gallium / drivers / nvfx / nvfx_fragprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_util.h"
9
10 #include "nvfx_context.h"
11
12 #define SWZ_X 0
13 #define SWZ_Y 1
14 #define SWZ_Z 2
15 #define SWZ_W 3
16 #define MASK_X 1
17 #define MASK_Y 2
18 #define MASK_Z 4
19 #define MASK_W 8
20 #define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
21 #define DEF_SCALE NVFX_FP_OP_DST_SCALE_1X
22 #define DEF_CTEST NVFX_FP_OP_COND_TR
23 #include "nvfx_shader.h"
24
25 #define swz(s,x,y,z,w) nvfx_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
26 #define neg(s) nvfx_sr_neg((s))
27 #define abs(s) nvfx_sr_abs((s))
28 #define scale(s,v) nvfx_sr_scale((s), NVFX_FP_OP_DST_SCALE_##v)
29
30 #define MAX_CONSTS 128
31 #define MAX_IMM 32
32 struct nvfx_fpc {
33 struct nvfx_fragment_program *fp;
34
35 uint attrib_map[PIPE_MAX_SHADER_INPUTS];
36
37 unsigned r_temps;
38 unsigned r_temps_discard;
39 struct nvfx_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
40 struct nvfx_sreg *r_temp;
41
42 int num_regs;
43
44 unsigned inst_offset;
45 unsigned have_const;
46
47 struct {
48 int pipe;
49 float vals[4];
50 } consts[MAX_CONSTS];
51 int nr_consts;
52
53 struct nvfx_sreg imm[MAX_IMM];
54 unsigned nr_imm;
55 };
56
57 static INLINE struct nvfx_sreg
58 temp(struct nvfx_fpc *fpc)
59 {
60 int idx = ffs(~fpc->r_temps) - 1;
61
62 if (idx < 0) {
63 NOUVEAU_ERR("out of temps!!\n");
64 assert(0);
65 return nvfx_sr(NVFXSR_TEMP, 0);
66 }
67
68 fpc->r_temps |= (1 << idx);
69 fpc->r_temps_discard |= (1 << idx);
70 return nvfx_sr(NVFXSR_TEMP, idx);
71 }
72
73 static INLINE void
74 release_temps(struct nvfx_fpc *fpc)
75 {
76 fpc->r_temps &= ~fpc->r_temps_discard;
77 fpc->r_temps_discard = 0;
78 }
79
80 static INLINE struct nvfx_sreg
81 constant(struct nvfx_fpc *fpc, int pipe, float vals[4])
82 {
83 int idx;
84
85 if (fpc->nr_consts == MAX_CONSTS)
86 assert(0);
87 idx = fpc->nr_consts++;
88
89 fpc->consts[idx].pipe = pipe;
90 if (pipe == -1)
91 memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
92 return nvfx_sr(NVFXSR_CONST, idx);
93 }
94
95 #define arith(cc,s,o,d,m,s0,s1,s2) \
96 nvfx_fp_arith((cc), (s), NVFX_FP_OP_OPCODE_##o, \
97 (d), (m), (s0), (s1), (s2))
98 #define tex(cc,s,o,u,d,m,s0,s1,s2) \
99 nvfx_fp_tex((cc), (s), NVFX_FP_OP_OPCODE_##o, (u), \
100 (d), (m), (s0), none, none)
101
102 static void
103 grow_insns(struct nvfx_fpc *fpc, int size)
104 {
105 struct nvfx_fragment_program *fp = fpc->fp;
106
107 fp->insn_len += size;
108 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
109 }
110
111 static void
112 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_sreg src)
113 {
114 struct nvfx_fragment_program *fp = fpc->fp;
115 uint32_t *hw = &fp->insn[fpc->inst_offset];
116 uint32_t sr = 0;
117
118 switch (src.type) {
119 case NVFXSR_INPUT:
120 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
121 hw[0] |= (src.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
122 break;
123 case NVFXSR_OUTPUT:
124 sr |= NVFX_FP_REG_SRC_HALF;
125 /* fall-through */
126 case NVFXSR_TEMP:
127 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
128 sr |= (src.index << NVFX_FP_REG_SRC_SHIFT);
129 break;
130 case NVFXSR_CONST:
131 if (!fpc->have_const) {
132 grow_insns(fpc, 4);
133 fpc->have_const = 1;
134 }
135
136 hw = &fp->insn[fpc->inst_offset];
137 if (fpc->consts[src.index].pipe >= 0) {
138 struct nvfx_fragment_program_data *fpd;
139
140 fp->consts = realloc(fp->consts, ++fp->nr_consts *
141 sizeof(*fpd));
142 fpd = &fp->consts[fp->nr_consts - 1];
143 fpd->offset = fpc->inst_offset + 4;
144 fpd->index = fpc->consts[src.index].pipe;
145 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
146 } else {
147 memcpy(&fp->insn[fpc->inst_offset + 4],
148 fpc->consts[src.index].vals,
149 sizeof(uint32_t) * 4);
150 }
151
152 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
153 break;
154 case NVFXSR_NONE:
155 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
156 break;
157 default:
158 assert(0);
159 }
160
161 if (src.negate)
162 sr |= NVFX_FP_REG_NEGATE;
163
164 if (src.abs)
165 hw[1] |= (1 << (29 + pos));
166
167 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
168 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
169 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
170 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
171
172 hw[pos + 1] |= sr;
173 }
174
175 static void
176 emit_dst(struct nvfx_fpc *fpc, struct nvfx_sreg dst)
177 {
178 struct nvfx_fragment_program *fp = fpc->fp;
179 uint32_t *hw = &fp->insn[fpc->inst_offset];
180
181 switch (dst.type) {
182 case NVFXSR_TEMP:
183 if (fpc->num_regs < (dst.index + 1))
184 fpc->num_regs = dst.index + 1;
185 break;
186 case NVFXSR_OUTPUT:
187 if (dst.index == 1) {
188 fp->fp_control |= 0xe;
189 } else {
190 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
191 }
192 break;
193 case NVFXSR_NONE:
194 hw[0] |= (1 << 30);
195 break;
196 default:
197 assert(0);
198 }
199
200 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
201 }
202
203 static void
204 nvfx_fp_arith(struct nvfx_fpc *fpc, int sat, int op,
205 struct nvfx_sreg dst, int mask,
206 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
207 {
208 struct nvfx_fragment_program *fp = fpc->fp;
209 uint32_t *hw;
210
211 fpc->inst_offset = fp->insn_len;
212 fpc->have_const = 0;
213 grow_insns(fpc, 4);
214 hw = &fp->insn[fpc->inst_offset];
215 memset(hw, 0, sizeof(uint32_t) * 4);
216
217 if (op == NVFX_FP_OP_OPCODE_KIL)
218 fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
219 hw[0] |= (op << NVFX_FP_OP_OPCODE_SHIFT);
220 hw[0] |= (mask << NVFX_FP_OP_OUTMASK_SHIFT);
221 hw[2] |= (dst.dst_scale << NVFX_FP_OP_DST_SCALE_SHIFT);
222
223 if (sat)
224 hw[0] |= NVFX_FP_OP_OUT_SAT;
225
226 if (dst.cc_update)
227 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
228 hw[1] |= (dst.cc_test << NVFX_FP_OP_COND_SHIFT);
229 hw[1] |= ((dst.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
230 (dst.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
231 (dst.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
232 (dst.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
233
234 emit_dst(fpc, dst);
235 emit_src(fpc, 0, s0);
236 emit_src(fpc, 1, s1);
237 emit_src(fpc, 2, s2);
238 }
239
240 static void
241 nvfx_fp_tex(struct nvfx_fpc *fpc, int sat, int op, int unit,
242 struct nvfx_sreg dst, int mask,
243 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
244 {
245 struct nvfx_fragment_program *fp = fpc->fp;
246
247 nvfx_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2);
248
249 fp->insn[fpc->inst_offset] |= (unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
250 fp->samplers |= (1 << unit);
251 }
252
253 static INLINE struct nvfx_sreg
254 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
255 {
256 struct nvfx_sreg src;
257
258 switch (fsrc->Register.File) {
259 case TGSI_FILE_INPUT:
260 src = nvfx_sr(NVFXSR_INPUT,
261 fpc->attrib_map[fsrc->Register.Index]);
262 break;
263 case TGSI_FILE_CONSTANT:
264 src = constant(fpc, fsrc->Register.Index, NULL);
265 break;
266 case TGSI_FILE_IMMEDIATE:
267 assert(fsrc->Register.Index < fpc->nr_imm);
268 src = fpc->imm[fsrc->Register.Index];
269 break;
270 case TGSI_FILE_TEMPORARY:
271 src = fpc->r_temp[fsrc->Register.Index];
272 break;
273 /* NV40 fragprog result regs are just temps, so this is simple */
274 case TGSI_FILE_OUTPUT:
275 src = fpc->r_result[fsrc->Register.Index];
276 break;
277 default:
278 NOUVEAU_ERR("bad src file\n");
279 break;
280 }
281
282 src.abs = fsrc->Register.Absolute;
283 src.negate = fsrc->Register.Negate;
284 src.swz[0] = fsrc->Register.SwizzleX;
285 src.swz[1] = fsrc->Register.SwizzleY;
286 src.swz[2] = fsrc->Register.SwizzleZ;
287 src.swz[3] = fsrc->Register.SwizzleW;
288 return src;
289 }
290
291 static INLINE struct nvfx_sreg
292 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
293 switch (fdst->Register.File) {
294 case TGSI_FILE_OUTPUT:
295 return fpc->r_result[fdst->Register.Index];
296 case TGSI_FILE_TEMPORARY:
297 return fpc->r_temp[fdst->Register.Index];
298 case TGSI_FILE_NULL:
299 return nvfx_sr(NVFXSR_NONE, 0);
300 default:
301 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
302 return nvfx_sr(NVFXSR_NONE, 0);
303 }
304 }
305
306 static INLINE int
307 tgsi_mask(uint tgsi)
308 {
309 int mask = 0;
310
311 if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
312 if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
313 if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
314 if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
315 return mask;
316 }
317
318 static boolean
319 src_native_swz(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc,
320 struct nvfx_sreg *src)
321 {
322 const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0);
323 struct nvfx_sreg tgsi = tgsi_src(fpc, fsrc);
324 uint mask = 0;
325 uint c;
326
327 for (c = 0; c < 4; c++) {
328 switch (tgsi_util_get_full_src_register_swizzle(fsrc, c)) {
329 case TGSI_SWIZZLE_X:
330 case TGSI_SWIZZLE_Y:
331 case TGSI_SWIZZLE_Z:
332 case TGSI_SWIZZLE_W:
333 mask |= (1 << c);
334 break;
335 default:
336 assert(0);
337 }
338 }
339
340 if (mask == MASK_ALL)
341 return TRUE;
342
343 *src = temp(fpc);
344
345 if (mask)
346 arith(fpc, 0, MOV, *src, mask, tgsi, none, none);
347
348 return FALSE;
349 }
350
351 static boolean
352 nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
353 const struct tgsi_full_instruction *finst)
354 {
355 const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0);
356 struct nvfx_sreg src[3], dst, tmp;
357 int mask, sat, unit;
358 int ai = -1, ci = -1, ii = -1;
359 int i;
360
361 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
362 return TRUE;
363
364 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
365 const struct tgsi_full_src_register *fsrc;
366
367 fsrc = &finst->Src[i];
368 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
369 src[i] = tgsi_src(fpc, fsrc);
370 }
371 }
372
373 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
374 const struct tgsi_full_src_register *fsrc;
375
376 fsrc = &finst->Src[i];
377
378 switch (fsrc->Register.File) {
379 case TGSI_FILE_INPUT:
380 case TGSI_FILE_CONSTANT:
381 case TGSI_FILE_TEMPORARY:
382 if (!src_native_swz(fpc, fsrc, &src[i]))
383 continue;
384 break;
385 default:
386 break;
387 }
388
389 switch (fsrc->Register.File) {
390 case TGSI_FILE_INPUT:
391 if (ai == -1 || ai == fsrc->Register.Index) {
392 ai = fsrc->Register.Index;
393 src[i] = tgsi_src(fpc, fsrc);
394 } else {
395 src[i] = temp(fpc);
396 arith(fpc, 0, MOV, src[i], MASK_ALL,
397 tgsi_src(fpc, fsrc), none, none);
398 }
399 break;
400 case TGSI_FILE_CONSTANT:
401 if ((ci == -1 && ii == -1) ||
402 ci == fsrc->Register.Index) {
403 ci = fsrc->Register.Index;
404 src[i] = tgsi_src(fpc, fsrc);
405 } else {
406 src[i] = temp(fpc);
407 arith(fpc, 0, MOV, src[i], MASK_ALL,
408 tgsi_src(fpc, fsrc), none, none);
409 }
410 break;
411 case TGSI_FILE_IMMEDIATE:
412 if ((ci == -1 && ii == -1) ||
413 ii == fsrc->Register.Index) {
414 ii = fsrc->Register.Index;
415 src[i] = tgsi_src(fpc, fsrc);
416 } else {
417 src[i] = temp(fpc);
418 arith(fpc, 0, MOV, src[i], MASK_ALL,
419 tgsi_src(fpc, fsrc), none, none);
420 }
421 break;
422 case TGSI_FILE_TEMPORARY:
423 /* handled above */
424 break;
425 case TGSI_FILE_SAMPLER:
426 unit = fsrc->Register.Index;
427 break;
428 case TGSI_FILE_OUTPUT:
429 break;
430 default:
431 NOUVEAU_ERR("bad src file\n");
432 return FALSE;
433 }
434 }
435
436 dst = tgsi_dst(fpc, &finst->Dst[0]);
437 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
438 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
439
440 switch (finst->Instruction.Opcode) {
441 case TGSI_OPCODE_ABS:
442 arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none);
443 break;
444 case TGSI_OPCODE_ADD:
445 arith(fpc, sat, ADD, dst, mask, src[0], src[1], none);
446 break;
447 case TGSI_OPCODE_CMP:
448 tmp = nvfx_sr(NVFXSR_NONE, 0);
449 tmp.cc_update = 1;
450 arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none);
451 dst.cc_test = NVFX_VP_INST_COND_GE;
452 arith(fpc, sat, MOV, dst, mask, src[2], none, none);
453 dst.cc_test = NVFX_VP_INST_COND_LT;
454 arith(fpc, sat, MOV, dst, mask, src[1], none, none);
455 break;
456 case TGSI_OPCODE_COS:
457 arith(fpc, sat, COS, dst, mask, src[0], none, none);
458 break;
459 case TGSI_OPCODE_DDX:
460 if (mask & (MASK_Z | MASK_W)) {
461 tmp = temp(fpc);
462 arith(fpc, sat, DDX, tmp, MASK_X | MASK_Y,
463 swz(src[0], Z, W, Z, W), none, none);
464 arith(fpc, 0, MOV, tmp, MASK_Z | MASK_W,
465 swz(tmp, X, Y, X, Y), none, none);
466 arith(fpc, sat, DDX, tmp, MASK_X | MASK_Y, src[0],
467 none, none);
468 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
469 } else {
470 arith(fpc, sat, DDX, dst, mask, src[0], none, none);
471 }
472 break;
473 case TGSI_OPCODE_DDY:
474 if (mask & (MASK_Z | MASK_W)) {
475 tmp = temp(fpc);
476 arith(fpc, sat, DDY, tmp, MASK_X | MASK_Y,
477 swz(src[0], Z, W, Z, W), none, none);
478 arith(fpc, 0, MOV, tmp, MASK_Z | MASK_W,
479 swz(tmp, X, Y, X, Y), none, none);
480 arith(fpc, sat, DDY, tmp, MASK_X | MASK_Y, src[0],
481 none, none);
482 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
483 } else {
484 arith(fpc, sat, DDY, dst, mask, src[0], none, none);
485 }
486 break;
487 case TGSI_OPCODE_DP3:
488 arith(fpc, sat, DP3, dst, mask, src[0], src[1], none);
489 break;
490 case TGSI_OPCODE_DP4:
491 arith(fpc, sat, DP4, dst, mask, src[0], src[1], none);
492 break;
493 case TGSI_OPCODE_DPH:
494 tmp = temp(fpc);
495 arith(fpc, 0, DP3, tmp, MASK_X, src[0], src[1], none);
496 arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X),
497 swz(src[1], W, W, W, W), none);
498 break;
499 case TGSI_OPCODE_DST:
500 arith(fpc, sat, DST, dst, mask, src[0], src[1], none);
501 break;
502 case TGSI_OPCODE_EX2:
503 arith(fpc, sat, EX2, dst, mask, src[0], none, none);
504 break;
505 case TGSI_OPCODE_FLR:
506 arith(fpc, sat, FLR, dst, mask, src[0], none, none);
507 break;
508 case TGSI_OPCODE_FRC:
509 arith(fpc, sat, FRC, dst, mask, src[0], none, none);
510 break;
511 case TGSI_OPCODE_KILP:
512 arith(fpc, 0, KIL, none, 0, none, none, none);
513 break;
514 case TGSI_OPCODE_KIL:
515 dst = nvfx_sr(NVFXSR_NONE, 0);
516 dst.cc_update = 1;
517 arith(fpc, 0, MOV, dst, MASK_ALL, src[0], none, none);
518 dst.cc_update = 0; dst.cc_test = NVFX_FP_OP_COND_LT;
519 arith(fpc, 0, KIL, dst, 0, none, none, none);
520 break;
521 case TGSI_OPCODE_LG2:
522 arith(fpc, sat, LG2, dst, mask, src[0], none, none);
523 break;
524 // case TGSI_OPCODE_LIT:
525 case TGSI_OPCODE_LRP:
526 if(!nvfx->is_nv4x)
527 arith(fpc, sat, LRP_NV30, dst, mask, src[0], src[1], src[2]);
528 else {
529 tmp = temp(fpc);
530 arith(fpc, 0, MAD, tmp, mask, neg(src[0]), src[2], src[2]);
531 arith(fpc, sat, MAD, dst, mask, src[0], src[1], tmp);
532 }
533 break;
534 case TGSI_OPCODE_MAD:
535 arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]);
536 break;
537 case TGSI_OPCODE_MAX:
538 arith(fpc, sat, MAX, dst, mask, src[0], src[1], none);
539 break;
540 case TGSI_OPCODE_MIN:
541 arith(fpc, sat, MIN, dst, mask, src[0], src[1], none);
542 break;
543 case TGSI_OPCODE_MOV:
544 arith(fpc, sat, MOV, dst, mask, src[0], none, none);
545 break;
546 case TGSI_OPCODE_MUL:
547 arith(fpc, sat, MUL, dst, mask, src[0], src[1], none);
548 break;
549 case TGSI_OPCODE_POW:
550 if(!nvfx->is_nv4x)
551 arith(fpc, sat, POW_NV30, dst, mask, src[0], src[1], none);
552 else {
553 tmp = temp(fpc);
554 arith(fpc, 0, LG2, tmp, MASK_X,
555 swz(src[0], X, X, X, X), none, none);
556 arith(fpc, 0, MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
557 swz(src[1], X, X, X, X), none);
558 arith(fpc, sat, EX2, dst, mask,
559 swz(tmp, X, X, X, X), none, none);
560 }
561 break;
562 case TGSI_OPCODE_RCP:
563 arith(fpc, sat, RCP, dst, mask, src[0], none, none);
564 break;
565 case TGSI_OPCODE_RET:
566 assert(0);
567 break;
568 case TGSI_OPCODE_RFL:
569 if(!nvfx->is_nv4x)
570 arith(fpc, 0, RFL_NV30, dst, mask, src[0], src[1], none);
571 else {
572 tmp = temp(fpc);
573 arith(fpc, 0, DP3, tmp, MASK_X, src[0], src[0], none);
574 arith(fpc, 0, DP3, tmp, MASK_Y, src[0], src[1], none);
575 arith(fpc, 0, DIV, scale(tmp, 2X), MASK_Z,
576 swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
577 arith(fpc, sat, MAD, dst, mask,
578 swz(tmp, Z, Z, Z, Z), src[0], neg(src[1]));
579 }
580 break;
581 case TGSI_OPCODE_RSQ:
582 if(!nvfx->is_nv4x)
583 arith(fpc, sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none);
584 else {
585 tmp = temp(fpc);
586 arith(fpc, 0, LG2, scale(tmp, INV_2X), MASK_X,
587 abs(swz(src[0], X, X, X, X)), none, none);
588 arith(fpc, sat, EX2, dst, mask,
589 neg(swz(tmp, X, X, X, X)), none, none);
590 }
591 break;
592 case TGSI_OPCODE_SCS:
593 /* avoid overwriting the source */
594 if(src[0].swz[SWZ_X] != SWZ_X)
595 {
596 if (mask & MASK_X) {
597 arith(fpc, sat, COS, dst, MASK_X,
598 swz(src[0], X, X, X, X), none, none);
599 }
600 if (mask & MASK_Y) {
601 arith(fpc, sat, SIN, dst, MASK_Y,
602 swz(src[0], X, X, X, X), none, none);
603 }
604 }
605 else
606 {
607 if (mask & MASK_Y) {
608 arith(fpc, sat, SIN, dst, MASK_Y,
609 swz(src[0], X, X, X, X), none, none);
610 }
611 if (mask & MASK_X) {
612 arith(fpc, sat, COS, dst, MASK_X,
613 swz(src[0], X, X, X, X), none, none);
614 }
615 }
616 break;
617 case TGSI_OPCODE_SEQ:
618 arith(fpc, sat, SEQ, dst, mask, src[0], src[1], none);
619 break;
620 case TGSI_OPCODE_SFL:
621 arith(fpc, sat, SFL, dst, mask, src[0], src[1], none);
622 break;
623 case TGSI_OPCODE_SGE:
624 arith(fpc, sat, SGE, dst, mask, src[0], src[1], none);
625 break;
626 case TGSI_OPCODE_SGT:
627 arith(fpc, sat, SGT, dst, mask, src[0], src[1], none);
628 break;
629 case TGSI_OPCODE_SIN:
630 arith(fpc, sat, SIN, dst, mask, src[0], none, none);
631 break;
632 case TGSI_OPCODE_SLE:
633 arith(fpc, sat, SLE, dst, mask, src[0], src[1], none);
634 break;
635 case TGSI_OPCODE_SLT:
636 arith(fpc, sat, SLT, dst, mask, src[0], src[1], none);
637 break;
638 case TGSI_OPCODE_SNE:
639 arith(fpc, sat, SNE, dst, mask, src[0], src[1], none);
640 break;
641 case TGSI_OPCODE_STR:
642 arith(fpc, sat, STR, dst, mask, src[0], src[1], none);
643 break;
644 case TGSI_OPCODE_SUB:
645 arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none);
646 break;
647 case TGSI_OPCODE_TEX:
648 tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none);
649 break;
650 case TGSI_OPCODE_TXB:
651 tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none);
652 break;
653 case TGSI_OPCODE_TXP:
654 tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none);
655 break;
656 case TGSI_OPCODE_XPD:
657 tmp = temp(fpc);
658 arith(fpc, 0, MUL, tmp, mask,
659 swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
660 arith(fpc, sat, MAD, dst, (mask & ~MASK_W),
661 swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
662 neg(tmp));
663 break;
664 default:
665 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
666 return FALSE;
667 }
668
669 release_temps(fpc);
670 return TRUE;
671 }
672
673 static boolean
674 nvfx_fragprog_parse_decl_attrib(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
675 const struct tgsi_full_declaration *fdec)
676 {
677 int hw;
678
679 switch (fdec->Semantic.Name) {
680 case TGSI_SEMANTIC_POSITION:
681 hw = NVFX_FP_OP_INPUT_SRC_POSITION;
682 break;
683 case TGSI_SEMANTIC_COLOR:
684 if (fdec->Semantic.Index == 0) {
685 hw = NVFX_FP_OP_INPUT_SRC_COL0;
686 } else
687 if (fdec->Semantic.Index == 1) {
688 hw = NVFX_FP_OP_INPUT_SRC_COL1;
689 } else {
690 NOUVEAU_ERR("bad colour semantic index\n");
691 return FALSE;
692 }
693 break;
694 case TGSI_SEMANTIC_FOG:
695 hw = NVFX_FP_OP_INPUT_SRC_FOGC;
696 break;
697 case TGSI_SEMANTIC_GENERIC:
698 if (fdec->Semantic.Index <= 7) {
699 hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic.
700 Index);
701 } else {
702 NOUVEAU_ERR("bad generic semantic index\n");
703 return FALSE;
704 }
705 break;
706 default:
707 NOUVEAU_ERR("bad input semantic\n");
708 return FALSE;
709 }
710
711 fpc->attrib_map[fdec->Range.First] = hw;
712 return TRUE;
713 }
714
715 static boolean
716 nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
717 const struct tgsi_full_declaration *fdec)
718 {
719 unsigned idx = fdec->Range.First;
720 unsigned hw;
721
722 switch (fdec->Semantic.Name) {
723 case TGSI_SEMANTIC_POSITION:
724 hw = 1;
725 break;
726 case TGSI_SEMANTIC_COLOR:
727 hw = ~0;
728 switch (fdec->Semantic.Index) {
729 case 0: hw = 0; break;
730 case 1: hw = 2; break;
731 case 2: hw = 3; break;
732 case 3: hw = 4; break;
733 }
734 if(hw > ((nvfx->is_nv4x) ? 4 : 2)) {
735 NOUVEAU_ERR("bad rcol index\n");
736 return FALSE;
737 }
738 break;
739 default:
740 NOUVEAU_ERR("bad output semantic\n");
741 return FALSE;
742 }
743
744 fpc->r_result[idx] = nvfx_sr(NVFXSR_OUTPUT, hw);
745 fpc->r_temps |= (1 << hw);
746 return TRUE;
747 }
748
749 static boolean
750 nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc)
751 {
752 struct tgsi_parse_context p;
753 int high_temp = -1, i;
754
755 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
756 while (!tgsi_parse_end_of_tokens(&p)) {
757 const union tgsi_full_token *tok = &p.FullToken;
758
759 tgsi_parse_token(&p);
760 switch(tok->Token.Type) {
761 case TGSI_TOKEN_TYPE_DECLARATION:
762 {
763 const struct tgsi_full_declaration *fdec;
764 fdec = &p.FullToken.FullDeclaration;
765 switch (fdec->Declaration.File) {
766 case TGSI_FILE_INPUT:
767 if (!nvfx_fragprog_parse_decl_attrib(nvfx, fpc, fdec))
768 goto out_err;
769 break;
770 case TGSI_FILE_OUTPUT:
771 if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec))
772 goto out_err;
773 break;
774 case TGSI_FILE_TEMPORARY:
775 if (fdec->Range.Last > high_temp) {
776 high_temp =
777 fdec->Range.Last;
778 }
779 break;
780 default:
781 break;
782 }
783 }
784 break;
785 case TGSI_TOKEN_TYPE_IMMEDIATE:
786 {
787 struct tgsi_full_immediate *imm;
788 float vals[4];
789
790 imm = &p.FullToken.FullImmediate;
791 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
792 assert(fpc->nr_imm < MAX_IMM);
793
794 vals[0] = imm->u[0].Float;
795 vals[1] = imm->u[1].Float;
796 vals[2] = imm->u[2].Float;
797 vals[3] = imm->u[3].Float;
798 fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
799 }
800 break;
801 default:
802 break;
803 }
804 }
805 tgsi_parse_free(&p);
806
807 if (++high_temp) {
808 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_sreg));
809 for (i = 0; i < high_temp; i++)
810 fpc->r_temp[i] = temp(fpc);
811 fpc->r_temps_discard = 0;
812 }
813
814 return TRUE;
815
816 out_err:
817 if (fpc->r_temp)
818 FREE(fpc->r_temp);
819 tgsi_parse_free(&p);
820 return FALSE;
821 }
822
823 static void
824 nvfx_fragprog_translate(struct nvfx_context *nvfx,
825 struct nvfx_fragment_program *fp)
826 {
827 struct tgsi_parse_context parse;
828 struct nvfx_fpc *fpc = NULL;
829
830 fpc = CALLOC(1, sizeof(struct nvfx_fpc));
831 if (!fpc)
832 return;
833 fpc->fp = fp;
834 fpc->num_regs = 2;
835
836 if (!nvfx_fragprog_prepare(nvfx, fpc)) {
837 FREE(fpc);
838 return;
839 }
840
841 tgsi_parse_init(&parse, fp->pipe.tokens);
842
843 while (!tgsi_parse_end_of_tokens(&parse)) {
844 tgsi_parse_token(&parse);
845
846 switch (parse.FullToken.Token.Type) {
847 case TGSI_TOKEN_TYPE_INSTRUCTION:
848 {
849 const struct tgsi_full_instruction *finst;
850
851 finst = &parse.FullToken.FullInstruction;
852 if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst))
853 goto out_err;
854 }
855 break;
856 default:
857 break;
858 }
859 }
860
861 if(!nvfx->is_nv4x)
862 fp->fp_control |= (fpc->num_regs-1)/2;
863 else
864 fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
865
866 /* Terminate final instruction */
867 fp->insn[fpc->inst_offset] |= 0x00000001;
868
869 /* Append NOP + END instruction, may or may not be necessary. */
870 fpc->inst_offset = fp->insn_len;
871 grow_insns(fpc, 4);
872 fp->insn[fpc->inst_offset + 0] = 0x00000001;
873 fp->insn[fpc->inst_offset + 1] = 0x00000000;
874 fp->insn[fpc->inst_offset + 2] = 0x00000000;
875 fp->insn[fpc->inst_offset + 3] = 0x00000000;
876
877 fp->translated = TRUE;
878 out_err:
879 tgsi_parse_free(&parse);
880 if (fpc->r_temp)
881 FREE(fpc->r_temp);
882 FREE(fpc);
883 }
884
885 static void
886 nvfx_fragprog_upload(struct nvfx_context *nvfx,
887 struct nvfx_fragment_program *fp)
888 {
889 struct pipe_screen *pscreen = nvfx->pipe.screen;
890 const uint32_t le = 1;
891 uint32_t *map;
892 int i;
893
894 map = pipe_buffer_map(pscreen, fp->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
895
896 #if 0
897 for (i = 0; i < fp->insn_len; i++) {
898 fflush(stdout); fflush(stderr);
899 NOUVEAU_ERR("%d 0x%08x\n", i, fp->insn[i]);
900 fflush(stdout); fflush(stderr);
901 }
902 #endif
903
904 if ((*(const uint8_t *)&le)) {
905 for (i = 0; i < fp->insn_len; i++) {
906 map[i] = fp->insn[i];
907 }
908 } else {
909 /* Weird swapping for big-endian chips */
910 for (i = 0; i < fp->insn_len; i++) {
911 map[i] = ((fp->insn[i] & 0xffff) << 16) |
912 ((fp->insn[i] >> 16) & 0xffff);
913 }
914 }
915
916 pipe_buffer_unmap(pscreen, fp->buffer);
917 }
918
919 static boolean
920 nvfx_fragprog_validate(struct nvfx_context *nvfx)
921 {
922 struct nvfx_fragment_program *fp = nvfx->fragprog;
923 struct pipe_buffer *constbuf =
924 nvfx->constbuf[PIPE_SHADER_FRAGMENT];
925 struct pipe_screen *pscreen = nvfx->pipe.screen;
926 struct nouveau_stateobj *so;
927 boolean new_consts = FALSE;
928 int i;
929
930 if (fp->translated)
931 goto update_constants;
932
933 nvfx->fallback_swrast &= ~NVFX_NEW_FRAGPROG;
934 nvfx_fragprog_translate(nvfx, fp);
935 if (!fp->translated) {
936 nvfx->fallback_swrast |= NVFX_NEW_FRAGPROG;
937 return FALSE;
938 }
939
940 fp->buffer = pscreen->buffer_create(pscreen, 0x100, 0, fp->insn_len * 4);
941 nvfx_fragprog_upload(nvfx, fp);
942
943 so = so_new(4, 4, 1);
944 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_ACTIVE_PROGRAM, 1);
945 so_reloc (so, nouveau_bo(fp->buffer), 0, NOUVEAU_BO_VRAM |
946 NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
947 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
948 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
949 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_CONTROL, 1);
950 so_data (so, fp->fp_control);
951 if(!nvfx->is_nv4x) {
952 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_REG_CONTROL, 1);
953 so_data (so, (1<<16)|0x4);
954 so_method(so, nvfx->screen->eng3d, NV34TCL_TX_UNITS_ENABLE, 1);
955 so_data (so, fp->samplers);
956 }
957
958 so_ref(so, &fp->so);
959 so_ref(NULL, &so);
960
961 update_constants:
962 if (fp->nr_consts) {
963 float *map;
964
965 map = pipe_buffer_map(pscreen, constbuf,
966 PIPE_BUFFER_USAGE_CPU_READ);
967 for (i = 0; i < fp->nr_consts; i++) {
968 struct nvfx_fragment_program_data *fpd = &fp->consts[i];
969 uint32_t *p = &fp->insn[fpd->offset];
970 uint32_t *cb = (uint32_t *)&map[fpd->index * 4];
971
972 if (!memcmp(p, cb, 4 * sizeof(float)))
973 continue;
974 memcpy(p, cb, 4 * sizeof(float));
975 new_consts = TRUE;
976 }
977 pipe_buffer_unmap(pscreen, constbuf);
978
979 if (new_consts)
980 nvfx_fragprog_upload(nvfx, fp);
981 }
982
983 if (new_consts || fp->so != nvfx->state.hw[NVFX_STATE_FRAGPROG]) {
984 so_ref(fp->so, &nvfx->state.hw[NVFX_STATE_FRAGPROG]);
985 return TRUE;
986 }
987
988 return FALSE;
989 }
990
991 void
992 nvfx_fragprog_destroy(struct nvfx_context *nvfx,
993 struct nvfx_fragment_program *fp)
994 {
995 if (fp->buffer)
996 pipe_buffer_reference(&fp->buffer, NULL);
997
998 if (fp->so)
999 so_ref(NULL, &fp->so);
1000
1001 if (fp->insn_len)
1002 FREE(fp->insn);
1003 }
1004
1005 struct nvfx_state_entry nvfx_state_fragprog = {
1006 .validate = nvfx_fragprog_validate,
1007 .dirty = {
1008 .pipe = NVFX_NEW_FRAGPROG,
1009 .hw = NVFX_STATE_FRAGPROG
1010 }
1011 };