Merge branch '7.8'
[mesa.git] / src / gallium / drivers / nvfx / nvfx_fragprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_util.h"
9
10 #include "nvfx_context.h"
11 #include "nvfx_shader.h"
12
13 #define MAX_CONSTS 128
14 #define MAX_IMM 32
15 struct nvfx_fpc {
16 struct nvfx_fragment_program *fp;
17
18 uint attrib_map[PIPE_MAX_SHADER_INPUTS];
19
20 unsigned r_temps;
21 unsigned r_temps_discard;
22 struct nvfx_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
23 struct nvfx_sreg *r_temp;
24
25 int num_regs;
26
27 unsigned inst_offset;
28 unsigned have_const;
29
30 struct {
31 int pipe;
32 float vals[4];
33 } consts[MAX_CONSTS];
34 int nr_consts;
35
36 struct nvfx_sreg imm[MAX_IMM];
37 unsigned nr_imm;
38 };
39
40 static INLINE struct nvfx_sreg
41 temp(struct nvfx_fpc *fpc)
42 {
43 int idx = ffs(~fpc->r_temps) - 1;
44
45 if (idx < 0) {
46 NOUVEAU_ERR("out of temps!!\n");
47 assert(0);
48 return nvfx_sr(NVFXSR_TEMP, 0);
49 }
50
51 fpc->r_temps |= (1 << idx);
52 fpc->r_temps_discard |= (1 << idx);
53 return nvfx_sr(NVFXSR_TEMP, idx);
54 }
55
56 static INLINE void
57 release_temps(struct nvfx_fpc *fpc)
58 {
59 fpc->r_temps &= ~fpc->r_temps_discard;
60 fpc->r_temps_discard = 0;
61 }
62
63 static INLINE struct nvfx_sreg
64 constant(struct nvfx_fpc *fpc, int pipe, float vals[4])
65 {
66 int idx;
67
68 if (fpc->nr_consts == MAX_CONSTS)
69 assert(0);
70 idx = fpc->nr_consts++;
71
72 fpc->consts[idx].pipe = pipe;
73 if (pipe == -1)
74 memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
75 return nvfx_sr(NVFXSR_CONST, idx);
76 }
77
78 #define arith(cc,s,o,d,m,s0,s1,s2) \
79 nvfx_fp_arith((cc), (s), NVFX_FP_OP_OPCODE_##o, \
80 (d), (m), (s0), (s1), (s2))
81 #define tex(cc,s,o,u,d,m,s0,s1,s2) \
82 nvfx_fp_tex((cc), (s), NVFX_FP_OP_OPCODE_##o, (u), \
83 (d), (m), (s0), none, none)
84
85 static void
86 grow_insns(struct nvfx_fpc *fpc, int size)
87 {
88 struct nvfx_fragment_program *fp = fpc->fp;
89
90 fp->insn_len += size;
91 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
92 }
93
94 static void
95 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_sreg src)
96 {
97 struct nvfx_fragment_program *fp = fpc->fp;
98 uint32_t *hw = &fp->insn[fpc->inst_offset];
99 uint32_t sr = 0;
100
101 switch (src.type) {
102 case NVFXSR_INPUT:
103 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
104 hw[0] |= (src.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
105 break;
106 case NVFXSR_OUTPUT:
107 sr |= NVFX_FP_REG_SRC_HALF;
108 /* fall-through */
109 case NVFXSR_TEMP:
110 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
111 sr |= (src.index << NVFX_FP_REG_SRC_SHIFT);
112 break;
113 case NVFXSR_CONST:
114 if (!fpc->have_const) {
115 grow_insns(fpc, 4);
116 fpc->have_const = 1;
117 }
118
119 hw = &fp->insn[fpc->inst_offset];
120 if (fpc->consts[src.index].pipe >= 0) {
121 struct nvfx_fragment_program_data *fpd;
122
123 fp->consts = realloc(fp->consts, ++fp->nr_consts *
124 sizeof(*fpd));
125 fpd = &fp->consts[fp->nr_consts - 1];
126 fpd->offset = fpc->inst_offset + 4;
127 fpd->index = fpc->consts[src.index].pipe;
128 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
129 } else {
130 memcpy(&fp->insn[fpc->inst_offset + 4],
131 fpc->consts[src.index].vals,
132 sizeof(uint32_t) * 4);
133 }
134
135 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
136 break;
137 case NVFXSR_NONE:
138 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
139 break;
140 default:
141 assert(0);
142 }
143
144 if (src.negate)
145 sr |= NVFX_FP_REG_NEGATE;
146
147 if (src.abs)
148 hw[1] |= (1 << (29 + pos));
149
150 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
151 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
152 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
153 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
154
155 hw[pos + 1] |= sr;
156 }
157
158 static void
159 emit_dst(struct nvfx_fpc *fpc, struct nvfx_sreg dst)
160 {
161 struct nvfx_fragment_program *fp = fpc->fp;
162 uint32_t *hw = &fp->insn[fpc->inst_offset];
163
164 switch (dst.type) {
165 case NVFXSR_TEMP:
166 if (fpc->num_regs < (dst.index + 1))
167 fpc->num_regs = dst.index + 1;
168 break;
169 case NVFXSR_OUTPUT:
170 if (dst.index == 1) {
171 fp->fp_control |= 0xe;
172 } else {
173 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
174 }
175 break;
176 case NVFXSR_NONE:
177 hw[0] |= (1 << 30);
178 break;
179 default:
180 assert(0);
181 }
182
183 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
184 }
185
186 static void
187 nvfx_fp_arith(struct nvfx_fpc *fpc, int sat, int op,
188 struct nvfx_sreg dst, int mask,
189 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
190 {
191 struct nvfx_fragment_program *fp = fpc->fp;
192 uint32_t *hw;
193
194 fpc->inst_offset = fp->insn_len;
195 fpc->have_const = 0;
196 grow_insns(fpc, 4);
197 hw = &fp->insn[fpc->inst_offset];
198 memset(hw, 0, sizeof(uint32_t) * 4);
199
200 if (op == NVFX_FP_OP_OPCODE_KIL)
201 fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
202 hw[0] |= (op << NVFX_FP_OP_OPCODE_SHIFT);
203 hw[0] |= (mask << NVFX_FP_OP_OUTMASK_SHIFT);
204 hw[2] |= (dst.dst_scale << NVFX_FP_OP_DST_SCALE_SHIFT);
205
206 if (sat)
207 hw[0] |= NVFX_FP_OP_OUT_SAT;
208
209 if (dst.cc_update)
210 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
211 hw[1] |= (dst.cc_test << NVFX_FP_OP_COND_SHIFT);
212 hw[1] |= ((dst.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
213 (dst.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
214 (dst.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
215 (dst.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
216
217 emit_dst(fpc, dst);
218 emit_src(fpc, 0, s0);
219 emit_src(fpc, 1, s1);
220 emit_src(fpc, 2, s2);
221 }
222
223 static void
224 nvfx_fp_tex(struct nvfx_fpc *fpc, int sat, int op, int unit,
225 struct nvfx_sreg dst, int mask,
226 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
227 {
228 struct nvfx_fragment_program *fp = fpc->fp;
229
230 nvfx_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2);
231
232 fp->insn[fpc->inst_offset] |= (unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
233 fp->samplers |= (1 << unit);
234 }
235
236 static INLINE struct nvfx_sreg
237 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
238 {
239 struct nvfx_sreg src;
240
241 switch (fsrc->Register.File) {
242 case TGSI_FILE_INPUT:
243 src = nvfx_sr(NVFXSR_INPUT,
244 fpc->attrib_map[fsrc->Register.Index]);
245 break;
246 case TGSI_FILE_CONSTANT:
247 src = constant(fpc, fsrc->Register.Index, NULL);
248 break;
249 case TGSI_FILE_IMMEDIATE:
250 assert(fsrc->Register.Index < fpc->nr_imm);
251 src = fpc->imm[fsrc->Register.Index];
252 break;
253 case TGSI_FILE_TEMPORARY:
254 src = fpc->r_temp[fsrc->Register.Index];
255 break;
256 /* NV40 fragprog result regs are just temps, so this is simple */
257 case TGSI_FILE_OUTPUT:
258 src = fpc->r_result[fsrc->Register.Index];
259 break;
260 default:
261 NOUVEAU_ERR("bad src file\n");
262 break;
263 }
264
265 src.abs = fsrc->Register.Absolute;
266 src.negate = fsrc->Register.Negate;
267 src.swz[0] = fsrc->Register.SwizzleX;
268 src.swz[1] = fsrc->Register.SwizzleY;
269 src.swz[2] = fsrc->Register.SwizzleZ;
270 src.swz[3] = fsrc->Register.SwizzleW;
271 return src;
272 }
273
274 static INLINE struct nvfx_sreg
275 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
276 switch (fdst->Register.File) {
277 case TGSI_FILE_OUTPUT:
278 return fpc->r_result[fdst->Register.Index];
279 case TGSI_FILE_TEMPORARY:
280 return fpc->r_temp[fdst->Register.Index];
281 case TGSI_FILE_NULL:
282 return nvfx_sr(NVFXSR_NONE, 0);
283 default:
284 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
285 return nvfx_sr(NVFXSR_NONE, 0);
286 }
287 }
288
289 static INLINE int
290 tgsi_mask(uint tgsi)
291 {
292 int mask = 0;
293
294 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
295 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
296 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
297 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
298 return mask;
299 }
300
301 static boolean
302 nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
303 const struct tgsi_full_instruction *finst)
304 {
305 const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0);
306 struct nvfx_sreg src[3], dst, tmp;
307 int mask, sat, unit;
308 int ai = -1, ci = -1, ii = -1;
309 int i;
310
311 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
312 return TRUE;
313
314 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
315 const struct tgsi_full_src_register *fsrc;
316
317 fsrc = &finst->Src[i];
318 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
319 src[i] = tgsi_src(fpc, fsrc);
320 }
321 }
322
323 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
324 const struct tgsi_full_src_register *fsrc;
325
326 fsrc = &finst->Src[i];
327
328 switch (fsrc->Register.File) {
329 case TGSI_FILE_INPUT:
330 if (ai == -1 || ai == fsrc->Register.Index) {
331 ai = fsrc->Register.Index;
332 src[i] = tgsi_src(fpc, fsrc);
333 } else {
334 src[i] = temp(fpc);
335 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
336 tgsi_src(fpc, fsrc), none, none);
337 }
338 break;
339 case TGSI_FILE_CONSTANT:
340 if ((ci == -1 && ii == -1) ||
341 ci == fsrc->Register.Index) {
342 ci = fsrc->Register.Index;
343 src[i] = tgsi_src(fpc, fsrc);
344 } else {
345 src[i] = temp(fpc);
346 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
347 tgsi_src(fpc, fsrc), none, none);
348 }
349 break;
350 case TGSI_FILE_IMMEDIATE:
351 if ((ci == -1 && ii == -1) ||
352 ii == fsrc->Register.Index) {
353 ii = fsrc->Register.Index;
354 src[i] = tgsi_src(fpc, fsrc);
355 } else {
356 src[i] = temp(fpc);
357 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
358 tgsi_src(fpc, fsrc), none, none);
359 }
360 break;
361 case TGSI_FILE_TEMPORARY:
362 /* handled above */
363 break;
364 case TGSI_FILE_SAMPLER:
365 unit = fsrc->Register.Index;
366 break;
367 case TGSI_FILE_OUTPUT:
368 break;
369 default:
370 NOUVEAU_ERR("bad src file\n");
371 return FALSE;
372 }
373 }
374
375 dst = tgsi_dst(fpc, &finst->Dst[0]);
376 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
377 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
378
379 switch (finst->Instruction.Opcode) {
380 case TGSI_OPCODE_ABS:
381 arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none);
382 break;
383 case TGSI_OPCODE_ADD:
384 arith(fpc, sat, ADD, dst, mask, src[0], src[1], none);
385 break;
386 case TGSI_OPCODE_CMP:
387 tmp = nvfx_sr(NVFXSR_NONE, 0);
388 tmp.cc_update = 1;
389 arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none);
390 dst.cc_test = NVFX_COND_GE;
391 arith(fpc, sat, MOV, dst, mask, src[2], none, none);
392 dst.cc_test = NVFX_COND_LT;
393 arith(fpc, sat, MOV, dst, mask, src[1], none, none);
394 break;
395 case TGSI_OPCODE_COS:
396 arith(fpc, sat, COS, dst, mask, src[0], none, none);
397 break;
398 case TGSI_OPCODE_DDX:
399 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
400 tmp = temp(fpc);
401 arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y,
402 swz(src[0], Z, W, Z, W), none, none);
403 arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W,
404 swz(tmp, X, Y, X, Y), none, none);
405 arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0],
406 none, none);
407 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
408 } else {
409 arith(fpc, sat, DDX, dst, mask, src[0], none, none);
410 }
411 break;
412 case TGSI_OPCODE_DDY:
413 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
414 tmp = temp(fpc);
415 arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y,
416 swz(src[0], Z, W, Z, W), none, none);
417 arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W,
418 swz(tmp, X, Y, X, Y), none, none);
419 arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0],
420 none, none);
421 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
422 } else {
423 arith(fpc, sat, DDY, dst, mask, src[0], none, none);
424 }
425 break;
426 case TGSI_OPCODE_DP3:
427 arith(fpc, sat, DP3, dst, mask, src[0], src[1], none);
428 break;
429 case TGSI_OPCODE_DP4:
430 arith(fpc, sat, DP4, dst, mask, src[0], src[1], none);
431 break;
432 case TGSI_OPCODE_DPH:
433 tmp = temp(fpc);
434 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[1], none);
435 arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X),
436 swz(src[1], W, W, W, W), none);
437 break;
438 case TGSI_OPCODE_DST:
439 arith(fpc, sat, DST, dst, mask, src[0], src[1], none);
440 break;
441 case TGSI_OPCODE_EX2:
442 arith(fpc, sat, EX2, dst, mask, src[0], none, none);
443 break;
444 case TGSI_OPCODE_FLR:
445 arith(fpc, sat, FLR, dst, mask, src[0], none, none);
446 break;
447 case TGSI_OPCODE_FRC:
448 arith(fpc, sat, FRC, dst, mask, src[0], none, none);
449 break;
450 case TGSI_OPCODE_KILP:
451 arith(fpc, 0, KIL, none, 0, none, none, none);
452 break;
453 case TGSI_OPCODE_KIL:
454 dst = nvfx_sr(NVFXSR_NONE, 0);
455 dst.cc_update = 1;
456 arith(fpc, 0, MOV, dst, NVFX_FP_MASK_ALL, src[0], none, none);
457 dst.cc_update = 0; dst.cc_test = NVFX_COND_LT;
458 arith(fpc, 0, KIL, dst, 0, none, none, none);
459 break;
460 case TGSI_OPCODE_LG2:
461 arith(fpc, sat, LG2, dst, mask, src[0], none, none);
462 break;
463 // case TGSI_OPCODE_LIT:
464 case TGSI_OPCODE_LRP:
465 if(!nvfx->is_nv4x)
466 arith(fpc, sat, LRP_NV30, dst, mask, src[0], src[1], src[2]);
467 else {
468 tmp = temp(fpc);
469 arith(fpc, 0, MAD, tmp, mask, neg(src[0]), src[2], src[2]);
470 arith(fpc, sat, MAD, dst, mask, src[0], src[1], tmp);
471 }
472 break;
473 case TGSI_OPCODE_MAD:
474 arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]);
475 break;
476 case TGSI_OPCODE_MAX:
477 arith(fpc, sat, MAX, dst, mask, src[0], src[1], none);
478 break;
479 case TGSI_OPCODE_MIN:
480 arith(fpc, sat, MIN, dst, mask, src[0], src[1], none);
481 break;
482 case TGSI_OPCODE_MOV:
483 arith(fpc, sat, MOV, dst, mask, src[0], none, none);
484 break;
485 case TGSI_OPCODE_MUL:
486 arith(fpc, sat, MUL, dst, mask, src[0], src[1], none);
487 break;
488 case TGSI_OPCODE_POW:
489 if(!nvfx->is_nv4x)
490 arith(fpc, sat, POW_NV30, dst, mask, src[0], src[1], none);
491 else {
492 tmp = temp(fpc);
493 arith(fpc, 0, LG2, tmp, NVFX_FP_MASK_X,
494 swz(src[0], X, X, X, X), none, none);
495 arith(fpc, 0, MUL, tmp, NVFX_FP_MASK_X, swz(tmp, X, X, X, X),
496 swz(src[1], X, X, X, X), none);
497 arith(fpc, sat, EX2, dst, mask,
498 swz(tmp, X, X, X, X), none, none);
499 }
500 break;
501 case TGSI_OPCODE_RCP:
502 arith(fpc, sat, RCP, dst, mask, src[0], none, none);
503 break;
504 case TGSI_OPCODE_RET:
505 assert(0);
506 break;
507 case TGSI_OPCODE_RFL:
508 if(!nvfx->is_nv4x)
509 arith(fpc, 0, RFL_NV30, dst, mask, src[0], src[1], none);
510 else {
511 tmp = temp(fpc);
512 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[0], none);
513 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_Y, src[0], src[1], none);
514 arith(fpc, 0, DIV, scale(tmp, 2X), NVFX_FP_MASK_Z,
515 swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
516 arith(fpc, sat, MAD, dst, mask,
517 swz(tmp, Z, Z, Z, Z), src[0], neg(src[1]));
518 }
519 break;
520 case TGSI_OPCODE_RSQ:
521 if(!nvfx->is_nv4x)
522 arith(fpc, sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none);
523 else {
524 tmp = temp(fpc);
525 arith(fpc, 0, LG2, scale(tmp, INV_2X), NVFX_FP_MASK_X,
526 abs(swz(src[0], X, X, X, X)), none, none);
527 arith(fpc, sat, EX2, dst, mask,
528 neg(swz(tmp, X, X, X, X)), none, none);
529 }
530 break;
531 case TGSI_OPCODE_SCS:
532 /* avoid overwriting the source */
533 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
534 {
535 if (mask & NVFX_FP_MASK_X) {
536 arith(fpc, sat, COS, dst, NVFX_FP_MASK_X,
537 swz(src[0], X, X, X, X), none, none);
538 }
539 if (mask & NVFX_FP_MASK_Y) {
540 arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y,
541 swz(src[0], X, X, X, X), none, none);
542 }
543 }
544 else
545 {
546 if (mask & NVFX_FP_MASK_Y) {
547 arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y,
548 swz(src[0], X, X, X, X), none, none);
549 }
550 if (mask & NVFX_FP_MASK_X) {
551 arith(fpc, sat, COS, dst, NVFX_FP_MASK_X,
552 swz(src[0], X, X, X, X), none, none);
553 }
554 }
555 break;
556 case TGSI_OPCODE_SEQ:
557 arith(fpc, sat, SEQ, dst, mask, src[0], src[1], none);
558 break;
559 case TGSI_OPCODE_SFL:
560 arith(fpc, sat, SFL, dst, mask, src[0], src[1], none);
561 break;
562 case TGSI_OPCODE_SGE:
563 arith(fpc, sat, SGE, dst, mask, src[0], src[1], none);
564 break;
565 case TGSI_OPCODE_SGT:
566 arith(fpc, sat, SGT, dst, mask, src[0], src[1], none);
567 break;
568 case TGSI_OPCODE_SIN:
569 arith(fpc, sat, SIN, dst, mask, src[0], none, none);
570 break;
571 case TGSI_OPCODE_SLE:
572 arith(fpc, sat, SLE, dst, mask, src[0], src[1], none);
573 break;
574 case TGSI_OPCODE_SLT:
575 arith(fpc, sat, SLT, dst, mask, src[0], src[1], none);
576 break;
577 case TGSI_OPCODE_SNE:
578 arith(fpc, sat, SNE, dst, mask, src[0], src[1], none);
579 break;
580 case TGSI_OPCODE_STR:
581 arith(fpc, sat, STR, dst, mask, src[0], src[1], none);
582 break;
583 case TGSI_OPCODE_SUB:
584 arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none);
585 break;
586 case TGSI_OPCODE_TEX:
587 tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none);
588 break;
589 case TGSI_OPCODE_TXB:
590 tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none);
591 break;
592 case TGSI_OPCODE_TXP:
593 tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none);
594 break;
595 case TGSI_OPCODE_XPD:
596 tmp = temp(fpc);
597 arith(fpc, 0, MUL, tmp, mask,
598 swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
599 arith(fpc, sat, MAD, dst, (mask & ~NVFX_FP_MASK_W),
600 swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
601 neg(tmp));
602 break;
603 default:
604 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
605 return FALSE;
606 }
607
608 release_temps(fpc);
609 return TRUE;
610 }
611
612 static boolean
613 nvfx_fragprog_parse_decl_attrib(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
614 const struct tgsi_full_declaration *fdec)
615 {
616 int hw;
617
618 switch (fdec->Semantic.Name) {
619 case TGSI_SEMANTIC_POSITION:
620 hw = NVFX_FP_OP_INPUT_SRC_POSITION;
621 break;
622 case TGSI_SEMANTIC_COLOR:
623 if (fdec->Semantic.Index == 0) {
624 hw = NVFX_FP_OP_INPUT_SRC_COL0;
625 } else
626 if (fdec->Semantic.Index == 1) {
627 hw = NVFX_FP_OP_INPUT_SRC_COL1;
628 } else {
629 NOUVEAU_ERR("bad colour semantic index\n");
630 return FALSE;
631 }
632 break;
633 case TGSI_SEMANTIC_FOG:
634 hw = NVFX_FP_OP_INPUT_SRC_FOGC;
635 break;
636 case TGSI_SEMANTIC_GENERIC:
637 if (fdec->Semantic.Index <= 7) {
638 hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic.
639 Index);
640 } else {
641 NOUVEAU_ERR("bad generic semantic index\n");
642 return FALSE;
643 }
644 break;
645 default:
646 NOUVEAU_ERR("bad input semantic\n");
647 return FALSE;
648 }
649
650 fpc->attrib_map[fdec->Range.First] = hw;
651 return TRUE;
652 }
653
654 static boolean
655 nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
656 const struct tgsi_full_declaration *fdec)
657 {
658 unsigned idx = fdec->Range.First;
659 unsigned hw;
660
661 switch (fdec->Semantic.Name) {
662 case TGSI_SEMANTIC_POSITION:
663 hw = 1;
664 break;
665 case TGSI_SEMANTIC_COLOR:
666 hw = ~0;
667 switch (fdec->Semantic.Index) {
668 case 0: hw = 0; break;
669 case 1: hw = 2; break;
670 case 2: hw = 3; break;
671 case 3: hw = 4; break;
672 }
673 if(hw > ((nvfx->is_nv4x) ? 4 : 2)) {
674 NOUVEAU_ERR("bad rcol index\n");
675 return FALSE;
676 }
677 break;
678 default:
679 NOUVEAU_ERR("bad output semantic\n");
680 return FALSE;
681 }
682
683 fpc->r_result[idx] = nvfx_sr(NVFXSR_OUTPUT, hw);
684 fpc->r_temps |= (1 << hw);
685 return TRUE;
686 }
687
688 static boolean
689 nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc)
690 {
691 struct tgsi_parse_context p;
692 int high_temp = -1, i;
693
694 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
695 while (!tgsi_parse_end_of_tokens(&p)) {
696 const union tgsi_full_token *tok = &p.FullToken;
697
698 tgsi_parse_token(&p);
699 switch(tok->Token.Type) {
700 case TGSI_TOKEN_TYPE_DECLARATION:
701 {
702 const struct tgsi_full_declaration *fdec;
703 fdec = &p.FullToken.FullDeclaration;
704 switch (fdec->Declaration.File) {
705 case TGSI_FILE_INPUT:
706 if (!nvfx_fragprog_parse_decl_attrib(nvfx, fpc, fdec))
707 goto out_err;
708 break;
709 case TGSI_FILE_OUTPUT:
710 if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec))
711 goto out_err;
712 break;
713 case TGSI_FILE_TEMPORARY:
714 if (fdec->Range.Last > high_temp) {
715 high_temp =
716 fdec->Range.Last;
717 }
718 break;
719 default:
720 break;
721 }
722 }
723 break;
724 case TGSI_TOKEN_TYPE_IMMEDIATE:
725 {
726 struct tgsi_full_immediate *imm;
727 float vals[4];
728
729 imm = &p.FullToken.FullImmediate;
730 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
731 assert(fpc->nr_imm < MAX_IMM);
732
733 vals[0] = imm->u[0].Float;
734 vals[1] = imm->u[1].Float;
735 vals[2] = imm->u[2].Float;
736 vals[3] = imm->u[3].Float;
737 fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
738 }
739 break;
740 default:
741 break;
742 }
743 }
744 tgsi_parse_free(&p);
745
746 if (++high_temp) {
747 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_sreg));
748 for (i = 0; i < high_temp; i++)
749 fpc->r_temp[i] = temp(fpc);
750 fpc->r_temps_discard = 0;
751 }
752
753 return TRUE;
754
755 out_err:
756 if (fpc->r_temp)
757 FREE(fpc->r_temp);
758 tgsi_parse_free(&p);
759 return FALSE;
760 }
761
762 static void
763 nvfx_fragprog_translate(struct nvfx_context *nvfx,
764 struct nvfx_fragment_program *fp)
765 {
766 struct tgsi_parse_context parse;
767 struct nvfx_fpc *fpc = NULL;
768
769 fpc = CALLOC(1, sizeof(struct nvfx_fpc));
770 if (!fpc)
771 return;
772 fpc->fp = fp;
773 fpc->num_regs = 2;
774
775 if (!nvfx_fragprog_prepare(nvfx, fpc)) {
776 FREE(fpc);
777 return;
778 }
779
780 tgsi_parse_init(&parse, fp->pipe.tokens);
781
782 while (!tgsi_parse_end_of_tokens(&parse)) {
783 tgsi_parse_token(&parse);
784
785 switch (parse.FullToken.Token.Type) {
786 case TGSI_TOKEN_TYPE_INSTRUCTION:
787 {
788 const struct tgsi_full_instruction *finst;
789
790 finst = &parse.FullToken.FullInstruction;
791 if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst))
792 goto out_err;
793 }
794 break;
795 default:
796 break;
797 }
798 }
799
800 if(!nvfx->is_nv4x)
801 fp->fp_control |= (fpc->num_regs-1)/2;
802 else
803 fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
804
805 /* Terminate final instruction */
806 if(fp->insn)
807 fp->insn[fpc->inst_offset] |= 0x00000001;
808
809 /* Append NOP + END instruction, may or may not be necessary. */
810 fpc->inst_offset = fp->insn_len;
811 grow_insns(fpc, 4);
812 fp->insn[fpc->inst_offset + 0] = 0x00000001;
813 fp->insn[fpc->inst_offset + 1] = 0x00000000;
814 fp->insn[fpc->inst_offset + 2] = 0x00000000;
815 fp->insn[fpc->inst_offset + 3] = 0x00000000;
816
817 fp->translated = TRUE;
818 out_err:
819 tgsi_parse_free(&parse);
820 if (fpc->r_temp)
821 FREE(fpc->r_temp);
822 FREE(fpc);
823 }
824
825 static void
826 nvfx_fragprog_upload(struct nvfx_context *nvfx,
827 struct nvfx_fragment_program *fp)
828 {
829 struct pipe_screen *pscreen = nvfx->pipe.screen;
830 const uint32_t le = 1;
831 uint32_t *map;
832 int i;
833
834 map = pipe_buffer_map(pscreen, fp->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
835
836 #if 0
837 for (i = 0; i < fp->insn_len; i++) {
838 fflush(stdout); fflush(stderr);
839 NOUVEAU_ERR("%d 0x%08x\n", i, fp->insn[i]);
840 fflush(stdout); fflush(stderr);
841 }
842 #endif
843
844 if ((*(const uint8_t *)&le)) {
845 for (i = 0; i < fp->insn_len; i++) {
846 map[i] = fp->insn[i];
847 }
848 } else {
849 /* Weird swapping for big-endian chips */
850 for (i = 0; i < fp->insn_len; i++) {
851 map[i] = ((fp->insn[i] & 0xffff) << 16) |
852 ((fp->insn[i] >> 16) & 0xffff);
853 }
854 }
855
856 pipe_buffer_unmap(pscreen, fp->buffer);
857 }
858
859 static boolean
860 nvfx_fragprog_validate(struct nvfx_context *nvfx)
861 {
862 struct nvfx_fragment_program *fp = nvfx->fragprog;
863 struct pipe_buffer *constbuf =
864 nvfx->constbuf[PIPE_SHADER_FRAGMENT];
865 struct pipe_screen *pscreen = nvfx->pipe.screen;
866 struct nouveau_stateobj *so;
867 boolean new_consts = FALSE;
868 int i;
869
870 if (fp->translated)
871 goto update_constants;
872
873 nvfx->fallback_swrast &= ~NVFX_NEW_FRAGPROG;
874 nvfx_fragprog_translate(nvfx, fp);
875 if (!fp->translated) {
876 nvfx->fallback_swrast |= NVFX_NEW_FRAGPROG;
877 return FALSE;
878 }
879
880 fp->buffer = pscreen->buffer_create(pscreen, 0x100, 0, fp->insn_len * 4);
881 nvfx_fragprog_upload(nvfx, fp);
882
883 so = so_new(4, 4, 1);
884 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_ACTIVE_PROGRAM, 1);
885 so_reloc (so, nouveau_bo(fp->buffer), 0, NOUVEAU_BO_VRAM |
886 NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
887 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
888 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
889 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_CONTROL, 1);
890 so_data (so, fp->fp_control);
891 if(!nvfx->is_nv4x) {
892 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_REG_CONTROL, 1);
893 so_data (so, (1<<16)|0x4);
894 so_method(so, nvfx->screen->eng3d, NV34TCL_TX_UNITS_ENABLE, 1);
895 so_data (so, fp->samplers);
896 }
897
898 so_ref(so, &fp->so);
899 so_ref(NULL, &so);
900
901 update_constants:
902 if (fp->nr_consts) {
903 float *map;
904
905 map = pipe_buffer_map(pscreen, constbuf,
906 PIPE_BUFFER_USAGE_CPU_READ);
907 for (i = 0; i < fp->nr_consts; i++) {
908 struct nvfx_fragment_program_data *fpd = &fp->consts[i];
909 uint32_t *p = &fp->insn[fpd->offset];
910 uint32_t *cb = (uint32_t *)&map[fpd->index * 4];
911
912 if (!memcmp(p, cb, 4 * sizeof(float)))
913 continue;
914 memcpy(p, cb, 4 * sizeof(float));
915 new_consts = TRUE;
916 }
917 pipe_buffer_unmap(pscreen, constbuf);
918
919 if (new_consts)
920 nvfx_fragprog_upload(nvfx, fp);
921 }
922
923 if (new_consts || fp->so != nvfx->state.hw[NVFX_STATE_FRAGPROG]) {
924 so_ref(fp->so, &nvfx->state.hw[NVFX_STATE_FRAGPROG]);
925 return TRUE;
926 }
927
928 return FALSE;
929 }
930
931 void
932 nvfx_fragprog_destroy(struct nvfx_context *nvfx,
933 struct nvfx_fragment_program *fp)
934 {
935 if (fp->buffer)
936 pipe_buffer_reference(&fp->buffer, NULL);
937
938 if (fp->so)
939 so_ref(NULL, &fp->so);
940
941 if (fp->insn_len)
942 FREE(fp->insn);
943 }
944
945 struct nvfx_state_entry nvfx_state_fragprog = {
946 .validate = nvfx_fragprog_validate,
947 .dirty = {
948 .pipe = NVFX_NEW_FRAGPROG,
949 .hw = NVFX_STATE_FRAGPROG
950 }
951 };