nv30: plug some memory leaks on screen destroy and shader compile
[mesa.git] / src / gallium / drivers / nouveau / nv30 / nvfx_fragprog.c
1 #include <float.h>
2 #include "pipe/p_context.h"
3 #include "pipe/p_defines.h"
4 #include "pipe/p_state.h"
5 #include "util/u_dynarray.h"
6 #include "util/u_linkage.h"
7 #include "util/u_inlines.h"
8 #include "util/u_debug.h"
9
10 #include "pipe/p_shader_tokens.h"
11 #include "tgsi/tgsi_parse.h"
12 #include "tgsi/tgsi_util.h"
13 #include "tgsi/tgsi_dump.h"
14 #include "tgsi/tgsi_ureg.h"
15
16 #include "nouveau_debug.h"
17 #include "nv_object.xml.h"
18 #include "nv30/nv30-40_3d.xml.h"
19 #include "nv30/nvfx_shader.h"
20 #include "nv30/nv30_state.h"
21
22 struct nvfx_fpc {
23 struct nv30_fragprog *fp;
24
25 unsigned max_temps;
26 unsigned long long r_temps;
27 unsigned long long r_temps_discard;
28 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
29 struct nvfx_reg r_input[PIPE_MAX_SHADER_INPUTS];
30 struct nvfx_reg *r_temp;
31
32 int num_regs;
33
34 unsigned inst_offset;
35 unsigned have_const;
36 unsigned is_nv4x;
37
38 struct util_dynarray imm_data;
39
40 struct nvfx_reg* r_imm;
41 unsigned nr_imm;
42
43 struct util_dynarray if_stack;
44 //struct util_dynarray loop_stack;
45 struct util_dynarray label_relocs;
46 };
47
48 static INLINE struct nvfx_reg
49 temp(struct nvfx_fpc *fpc)
50 {
51 int idx = __builtin_ctzll(~fpc->r_temps);
52
53 if (idx >= fpc->max_temps) {
54 NOUVEAU_ERR("out of temps!!\n");
55 assert(0);
56 return nvfx_reg(NVFXSR_TEMP, 0);
57 }
58
59 fpc->r_temps |= (1ULL << idx);
60 fpc->r_temps_discard |= (1ULL << idx);
61 return nvfx_reg(NVFXSR_TEMP, idx);
62 }
63
64 static INLINE void
65 release_temps(struct nvfx_fpc *fpc)
66 {
67 fpc->r_temps &= ~fpc->r_temps_discard;
68 fpc->r_temps_discard = 0ULL;
69 }
70
71 static inline struct nvfx_reg
72 nvfx_fp_imm(struct nvfx_fpc *fpc, float a, float b, float c, float d)
73 {
74 float v[4] = {a, b, c, d};
75 int idx = fpc->imm_data.size >> 4;
76
77 memcpy(util_dynarray_grow(&fpc->imm_data, sizeof(float) * 4), v, 4 * sizeof(float));
78 return nvfx_reg(NVFXSR_IMM, idx);
79 }
80
81 static void
82 grow_insns(struct nvfx_fpc *fpc, int size)
83 {
84 struct nv30_fragprog *fp = fpc->fp;
85
86 fp->insn_len += size;
87 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
88 }
89
90 static void
91 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_src src)
92 {
93 struct nv30_fragprog *fp = fpc->fp;
94 uint32_t *hw = &fp->insn[fpc->inst_offset];
95 uint32_t sr = 0;
96
97 switch (src.reg.type) {
98 case NVFXSR_INPUT:
99 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
100 hw[0] |= (src.reg.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
101 break;
102 case NVFXSR_OUTPUT:
103 sr |= NVFX_FP_REG_SRC_HALF;
104 /* fall-through */
105 case NVFXSR_TEMP:
106 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
107 sr |= (src.reg.index << NVFX_FP_REG_SRC_SHIFT);
108 break;
109 case NVFXSR_IMM:
110 if (!fpc->have_const) {
111 grow_insns(fpc, 4);
112 hw = &fp->insn[fpc->inst_offset];
113 fpc->have_const = 1;
114 }
115
116 memcpy(&fp->insn[fpc->inst_offset + 4],
117 (float*)fpc->imm_data.data + src.reg.index * 4,
118 sizeof(uint32_t) * 4);
119
120 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
121 break;
122 case NVFXSR_CONST:
123 if (!fpc->have_const) {
124 grow_insns(fpc, 4);
125 hw = &fp->insn[fpc->inst_offset];
126 fpc->have_const = 1;
127 }
128
129 {
130 struct nv30_fragprog_data *fpd;
131
132 fp->consts = realloc(fp->consts, ++fp->nr_consts *
133 sizeof(*fpd));
134 fpd = &fp->consts[fp->nr_consts - 1];
135 fpd->offset = fpc->inst_offset + 4;
136 fpd->index = src.reg.index;
137 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
138 }
139
140 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
141 break;
142 case NVFXSR_NONE:
143 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
144 break;
145 default:
146 assert(0);
147 }
148
149 if (src.negate)
150 sr |= NVFX_FP_REG_NEGATE;
151
152 if (src.abs)
153 hw[1] |= (1 << (29 + pos));
154
155 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
156 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
157 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
158 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
159
160 hw[pos + 1] |= sr;
161 }
162
163 static void
164 emit_dst(struct nvfx_fpc *fpc, struct nvfx_reg dst)
165 {
166 struct nv30_fragprog *fp = fpc->fp;
167 uint32_t *hw = &fp->insn[fpc->inst_offset];
168
169 switch (dst.type) {
170 case NVFXSR_OUTPUT:
171 if (dst.index == 1)
172 fp->fp_control |= 0x0000000e;
173 else {
174 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
175 dst.index <<= 1;
176 }
177 /* fall-through */
178 case NVFXSR_TEMP:
179 if (fpc->num_regs < (dst.index + 1))
180 fpc->num_regs = dst.index + 1;
181 break;
182 case NVFXSR_NONE:
183 hw[0] |= (1 << 30);
184 break;
185 default:
186 assert(0);
187 }
188
189 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
190 }
191
192 static void
193 nvfx_fp_emit(struct nvfx_fpc *fpc, struct nvfx_insn insn)
194 {
195 struct nv30_fragprog *fp = fpc->fp;
196 uint32_t *hw;
197
198 fpc->inst_offset = fp->insn_len;
199 fpc->have_const = 0;
200 grow_insns(fpc, 4);
201 hw = &fp->insn[fpc->inst_offset];
202 memset(hw, 0, sizeof(uint32_t) * 4);
203
204 if (insn.op == NVFX_FP_OP_OPCODE_KIL)
205 fp->fp_control |= NV30_3D_FP_CONTROL_USES_KIL;
206 hw[0] |= (insn.op << NVFX_FP_OP_OPCODE_SHIFT);
207 hw[0] |= (insn.mask << NVFX_FP_OP_OUTMASK_SHIFT);
208 hw[2] |= (insn.scale << NVFX_FP_OP_DST_SCALE_SHIFT);
209
210 if (insn.sat)
211 hw[0] |= NVFX_FP_OP_OUT_SAT;
212
213 if (insn.cc_update)
214 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
215 hw[1] |= (insn.cc_test << NVFX_FP_OP_COND_SHIFT);
216 hw[1] |= ((insn.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
217 (insn.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
218 (insn.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
219 (insn.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
220
221 if(insn.unit >= 0)
222 {
223 hw[0] |= (insn.unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
224 }
225
226 emit_dst(fpc, insn.dst);
227 emit_src(fpc, 0, insn.src[0]);
228 emit_src(fpc, 1, insn.src[1]);
229 emit_src(fpc, 2, insn.src[2]);
230 }
231
232 #define arith(s,o,d,m,s0,s1,s2) \
233 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \
234 (d), (m), (s0), (s1), (s2))
235
236 #define tex(s,o,u,d,m,s0,s1,s2) \
237 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \
238 (d), (m), (s0), none, none)
239
240 /* IF src.x != 0, as TGSI specifies */
241 static void
242 nv40_fp_if(struct nvfx_fpc *fpc, struct nvfx_src src)
243 {
244 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
245 struct nvfx_insn insn = arith(0, MOV, none.reg, NVFX_FP_MASK_X, src, none, none);
246 uint32_t *hw;
247 insn.cc_update = 1;
248 nvfx_fp_emit(fpc, insn);
249
250 fpc->inst_offset = fpc->fp->insn_len;
251 grow_insns(fpc, 4);
252 hw = &fpc->fp->insn[fpc->inst_offset];
253 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
254 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
255 NV40_FP_OP_OUT_NONE |
256 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
257 /* Use .xxxx swizzle so that we check only src[0].x*/
258 hw[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
259 (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
260 (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
261 (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT) |
262 (NVFX_FP_OP_COND_NE << NVFX_FP_OP_COND_SHIFT);
263 hw[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */
264 hw[3] = 0; /* | endif_offset */
265 util_dynarray_append(&fpc->if_stack, unsigned, fpc->inst_offset);
266 }
267
268 /* IF src.x != 0, as TGSI specifies */
269 static void
270 nv40_fp_cal(struct nvfx_fpc *fpc, unsigned target)
271 {
272 struct nvfx_relocation reloc;
273 uint32_t *hw;
274 fpc->inst_offset = fpc->fp->insn_len;
275 grow_insns(fpc, 4);
276 hw = &fpc->fp->insn[fpc->inst_offset];
277 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
278 hw[0] = (NV40_FP_OP_BRA_OPCODE_CAL << NVFX_FP_OP_OPCODE_SHIFT);
279 /* Use .xxxx swizzle so that we check only src[0].x*/
280 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
281 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
282 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
283 hw[3] = 0;
284 reloc.target = target;
285 reloc.location = fpc->inst_offset + 2;
286 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
287 }
288
289 static void
290 nv40_fp_ret(struct nvfx_fpc *fpc)
291 {
292 uint32_t *hw;
293 fpc->inst_offset = fpc->fp->insn_len;
294 grow_insns(fpc, 4);
295 hw = &fpc->fp->insn[fpc->inst_offset];
296 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
297 hw[0] = (NV40_FP_OP_BRA_OPCODE_RET << NVFX_FP_OP_OPCODE_SHIFT);
298 /* Use .xxxx swizzle so that we check only src[0].x*/
299 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
300 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
301 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
302 hw[3] = 0;
303 }
304
305 static void
306 nv40_fp_rep(struct nvfx_fpc *fpc, unsigned count, unsigned target)
307 {
308 struct nvfx_relocation reloc;
309 uint32_t *hw;
310 fpc->inst_offset = fpc->fp->insn_len;
311 grow_insns(fpc, 4);
312 hw = &fpc->fp->insn[fpc->inst_offset];
313 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
314 hw[0] = (NV40_FP_OP_BRA_OPCODE_REP << NVFX_FP_OP_OPCODE_SHIFT) |
315 NV40_FP_OP_OUT_NONE |
316 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
317 /* Use .xxxx swizzle so that we check only src[0].x*/
318 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
319 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
320 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH |
321 (count << NV40_FP_OP_REP_COUNT1_SHIFT) |
322 (count << NV40_FP_OP_REP_COUNT2_SHIFT) |
323 (count << NV40_FP_OP_REP_COUNT3_SHIFT);
324 hw[3] = 0; /* | end_offset */
325 reloc.target = target;
326 reloc.location = fpc->inst_offset + 3;
327 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
328 //util_dynarray_append(&fpc->loop_stack, unsigned, target);
329 }
330
331 /* warning: this only works forward, and probably only if not inside any IF */
332 static void
333 nv40_fp_bra(struct nvfx_fpc *fpc, unsigned target)
334 {
335 struct nvfx_relocation reloc;
336 uint32_t *hw;
337 fpc->inst_offset = fpc->fp->insn_len;
338 grow_insns(fpc, 4);
339 hw = &fpc->fp->insn[fpc->inst_offset];
340 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
341 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
342 NV40_FP_OP_OUT_NONE |
343 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
344 /* Use .xxxx swizzle so that we check only src[0].x*/
345 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
346 (NVFX_FP_OP_COND_FL << NVFX_FP_OP_COND_SHIFT);
347 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | else_offset */
348 hw[3] = 0; /* | endif_offset */
349 reloc.target = target;
350 reloc.location = fpc->inst_offset + 2;
351 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
352 reloc.target = target;
353 reloc.location = fpc->inst_offset + 3;
354 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
355 }
356
357 static void
358 nv40_fp_brk(struct nvfx_fpc *fpc)
359 {
360 uint32_t *hw;
361 fpc->inst_offset = fpc->fp->insn_len;
362 grow_insns(fpc, 4);
363 hw = &fpc->fp->insn[fpc->inst_offset];
364 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
365 hw[0] = (NV40_FP_OP_BRA_OPCODE_BRK << NVFX_FP_OP_OPCODE_SHIFT) |
366 NV40_FP_OP_OUT_NONE;
367 /* Use .xxxx swizzle so that we check only src[0].x*/
368 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
369 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
370 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH;
371 hw[3] = 0;
372 }
373
374 static INLINE struct nvfx_src
375 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
376 {
377 struct nvfx_src src;
378
379 switch (fsrc->Register.File) {
380 case TGSI_FILE_INPUT:
381 src.reg = fpc->r_input[fsrc->Register.Index];
382 break;
383 case TGSI_FILE_CONSTANT:
384 src.reg = nvfx_reg(NVFXSR_CONST, fsrc->Register.Index);
385 break;
386 case TGSI_FILE_IMMEDIATE:
387 assert(fsrc->Register.Index < fpc->nr_imm);
388 src.reg = fpc->r_imm[fsrc->Register.Index];
389 break;
390 case TGSI_FILE_TEMPORARY:
391 src.reg = fpc->r_temp[fsrc->Register.Index];
392 break;
393 /* NV40 fragprog result regs are just temps, so this is simple */
394 case TGSI_FILE_OUTPUT:
395 src.reg = fpc->r_result[fsrc->Register.Index];
396 break;
397 default:
398 NOUVEAU_ERR("bad src file\n");
399 src.reg.index = 0;
400 src.reg.type = 0;
401 break;
402 }
403
404 src.abs = fsrc->Register.Absolute;
405 src.negate = fsrc->Register.Negate;
406 src.swz[0] = fsrc->Register.SwizzleX;
407 src.swz[1] = fsrc->Register.SwizzleY;
408 src.swz[2] = fsrc->Register.SwizzleZ;
409 src.swz[3] = fsrc->Register.SwizzleW;
410 src.indirect = 0;
411 src.indirect_reg = 0;
412 src.indirect_swz = 0;
413 return src;
414 }
415
416 static INLINE struct nvfx_reg
417 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
418 switch (fdst->Register.File) {
419 case TGSI_FILE_OUTPUT:
420 return fpc->r_result[fdst->Register.Index];
421 case TGSI_FILE_TEMPORARY:
422 return fpc->r_temp[fdst->Register.Index];
423 case TGSI_FILE_NULL:
424 return nvfx_reg(NVFXSR_NONE, 0);
425 default:
426 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
427 return nvfx_reg(NVFXSR_NONE, 0);
428 }
429 }
430
431 static INLINE int
432 tgsi_mask(uint tgsi)
433 {
434 int mask = 0;
435
436 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
437 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
438 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
439 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
440 return mask;
441 }
442
443 static boolean
444 nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc,
445 const struct tgsi_full_instruction *finst)
446 {
447 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
448 struct nvfx_insn insn;
449 struct nvfx_src src[3], tmp;
450 struct nvfx_reg dst;
451 int mask, sat, unit = 0;
452 int ai = -1, ci = -1, ii = -1;
453 int i;
454
455 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
456 return TRUE;
457
458 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
459 const struct tgsi_full_src_register *fsrc;
460
461 fsrc = &finst->Src[i];
462 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
463 src[i] = tgsi_src(fpc, fsrc);
464 }
465 }
466
467 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
468 const struct tgsi_full_src_register *fsrc;
469
470 fsrc = &finst->Src[i];
471
472 switch (fsrc->Register.File) {
473 case TGSI_FILE_INPUT:
474 if(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FOG && (0
475 || fsrc->Register.SwizzleX == PIPE_SWIZZLE_ALPHA
476 || fsrc->Register.SwizzleY == PIPE_SWIZZLE_ALPHA
477 || fsrc->Register.SwizzleZ == PIPE_SWIZZLE_ALPHA
478 || fsrc->Register.SwizzleW == PIPE_SWIZZLE_ALPHA
479 )) {
480 /* hardware puts 0 in fogcoord.w, but GL/Gallium want 1 there */
481 struct nvfx_src addend = nvfx_src(nvfx_fp_imm(fpc, 0, 0, 0, 1));
482 addend.swz[0] = fsrc->Register.SwizzleX;
483 addend.swz[1] = fsrc->Register.SwizzleY;
484 addend.swz[2] = fsrc->Register.SwizzleZ;
485 addend.swz[3] = fsrc->Register.SwizzleW;
486 src[i] = nvfx_src(temp(fpc));
487 nvfx_fp_emit(fpc, arith(0, ADD, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), addend, none));
488 } else if (ai == -1 || ai == fsrc->Register.Index) {
489 ai = fsrc->Register.Index;
490 src[i] = tgsi_src(fpc, fsrc);
491 } else {
492 src[i] = nvfx_src(temp(fpc));
493 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
494 }
495 break;
496 case TGSI_FILE_CONSTANT:
497 if ((ci == -1 && ii == -1) ||
498 ci == fsrc->Register.Index) {
499 ci = fsrc->Register.Index;
500 src[i] = tgsi_src(fpc, fsrc);
501 } else {
502 src[i] = nvfx_src(temp(fpc));
503 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
504 }
505 break;
506 case TGSI_FILE_IMMEDIATE:
507 if ((ci == -1 && ii == -1) ||
508 ii == fsrc->Register.Index) {
509 ii = fsrc->Register.Index;
510 src[i] = tgsi_src(fpc, fsrc);
511 } else {
512 src[i] = nvfx_src(temp(fpc));
513 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
514 }
515 break;
516 case TGSI_FILE_TEMPORARY:
517 /* handled above */
518 break;
519 case TGSI_FILE_SAMPLER:
520 unit = fsrc->Register.Index;
521 break;
522 case TGSI_FILE_OUTPUT:
523 break;
524 default:
525 NOUVEAU_ERR("bad src file\n");
526 return FALSE;
527 }
528 }
529
530 dst = tgsi_dst(fpc, &finst->Dst[0]);
531 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
532 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
533
534 switch (finst->Instruction.Opcode) {
535 case TGSI_OPCODE_ABS:
536 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, abs(src[0]), none, none));
537 break;
538 case TGSI_OPCODE_ADD:
539 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], src[1], none));
540 break;
541 case TGSI_OPCODE_CEIL:
542 tmp = nvfx_src(temp(fpc));
543 nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, neg(src[0]), none, none));
544 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, neg(tmp), none, none));
545 break;
546 case TGSI_OPCODE_CMP:
547 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
548 insn.cc_update = 1;
549 nvfx_fp_emit(fpc, insn);
550
551 insn = arith(sat, MOV, dst, mask, src[2], none, none);
552 insn.cc_test = NVFX_COND_GE;
553 nvfx_fp_emit(fpc, insn);
554
555 insn = arith(sat, MOV, dst, mask, src[1], none, none);
556 insn.cc_test = NVFX_COND_LT;
557 nvfx_fp_emit(fpc, insn);
558 break;
559 case TGSI_OPCODE_COS:
560 nvfx_fp_emit(fpc, arith(sat, COS, dst, mask, src[0], none, none));
561 break;
562 case TGSI_OPCODE_DDX:
563 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
564 tmp = nvfx_src(temp(fpc));
565 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
566 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
567 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
568 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
569 } else {
570 nvfx_fp_emit(fpc, arith(sat, DDX, dst, mask, src[0], none, none));
571 }
572 break;
573 case TGSI_OPCODE_DDY:
574 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
575 tmp = nvfx_src(temp(fpc));
576 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
577 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
578 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
579 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
580 } else {
581 nvfx_fp_emit(fpc, arith(sat, DDY, dst, mask, src[0], none, none));
582 }
583 break;
584 case TGSI_OPCODE_DP2:
585 tmp = nvfx_src(temp(fpc));
586 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], src[1], none));
587 nvfx_fp_emit(fpc, arith(0, ADD, dst, mask, swz(tmp, X, X, X, X), swz(tmp, Y, Y, Y, Y), none));
588 break;
589 case TGSI_OPCODE_DP3:
590 nvfx_fp_emit(fpc, arith(sat, DP3, dst, mask, src[0], src[1], none));
591 break;
592 case TGSI_OPCODE_DP4:
593 nvfx_fp_emit(fpc, arith(sat, DP4, dst, mask, src[0], src[1], none));
594 break;
595 case TGSI_OPCODE_DPH:
596 tmp = nvfx_src(temp(fpc));
597 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[1], none));
598 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, swz(tmp, X, X, X, X), swz(src[1], W, W, W, W), none));
599 break;
600 case TGSI_OPCODE_DST:
601 nvfx_fp_emit(fpc, arith(sat, DST, dst, mask, src[0], src[1], none));
602 break;
603 case TGSI_OPCODE_EX2:
604 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, src[0], none, none));
605 break;
606 case TGSI_OPCODE_FLR:
607 nvfx_fp_emit(fpc, arith(sat, FLR, dst, mask, src[0], none, none));
608 break;
609 case TGSI_OPCODE_FRC:
610 nvfx_fp_emit(fpc, arith(sat, FRC, dst, mask, src[0], none, none));
611 break;
612 case TGSI_OPCODE_KILL:
613 nvfx_fp_emit(fpc, arith(0, KIL, none.reg, 0, none, none, none));
614 break;
615 case TGSI_OPCODE_KILL_IF:
616 insn = arith(0, MOV, none.reg, NVFX_FP_MASK_ALL, src[0], none, none);
617 insn.cc_update = 1;
618 nvfx_fp_emit(fpc, insn);
619
620 insn = arith(0, KIL, none.reg, 0, none, none, none);
621 insn.cc_test = NVFX_COND_LT;
622 nvfx_fp_emit(fpc, insn);
623 break;
624 case TGSI_OPCODE_LG2:
625 nvfx_fp_emit(fpc, arith(sat, LG2, dst, mask, src[0], none, none));
626 break;
627 case TGSI_OPCODE_LIT:
628 if(!fpc->is_nv4x)
629 nvfx_fp_emit(fpc, arith(sat, LIT_NV30, dst, mask, src[0], none, none));
630 else {
631 /* we use FLT_MIN, so that log2 never gives -infinity, and thus multiplication by
632 * specular 0 always gives 0, so that ex2 gives 1, to satisfy the 0^0 = 1 requirement
633 *
634 * NOTE: if we start using half precision, we might need an fp16 FLT_MIN here instead
635 */
636 struct nvfx_src maxs = nvfx_src(nvfx_fp_imm(fpc, 0, FLT_MIN, 0, 0));
637 tmp = nvfx_src(temp(fpc));
638 if (ci>= 0 || ii >= 0) {
639 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, maxs, none, none));
640 maxs = tmp;
641 }
642 nvfx_fp_emit(fpc, arith(0, MAX, tmp.reg, NVFX_FP_MASK_Y | NVFX_FP_MASK_W, swz(src[0], X, X, X, Y), swz(maxs, X, X, Y, Y), none));
643 nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_W, swz(tmp, W, W, W, W), none, none));
644 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_W, swz(tmp, W, W, W, W), swz(src[0], W, W, W, W), none));
645 nvfx_fp_emit(fpc, arith(sat, LITEX2_NV40, dst, mask, swz(tmp, Y, Y, W, W), none, none));
646 }
647 break;
648 case TGSI_OPCODE_LRP:
649 if(!fpc->is_nv4x)
650 nvfx_fp_emit(fpc, arith(sat, LRP_NV30, dst, mask, src[0], src[1], src[2]));
651 else {
652 tmp = nvfx_src(temp(fpc));
653 nvfx_fp_emit(fpc, arith(0, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
654 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], tmp));
655 }
656 break;
657 case TGSI_OPCODE_MAD:
658 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], src[2]));
659 break;
660 case TGSI_OPCODE_MAX:
661 nvfx_fp_emit(fpc, arith(sat, MAX, dst, mask, src[0], src[1], none));
662 break;
663 case TGSI_OPCODE_MIN:
664 nvfx_fp_emit(fpc, arith(sat, MIN, dst, mask, src[0], src[1], none));
665 break;
666 case TGSI_OPCODE_MOV:
667 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, src[0], none, none));
668 break;
669 case TGSI_OPCODE_MUL:
670 nvfx_fp_emit(fpc, arith(sat, MUL, dst, mask, src[0], src[1], none));
671 break;
672 case TGSI_OPCODE_NOP:
673 break;
674 case TGSI_OPCODE_POW:
675 if(!fpc->is_nv4x)
676 nvfx_fp_emit(fpc, arith(sat, POW_NV30, dst, mask, src[0], src[1], none));
677 else {
678 tmp = nvfx_src(temp(fpc));
679 nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
680 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
681 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, swz(tmp, X, X, X, X), none, none));
682 }
683 break;
684 case TGSI_OPCODE_RCP:
685 nvfx_fp_emit(fpc, arith(sat, RCP, dst, mask, src[0], none, none));
686 break;
687 case TGSI_OPCODE_RFL:
688 if(!fpc->is_nv4x)
689 nvfx_fp_emit(fpc, arith(0, RFL_NV30, dst, mask, src[0], src[1], none));
690 else {
691 tmp = nvfx_src(temp(fpc));
692 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[0], none));
693 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_Y, src[0], src[1], none));
694 insn = arith(0, DIV, tmp.reg, NVFX_FP_MASK_Z, swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
695 insn.scale = NVFX_FP_OP_DST_SCALE_2X;
696 nvfx_fp_emit(fpc, insn);
697 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, swz(tmp, Z, Z, Z, Z), src[0], neg(src[1])));
698 }
699 break;
700 case TGSI_OPCODE_RSQ:
701 if(!fpc->is_nv4x)
702 nvfx_fp_emit(fpc, arith(sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none));
703 else {
704 tmp = nvfx_src(temp(fpc));
705 insn = arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, abs(swz(src[0], X, X, X, X)), none, none);
706 insn.scale = NVFX_FP_OP_DST_SCALE_INV_2X;
707 nvfx_fp_emit(fpc, insn);
708 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, neg(swz(tmp, X, X, X, X)), none, none));
709 }
710 break;
711 case TGSI_OPCODE_SCS:
712 /* avoid overwriting the source */
713 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
714 {
715 if (mask & NVFX_FP_MASK_X)
716 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
717 if (mask & NVFX_FP_MASK_Y)
718 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
719 }
720 else
721 {
722 if (mask & NVFX_FP_MASK_Y)
723 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
724 if (mask & NVFX_FP_MASK_X)
725 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
726 }
727 break;
728 case TGSI_OPCODE_SEQ:
729 nvfx_fp_emit(fpc, arith(sat, SEQ, dst, mask, src[0], src[1], none));
730 break;
731 case TGSI_OPCODE_SFL:
732 nvfx_fp_emit(fpc, arith(sat, SFL, dst, mask, src[0], src[1], none));
733 break;
734 case TGSI_OPCODE_SGE:
735 nvfx_fp_emit(fpc, arith(sat, SGE, dst, mask, src[0], src[1], none));
736 break;
737 case TGSI_OPCODE_SGT:
738 nvfx_fp_emit(fpc, arith(sat, SGT, dst, mask, src[0], src[1], none));
739 break;
740 case TGSI_OPCODE_SIN:
741 nvfx_fp_emit(fpc, arith(sat, SIN, dst, mask, src[0], none, none));
742 break;
743 case TGSI_OPCODE_SLE:
744 nvfx_fp_emit(fpc, arith(sat, SLE, dst, mask, src[0], src[1], none));
745 break;
746 case TGSI_OPCODE_SLT:
747 nvfx_fp_emit(fpc, arith(sat, SLT, dst, mask, src[0], src[1], none));
748 break;
749 case TGSI_OPCODE_SNE:
750 nvfx_fp_emit(fpc, arith(sat, SNE, dst, mask, src[0], src[1], none));
751 break;
752 case TGSI_OPCODE_SSG:
753 {
754 struct nvfx_src minones = swz(nvfx_src(nvfx_fp_imm(fpc, -1, -1, -1, -1)), X, X, X, X);
755
756 insn = arith(sat, MOV, dst, mask, src[0], none, none);
757 insn.cc_update = 1;
758 nvfx_fp_emit(fpc, insn);
759
760 insn = arith(0, STR, dst, mask, none, none, none);
761 insn.cc_test = NVFX_COND_GT;
762 nvfx_fp_emit(fpc, insn);
763
764 if(!sat) {
765 insn = arith(0, MOV, dst, mask, minones, none, none);
766 insn.cc_test = NVFX_COND_LT;
767 nvfx_fp_emit(fpc, insn);
768 }
769 break;
770 }
771 case TGSI_OPCODE_STR:
772 nvfx_fp_emit(fpc, arith(sat, STR, dst, mask, src[0], src[1], none));
773 break;
774 case TGSI_OPCODE_SUB:
775 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], neg(src[1]), none));
776 break;
777 case TGSI_OPCODE_TEX:
778 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
779 break;
780 case TGSI_OPCODE_TRUNC:
781 tmp = nvfx_src(temp(fpc));
782 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
783 insn.cc_update = 1;
784 nvfx_fp_emit(fpc, insn);
785
786 nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, abs(src[0]), none, none));
787 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, tmp, none, none));
788
789 insn = arith(sat, MOV, dst, mask, neg(tmp), none, none);
790 insn.cc_test = NVFX_COND_LT;
791 nvfx_fp_emit(fpc, insn);
792 break;
793 case TGSI_OPCODE_TXB:
794 nvfx_fp_emit(fpc, tex(sat, TXB, unit, dst, mask, src[0], none, none));
795 break;
796 case TGSI_OPCODE_TXL:
797 if(fpc->is_nv4x)
798 nvfx_fp_emit(fpc, tex(sat, TXL_NV40, unit, dst, mask, src[0], none, none));
799 else /* unsupported on nv30, use TEX and hope they like it */
800 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
801 break;
802 case TGSI_OPCODE_TXP:
803 nvfx_fp_emit(fpc, tex(sat, TXP, unit, dst, mask, src[0], none, none));
804 break;
805 case TGSI_OPCODE_XPD:
806 tmp = nvfx_src(temp(fpc));
807 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
808 nvfx_fp_emit(fpc, arith(sat, MAD, dst, (mask & ~NVFX_FP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
809 break;
810
811 case TGSI_OPCODE_IF:
812 // MOVRC0 R31 (TR0.xyzw), R<src>:
813 // IF (NE.xxxx) ELSE <else> END <end>
814 if(!fpc->is_nv4x)
815 goto nv3x_cflow;
816 nv40_fp_if(fpc, src[0]);
817 break;
818
819 case TGSI_OPCODE_ELSE:
820 {
821 uint32_t *hw;
822 if(!fpc->is_nv4x)
823 goto nv3x_cflow;
824 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
825 hw = &fpc->fp->insn[util_dynarray_top(&fpc->if_stack, unsigned)];
826 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
827 break;
828 }
829
830 case TGSI_OPCODE_ENDIF:
831 {
832 uint32_t *hw;
833 if(!fpc->is_nv4x)
834 goto nv3x_cflow;
835 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
836 hw = &fpc->fp->insn[util_dynarray_pop(&fpc->if_stack, unsigned)];
837 if(!hw[2])
838 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
839 hw[3] = fpc->fp->insn_len;
840 break;
841 }
842
843 case TGSI_OPCODE_BRA:
844 /* This can in limited cases be implemented with an IF with the else and endif labels pointing to the target */
845 /* no state tracker uses this, so don't implement this for now */
846 assert(0);
847 nv40_fp_bra(fpc, finst->Label.Label);
848 break;
849
850 case TGSI_OPCODE_BGNSUB:
851 case TGSI_OPCODE_ENDSUB:
852 /* nothing to do here */
853 break;
854
855 case TGSI_OPCODE_CAL:
856 if(!fpc->is_nv4x)
857 goto nv3x_cflow;
858 nv40_fp_cal(fpc, finst->Label.Label);
859 break;
860
861 case TGSI_OPCODE_RET:
862 if(!fpc->is_nv4x)
863 goto nv3x_cflow;
864 nv40_fp_ret(fpc);
865 break;
866
867 case TGSI_OPCODE_BGNLOOP:
868 if(!fpc->is_nv4x)
869 goto nv3x_cflow;
870 /* TODO: we should support using two nested REPs to allow a > 255 iteration count */
871 nv40_fp_rep(fpc, 255, finst->Label.Label);
872 break;
873
874 case TGSI_OPCODE_ENDLOOP:
875 break;
876
877 case TGSI_OPCODE_BRK:
878 if(!fpc->is_nv4x)
879 goto nv3x_cflow;
880 nv40_fp_brk(fpc);
881 break;
882
883 case TGSI_OPCODE_CONT:
884 {
885 static int warned = 0;
886 if(!warned) {
887 NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n");
888 warned = 1;
889 }
890 break;
891 }
892
893 default:
894 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
895 return FALSE;
896 }
897
898 out:
899 release_temps(fpc);
900 return TRUE;
901 nv3x_cflow:
902 {
903 static int warned = 0;
904 if(!warned) {
905 NOUVEAU_ERR(
906 "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n"
907 "If rendering is incorrect, try to disable GLSL support in the application.\n");
908 warned = 1;
909 }
910 }
911 goto out;
912 }
913
914 static boolean
915 nvfx_fragprog_parse_decl_input(struct nvfx_fpc *fpc,
916 const struct tgsi_full_declaration *fdec)
917 {
918 unsigned idx = fdec->Range.First;
919 unsigned hw;
920
921 switch (fdec->Semantic.Name) {
922 case TGSI_SEMANTIC_POSITION:
923 hw = NVFX_FP_OP_INPUT_SRC_POSITION;
924 break;
925 case TGSI_SEMANTIC_COLOR:
926 hw = NVFX_FP_OP_INPUT_SRC_COL0 + fdec->Semantic.Index;
927 break;
928 case TGSI_SEMANTIC_FOG:
929 hw = NVFX_FP_OP_INPUT_SRC_FOGC;
930 break;
931 case TGSI_SEMANTIC_FACE:
932 hw = NV40_FP_OP_INPUT_SRC_FACING;
933 break;
934 case TGSI_SEMANTIC_TEXCOORD:
935 assert(fdec->Semantic.Index < 8);
936 fpc->fp->texcoord[fdec->Semantic.Index] = fdec->Semantic.Index;
937 fpc->fp->texcoords |= (1 << fdec->Semantic.Index);
938 fpc->fp->vp_or |= (0x00004000 << fdec->Semantic.Index);
939 hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic.Index);
940 break;
941 case TGSI_SEMANTIC_GENERIC:
942 case TGSI_SEMANTIC_PCOORD:
943 /* will be assigned to remaining TC slots later */
944 return TRUE;
945 default:
946 assert(0);
947 return FALSE;
948 }
949
950 fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw);
951 return TRUE;
952 }
953
954 static boolean
955 nvfx_fragprog_assign_generic(struct nvfx_fpc *fpc,
956 const struct tgsi_full_declaration *fdec)
957 {
958 unsigned num_texcoords = fpc->is_nv4x ? 10 : 8;
959 unsigned idx = fdec->Range.First;
960 unsigned hw;
961
962 switch (fdec->Semantic.Name) {
963 case TGSI_SEMANTIC_GENERIC:
964 case TGSI_SEMANTIC_PCOORD:
965 for (hw = 0; hw < num_texcoords; hw++) {
966 if (fpc->fp->texcoord[hw] == 0xffff) {
967 if (hw <= 7) {
968 fpc->fp->texcoords |= (0x1 << hw);
969 fpc->fp->vp_or |= (0x00004000 << hw);
970 } else {
971 fpc->fp->vp_or |= (0x00001000 << (hw - 8));
972 }
973 if (fdec->Semantic.Name == TGSI_SEMANTIC_PCOORD) {
974 fpc->fp->texcoord[hw] = 0xfffe;
975 fpc->fp->point_sprite_control |= (0x00000100 << hw);
976 } else {
977 fpc->fp->texcoord[hw] = fdec->Semantic.Index + 8;
978 }
979 hw = NVFX_FP_OP_INPUT_SRC_TC(hw);
980 fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw);
981 return TRUE;
982 }
983 }
984 return FALSE;
985 default:
986 return TRUE;
987 }
988 }
989
990 static boolean
991 nvfx_fragprog_parse_decl_output(struct nvfx_fpc *fpc,
992 const struct tgsi_full_declaration *fdec)
993 {
994 unsigned idx = fdec->Range.First;
995 unsigned hw;
996
997 switch (fdec->Semantic.Name) {
998 case TGSI_SEMANTIC_POSITION:
999 hw = 1;
1000 break;
1001 case TGSI_SEMANTIC_COLOR:
1002 hw = ~0;
1003 switch (fdec->Semantic.Index) {
1004 case 0: hw = 0; break;
1005 case 1: hw = 2; break;
1006 case 2: hw = 3; break;
1007 case 3: hw = 4; break;
1008 }
1009 if(hw > ((fpc->is_nv4x) ? 4 : 2)) {
1010 NOUVEAU_ERR("bad rcol index\n");
1011 return FALSE;
1012 }
1013 break;
1014 default:
1015 NOUVEAU_ERR("bad output semantic\n");
1016 return FALSE;
1017 }
1018
1019 fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
1020 fpc->r_temps |= (1ULL << hw);
1021 return TRUE;
1022 }
1023
1024 static boolean
1025 nvfx_fragprog_prepare(struct nvfx_fpc *fpc)
1026 {
1027 struct tgsi_parse_context p;
1028 int high_temp = -1, i;
1029
1030 fpc->r_imm = CALLOC(fpc->fp->info.immediate_count, sizeof(struct nvfx_reg));
1031
1032 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
1033 while (!tgsi_parse_end_of_tokens(&p)) {
1034 const union tgsi_full_token *tok = &p.FullToken;
1035
1036 tgsi_parse_token(&p);
1037 switch(tok->Token.Type) {
1038 case TGSI_TOKEN_TYPE_DECLARATION:
1039 {
1040 const struct tgsi_full_declaration *fdec;
1041 fdec = &p.FullToken.FullDeclaration;
1042 switch (fdec->Declaration.File) {
1043 case TGSI_FILE_INPUT:
1044 if (!nvfx_fragprog_parse_decl_input(fpc, fdec))
1045 goto out_err;
1046 break;
1047 case TGSI_FILE_OUTPUT:
1048 if (!nvfx_fragprog_parse_decl_output(fpc, fdec))
1049 goto out_err;
1050 break;
1051 case TGSI_FILE_TEMPORARY:
1052 if (fdec->Range.Last > high_temp) {
1053 high_temp =
1054 fdec->Range.Last;
1055 }
1056 break;
1057 default:
1058 break;
1059 }
1060 }
1061 break;
1062 case TGSI_TOKEN_TYPE_IMMEDIATE:
1063 {
1064 struct tgsi_full_immediate *imm;
1065
1066 imm = &p.FullToken.FullImmediate;
1067 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1068 assert(fpc->nr_imm < fpc->fp->info.immediate_count);
1069
1070 fpc->r_imm[fpc->nr_imm++] = nvfx_fp_imm(fpc, imm->u[0].Float, imm->u[1].Float, imm->u[2].Float, imm->u[3].Float);
1071 break;
1072 }
1073 default:
1074 break;
1075 }
1076 }
1077 tgsi_parse_free(&p);
1078
1079 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
1080 while (!tgsi_parse_end_of_tokens(&p)) {
1081 const struct tgsi_full_declaration *fdec;
1082 tgsi_parse_token(&p);
1083 switch(p.FullToken.Token.Type) {
1084 case TGSI_TOKEN_TYPE_DECLARATION:
1085 fdec = &p.FullToken.FullDeclaration;
1086 switch (fdec->Declaration.File) {
1087 case TGSI_FILE_INPUT:
1088 if (!nvfx_fragprog_assign_generic(fpc, fdec))
1089 goto out_err;
1090 break;
1091 default:
1092 break;
1093 }
1094 break;
1095 default:
1096 break;
1097 }
1098 }
1099 tgsi_parse_free(&p);
1100
1101 if (++high_temp) {
1102 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
1103 for (i = 0; i < high_temp; i++)
1104 fpc->r_temp[i] = temp(fpc);
1105 fpc->r_temps_discard = 0ULL;
1106 }
1107
1108 return TRUE;
1109
1110 out_err:
1111 FREE(fpc->r_temp);
1112 fpc->r_temp = NULL;
1113
1114 tgsi_parse_free(&p);
1115 return FALSE;
1116 }
1117
1118 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", FALSE)
1119
1120 void
1121 _nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp)
1122 {
1123 struct tgsi_parse_context parse;
1124 struct nvfx_fpc *fpc = NULL;
1125 struct util_dynarray insns;
1126
1127 fp->translated = FALSE;
1128 fp->point_sprite_control = 0;
1129 fp->vp_or = 0;
1130
1131 fpc = CALLOC_STRUCT(nvfx_fpc);
1132 if (!fpc)
1133 goto out_err;
1134
1135 fpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
1136 fpc->max_temps = fpc->is_nv4x ? 48 : 32;
1137 fpc->fp = fp;
1138 fpc->num_regs = 2;
1139 memset(fp->texcoord, 0xff, sizeof(fp->texcoord));
1140
1141 for (unsigned i = 0; i < fp->info.num_properties; ++i) {
1142 switch (fp->info.properties[i].name) {
1143 case TGSI_PROPERTY_FS_COORD_ORIGIN:
1144 if (fp->info.properties[i].data[0])
1145 fp->coord_conventions |= NV30_3D_COORD_CONVENTIONS_ORIGIN_INVERTED;
1146 break;
1147 case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1148 if (fp->info.properties[i].data[0])
1149 fp->coord_conventions |= NV30_3D_COORD_CONVENTIONS_CENTER_INTEGER;
1150 break;
1151 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1152 if (fp->info.properties[i].data[0])
1153 fp->rt_enable |= NV30_3D_RT_ENABLE_MRT;
1154 break;
1155 default:
1156 break;
1157 }
1158 }
1159
1160 if (!nvfx_fragprog_prepare(fpc))
1161 goto out_err;
1162
1163 tgsi_parse_init(&parse, fp->pipe.tokens);
1164 util_dynarray_init(&insns);
1165
1166 while (!tgsi_parse_end_of_tokens(&parse)) {
1167 tgsi_parse_token(&parse);
1168
1169 switch (parse.FullToken.Token.Type) {
1170 case TGSI_TOKEN_TYPE_INSTRUCTION:
1171 {
1172 const struct tgsi_full_instruction *finst;
1173
1174 util_dynarray_append(&insns, unsigned, fp->insn_len);
1175 finst = &parse.FullToken.FullInstruction;
1176 if (!nvfx_fragprog_parse_instruction(fpc, finst))
1177 goto out_err;
1178 }
1179 break;
1180 default:
1181 break;
1182 }
1183 }
1184 util_dynarray_append(&insns, unsigned, fp->insn_len);
1185
1186 for(unsigned i = 0; i < fpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1187 {
1188 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)fpc->label_relocs.data + i);
1189 fp->insn[label_reloc->location] |= ((unsigned*)insns.data)[label_reloc->target];
1190 }
1191 util_dynarray_fini(&insns);
1192
1193 if(!fpc->is_nv4x)
1194 fp->fp_control |= (fpc->num_regs-1)/2;
1195 else
1196 fp->fp_control |= fpc->num_regs << NV40_3D_FP_CONTROL_TEMP_COUNT__SHIFT;
1197
1198 /* Terminate final instruction */
1199 if(fp->insn)
1200 fp->insn[fpc->inst_offset] |= 0x00000001;
1201
1202 /* Append NOP + END instruction for branches to the end of the program */
1203 fpc->inst_offset = fp->insn_len;
1204 grow_insns(fpc, 4);
1205 fp->insn[fpc->inst_offset + 0] = 0x00000001;
1206 fp->insn[fpc->inst_offset + 1] = 0x00000000;
1207 fp->insn[fpc->inst_offset + 2] = 0x00000000;
1208 fp->insn[fpc->inst_offset + 3] = 0x00000000;
1209
1210 if(debug_get_option_nvfx_dump_fp())
1211 {
1212 debug_printf("\n");
1213 tgsi_dump(fp->pipe.tokens, 0);
1214
1215 debug_printf("\n%s fragment program:\n", fpc->is_nv4x ? "nv4x" : "nv3x");
1216 for (unsigned i = 0; i < fp->insn_len; i += 4)
1217 debug_printf("%3u: %08x %08x %08x %08x\n", i >> 2, fp->insn[i], fp->insn[i + 1], fp->insn[i + 2], fp->insn[i + 3]);
1218 debug_printf("\n");
1219 }
1220
1221 fp->translated = TRUE;
1222
1223 out:
1224 tgsi_parse_free(&parse);
1225 if(fpc)
1226 {
1227 FREE(fpc->r_temp);
1228 FREE(fpc->r_imm);
1229 util_dynarray_fini(&fpc->if_stack);
1230 util_dynarray_fini(&fpc->label_relocs);
1231 util_dynarray_fini(&fpc->imm_data);
1232 //util_dynarray_fini(&fpc->loop_stack);
1233 FREE(fpc);
1234 }
1235
1236 return;
1237
1238 out_err:
1239 _debug_printf("Error: failed to compile this fragment program:\n");
1240 tgsi_dump(fp->pipe.tokens, 0);
1241 goto out;
1242 }
1243
1244 static inline void
1245 nvfx_fp_memcpy(void* dst, const void* src, size_t len)
1246 {
1247 #ifndef PIPE_ARCH_BIG_ENDIAN
1248 memcpy(dst, src, len);
1249 #else
1250 size_t i;
1251 for(i = 0; i < len; i += 4) {
1252 uint32_t v = *(uint32_t*)((char*)src + i);
1253 *(uint32_t*)((char*)dst + i) = (v >> 16) | (v << 16);
1254 }
1255 #endif
1256 }