dab42e17cfebb1e7ecf8e4e2a05d73927bf9b953
[mesa.git] / src / gallium / drivers / nouveau / nv30 / nvfx_fragprog.c
1 #include <float.h>
2 #include "pipe/p_context.h"
3 #include "pipe/p_defines.h"
4 #include "pipe/p_state.h"
5 #include "util/u_dynarray.h"
6 #include "util/u_inlines.h"
7 #include "util/u_debug.h"
8
9 #include "pipe/p_shader_tokens.h"
10 #include "tgsi/tgsi_parse.h"
11 #include "tgsi/tgsi_util.h"
12 #include "tgsi/tgsi_dump.h"
13 #include "tgsi/tgsi_ureg.h"
14
15 #include "nouveau_debug.h"
16 #include "nv_object.xml.h"
17 #include "nv30/nv30-40_3d.xml.h"
18 #include "nv30/nvfx_shader.h"
19 #include "nv30/nv30_state.h"
20
21 struct nvfx_fpc {
22 struct nv30_fragprog *fp;
23
24 unsigned max_temps;
25 unsigned long long r_temps;
26 unsigned long long r_temps_discard;
27 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
28 struct nvfx_reg r_input[PIPE_MAX_SHADER_INPUTS];
29 struct nvfx_reg *r_temp;
30
31 int num_regs;
32
33 unsigned inst_offset;
34 unsigned have_const;
35 unsigned is_nv4x;
36
37 struct util_dynarray imm_data;
38
39 struct nvfx_reg* r_imm;
40 unsigned nr_imm;
41
42 struct util_dynarray if_stack;
43 //struct util_dynarray loop_stack;
44 struct util_dynarray label_relocs;
45 };
46
47 static inline struct nvfx_reg
48 temp(struct nvfx_fpc *fpc)
49 {
50 int idx = __builtin_ctzll(~fpc->r_temps);
51
52 if (idx >= fpc->max_temps) {
53 NOUVEAU_ERR("out of temps!!\n");
54 return nvfx_reg(NVFXSR_TEMP, 0);
55 }
56
57 fpc->r_temps |= (1ULL << idx);
58 fpc->r_temps_discard |= (1ULL << idx);
59 return nvfx_reg(NVFXSR_TEMP, idx);
60 }
61
62 static inline void
63 release_temps(struct nvfx_fpc *fpc)
64 {
65 fpc->r_temps &= ~fpc->r_temps_discard;
66 fpc->r_temps_discard = 0ULL;
67 }
68
69 static inline struct nvfx_reg
70 nvfx_fp_imm(struct nvfx_fpc *fpc, float a, float b, float c, float d)
71 {
72 float v[4] = {a, b, c, d};
73 int idx = fpc->imm_data.size >> 4;
74
75 memcpy(util_dynarray_grow(&fpc->imm_data, sizeof(float) * 4), v, 4 * sizeof(float));
76 return nvfx_reg(NVFXSR_IMM, idx);
77 }
78
79 static void
80 grow_insns(struct nvfx_fpc *fpc, int size)
81 {
82 struct nv30_fragprog *fp = fpc->fp;
83
84 fp->insn_len += size;
85 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
86 }
87
88 static void
89 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_src src)
90 {
91 struct nv30_fragprog *fp = fpc->fp;
92 uint32_t *hw = &fp->insn[fpc->inst_offset];
93 uint32_t sr = 0;
94
95 switch (src.reg.type) {
96 case NVFXSR_INPUT:
97 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
98 hw[0] |= (src.reg.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
99 break;
100 case NVFXSR_OUTPUT:
101 sr |= NVFX_FP_REG_SRC_HALF;
102 /* fall-through */
103 case NVFXSR_TEMP:
104 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
105 sr |= (src.reg.index << NVFX_FP_REG_SRC_SHIFT);
106 break;
107 case NVFXSR_IMM:
108 if (!fpc->have_const) {
109 grow_insns(fpc, 4);
110 hw = &fp->insn[fpc->inst_offset];
111 fpc->have_const = 1;
112 }
113
114 memcpy(&fp->insn[fpc->inst_offset + 4],
115 (float*)fpc->imm_data.data + src.reg.index * 4,
116 sizeof(uint32_t) * 4);
117
118 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
119 break;
120 case NVFXSR_CONST:
121 if (!fpc->have_const) {
122 grow_insns(fpc, 4);
123 hw = &fp->insn[fpc->inst_offset];
124 fpc->have_const = 1;
125 }
126
127 {
128 struct nv30_fragprog_data *fpd;
129
130 fp->consts = realloc(fp->consts, ++fp->nr_consts *
131 sizeof(*fpd));
132 fpd = &fp->consts[fp->nr_consts - 1];
133 fpd->offset = fpc->inst_offset + 4;
134 fpd->index = src.reg.index;
135 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
136 }
137
138 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
139 break;
140 case NVFXSR_NONE:
141 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
142 break;
143 default:
144 assert(0);
145 }
146
147 if (src.negate)
148 sr |= NVFX_FP_REG_NEGATE;
149
150 if (src.abs)
151 hw[1] |= (1 << (29 + pos));
152
153 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
154 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
155 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
156 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
157
158 hw[pos + 1] |= sr;
159 }
160
161 static void
162 emit_dst(struct nvfx_fpc *fpc, struct nvfx_reg dst)
163 {
164 struct nv30_fragprog *fp = fpc->fp;
165 uint32_t *hw = &fp->insn[fpc->inst_offset];
166
167 switch (dst.type) {
168 case NVFXSR_OUTPUT:
169 if (dst.index == 1)
170 fp->fp_control |= 0x0000000e;
171 else {
172 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
173 dst.index <<= 1;
174 }
175 /* fall-through */
176 case NVFXSR_TEMP:
177 if (fpc->num_regs < (dst.index + 1))
178 fpc->num_regs = dst.index + 1;
179 break;
180 case NVFXSR_NONE:
181 hw[0] |= (1 << 30);
182 break;
183 default:
184 assert(0);
185 }
186
187 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
188 }
189
190 static void
191 nvfx_fp_emit(struct nvfx_fpc *fpc, struct nvfx_insn insn)
192 {
193 struct nv30_fragprog *fp = fpc->fp;
194 uint32_t *hw;
195
196 fpc->inst_offset = fp->insn_len;
197 fpc->have_const = 0;
198 grow_insns(fpc, 4);
199 hw = &fp->insn[fpc->inst_offset];
200 memset(hw, 0, sizeof(uint32_t) * 4);
201
202 if (insn.op == NVFX_FP_OP_OPCODE_KIL)
203 fp->fp_control |= NV30_3D_FP_CONTROL_USES_KIL;
204 hw[0] |= (insn.op << NVFX_FP_OP_OPCODE_SHIFT);
205 hw[0] |= (insn.mask << NVFX_FP_OP_OUTMASK_SHIFT);
206 hw[2] |= (insn.scale << NVFX_FP_OP_DST_SCALE_SHIFT);
207
208 if (insn.sat)
209 hw[0] |= NVFX_FP_OP_OUT_SAT;
210
211 if (insn.cc_update)
212 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
213 hw[1] |= (insn.cc_test << NVFX_FP_OP_COND_SHIFT);
214 hw[1] |= ((insn.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
215 (insn.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
216 (insn.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
217 (insn.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
218
219 if(insn.unit >= 0)
220 {
221 hw[0] |= (insn.unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
222 }
223
224 emit_dst(fpc, insn.dst);
225 emit_src(fpc, 0, insn.src[0]);
226 emit_src(fpc, 1, insn.src[1]);
227 emit_src(fpc, 2, insn.src[2]);
228 }
229
230 #define arith(s,o,d,m,s0,s1,s2) \
231 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \
232 (d), (m), (s0), (s1), (s2))
233
234 #define tex(s,o,u,d,m,s0,s1,s2) \
235 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \
236 (d), (m), (s0), none, none)
237
238 /* IF src.x != 0, as TGSI specifies */
239 static void
240 nv40_fp_if(struct nvfx_fpc *fpc, struct nvfx_src src)
241 {
242 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
243 struct nvfx_insn insn = arith(0, MOV, none.reg, NVFX_FP_MASK_X, src, none, none);
244 uint32_t *hw;
245 insn.cc_update = 1;
246 nvfx_fp_emit(fpc, insn);
247
248 fpc->inst_offset = fpc->fp->insn_len;
249 grow_insns(fpc, 4);
250 hw = &fpc->fp->insn[fpc->inst_offset];
251 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
252 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
253 NV40_FP_OP_OUT_NONE |
254 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
255 /* Use .xxxx swizzle so that we check only src[0].x*/
256 hw[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
257 (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
258 (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
259 (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT) |
260 (NVFX_FP_OP_COND_NE << NVFX_FP_OP_COND_SHIFT);
261 hw[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */
262 hw[3] = 0; /* | endif_offset */
263 util_dynarray_append(&fpc->if_stack, unsigned, fpc->inst_offset);
264 }
265
266 /* IF src.x != 0, as TGSI specifies */
267 static void
268 nv40_fp_cal(struct nvfx_fpc *fpc, unsigned target)
269 {
270 struct nvfx_relocation reloc;
271 uint32_t *hw;
272 fpc->inst_offset = fpc->fp->insn_len;
273 grow_insns(fpc, 4);
274 hw = &fpc->fp->insn[fpc->inst_offset];
275 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
276 hw[0] = (NV40_FP_OP_BRA_OPCODE_CAL << NVFX_FP_OP_OPCODE_SHIFT);
277 /* Use .xxxx swizzle so that we check only src[0].x*/
278 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
279 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
280 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
281 hw[3] = 0;
282 reloc.target = target;
283 reloc.location = fpc->inst_offset + 2;
284 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
285 }
286
287 static void
288 nv40_fp_ret(struct nvfx_fpc *fpc)
289 {
290 uint32_t *hw;
291 fpc->inst_offset = fpc->fp->insn_len;
292 grow_insns(fpc, 4);
293 hw = &fpc->fp->insn[fpc->inst_offset];
294 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
295 hw[0] = (NV40_FP_OP_BRA_OPCODE_RET << NVFX_FP_OP_OPCODE_SHIFT);
296 /* Use .xxxx swizzle so that we check only src[0].x*/
297 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
298 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
299 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
300 hw[3] = 0;
301 }
302
303 static void
304 nv40_fp_rep(struct nvfx_fpc *fpc, unsigned count, unsigned target)
305 {
306 struct nvfx_relocation reloc;
307 uint32_t *hw;
308 fpc->inst_offset = fpc->fp->insn_len;
309 grow_insns(fpc, 4);
310 hw = &fpc->fp->insn[fpc->inst_offset];
311 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
312 hw[0] = (NV40_FP_OP_BRA_OPCODE_REP << NVFX_FP_OP_OPCODE_SHIFT) |
313 NV40_FP_OP_OUT_NONE |
314 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
315 /* Use .xxxx swizzle so that we check only src[0].x*/
316 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
317 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
318 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH |
319 (count << NV40_FP_OP_REP_COUNT1_SHIFT) |
320 (count << NV40_FP_OP_REP_COUNT2_SHIFT) |
321 (count << NV40_FP_OP_REP_COUNT3_SHIFT);
322 hw[3] = 0; /* | end_offset */
323 reloc.target = target;
324 reloc.location = fpc->inst_offset + 3;
325 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
326 //util_dynarray_append(&fpc->loop_stack, unsigned, target);
327 }
328
329 #if 0
330 /* documentation only */
331 /* warning: this only works forward, and probably only if not inside any IF */
332 static void
333 nv40_fp_bra(struct nvfx_fpc *fpc, unsigned target)
334 {
335 struct nvfx_relocation reloc;
336 uint32_t *hw;
337 fpc->inst_offset = fpc->fp->insn_len;
338 grow_insns(fpc, 4);
339 hw = &fpc->fp->insn[fpc->inst_offset];
340 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
341 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
342 NV40_FP_OP_OUT_NONE |
343 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
344 /* Use .xxxx swizzle so that we check only src[0].x*/
345 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
346 (NVFX_FP_OP_COND_FL << NVFX_FP_OP_COND_SHIFT);
347 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | else_offset */
348 hw[3] = 0; /* | endif_offset */
349 reloc.target = target;
350 reloc.location = fpc->inst_offset + 2;
351 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
352 reloc.target = target;
353 reloc.location = fpc->inst_offset + 3;
354 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
355 }
356 #endif
357
358 static void
359 nv40_fp_brk(struct nvfx_fpc *fpc)
360 {
361 uint32_t *hw;
362 fpc->inst_offset = fpc->fp->insn_len;
363 grow_insns(fpc, 4);
364 hw = &fpc->fp->insn[fpc->inst_offset];
365 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
366 hw[0] = (NV40_FP_OP_BRA_OPCODE_BRK << NVFX_FP_OP_OPCODE_SHIFT) |
367 NV40_FP_OP_OUT_NONE;
368 /* Use .xxxx swizzle so that we check only src[0].x*/
369 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
370 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
371 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH;
372 hw[3] = 0;
373 }
374
375 static inline struct nvfx_src
376 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
377 {
378 struct nvfx_src src;
379
380 switch (fsrc->Register.File) {
381 case TGSI_FILE_INPUT:
382 src.reg = fpc->r_input[fsrc->Register.Index];
383 break;
384 case TGSI_FILE_CONSTANT:
385 src.reg = nvfx_reg(NVFXSR_CONST, fsrc->Register.Index);
386 break;
387 case TGSI_FILE_IMMEDIATE:
388 assert(fsrc->Register.Index < fpc->nr_imm);
389 src.reg = fpc->r_imm[fsrc->Register.Index];
390 break;
391 case TGSI_FILE_TEMPORARY:
392 src.reg = fpc->r_temp[fsrc->Register.Index];
393 break;
394 /* NV40 fragprog result regs are just temps, so this is simple */
395 case TGSI_FILE_OUTPUT:
396 src.reg = fpc->r_result[fsrc->Register.Index];
397 break;
398 default:
399 NOUVEAU_ERR("bad src file\n");
400 src.reg.index = 0;
401 src.reg.type = 0;
402 break;
403 }
404
405 src.abs = fsrc->Register.Absolute;
406 src.negate = fsrc->Register.Negate;
407 src.swz[0] = fsrc->Register.SwizzleX;
408 src.swz[1] = fsrc->Register.SwizzleY;
409 src.swz[2] = fsrc->Register.SwizzleZ;
410 src.swz[3] = fsrc->Register.SwizzleW;
411 src.indirect = 0;
412 src.indirect_reg = 0;
413 src.indirect_swz = 0;
414 return src;
415 }
416
417 static inline struct nvfx_reg
418 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
419 switch (fdst->Register.File) {
420 case TGSI_FILE_OUTPUT:
421 return fpc->r_result[fdst->Register.Index];
422 case TGSI_FILE_TEMPORARY:
423 return fpc->r_temp[fdst->Register.Index];
424 case TGSI_FILE_NULL:
425 return nvfx_reg(NVFXSR_NONE, 0);
426 default:
427 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
428 return nvfx_reg(NVFXSR_NONE, 0);
429 }
430 }
431
432 static inline int
433 tgsi_mask(uint tgsi)
434 {
435 int mask = 0;
436
437 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
438 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
439 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
440 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
441 return mask;
442 }
443
444 static bool
445 nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc,
446 const struct tgsi_full_instruction *finst)
447 {
448 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
449 struct nvfx_insn insn;
450 struct nvfx_src src[3], tmp;
451 struct nvfx_reg dst;
452 int mask, sat, unit = 0;
453 int ai = -1, ci = -1, ii = -1;
454 int i;
455
456 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
457 return true;
458
459 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
460 const struct tgsi_full_src_register *fsrc;
461
462 fsrc = &finst->Src[i];
463 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
464 src[i] = tgsi_src(fpc, fsrc);
465 }
466 }
467
468 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
469 const struct tgsi_full_src_register *fsrc;
470
471 fsrc = &finst->Src[i];
472
473 switch (fsrc->Register.File) {
474 case TGSI_FILE_INPUT:
475 if(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FOG && (0
476 || fsrc->Register.SwizzleX == PIPE_SWIZZLE_W
477 || fsrc->Register.SwizzleY == PIPE_SWIZZLE_W
478 || fsrc->Register.SwizzleZ == PIPE_SWIZZLE_W
479 || fsrc->Register.SwizzleW == PIPE_SWIZZLE_W
480 )) {
481 /* hardware puts 0 in fogcoord.w, but GL/Gallium want 1 there */
482 struct nvfx_src addend = nvfx_src(nvfx_fp_imm(fpc, 0, 0, 0, 1));
483 addend.swz[0] = fsrc->Register.SwizzleX;
484 addend.swz[1] = fsrc->Register.SwizzleY;
485 addend.swz[2] = fsrc->Register.SwizzleZ;
486 addend.swz[3] = fsrc->Register.SwizzleW;
487 src[i] = nvfx_src(temp(fpc));
488 nvfx_fp_emit(fpc, arith(0, ADD, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), addend, none));
489 } else if (ai == -1 || ai == fsrc->Register.Index) {
490 ai = fsrc->Register.Index;
491 src[i] = tgsi_src(fpc, fsrc);
492 } else {
493 src[i] = nvfx_src(temp(fpc));
494 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
495 }
496 break;
497 case TGSI_FILE_CONSTANT:
498 if ((ci == -1 && ii == -1) ||
499 ci == fsrc->Register.Index) {
500 ci = fsrc->Register.Index;
501 src[i] = tgsi_src(fpc, fsrc);
502 } else {
503 src[i] = nvfx_src(temp(fpc));
504 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
505 }
506 break;
507 case TGSI_FILE_IMMEDIATE:
508 if ((ci == -1 && ii == -1) ||
509 ii == fsrc->Register.Index) {
510 ii = fsrc->Register.Index;
511 src[i] = tgsi_src(fpc, fsrc);
512 } else {
513 src[i] = nvfx_src(temp(fpc));
514 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
515 }
516 break;
517 case TGSI_FILE_TEMPORARY:
518 /* handled above */
519 break;
520 case TGSI_FILE_SAMPLER:
521 unit = fsrc->Register.Index;
522 break;
523 case TGSI_FILE_OUTPUT:
524 break;
525 default:
526 NOUVEAU_ERR("bad src file\n");
527 return false;
528 }
529 }
530
531 dst = tgsi_dst(fpc, &finst->Dst[0]);
532 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
533 sat = finst->Instruction.Saturate;
534
535 switch (finst->Instruction.Opcode) {
536 case TGSI_OPCODE_ABS:
537 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, abs(src[0]), none, none));
538 break;
539 case TGSI_OPCODE_ADD:
540 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], src[1], none));
541 break;
542 case TGSI_OPCODE_CEIL:
543 tmp = nvfx_src(temp(fpc));
544 nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, neg(src[0]), none, none));
545 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, neg(tmp), none, none));
546 break;
547 case TGSI_OPCODE_CMP:
548 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
549 insn.cc_update = 1;
550 nvfx_fp_emit(fpc, insn);
551
552 insn = arith(sat, MOV, dst, mask, src[2], none, none);
553 insn.cc_test = NVFX_COND_GE;
554 nvfx_fp_emit(fpc, insn);
555
556 insn = arith(sat, MOV, dst, mask, src[1], none, none);
557 insn.cc_test = NVFX_COND_LT;
558 nvfx_fp_emit(fpc, insn);
559 break;
560 case TGSI_OPCODE_COS:
561 nvfx_fp_emit(fpc, arith(sat, COS, dst, mask, src[0], none, none));
562 break;
563 case TGSI_OPCODE_DDX:
564 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
565 tmp = nvfx_src(temp(fpc));
566 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
567 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
568 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
569 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
570 } else {
571 nvfx_fp_emit(fpc, arith(sat, DDX, dst, mask, src[0], none, none));
572 }
573 break;
574 case TGSI_OPCODE_DDY:
575 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
576 tmp = nvfx_src(temp(fpc));
577 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
578 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
579 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
580 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
581 } else {
582 nvfx_fp_emit(fpc, arith(sat, DDY, dst, mask, src[0], none, none));
583 }
584 break;
585 case TGSI_OPCODE_DP2:
586 tmp = nvfx_src(temp(fpc));
587 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], src[1], none));
588 nvfx_fp_emit(fpc, arith(0, ADD, dst, mask, swz(tmp, X, X, X, X), swz(tmp, Y, Y, Y, Y), none));
589 break;
590 case TGSI_OPCODE_DP3:
591 nvfx_fp_emit(fpc, arith(sat, DP3, dst, mask, src[0], src[1], none));
592 break;
593 case TGSI_OPCODE_DP4:
594 nvfx_fp_emit(fpc, arith(sat, DP4, dst, mask, src[0], src[1], none));
595 break;
596 case TGSI_OPCODE_DPH:
597 tmp = nvfx_src(temp(fpc));
598 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[1], none));
599 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, swz(tmp, X, X, X, X), swz(src[1], W, W, W, W), none));
600 break;
601 case TGSI_OPCODE_DST:
602 nvfx_fp_emit(fpc, arith(sat, DST, dst, mask, src[0], src[1], none));
603 break;
604 case TGSI_OPCODE_EX2:
605 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, src[0], none, none));
606 break;
607 case TGSI_OPCODE_FLR:
608 nvfx_fp_emit(fpc, arith(sat, FLR, dst, mask, src[0], none, none));
609 break;
610 case TGSI_OPCODE_FRC:
611 nvfx_fp_emit(fpc, arith(sat, FRC, dst, mask, src[0], none, none));
612 break;
613 case TGSI_OPCODE_KILL:
614 nvfx_fp_emit(fpc, arith(0, KIL, none.reg, 0, none, none, none));
615 break;
616 case TGSI_OPCODE_KILL_IF:
617 insn = arith(0, MOV, none.reg, NVFX_FP_MASK_ALL, src[0], none, none);
618 insn.cc_update = 1;
619 nvfx_fp_emit(fpc, insn);
620
621 insn = arith(0, KIL, none.reg, 0, none, none, none);
622 insn.cc_test = NVFX_COND_LT;
623 nvfx_fp_emit(fpc, insn);
624 break;
625 case TGSI_OPCODE_LG2:
626 nvfx_fp_emit(fpc, arith(sat, LG2, dst, mask, src[0], none, none));
627 break;
628 case TGSI_OPCODE_LIT:
629 if(!fpc->is_nv4x)
630 nvfx_fp_emit(fpc, arith(sat, LIT_NV30, dst, mask, src[0], none, none));
631 else {
632 /* we use FLT_MIN, so that log2 never gives -infinity, and thus multiplication by
633 * specular 0 always gives 0, so that ex2 gives 1, to satisfy the 0^0 = 1 requirement
634 *
635 * NOTE: if we start using half precision, we might need an fp16 FLT_MIN here instead
636 */
637 struct nvfx_src maxs = nvfx_src(nvfx_fp_imm(fpc, 0, FLT_MIN, 0, 0));
638 tmp = nvfx_src(temp(fpc));
639 if (ci>= 0 || ii >= 0) {
640 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, maxs, none, none));
641 maxs = tmp;
642 }
643 nvfx_fp_emit(fpc, arith(0, MAX, tmp.reg, NVFX_FP_MASK_Y | NVFX_FP_MASK_W, swz(src[0], X, X, X, Y), swz(maxs, X, X, Y, Y), none));
644 nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_W, swz(tmp, W, W, W, W), none, none));
645 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_W, swz(tmp, W, W, W, W), swz(src[0], W, W, W, W), none));
646 nvfx_fp_emit(fpc, arith(sat, LITEX2_NV40, dst, mask, swz(tmp, Y, Y, W, W), none, none));
647 }
648 break;
649 case TGSI_OPCODE_LRP:
650 if(!fpc->is_nv4x)
651 nvfx_fp_emit(fpc, arith(sat, LRP_NV30, dst, mask, src[0], src[1], src[2]));
652 else {
653 tmp = nvfx_src(temp(fpc));
654 nvfx_fp_emit(fpc, arith(0, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
655 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], tmp));
656 }
657 break;
658 case TGSI_OPCODE_MAD:
659 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], src[2]));
660 break;
661 case TGSI_OPCODE_MAX:
662 nvfx_fp_emit(fpc, arith(sat, MAX, dst, mask, src[0], src[1], none));
663 break;
664 case TGSI_OPCODE_MIN:
665 nvfx_fp_emit(fpc, arith(sat, MIN, dst, mask, src[0], src[1], none));
666 break;
667 case TGSI_OPCODE_MOV:
668 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, src[0], none, none));
669 break;
670 case TGSI_OPCODE_MUL:
671 nvfx_fp_emit(fpc, arith(sat, MUL, dst, mask, src[0], src[1], none));
672 break;
673 case TGSI_OPCODE_NOP:
674 break;
675 case TGSI_OPCODE_POW:
676 if(!fpc->is_nv4x)
677 nvfx_fp_emit(fpc, arith(sat, POW_NV30, dst, mask, src[0], src[1], none));
678 else {
679 tmp = nvfx_src(temp(fpc));
680 nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
681 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
682 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, swz(tmp, X, X, X, X), none, none));
683 }
684 break;
685 case TGSI_OPCODE_RCP:
686 nvfx_fp_emit(fpc, arith(sat, RCP, dst, mask, src[0], none, none));
687 break;
688 case TGSI_OPCODE_RSQ:
689 if(!fpc->is_nv4x)
690 nvfx_fp_emit(fpc, arith(sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none));
691 else {
692 tmp = nvfx_src(temp(fpc));
693 insn = arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, abs(swz(src[0], X, X, X, X)), none, none);
694 insn.scale = NVFX_FP_OP_DST_SCALE_INV_2X;
695 nvfx_fp_emit(fpc, insn);
696 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, neg(swz(tmp, X, X, X, X)), none, none));
697 }
698 break;
699 case TGSI_OPCODE_SCS:
700 /* avoid overwriting the source */
701 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
702 {
703 if (mask & NVFX_FP_MASK_X)
704 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
705 if (mask & NVFX_FP_MASK_Y)
706 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
707 }
708 else
709 {
710 if (mask & NVFX_FP_MASK_Y)
711 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
712 if (mask & NVFX_FP_MASK_X)
713 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
714 }
715 break;
716 case TGSI_OPCODE_SEQ:
717 nvfx_fp_emit(fpc, arith(sat, SEQ, dst, mask, src[0], src[1], none));
718 break;
719 case TGSI_OPCODE_SGE:
720 nvfx_fp_emit(fpc, arith(sat, SGE, dst, mask, src[0], src[1], none));
721 break;
722 case TGSI_OPCODE_SGT:
723 nvfx_fp_emit(fpc, arith(sat, SGT, dst, mask, src[0], src[1], none));
724 break;
725 case TGSI_OPCODE_SIN:
726 nvfx_fp_emit(fpc, arith(sat, SIN, dst, mask, src[0], none, none));
727 break;
728 case TGSI_OPCODE_SLE:
729 nvfx_fp_emit(fpc, arith(sat, SLE, dst, mask, src[0], src[1], none));
730 break;
731 case TGSI_OPCODE_SLT:
732 nvfx_fp_emit(fpc, arith(sat, SLT, dst, mask, src[0], src[1], none));
733 break;
734 case TGSI_OPCODE_SNE:
735 nvfx_fp_emit(fpc, arith(sat, SNE, dst, mask, src[0], src[1], none));
736 break;
737 case TGSI_OPCODE_SSG:
738 {
739 struct nvfx_src minones = swz(nvfx_src(nvfx_fp_imm(fpc, -1, -1, -1, -1)), X, X, X, X);
740
741 insn = arith(sat, MOV, dst, mask, src[0], none, none);
742 insn.cc_update = 1;
743 nvfx_fp_emit(fpc, insn);
744
745 insn = arith(0, STR, dst, mask, none, none, none);
746 insn.cc_test = NVFX_COND_GT;
747 nvfx_fp_emit(fpc, insn);
748
749 if(!sat) {
750 insn = arith(0, MOV, dst, mask, minones, none, none);
751 insn.cc_test = NVFX_COND_LT;
752 nvfx_fp_emit(fpc, insn);
753 }
754 break;
755 }
756 case TGSI_OPCODE_SUB:
757 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], neg(src[1]), none));
758 break;
759 case TGSI_OPCODE_TEX:
760 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
761 break;
762 case TGSI_OPCODE_TRUNC:
763 tmp = nvfx_src(temp(fpc));
764 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
765 insn.cc_update = 1;
766 nvfx_fp_emit(fpc, insn);
767
768 nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, abs(src[0]), none, none));
769 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, tmp, none, none));
770
771 insn = arith(sat, MOV, dst, mask, neg(tmp), none, none);
772 insn.cc_test = NVFX_COND_LT;
773 nvfx_fp_emit(fpc, insn);
774 break;
775 case TGSI_OPCODE_TXB:
776 nvfx_fp_emit(fpc, tex(sat, TXB, unit, dst, mask, src[0], none, none));
777 break;
778 case TGSI_OPCODE_TXL:
779 if(fpc->is_nv4x)
780 nvfx_fp_emit(fpc, tex(sat, TXL_NV40, unit, dst, mask, src[0], none, none));
781 else /* unsupported on nv30, use TEX and hope they like it */
782 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
783 break;
784 case TGSI_OPCODE_TXP:
785 nvfx_fp_emit(fpc, tex(sat, TXP, unit, dst, mask, src[0], none, none));
786 break;
787 case TGSI_OPCODE_XPD:
788 tmp = nvfx_src(temp(fpc));
789 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
790 nvfx_fp_emit(fpc, arith(sat, MAD, dst, (mask & ~NVFX_FP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
791 break;
792
793 case TGSI_OPCODE_IF:
794 // MOVRC0 R31 (TR0.xyzw), R<src>:
795 // IF (NE.xxxx) ELSE <else> END <end>
796 if(!fpc->is_nv4x)
797 goto nv3x_cflow;
798 nv40_fp_if(fpc, src[0]);
799 break;
800
801 case TGSI_OPCODE_ELSE:
802 {
803 uint32_t *hw;
804 if(!fpc->is_nv4x)
805 goto nv3x_cflow;
806 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
807 hw = &fpc->fp->insn[util_dynarray_top(&fpc->if_stack, unsigned)];
808 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
809 break;
810 }
811
812 case TGSI_OPCODE_ENDIF:
813 {
814 uint32_t *hw;
815 if(!fpc->is_nv4x)
816 goto nv3x_cflow;
817 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
818 hw = &fpc->fp->insn[util_dynarray_pop(&fpc->if_stack, unsigned)];
819 if(!hw[2])
820 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
821 hw[3] = fpc->fp->insn_len;
822 break;
823 }
824
825 case TGSI_OPCODE_BGNSUB:
826 case TGSI_OPCODE_ENDSUB:
827 /* nothing to do here */
828 break;
829
830 case TGSI_OPCODE_CAL:
831 if(!fpc->is_nv4x)
832 goto nv3x_cflow;
833 nv40_fp_cal(fpc, finst->Label.Label);
834 break;
835
836 case TGSI_OPCODE_RET:
837 if(!fpc->is_nv4x)
838 goto nv3x_cflow;
839 nv40_fp_ret(fpc);
840 break;
841
842 case TGSI_OPCODE_BGNLOOP:
843 if(!fpc->is_nv4x)
844 goto nv3x_cflow;
845 /* TODO: we should support using two nested REPs to allow a > 255 iteration count */
846 nv40_fp_rep(fpc, 255, finst->Label.Label);
847 break;
848
849 case TGSI_OPCODE_ENDLOOP:
850 break;
851
852 case TGSI_OPCODE_BRK:
853 if(!fpc->is_nv4x)
854 goto nv3x_cflow;
855 nv40_fp_brk(fpc);
856 break;
857
858 case TGSI_OPCODE_CONT:
859 {
860 static int warned = 0;
861 if(!warned) {
862 NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n");
863 warned = 1;
864 }
865 break;
866 }
867
868 default:
869 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
870 return false;
871 }
872
873 out:
874 release_temps(fpc);
875 return true;
876 nv3x_cflow:
877 {
878 static int warned = 0;
879 if(!warned) {
880 NOUVEAU_ERR(
881 "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n"
882 "If rendering is incorrect, try to disable GLSL support in the application.\n");
883 warned = 1;
884 }
885 }
886 goto out;
887 }
888
889 static bool
890 nvfx_fragprog_parse_decl_input(struct nvfx_fpc *fpc,
891 const struct tgsi_full_declaration *fdec)
892 {
893 unsigned idx = fdec->Range.First;
894 unsigned hw;
895
896 switch (fdec->Semantic.Name) {
897 case TGSI_SEMANTIC_POSITION:
898 hw = NVFX_FP_OP_INPUT_SRC_POSITION;
899 break;
900 case TGSI_SEMANTIC_COLOR:
901 hw = NVFX_FP_OP_INPUT_SRC_COL0 + fdec->Semantic.Index;
902 break;
903 case TGSI_SEMANTIC_FOG:
904 hw = NVFX_FP_OP_INPUT_SRC_FOGC;
905 break;
906 case TGSI_SEMANTIC_FACE:
907 hw = NV40_FP_OP_INPUT_SRC_FACING;
908 break;
909 case TGSI_SEMANTIC_TEXCOORD:
910 assert(fdec->Semantic.Index < 8);
911 fpc->fp->texcoord[fdec->Semantic.Index] = fdec->Semantic.Index;
912 fpc->fp->texcoords |= (1 << fdec->Semantic.Index);
913 fpc->fp->vp_or |= (0x00004000 << fdec->Semantic.Index);
914 hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic.Index);
915 break;
916 case TGSI_SEMANTIC_GENERIC:
917 case TGSI_SEMANTIC_PCOORD:
918 /* will be assigned to remaining TC slots later */
919 return true;
920 default:
921 assert(0);
922 return false;
923 }
924
925 fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw);
926 return true;
927 }
928
929 static bool
930 nvfx_fragprog_assign_generic(struct nvfx_fpc *fpc,
931 const struct tgsi_full_declaration *fdec)
932 {
933 unsigned num_texcoords = fpc->is_nv4x ? 10 : 8;
934 unsigned idx = fdec->Range.First;
935 unsigned hw;
936
937 switch (fdec->Semantic.Name) {
938 case TGSI_SEMANTIC_GENERIC:
939 case TGSI_SEMANTIC_PCOORD:
940 for (hw = 0; hw < num_texcoords; hw++) {
941 if (fpc->fp->texcoord[hw] == 0xffff) {
942 if (hw <= 7) {
943 fpc->fp->texcoords |= (0x1 << hw);
944 fpc->fp->vp_or |= (0x00004000 << hw);
945 } else {
946 fpc->fp->vp_or |= (0x00001000 << (hw - 8));
947 }
948 if (fdec->Semantic.Name == TGSI_SEMANTIC_PCOORD) {
949 fpc->fp->texcoord[hw] = 0xfffe;
950 fpc->fp->point_sprite_control |= (0x00000100 << hw);
951 } else {
952 fpc->fp->texcoord[hw] = fdec->Semantic.Index + 8;
953 }
954 hw = NVFX_FP_OP_INPUT_SRC_TC(hw);
955 fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw);
956 return true;
957 }
958 }
959 return false;
960 default:
961 return true;
962 }
963 }
964
965 static bool
966 nvfx_fragprog_parse_decl_output(struct nvfx_fpc *fpc,
967 const struct tgsi_full_declaration *fdec)
968 {
969 unsigned idx = fdec->Range.First;
970 unsigned hw;
971
972 switch (fdec->Semantic.Name) {
973 case TGSI_SEMANTIC_POSITION:
974 hw = 1;
975 break;
976 case TGSI_SEMANTIC_COLOR:
977 hw = ~0;
978 switch (fdec->Semantic.Index) {
979 case 0: hw = 0; break;
980 case 1: hw = 2; break;
981 case 2: hw = 3; break;
982 case 3: hw = 4; break;
983 }
984 if(hw > ((fpc->is_nv4x) ? 4 : 2)) {
985 NOUVEAU_ERR("bad rcol index\n");
986 return false;
987 }
988 break;
989 default:
990 NOUVEAU_ERR("bad output semantic\n");
991 return false;
992 }
993
994 fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
995 fpc->r_temps |= (1ULL << hw);
996 return true;
997 }
998
999 static bool
1000 nvfx_fragprog_prepare(struct nvfx_fpc *fpc)
1001 {
1002 struct tgsi_parse_context p;
1003 int high_temp = -1, i;
1004
1005 fpc->r_imm = CALLOC(fpc->fp->info.immediate_count, sizeof(struct nvfx_reg));
1006
1007 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
1008 while (!tgsi_parse_end_of_tokens(&p)) {
1009 const union tgsi_full_token *tok = &p.FullToken;
1010
1011 tgsi_parse_token(&p);
1012 switch(tok->Token.Type) {
1013 case TGSI_TOKEN_TYPE_DECLARATION:
1014 {
1015 const struct tgsi_full_declaration *fdec;
1016 fdec = &p.FullToken.FullDeclaration;
1017 switch (fdec->Declaration.File) {
1018 case TGSI_FILE_INPUT:
1019 if (!nvfx_fragprog_parse_decl_input(fpc, fdec))
1020 goto out_err;
1021 break;
1022 case TGSI_FILE_OUTPUT:
1023 if (!nvfx_fragprog_parse_decl_output(fpc, fdec))
1024 goto out_err;
1025 break;
1026 case TGSI_FILE_TEMPORARY:
1027 if (fdec->Range.Last > high_temp) {
1028 high_temp =
1029 fdec->Range.Last;
1030 }
1031 break;
1032 default:
1033 break;
1034 }
1035 }
1036 break;
1037 case TGSI_TOKEN_TYPE_IMMEDIATE:
1038 {
1039 struct tgsi_full_immediate *imm;
1040
1041 imm = &p.FullToken.FullImmediate;
1042 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1043 assert(fpc->nr_imm < fpc->fp->info.immediate_count);
1044
1045 fpc->r_imm[fpc->nr_imm++] = nvfx_fp_imm(fpc, imm->u[0].Float, imm->u[1].Float, imm->u[2].Float, imm->u[3].Float);
1046 break;
1047 }
1048 default:
1049 break;
1050 }
1051 }
1052 tgsi_parse_free(&p);
1053
1054 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
1055 while (!tgsi_parse_end_of_tokens(&p)) {
1056 const struct tgsi_full_declaration *fdec;
1057 tgsi_parse_token(&p);
1058 switch(p.FullToken.Token.Type) {
1059 case TGSI_TOKEN_TYPE_DECLARATION:
1060 fdec = &p.FullToken.FullDeclaration;
1061 switch (fdec->Declaration.File) {
1062 case TGSI_FILE_INPUT:
1063 if (!nvfx_fragprog_assign_generic(fpc, fdec))
1064 goto out_err;
1065 break;
1066 default:
1067 break;
1068 }
1069 break;
1070 default:
1071 break;
1072 }
1073 }
1074 tgsi_parse_free(&p);
1075
1076 if (++high_temp) {
1077 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
1078 for (i = 0; i < high_temp; i++)
1079 fpc->r_temp[i] = temp(fpc);
1080 fpc->r_temps_discard = 0ULL;
1081 }
1082
1083 return true;
1084
1085 out_err:
1086 FREE(fpc->r_temp);
1087 fpc->r_temp = NULL;
1088
1089 tgsi_parse_free(&p);
1090 return false;
1091 }
1092
1093 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", false)
1094
1095 void
1096 _nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp)
1097 {
1098 struct tgsi_parse_context parse;
1099 struct nvfx_fpc *fpc = NULL;
1100 struct util_dynarray insns;
1101
1102 fp->translated = false;
1103 fp->point_sprite_control = 0;
1104 fp->vp_or = 0;
1105
1106 fpc = CALLOC_STRUCT(nvfx_fpc);
1107 if (!fpc)
1108 goto out_err;
1109
1110 fpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
1111 fpc->max_temps = fpc->is_nv4x ? 48 : 32;
1112 fpc->fp = fp;
1113 fpc->num_regs = 2;
1114 memset(fp->texcoord, 0xff, sizeof(fp->texcoord));
1115
1116 if (fp->info.properties[TGSI_PROPERTY_FS_COORD_ORIGIN])
1117 fp->coord_conventions |= NV30_3D_COORD_CONVENTIONS_ORIGIN_INVERTED;
1118 if (fp->info.properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER])
1119 fp->coord_conventions |= NV30_3D_COORD_CONVENTIONS_CENTER_INTEGER;
1120 if (fp->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
1121 fp->rt_enable |= NV30_3D_RT_ENABLE_MRT;
1122
1123 if (!nvfx_fragprog_prepare(fpc))
1124 goto out_err;
1125
1126 tgsi_parse_init(&parse, fp->pipe.tokens);
1127 util_dynarray_init(&insns);
1128
1129 while (!tgsi_parse_end_of_tokens(&parse)) {
1130 tgsi_parse_token(&parse);
1131
1132 switch (parse.FullToken.Token.Type) {
1133 case TGSI_TOKEN_TYPE_INSTRUCTION:
1134 {
1135 const struct tgsi_full_instruction *finst;
1136
1137 util_dynarray_append(&insns, unsigned, fp->insn_len);
1138 finst = &parse.FullToken.FullInstruction;
1139 if (!nvfx_fragprog_parse_instruction(fpc, finst))
1140 goto out_err;
1141 }
1142 break;
1143 default:
1144 break;
1145 }
1146 }
1147 util_dynarray_append(&insns, unsigned, fp->insn_len);
1148
1149 for(unsigned i = 0; i < fpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1150 {
1151 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)fpc->label_relocs.data + i);
1152 fp->insn[label_reloc->location] |= ((unsigned*)insns.data)[label_reloc->target];
1153 }
1154 util_dynarray_fini(&insns);
1155
1156 if(!fpc->is_nv4x)
1157 fp->fp_control |= (fpc->num_regs-1)/2;
1158 else
1159 fp->fp_control |= fpc->num_regs << NV40_3D_FP_CONTROL_TEMP_COUNT__SHIFT;
1160
1161 /* Terminate final instruction */
1162 if(fp->insn)
1163 fp->insn[fpc->inst_offset] |= 0x00000001;
1164
1165 /* Append NOP + END instruction for branches to the end of the program */
1166 fpc->inst_offset = fp->insn_len;
1167 grow_insns(fpc, 4);
1168 fp->insn[fpc->inst_offset + 0] = 0x00000001;
1169 fp->insn[fpc->inst_offset + 1] = 0x00000000;
1170 fp->insn[fpc->inst_offset + 2] = 0x00000000;
1171 fp->insn[fpc->inst_offset + 3] = 0x00000000;
1172
1173 if(debug_get_option_nvfx_dump_fp())
1174 {
1175 debug_printf("\n");
1176 tgsi_dump(fp->pipe.tokens, 0);
1177
1178 debug_printf("\n%s fragment program:\n", fpc->is_nv4x ? "nv4x" : "nv3x");
1179 for (unsigned i = 0; i < fp->insn_len; i += 4)
1180 debug_printf("%3u: %08x %08x %08x %08x\n", i >> 2, fp->insn[i], fp->insn[i + 1], fp->insn[i + 2], fp->insn[i + 3]);
1181 debug_printf("\n");
1182 }
1183
1184 fp->translated = true;
1185
1186 out:
1187 tgsi_parse_free(&parse);
1188 if (fpc)
1189 {
1190 FREE(fpc->r_temp);
1191 FREE(fpc->r_imm);
1192 util_dynarray_fini(&fpc->if_stack);
1193 util_dynarray_fini(&fpc->label_relocs);
1194 util_dynarray_fini(&fpc->imm_data);
1195 //util_dynarray_fini(&fpc->loop_stack);
1196 FREE(fpc);
1197 }
1198
1199 return;
1200
1201 out_err:
1202 _debug_printf("Error: failed to compile this fragment program:\n");
1203 tgsi_dump(fp->pipe.tokens, 0);
1204 goto out;
1205 }