025989ac5bb29f8e3b7ecaacbe2efb9c5d27e8c0
[mesa.git] / src / gallium / drivers / nvfx / nvfx_fragprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_debug.h"
6
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_util.h"
10 #include "tgsi/tgsi_dump.h"
11
12 #include "nvfx_context.h"
13 #include "nvfx_shader.h"
14 #include "nvfx_resource.h"
15
16 #define MAX_CONSTS 128
17 #define MAX_IMM 32
18
19 struct nvfx_fpc {
20 struct nvfx_fragment_program *fp;
21
22 unsigned r_temps;
23 unsigned r_temps_discard;
24 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
25 struct nvfx_reg *r_temp;
26
27 int num_regs;
28
29 unsigned inst_offset;
30 unsigned have_const;
31
32 struct {
33 int pipe;
34 float vals[4];
35 } consts[MAX_CONSTS];
36 int nr_consts;
37
38 struct nvfx_reg imm[MAX_IMM];
39 unsigned nr_imm;
40
41 unsigned char generic_to_slot[256]; /* semantic idx for each input semantic */
42
43 struct util_dynarray if_stack;
44 //struct util_dynarray loop_stack;
45 struct util_dynarray label_relocs;
46 };
47
48 static INLINE struct nvfx_reg
49 temp(struct nvfx_fpc *fpc)
50 {
51 int idx = ffs(~fpc->r_temps) - 1;
52
53 if (idx < 0) {
54 NOUVEAU_ERR("out of temps!!\n");
55 assert(0);
56 return nvfx_reg(NVFXSR_TEMP, 0);
57 }
58
59 fpc->r_temps |= (1 << idx);
60 fpc->r_temps_discard |= (1 << idx);
61 return nvfx_reg(NVFXSR_TEMP, idx);
62 }
63
64 static INLINE void
65 release_temps(struct nvfx_fpc *fpc)
66 {
67 fpc->r_temps &= ~fpc->r_temps_discard;
68 fpc->r_temps_discard = 0;
69 }
70
71 static INLINE struct nvfx_reg
72 constant(struct nvfx_fpc *fpc, int pipe, float vals[4])
73 {
74 int idx;
75
76 if (fpc->nr_consts == MAX_CONSTS)
77 assert(0);
78 idx = fpc->nr_consts++;
79
80 fpc->consts[idx].pipe = pipe;
81 if (pipe == -1)
82 memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
83 return nvfx_reg(NVFXSR_CONST, idx);
84 }
85
86 static void
87 grow_insns(struct nvfx_fpc *fpc, int size)
88 {
89 struct nvfx_fragment_program *fp = fpc->fp;
90
91 fp->insn_len += size;
92 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
93 }
94
95 static void
96 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_src src)
97 {
98 struct nvfx_fragment_program *fp = fpc->fp;
99 uint32_t *hw = &fp->insn[fpc->inst_offset];
100 uint32_t sr = 0;
101
102 switch (src.reg.type) {
103 case NVFXSR_INPUT:
104 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
105 hw[0] |= (src.reg.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
106 break;
107 case NVFXSR_OUTPUT:
108 sr |= NVFX_FP_REG_SRC_HALF;
109 /* fall-through */
110 case NVFXSR_TEMP:
111 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
112 sr |= (src.reg.index << NVFX_FP_REG_SRC_SHIFT);
113 break;
114 case NVFXSR_RELOCATED:
115 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
116 //printf("adding relocation at %x for %x\n", fpc->inst_offset, src.index);
117 util_dynarray_append(&fpc->fp->slot_relocations[src.reg.index], unsigned, fpc->inst_offset);
118 break;
119 case NVFXSR_CONST:
120 if (!fpc->have_const) {
121 grow_insns(fpc, 4);
122 fpc->have_const = 1;
123 }
124
125 hw = &fp->insn[fpc->inst_offset];
126 if (fpc->consts[src.reg.index].pipe >= 0) {
127 struct nvfx_fragment_program_data *fpd;
128
129 fp->consts = realloc(fp->consts, ++fp->nr_consts *
130 sizeof(*fpd));
131 fpd = &fp->consts[fp->nr_consts - 1];
132 fpd->offset = fpc->inst_offset + 4;
133 fpd->index = fpc->consts[src.reg.index].pipe;
134 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
135 } else {
136 memcpy(&fp->insn[fpc->inst_offset + 4],
137 fpc->consts[src.reg.index].vals,
138 sizeof(uint32_t) * 4);
139 }
140
141 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
142 break;
143 case NVFXSR_NONE:
144 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
145 break;
146 default:
147 assert(0);
148 }
149
150 if (src.negate)
151 sr |= NVFX_FP_REG_NEGATE;
152
153 if (src.abs)
154 hw[1] |= (1 << (29 + pos));
155
156 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
157 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
158 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
159 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
160
161 hw[pos + 1] |= sr;
162 }
163
164 static void
165 emit_dst(struct nvfx_fpc *fpc, struct nvfx_reg dst)
166 {
167 struct nvfx_fragment_program *fp = fpc->fp;
168 uint32_t *hw = &fp->insn[fpc->inst_offset];
169
170 switch (dst.type) {
171 case NVFXSR_TEMP:
172 if (fpc->num_regs < (dst.index + 1))
173 fpc->num_regs = dst.index + 1;
174 break;
175 case NVFXSR_OUTPUT:
176 if (dst.index == 1) {
177 fp->fp_control |= 0xe;
178 } else {
179 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
180 }
181 break;
182 case NVFXSR_NONE:
183 hw[0] |= (1 << 30);
184 break;
185 default:
186 assert(0);
187 }
188
189 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
190 }
191
192 static void
193 nvfx_fp_emit(struct nvfx_fpc *fpc, struct nvfx_insn insn)
194 {
195 struct nvfx_fragment_program *fp = fpc->fp;
196 uint32_t *hw;
197
198 fpc->inst_offset = fp->insn_len;
199 fpc->have_const = 0;
200 grow_insns(fpc, 4);
201 hw = &fp->insn[fpc->inst_offset];
202 memset(hw, 0, sizeof(uint32_t) * 4);
203
204 if (insn.op == NVFX_FP_OP_OPCODE_KIL)
205 fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
206 hw[0] |= (insn.op << NVFX_FP_OP_OPCODE_SHIFT);
207 hw[0] |= (insn.mask << NVFX_FP_OP_OUTMASK_SHIFT);
208 hw[2] |= (insn.scale << NVFX_FP_OP_DST_SCALE_SHIFT);
209
210 if (insn.sat)
211 hw[0] |= NVFX_FP_OP_OUT_SAT;
212
213 if (insn.cc_update)
214 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
215 hw[1] |= (insn.cc_test << NVFX_FP_OP_COND_SHIFT);
216 hw[1] |= ((insn.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
217 (insn.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
218 (insn.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
219 (insn.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
220
221 if(insn.unit >= 0)
222 {
223 hw[0] |= (insn.unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
224 fp->samplers |= (1 << insn.unit);
225 }
226
227 emit_dst(fpc, insn.dst);
228 emit_src(fpc, 0, insn.src[0]);
229 emit_src(fpc, 1, insn.src[1]);
230 emit_src(fpc, 2, insn.src[2]);
231 }
232
233 #define arith(s,o,d,m,s0,s1,s2) \
234 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \
235 (d), (m), (s0), (s1), (s2))
236
237 #define tex(s,o,u,d,m,s0,s1,s2) \
238 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \
239 (d), (m), (s0), none, none)
240
241 /* IF src.x != 0, as TGSI specifies */
242 static void
243 nv40_fp_if(struct nvfx_fpc *fpc, struct nvfx_src src)
244 {
245 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
246 struct nvfx_insn insn = arith(0, MOV, none.reg, NVFX_FP_MASK_X, src, none, none);
247 uint32_t *hw;
248 insn.cc_update = 1;
249 nvfx_fp_emit(fpc, insn);
250
251 fpc->inst_offset = fpc->fp->insn_len;
252 grow_insns(fpc, 4);
253 hw = &fpc->fp->insn[fpc->inst_offset];
254 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
255 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
256 NV40_FP_OP_OUT_NONE |
257 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
258 /* Use .xxxx swizzle so that we check only src[0].x*/
259 hw[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
260 (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
261 (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
262 (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT) |
263 (NVFX_FP_OP_COND_NE << NVFX_FP_OP_COND_SHIFT);
264 hw[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */
265 hw[3] = 0; /* | endif_offset */
266 util_dynarray_append(&fpc->if_stack, unsigned, fpc->inst_offset);
267 }
268
269 /* IF src.x != 0, as TGSI specifies */
270 static void
271 nv40_fp_cal(struct nvfx_fpc *fpc, unsigned target)
272 {
273 struct nvfx_relocation reloc;
274 uint32_t *hw;
275 fpc->inst_offset = fpc->fp->insn_len;
276 grow_insns(fpc, 4);
277 hw = &fpc->fp->insn[fpc->inst_offset];
278 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
279 hw[0] = (NV40_FP_OP_BRA_OPCODE_CAL << NVFX_FP_OP_OPCODE_SHIFT);
280 /* Use .xxxx swizzle so that we check only src[0].x*/
281 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
282 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
283 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
284 hw[3] = 0;
285 reloc.target = target;
286 reloc.location = fpc->inst_offset + 2;
287 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
288 }
289
290 static void
291 nv40_fp_ret(struct nvfx_fpc *fpc)
292 {
293 uint32_t *hw;
294 fpc->inst_offset = fpc->fp->insn_len;
295 grow_insns(fpc, 4);
296 hw = &fpc->fp->insn[fpc->inst_offset];
297 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
298 hw[0] = (NV40_FP_OP_BRA_OPCODE_RET << NVFX_FP_OP_OPCODE_SHIFT);
299 /* Use .xxxx swizzle so that we check only src[0].x*/
300 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
301 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
302 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
303 hw[3] = 0;
304 }
305
306 static void
307 nv40_fp_rep(struct nvfx_fpc *fpc, unsigned count, unsigned target)
308 {
309 struct nvfx_relocation reloc;
310 uint32_t *hw;
311 fpc->inst_offset = fpc->fp->insn_len;
312 grow_insns(fpc, 4);
313 hw = &fpc->fp->insn[fpc->inst_offset];
314 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
315 hw[0] = (NV40_FP_OP_BRA_OPCODE_REP << NVFX_FP_OP_OPCODE_SHIFT) |
316 NV40_FP_OP_OUT_NONE |
317 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
318 /* Use .xxxx swizzle so that we check only src[0].x*/
319 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
320 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
321 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH |
322 (count << NV40_FP_OP_REP_COUNT1_SHIFT) |
323 (count << NV40_FP_OP_REP_COUNT2_SHIFT) |
324 (count << NV40_FP_OP_REP_COUNT3_SHIFT);
325 hw[3] = 0; /* | end_offset */
326 reloc.target = target;
327 reloc.location = fpc->inst_offset + 3;
328 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
329 //util_dynarray_append(&fpc->loop_stack, unsigned, target);
330 }
331
332 /* warning: this only works forward, and probably only if not inside any IF */
333 static void
334 nv40_fp_bra(struct nvfx_fpc *fpc, unsigned target)
335 {
336 struct nvfx_relocation reloc;
337 uint32_t *hw;
338 fpc->inst_offset = fpc->fp->insn_len;
339 grow_insns(fpc, 4);
340 hw = &fpc->fp->insn[fpc->inst_offset];
341 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
342 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
343 NV40_FP_OP_OUT_NONE |
344 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
345 /* Use .xxxx swizzle so that we check only src[0].x*/
346 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
347 (NVFX_FP_OP_COND_FL << NVFX_FP_OP_COND_SHIFT);
348 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | else_offset */
349 hw[3] = 0; /* | endif_offset */
350 reloc.target = target;
351 reloc.location = fpc->inst_offset + 2;
352 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
353 reloc.target = target;
354 reloc.location = fpc->inst_offset + 3;
355 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
356 }
357
358 static void
359 nv40_fp_brk(struct nvfx_fpc *fpc)
360 {
361 uint32_t *hw;
362 fpc->inst_offset = fpc->fp->insn_len;
363 grow_insns(fpc, 4);
364 hw = &fpc->fp->insn[fpc->inst_offset];
365 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
366 hw[0] = (NV40_FP_OP_BRA_OPCODE_BRK << NVFX_FP_OP_OPCODE_SHIFT) |
367 NV40_FP_OP_OUT_NONE;
368 /* Use .xxxx swizzle so that we check only src[0].x*/
369 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
370 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
371 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH;
372 hw[3] = 0;
373 }
374
375 static INLINE struct nvfx_src
376 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
377 {
378 struct nvfx_src src;
379
380 switch (fsrc->Register.File) {
381 case TGSI_FILE_INPUT:
382 if(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_POSITION) {
383 assert(fpc->fp->info.input_semantic_index[fsrc->Register.Index] == 0);
384 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_POSITION);
385 } else if(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_COLOR) {
386 if(fpc->fp->info.input_semantic_index[fsrc->Register.Index] == 0)
387 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_COL0);
388 else if(fpc->fp->info.input_semantic_index[fsrc->Register.Index] == 1)
389 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_COL1);
390 else
391 assert(0);
392 } else if(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FOG) {
393 assert(fpc->fp->info.input_semantic_index[fsrc->Register.Index] == 0);
394 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_FOGC);
395 } else if(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FACE) {
396 /* TODO: check this has the correct values */
397 /* XXX: what do we do for nv30 here (assuming it lacks facing)?! */
398 assert(fpc->fp->info.input_semantic_index[fsrc->Register.Index] == 0);
399 src.reg = nvfx_reg(NVFXSR_INPUT, NV40_FP_OP_INPUT_SRC_FACING);
400 } else {
401 assert(fpc->fp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_GENERIC);
402 src.reg = nvfx_reg(NVFXSR_RELOCATED, fpc->generic_to_slot[fpc->fp->info.input_semantic_index[fsrc->Register.Index]]);
403 }
404 break;
405 case TGSI_FILE_CONSTANT:
406 src.reg = constant(fpc, fsrc->Register.Index, NULL);
407 break;
408 case TGSI_FILE_IMMEDIATE:
409 assert(fsrc->Register.Index < fpc->nr_imm);
410 src.reg = fpc->imm[fsrc->Register.Index];
411 break;
412 case TGSI_FILE_TEMPORARY:
413 src.reg = fpc->r_temp[fsrc->Register.Index];
414 break;
415 /* NV40 fragprog result regs are just temps, so this is simple */
416 case TGSI_FILE_OUTPUT:
417 src.reg = fpc->r_result[fsrc->Register.Index];
418 break;
419 default:
420 NOUVEAU_ERR("bad src file\n");
421 src.reg.index = 0;
422 src.reg.type = 0;
423 break;
424 }
425
426 src.abs = fsrc->Register.Absolute;
427 src.negate = fsrc->Register.Negate;
428 src.swz[0] = fsrc->Register.SwizzleX;
429 src.swz[1] = fsrc->Register.SwizzleY;
430 src.swz[2] = fsrc->Register.SwizzleZ;
431 src.swz[3] = fsrc->Register.SwizzleW;
432 return src;
433 }
434
435 static INLINE struct nvfx_reg
436 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
437 switch (fdst->Register.File) {
438 case TGSI_FILE_OUTPUT:
439 return fpc->r_result[fdst->Register.Index];
440 case TGSI_FILE_TEMPORARY:
441 return fpc->r_temp[fdst->Register.Index];
442 case TGSI_FILE_NULL:
443 return nvfx_reg(NVFXSR_NONE, 0);
444 default:
445 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
446 return nvfx_reg(NVFXSR_NONE, 0);
447 }
448 }
449
450 static INLINE int
451 tgsi_mask(uint tgsi)
452 {
453 int mask = 0;
454
455 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
456 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
457 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
458 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
459 return mask;
460 }
461
462 static boolean
463 nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
464 const struct tgsi_full_instruction *finst)
465 {
466 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
467 struct nvfx_insn insn;
468 struct nvfx_src src[3], tmp, tmp2;
469 struct nvfx_reg dst;
470 int mask, sat, unit = 0;
471 int ai = -1, ci = -1, ii = -1;
472 int i;
473
474 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
475 return TRUE;
476
477 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
478 const struct tgsi_full_src_register *fsrc;
479
480 fsrc = &finst->Src[i];
481 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
482 src[i] = tgsi_src(fpc, fsrc);
483 }
484 }
485
486 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
487 const struct tgsi_full_src_register *fsrc;
488
489 fsrc = &finst->Src[i];
490
491 switch (fsrc->Register.File) {
492 case TGSI_FILE_INPUT:
493 if (ai == -1 || ai == fsrc->Register.Index) {
494 ai = fsrc->Register.Index;
495 src[i] = tgsi_src(fpc, fsrc);
496 } else {
497 src[i] = nvfx_src(temp(fpc));
498 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
499 }
500 break;
501 case TGSI_FILE_CONSTANT:
502 if ((ci == -1 && ii == -1) ||
503 ci == fsrc->Register.Index) {
504 ci = fsrc->Register.Index;
505 src[i] = tgsi_src(fpc, fsrc);
506 } else {
507 src[i] = nvfx_src(temp(fpc));
508 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
509 }
510 break;
511 case TGSI_FILE_IMMEDIATE:
512 if ((ci == -1 && ii == -1) ||
513 ii == fsrc->Register.Index) {
514 ii = fsrc->Register.Index;
515 src[i] = tgsi_src(fpc, fsrc);
516 } else {
517 src[i] = nvfx_src(temp(fpc));
518 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
519 }
520 break;
521 case TGSI_FILE_TEMPORARY:
522 /* handled above */
523 break;
524 case TGSI_FILE_SAMPLER:
525 unit = fsrc->Register.Index;
526 break;
527 case TGSI_FILE_OUTPUT:
528 break;
529 default:
530 NOUVEAU_ERR("bad src file\n");
531 return FALSE;
532 }
533 }
534
535 dst = tgsi_dst(fpc, &finst->Dst[0]);
536 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
537 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
538
539 switch (finst->Instruction.Opcode) {
540 case TGSI_OPCODE_ABS:
541 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, abs(src[0]), none, none));
542 break;
543 case TGSI_OPCODE_ADD:
544 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], src[1], none));
545 break;
546 case TGSI_OPCODE_CMP:
547 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
548 insn.cc_update = 1;
549 nvfx_fp_emit(fpc, insn);
550
551 insn = arith(sat, MOV, dst, mask, src[2], none, none);
552 insn.cc_test = NVFX_COND_GE;
553 nvfx_fp_emit(fpc, insn);
554
555 insn = arith(sat, MOV, dst, mask, src[1], none, none);
556 insn.cc_test = NVFX_COND_LT;
557 nvfx_fp_emit(fpc, insn);
558 break;
559 case TGSI_OPCODE_COS:
560 nvfx_fp_emit(fpc, arith(sat, COS, dst, mask, src[0], none, none));
561 break;
562 case TGSI_OPCODE_DDX:
563 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
564 tmp = nvfx_src(temp(fpc));
565 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
566 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
567 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
568 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
569 } else {
570 nvfx_fp_emit(fpc, arith(sat, DDX, dst, mask, src[0], none, none));
571 }
572 break;
573 case TGSI_OPCODE_DDY:
574 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
575 tmp = nvfx_src(temp(fpc));
576 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
577 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
578 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
579 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
580 } else {
581 nvfx_fp_emit(fpc, arith(sat, DDY, dst, mask, src[0], none, none));
582 }
583 break;
584 case TGSI_OPCODE_DP2:
585 tmp = nvfx_src(temp(fpc));
586 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], src[1], none));
587 nvfx_fp_emit(fpc, arith(0, ADD, dst, mask, swz(tmp, X, X, X, X), swz(tmp, Y, Y, Y, Y), none));
588 break;
589 case TGSI_OPCODE_DP3:
590 nvfx_fp_emit(fpc, arith(sat, DP3, dst, mask, src[0], src[1], none));
591 break;
592 case TGSI_OPCODE_DP4:
593 nvfx_fp_emit(fpc, arith(sat, DP4, dst, mask, src[0], src[1], none));
594 break;
595 case TGSI_OPCODE_DPH:
596 tmp = nvfx_src(temp(fpc));
597 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[1], none));
598 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, swz(tmp, X, X, X, X), swz(src[1], W, W, W, W), none));
599 break;
600 case TGSI_OPCODE_DST:
601 nvfx_fp_emit(fpc, arith(sat, DST, dst, mask, src[0], src[1], none));
602 break;
603 case TGSI_OPCODE_EX2:
604 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, src[0], none, none));
605 break;
606 case TGSI_OPCODE_FLR:
607 nvfx_fp_emit(fpc, arith(sat, FLR, dst, mask, src[0], none, none));
608 break;
609 case TGSI_OPCODE_FRC:
610 nvfx_fp_emit(fpc, arith(sat, FRC, dst, mask, src[0], none, none));
611 break;
612 case TGSI_OPCODE_KILP:
613 nvfx_fp_emit(fpc, arith(0, KIL, none.reg, 0, none, none, none));
614 break;
615 case TGSI_OPCODE_KIL:
616 insn = arith(0, MOV, none.reg, NVFX_FP_MASK_ALL, src[0], none, none);
617 insn.cc_update = 1;
618 nvfx_fp_emit(fpc, insn);
619
620 insn = arith(0, KIL, none.reg, 0, none, none, none);
621 insn.cc_test = NVFX_COND_LT;
622 nvfx_fp_emit(fpc, insn);
623 break;
624 case TGSI_OPCODE_LG2:
625 nvfx_fp_emit(fpc, arith(sat, LG2, dst, mask, src[0], none, none));
626 break;
627 // case TGSI_OPCODE_LIT:
628 case TGSI_OPCODE_LRP:
629 if(!nvfx->is_nv4x)
630 nvfx_fp_emit(fpc, arith(sat, LRP_NV30, dst, mask, src[0], src[1], src[2]));
631 else {
632 tmp = nvfx_src(temp(fpc));
633 nvfx_fp_emit(fpc, arith(0, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
634 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], tmp));
635 }
636 break;
637 case TGSI_OPCODE_MAD:
638 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], src[2]));
639 break;
640 case TGSI_OPCODE_MAX:
641 nvfx_fp_emit(fpc, arith(sat, MAX, dst, mask, src[0], src[1], none));
642 break;
643 case TGSI_OPCODE_MIN:
644 nvfx_fp_emit(fpc, arith(sat, MIN, dst, mask, src[0], src[1], none));
645 break;
646 case TGSI_OPCODE_MOV:
647 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, src[0], none, none));
648 break;
649 case TGSI_OPCODE_MUL:
650 nvfx_fp_emit(fpc, arith(sat, MUL, dst, mask, src[0], src[1], none));
651 break;
652 case TGSI_OPCODE_NOP:
653 break;
654 case TGSI_OPCODE_POW:
655 if(!nvfx->is_nv4x)
656 nvfx_fp_emit(fpc, arith(sat, POW_NV30, dst, mask, src[0], src[1], none));
657 else {
658 tmp = nvfx_src(temp(fpc));
659 nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
660 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
661 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, swz(tmp, X, X, X, X), none, none));
662 }
663 break;
664 case TGSI_OPCODE_RCP:
665 nvfx_fp_emit(fpc, arith(sat, RCP, dst, mask, src[0], none, none));
666 break;
667 case TGSI_OPCODE_RFL:
668 if(!nvfx->is_nv4x)
669 nvfx_fp_emit(fpc, arith(0, RFL_NV30, dst, mask, src[0], src[1], none));
670 else {
671 tmp = nvfx_src(temp(fpc));
672 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[0], none));
673 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_Y, src[0], src[1], none));
674 insn = arith(0, DIV, tmp.reg, NVFX_FP_MASK_Z, swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
675 insn.scale = NVFX_FP_OP_DST_SCALE_2X;
676 nvfx_fp_emit(fpc, insn);
677 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, swz(tmp, Z, Z, Z, Z), src[0], neg(src[1])));
678 }
679 break;
680 case TGSI_OPCODE_RSQ:
681 if(!nvfx->is_nv4x)
682 nvfx_fp_emit(fpc, arith(sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none));
683 else {
684 tmp = nvfx_src(temp(fpc));
685 insn = arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, abs(swz(src[0], X, X, X, X)), none, none);
686 insn.scale = NVFX_FP_OP_DST_SCALE_INV_2X;
687 nvfx_fp_emit(fpc, insn);
688 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, neg(swz(tmp, X, X, X, X)), none, none));
689 }
690 break;
691 case TGSI_OPCODE_SCS:
692 /* avoid overwriting the source */
693 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
694 {
695 if (mask & NVFX_FP_MASK_X)
696 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
697 if (mask & NVFX_FP_MASK_Y)
698 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
699 }
700 else
701 {
702 if (mask & NVFX_FP_MASK_Y)
703 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
704 if (mask & NVFX_FP_MASK_X)
705 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
706 }
707 break;
708 case TGSI_OPCODE_SEQ:
709 nvfx_fp_emit(fpc, arith(sat, SEQ, dst, mask, src[0], src[1], none));
710 break;
711 case TGSI_OPCODE_SFL:
712 nvfx_fp_emit(fpc, arith(sat, SFL, dst, mask, src[0], src[1], none));
713 break;
714 case TGSI_OPCODE_SGE:
715 nvfx_fp_emit(fpc, arith(sat, SGE, dst, mask, src[0], src[1], none));
716 break;
717 case TGSI_OPCODE_SGT:
718 nvfx_fp_emit(fpc, arith(sat, SGT, dst, mask, src[0], src[1], none));
719 break;
720 case TGSI_OPCODE_SIN:
721 nvfx_fp_emit(fpc, arith(sat, SIN, dst, mask, src[0], none, none));
722 break;
723 case TGSI_OPCODE_SLE:
724 nvfx_fp_emit(fpc, arith(sat, SLE, dst, mask, src[0], src[1], none));
725 break;
726 case TGSI_OPCODE_SLT:
727 nvfx_fp_emit(fpc, arith(sat, SLT, dst, mask, src[0], src[1], none));
728 break;
729 case TGSI_OPCODE_SNE:
730 nvfx_fp_emit(fpc, arith(sat, SNE, dst, mask, src[0], src[1], none));
731 break;
732 case TGSI_OPCODE_SSG:
733 tmp = nvfx_src(temp(fpc));
734 tmp2 = nvfx_src(temp(fpc));
735 nvfx_fp_emit(fpc, arith(0, SGT, tmp.reg, mask, src[0], nvfx_src(nvfx_reg(NVFXSR_CONST, 0)), none));
736 nvfx_fp_emit(fpc, arith(0, SLT, tmp.reg, mask, src[0], nvfx_src(nvfx_reg(NVFXSR_CONST, 0)), none));
737 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, tmp, neg(tmp2), none));
738 break;
739 case TGSI_OPCODE_STR:
740 nvfx_fp_emit(fpc, arith(sat, STR, dst, mask, src[0], src[1], none));
741 break;
742 case TGSI_OPCODE_SUB:
743 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], neg(src[1]), none));
744 break;
745 case TGSI_OPCODE_TEX:
746 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
747 break;
748 case TGSI_OPCODE_TRUNC:
749 tmp = nvfx_src(temp(fpc));
750 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
751 insn.cc_update = 1;
752 nvfx_fp_emit(fpc, insn);
753
754 nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, abs(src[0]), none, none));
755 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, tmp, none, none));
756
757 insn = arith(sat, MOV, dst, mask, neg(tmp), none, none);
758 insn.cc_test = NVFX_COND_LT;
759 nvfx_fp_emit(fpc, insn);
760 break;
761 case TGSI_OPCODE_TXB:
762 nvfx_fp_emit(fpc, tex(sat, TXB, unit, dst, mask, src[0], none, none));
763 break;
764 case TGSI_OPCODE_TXL:
765 if(nvfx->is_nv4x)
766 nvfx_fp_emit(fpc, tex(sat, TXL_NV40, unit, dst, mask, src[0], none, none));
767 else /* unsupported on nv30, use TEX and hope they like it */
768 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
769 break;
770 case TGSI_OPCODE_TXP:
771 nvfx_fp_emit(fpc, tex(sat, TXP, unit, dst, mask, src[0], none, none));
772 break;
773 case TGSI_OPCODE_XPD:
774 tmp = nvfx_src(temp(fpc));
775 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
776 nvfx_fp_emit(fpc, arith(sat, MAD, dst, (mask & ~NVFX_FP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
777 break;
778
779 case TGSI_OPCODE_IF:
780 // MOVRC0 R31 (TR0.xyzw), R<src>:
781 // IF (NE.xxxx) ELSE <else> END <end>
782 if(!nvfx->is_nv4x)
783 goto nv3x_cflow;
784 nv40_fp_if(fpc, src[0]);
785 break;
786
787 case TGSI_OPCODE_ELSE:
788 {
789 uint32_t *hw;
790 if(!nvfx->is_nv4x)
791 goto nv3x_cflow;
792 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
793 hw = &fpc->fp->insn[util_dynarray_top(&fpc->if_stack, unsigned)];
794 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
795 break;
796 }
797
798 case TGSI_OPCODE_ENDIF:
799 {
800 uint32_t *hw;
801 if(!nvfx->is_nv4x)
802 goto nv3x_cflow;
803 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
804 hw = &fpc->fp->insn[util_dynarray_pop(&fpc->if_stack, unsigned)];
805 if(!hw[2])
806 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
807 hw[3] = fpc->fp->insn_len;
808 break;
809 }
810
811 case TGSI_OPCODE_BRA:
812 /* This can in limited cases be implemented with an IF with the else and endif labels pointing to the target */
813 /* no state tracker uses this, so don't implement this for now */
814 assert(0);
815 nv40_fp_bra(fpc, finst->Label.Label);
816 break;
817
818 case TGSI_OPCODE_BGNSUB:
819 case TGSI_OPCODE_ENDSUB:
820 /* nothing to do here */
821 break;
822
823 case TGSI_OPCODE_CAL:
824 if(!nvfx->is_nv4x)
825 goto nv3x_cflow;
826 nv40_fp_cal(fpc, finst->Label.Label);
827 break;
828
829 case TGSI_OPCODE_RET:
830 if(!nvfx->is_nv4x)
831 goto nv3x_cflow;
832 nv40_fp_ret(fpc);
833 break;
834
835 case TGSI_OPCODE_BGNLOOP:
836 if(!nvfx->is_nv4x)
837 goto nv3x_cflow;
838 /* TODO: we should support using two nested REPs to allow a > 255 iteration count */
839 nv40_fp_rep(fpc, 255, finst->Label.Label);
840 break;
841
842 case TGSI_OPCODE_ENDLOOP:
843 break;
844
845 case TGSI_OPCODE_BRK:
846 if(!nvfx->is_nv4x)
847 goto nv3x_cflow;
848 nv40_fp_brk(fpc);
849 break;
850
851 case TGSI_OPCODE_CONT:
852 {
853 static int warned = 0;
854 if(!warned) {
855 NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n");
856 warned = 1;
857 }
858 break;
859 }
860
861 default:
862 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
863 return FALSE;
864 }
865
866 out:
867 release_temps(fpc);
868 return TRUE;
869 nv3x_cflow:
870 {
871 static int warned = 0;
872 if(!warned) {
873 NOUVEAU_ERR(
874 "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n"
875 "If rendering is incorrect, try to disable GLSL support in the application.\n");
876 warned = 1;
877 }
878 }
879 goto out;
880 }
881
882 static boolean
883 nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
884 const struct tgsi_full_declaration *fdec)
885 {
886 unsigned idx = fdec->Range.First;
887 unsigned hw;
888
889 switch (fdec->Semantic.Name) {
890 case TGSI_SEMANTIC_POSITION:
891 hw = 1;
892 break;
893 case TGSI_SEMANTIC_COLOR:
894 hw = ~0;
895 switch (fdec->Semantic.Index) {
896 case 0: hw = 0; break;
897 case 1: hw = 2; break;
898 case 2: hw = 3; break;
899 case 3: hw = 4; break;
900 }
901 if(hw > ((nvfx->is_nv4x) ? 4 : 2)) {
902 NOUVEAU_ERR("bad rcol index\n");
903 return FALSE;
904 }
905 break;
906 default:
907 NOUVEAU_ERR("bad output semantic\n");
908 return FALSE;
909 }
910
911 fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
912 fpc->r_temps |= (1 << hw);
913 return TRUE;
914 }
915
916 static boolean
917 nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc)
918 {
919 struct tgsi_parse_context p;
920 int high_temp = -1, i;
921 struct util_semantic_set set;
922 float const0v[4] = {0, 0, 0, 0};
923 struct nvfx_reg const0;
924
925 fpc->fp->num_slots = util_semantic_set_from_program_file(&set, fpc->fp->pipe.tokens, TGSI_FILE_INPUT);
926 if(fpc->fp->num_slots > 8)
927 return FALSE;
928 util_semantic_layout_from_set(fpc->fp->slot_to_generic, &set, 0, 8);
929 util_semantic_table_from_layout(fpc->generic_to_slot, fpc->fp->slot_to_generic, 0, 8);
930
931 memset(fpc->fp->slot_to_fp_input, 0xff, sizeof(fpc->fp->slot_to_fp_input));
932
933 const0 = constant(fpc, -1, const0v);
934 assert(const0.index == 0);
935
936 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
937 while (!tgsi_parse_end_of_tokens(&p)) {
938 const union tgsi_full_token *tok = &p.FullToken;
939
940 tgsi_parse_token(&p);
941 switch(tok->Token.Type) {
942 case TGSI_TOKEN_TYPE_DECLARATION:
943 {
944 const struct tgsi_full_declaration *fdec;
945 fdec = &p.FullToken.FullDeclaration;
946 switch (fdec->Declaration.File) {
947 case TGSI_FILE_OUTPUT:
948 if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec))
949 goto out_err;
950 break;
951 case TGSI_FILE_TEMPORARY:
952 if (fdec->Range.Last > high_temp) {
953 high_temp =
954 fdec->Range.Last;
955 }
956 break;
957 default:
958 break;
959 }
960 }
961 break;
962 case TGSI_TOKEN_TYPE_IMMEDIATE:
963 {
964 struct tgsi_full_immediate *imm;
965 float vals[4];
966
967 imm = &p.FullToken.FullImmediate;
968 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
969 assert(fpc->nr_imm < MAX_IMM);
970
971 vals[0] = imm->u[0].Float;
972 vals[1] = imm->u[1].Float;
973 vals[2] = imm->u[2].Float;
974 vals[3] = imm->u[3].Float;
975 fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
976 }
977 break;
978 default:
979 break;
980 }
981 }
982 tgsi_parse_free(&p);
983
984 if (++high_temp) {
985 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
986 for (i = 0; i < high_temp; i++)
987 fpc->r_temp[i] = temp(fpc);
988 fpc->r_temps_discard = 0;
989 }
990
991 return TRUE;
992
993 out_err:
994 if (fpc->r_temp)
995 FREE(fpc->r_temp);
996 tgsi_parse_free(&p);
997 return FALSE;
998 }
999
1000 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", FALSE)
1001
1002 static void
1003 nvfx_fragprog_translate(struct nvfx_context *nvfx,
1004 struct nvfx_fragment_program *fp)
1005 {
1006 struct tgsi_parse_context parse;
1007 struct nvfx_fpc *fpc = NULL;
1008 struct util_dynarray insns;
1009
1010 fpc = CALLOC(1, sizeof(struct nvfx_fpc));
1011 if (!fpc)
1012 return;
1013 fpc->fp = fp;
1014 fpc->num_regs = 2;
1015
1016 if (!nvfx_fragprog_prepare(nvfx, fpc)) {
1017 FREE(fpc);
1018 return;
1019 }
1020
1021 tgsi_parse_init(&parse, fp->pipe.tokens);
1022
1023 util_dynarray_init(&insns);
1024 while (!tgsi_parse_end_of_tokens(&parse)) {
1025 tgsi_parse_token(&parse);
1026
1027 switch (parse.FullToken.Token.Type) {
1028 case TGSI_TOKEN_TYPE_INSTRUCTION:
1029 {
1030 const struct tgsi_full_instruction *finst;
1031
1032 util_dynarray_append(&insns, unsigned, fp->insn_len);
1033 finst = &parse.FullToken.FullInstruction;
1034 if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst))
1035 goto out_err;
1036 }
1037 break;
1038 default:
1039 break;
1040 }
1041 }
1042 util_dynarray_append(&insns, unsigned, fp->insn_len);
1043
1044 for(unsigned i = 0; i < fpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1045 {
1046 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)fpc->label_relocs.data + i);
1047 fp->insn[label_reloc->location] |= ((unsigned*)insns.data)[label_reloc->target];
1048 }
1049 util_dynarray_fini(&insns);
1050
1051 if(!nvfx->is_nv4x)
1052 fp->fp_control |= (fpc->num_regs-1)/2;
1053 else
1054 fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
1055
1056 /* Terminate final instruction */
1057 if(fp->insn)
1058 fp->insn[fpc->inst_offset] |= 0x00000001;
1059
1060 /* Append NOP + END instruction for branches to the end of the program */
1061 fpc->inst_offset = fp->insn_len;
1062 grow_insns(fpc, 4);
1063 fp->insn[fpc->inst_offset + 0] = 0x00000001;
1064 fp->insn[fpc->inst_offset + 1] = 0x00000000;
1065 fp->insn[fpc->inst_offset + 2] = 0x00000000;
1066 fp->insn[fpc->inst_offset + 3] = 0x00000000;
1067
1068 if(debug_get_option_nvfx_dump_fp())
1069 {
1070 debug_printf("\n");
1071 tgsi_dump(fp->pipe.tokens, 0);
1072
1073 debug_printf("\n%s fragment program:\n", nvfx->is_nv4x ? "nv4x" : "nv3x");
1074 for (unsigned i = 0; i < fp->insn_len; i += 4)
1075 debug_printf("%3u: %08x %08x %08x %08x\n", i >> 2, fp->insn[i], fp->insn[i + 1], fp->insn[i + 2], fp->insn[i + 3]);
1076 debug_printf("\n");
1077 }
1078
1079 fp->translated = TRUE;
1080 out_err:
1081 tgsi_parse_free(&parse);
1082 if (fpc->r_temp)
1083 FREE(fpc->r_temp);
1084 util_dynarray_fini(&fpc->if_stack);
1085 util_dynarray_fini(&fpc->label_relocs);
1086 //util_dynarray_fini(&fpc->loop_stack);
1087 FREE(fpc);
1088 }
1089
1090 static inline void
1091 nvfx_fp_memcpy(void* dst, const void* src, size_t len)
1092 {
1093 #ifndef WORDS_BIGENDIAN
1094 memcpy(dst, src, len);
1095 #else
1096 size_t i;
1097 for(i = 0; i < len; i += 4) {
1098 uint32_t v = (uint32_t*)((char*)src + i);
1099 *(uint32_t*)((char*)dst + i) = (v >> 16) | (v << 16);
1100 }
1101 #endif
1102 }
1103
1104 void
1105 nvfx_fragprog_validate(struct nvfx_context *nvfx)
1106 {
1107 struct nouveau_channel* chan = nvfx->screen->base.channel;
1108 struct nvfx_fragment_program *fp = nvfx->fragprog;
1109 int update = 0;
1110 struct nvfx_vertex_program* vp;
1111 unsigned sprite_coord_enable;
1112
1113 if (!fp->translated)
1114 {
1115 const int min_size = 4096;
1116
1117 nvfx_fragprog_translate(nvfx, fp);
1118 if (!fp->translated) {
1119 static unsigned dummy[8] = {1, 0, 0, 0, 1, 0, 0, 0};
1120 static int warned = 0;
1121 if(!warned)
1122 {
1123 fprintf(stderr, "nvfx: failed to translate fragment program!\n");
1124 warned = 1;
1125 }
1126
1127 /* use dummy program: we cannot fail here */
1128 fp->translated = TRUE;
1129 fp->insn = malloc(sizeof(dummy));
1130 memcpy(fp->insn, dummy, sizeof(dummy));
1131 fp->insn_len = sizeof(dummy) / sizeof(dummy[0]);
1132 }
1133 update = TRUE;
1134
1135 fp->prog_size = (fp->insn_len * 4 + 63) & ~63;
1136
1137 if(fp->prog_size >= min_size)
1138 fp->progs_per_bo = 1;
1139 else
1140 fp->progs_per_bo = min_size / fp->prog_size;
1141 fp->bo_prog_idx = fp->progs_per_bo - 1;
1142 }
1143
1144 /* we must update constants even on "just" fragprog changes, because
1145 we don't check whether the current constant buffer matches the latest
1146 one bound to this fragment program */
1147 if (nvfx->dirty & (NVFX_NEW_FRAGCONST | NVFX_NEW_FRAGPROG))
1148 update = TRUE;
1149
1150 vp = nvfx->render_mode == HW ? nvfx->vertprog : nvfx->swtnl.vertprog;
1151 if (fp->last_vp_id != vp->id) {
1152 char* vp_sem_table = vp->generic_to_fp_input;
1153 unsigned char* fp_semantics = fp->slot_to_generic;
1154 unsigned diff = 0;
1155 unsigned char* cur_slots;
1156 fp->last_vp_id = nvfx->vertprog->id;
1157 cur_slots = fp->slot_to_fp_input;
1158 for(unsigned i = 0; i < fp->num_slots; ++i) {
1159 unsigned char slot_mask = vp_sem_table[fp_semantics[i]];
1160 diff |= (slot_mask >> 4) & (slot_mask ^ cur_slots[i]);
1161 }
1162
1163 if(diff)
1164 {
1165 for(unsigned i = 0; i < fp->num_slots; ++i) {
1166 /* if 0xff, then this will write to the dummy value at fp->last_layout_mask[0] */
1167 fp->slot_to_fp_input[i] = vp_sem_table[fp_semantics[i]] & 0xf;
1168 //printf("fp: GENERIC[%i] from fpreg %i\n", fp_semantics[i], fp->slot_to_fp_input[i]);
1169 }
1170
1171 fp->progs_left_with_obsolete_slot_assignments = fp->progs;
1172 update = TRUE;
1173 }
1174 }
1175
1176 // last_sprite_coord_enable
1177 sprite_coord_enable = nvfx->rasterizer->pipe.point_quad_rasterization * nvfx->rasterizer->pipe.sprite_coord_enable;
1178 if(fp->last_sprite_coord_enable != sprite_coord_enable)
1179 {
1180 unsigned texcoord_mask = vp->texcoord_ouput_mask;
1181 fp->last_sprite_coord_enable = sprite_coord_enable;
1182 fp->point_sprite_control = 0;
1183 for(unsigned i = 0; i < fp->num_slots; ++i) {
1184 if((1 << fp->slot_to_generic[i]) & sprite_coord_enable)
1185 {
1186 unsigned fpin = fp->slot_to_fp_input[i];
1187 //printf("sprite: slot %i generic %i had texcoord %i\n", i, fp->slot_to_generic[i], fpin - NVFX_FP_OP_INPUT_SRC_TC0);
1188 if(fpin >= 0x0f)
1189 {
1190 unsigned tc = __builtin_ctz(~texcoord_mask);
1191 texcoord_mask |= (1 << tc);
1192 fp->slot_to_fp_input[i] = fpin = NVFX_FP_OP_INPUT_SRC_TC(tc);
1193
1194 fp->progs_left_with_obsolete_slot_assignments = fp->progs;
1195 update = TRUE;
1196 }
1197 //printf("sprite: slot %i texcoord %i\n", i, fpin - NVFX_FP_OP_INPUT_SRC_TC0);
1198 fp->point_sprite_control |= (1 << (fpin - NVFX_FP_OP_INPUT_SRC_TC0 + 8));
1199 }
1200 else
1201 {
1202 unsigned fpin = fp->slot_to_fp_input[i];
1203 if(!(vp->texcoord_ouput_mask & (1 << (fpin - NVFX_FP_OP_INPUT_SRC_TC0))))
1204 {
1205 fp->slot_to_fp_input[i] = 0x0f;
1206
1207 fp->progs_left_with_obsolete_slot_assignments = fp->progs;
1208 update = TRUE;
1209 }
1210 }
1211 }
1212 }
1213
1214 if(update) {
1215 int offset;
1216 uint32_t* fpmap;
1217
1218 ++fp->bo_prog_idx;
1219 if(fp->bo_prog_idx >= fp->progs_per_bo)
1220 {
1221 if(fp->fpbo && !nouveau_bo_busy(fp->fpbo->next->bo, NOUVEAU_BO_WR))
1222 {
1223 fp->fpbo = fp->fpbo->next;
1224 }
1225 else
1226 {
1227 struct nvfx_fragment_program_bo* fpbo = os_malloc_aligned(sizeof(struct nvfx_fragment_program) + (fp->prog_size + 8) * fp->progs_per_bo, 16);
1228 uint8_t* map;
1229 uint8_t* buf;
1230
1231 fpbo->slots = (unsigned char*)&fpbo->insn[(fp->prog_size) * fp->progs_per_bo];
1232 memset(fpbo->slots, 0, 8 * fp->progs_per_bo);
1233 if(fp->fpbo)
1234 {
1235 fpbo->next = fp->fpbo->next;
1236 fp->fpbo->next = fpbo;
1237 }
1238 else
1239 fpbo->next = fpbo;
1240 fp->fpbo = fpbo;
1241 fpbo->bo = 0;
1242 fp->progs += fp->progs_per_bo;
1243 fp->progs_left_with_obsolete_slot_assignments += fp->progs_per_bo;
1244 nouveau_bo_new(nvfx->screen->base.device, NOUVEAU_BO_VRAM | NOUVEAU_BO_MAP, 64, fp->prog_size * fp->progs_per_bo, &fpbo->bo);
1245 nouveau_bo_map(fpbo->bo, NOUVEAU_BO_NOSYNC);
1246
1247 map = fpbo->bo->map;
1248 buf = (uint8_t*)fpbo->insn;
1249 for(unsigned i = 0; i < fp->progs_per_bo; ++i)
1250 {
1251 memcpy(buf, fp->insn, fp->insn_len * 4);
1252 nvfx_fp_memcpy(map, fp->insn, fp->insn_len * 4);
1253 map += fp->prog_size;
1254 buf += fp->prog_size;
1255 }
1256 }
1257 fp->bo_prog_idx = 0;
1258 }
1259
1260 offset = fp->bo_prog_idx * fp->prog_size;
1261 fpmap = (uint32_t*)((char*)fp->fpbo->bo->map + offset);
1262
1263 if(nvfx->constbuf[PIPE_SHADER_FRAGMENT]) {
1264 struct pipe_resource* constbuf = nvfx->constbuf[PIPE_SHADER_FRAGMENT];
1265 uint32_t* map = (uint32_t*)nvfx_buffer(constbuf)->data;
1266 uint32_t* fpmap = (uint32_t*)((char*)fp->fpbo->bo->map + offset);
1267 uint32_t* buf = (uint32_t*)((char*)fp->fpbo->insn + offset);
1268 int i;
1269 for (i = 0; i < fp->nr_consts; ++i) {
1270 unsigned off = fp->consts[i].offset;
1271 unsigned idx = fp->consts[i].index * 4;
1272
1273 /* TODO: is checking a good idea? */
1274 if(memcmp(&buf[off], &map[idx], 4 * sizeof(uint32_t))) {
1275 memcpy(&buf[off], &map[idx], 4 * sizeof(uint32_t));
1276 nvfx_fp_memcpy(&fpmap[off], &map[idx], 4 * sizeof(uint32_t));
1277 }
1278 }
1279 }
1280
1281 if(fp->progs_left_with_obsolete_slot_assignments) {
1282 unsigned char* fpbo_slots = &fp->fpbo->slots[fp->bo_prog_idx * 8];
1283 for(unsigned i = 0; i < fp->num_slots; ++i) {
1284 unsigned value = fp->slot_to_fp_input[i];;
1285 if(value != fpbo_slots[i]) {
1286 unsigned* p = (unsigned*)fp->slot_relocations[i].data;
1287 unsigned* pend = (unsigned*)((char*)fp->slot_relocations[i].data + fp->slot_relocations[i].size);
1288 for(; p != pend; ++p) {
1289 unsigned off = *p;
1290 unsigned dw = fp->insn[off];
1291 dw = (dw & ~NVFX_FP_OP_INPUT_SRC_MASK) | (value << NVFX_FP_OP_INPUT_SRC_SHIFT);
1292 nvfx_fp_memcpy(&fpmap[*p], &dw, sizeof(dw));
1293 }
1294 fpbo_slots[i] = value;
1295 }
1296 }
1297 --fp->progs_left_with_obsolete_slot_assignments;
1298 }
1299 }
1300
1301 if(update || (nvfx->dirty & NVFX_NEW_FRAGPROG)) {
1302 int offset = fp->bo_prog_idx * fp->prog_size;
1303 MARK_RING(chan, 8, 1);
1304 OUT_RING(chan, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM, 1));
1305 OUT_RELOC(chan, fp->fpbo->bo, offset, NOUVEAU_BO_VRAM |
1306 NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
1307 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
1308 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
1309 OUT_RING(chan, RING_3D(NV34TCL_FP_CONTROL, 1));
1310 OUT_RING(chan, fp->fp_control);
1311 if(!nvfx->is_nv4x) {
1312 OUT_RING(chan, RING_3D(NV34TCL_FP_REG_CONTROL, 1));
1313 OUT_RING(chan, (1<<16)|0x4);
1314 OUT_RING(chan, RING_3D(NV34TCL_TX_UNITS_ENABLE, 1));
1315 OUT_RING(chan, fp->samplers);
1316 }
1317 }
1318
1319 if(nvfx->dirty & (NVFX_NEW_FRAGPROG | NVFX_NEW_SPRITE))
1320 {
1321 WAIT_RING(chan, 2);
1322 OUT_RING(chan, RING_3D(NV34TCL_POINT_SPRITE, 1));
1323 OUT_RING(chan, fp->point_sprite_control | nvfx->rasterizer->pipe.point_quad_rasterization);
1324 }
1325 }
1326
1327 void
1328 nvfx_fragprog_relocate(struct nvfx_context *nvfx)
1329 {
1330 struct nouveau_channel* chan = nvfx->screen->base.channel;
1331 struct nvfx_fragment_program *fp = nvfx->fragprog;
1332 struct nouveau_bo* bo = fp->fpbo->bo;
1333 int offset = fp->bo_prog_idx * fp->prog_size;
1334 unsigned fp_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD; // TODO: GART?
1335 fp_flags |= NOUVEAU_BO_DUMMY;
1336 MARK_RING(chan, 2, 2);
1337 OUT_RELOC(chan, bo, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM, 1), fp_flags, 0, 0);
1338 OUT_RELOC(chan, bo, offset, fp_flags | NOUVEAU_BO_LOW |
1339 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
1340 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
1341 }
1342
1343 void
1344 nvfx_fragprog_destroy(struct nvfx_context *nvfx,
1345 struct nvfx_fragment_program *fp)
1346 {
1347 unsigned i;
1348 struct nvfx_fragment_program_bo* fpbo = fp->fpbo;
1349 if(fpbo)
1350 {
1351 do
1352 {
1353 struct nvfx_fragment_program_bo* next = fpbo->next;
1354 nouveau_bo_unmap(fpbo->bo);
1355 nouveau_bo_ref(0, &fpbo->bo);
1356 free(fpbo);
1357 fpbo = next;
1358 }
1359 while(fpbo != fp->fpbo);
1360 }
1361
1362 for(i = 0; i < 8; ++i)
1363 util_dynarray_fini(&fp->slot_relocations[i]);
1364
1365 if (fp->insn_len)
1366 FREE(fp->insn);
1367 }