12b002a8198a3eedc2c8c9c5a3d77dad0eedbf32
[mesa.git] / src / gallium / drivers / nvfx / nvfx_fragprog.c
1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_debug.h"
6
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_util.h"
10 #include "tgsi/tgsi_dump.h"
11 #include "tgsi/tgsi_ureg.h"
12
13 #include "nvfx_context.h"
14 #include "nvfx_shader.h"
15 #include "nvfx_resource.h"
16
17 #define MAX_CONSTS 128
18 #define MAX_IMM 32
19
20 struct nvfx_fpc {
21 struct nvfx_pipe_fragment_program* pfp;
22 struct nvfx_fragment_program *fp;
23
24 unsigned long long r_temps;
25 unsigned long long r_temps_discard;
26 struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
27 struct nvfx_reg *r_temp;
28 unsigned sprite_coord_temp;
29
30 int num_regs;
31
32 unsigned inst_offset;
33 unsigned have_const;
34
35 struct {
36 int pipe;
37 float vals[4];
38 } consts[MAX_CONSTS];
39 int nr_consts;
40
41 struct nvfx_reg imm[MAX_IMM];
42 unsigned nr_imm;
43
44 unsigned char generic_to_slot[256]; /* semantic idx for each input semantic */
45
46 struct util_dynarray if_stack;
47 //struct util_dynarray loop_stack;
48 struct util_dynarray label_relocs;
49 };
50
51 static INLINE struct nvfx_reg
52 temp(struct nvfx_fpc *fpc)
53 {
54 int idx = ffsll(~fpc->r_temps) - 1;
55
56 if (idx < 0) {
57 NOUVEAU_ERR("out of temps!!\n");
58 assert(0);
59 return nvfx_reg(NVFXSR_TEMP, 0);
60 }
61
62 fpc->r_temps |= (1ULL << idx);
63 fpc->r_temps_discard |= (1ULL << idx);
64 return nvfx_reg(NVFXSR_TEMP, idx);
65 }
66
67 static INLINE void
68 release_temps(struct nvfx_fpc *fpc)
69 {
70 fpc->r_temps &= ~fpc->r_temps_discard;
71 fpc->r_temps_discard = 0ULL;
72 }
73
74 static INLINE struct nvfx_reg
75 constant(struct nvfx_fpc *fpc, int pipe, float vals[4])
76 {
77 int idx;
78
79 if (fpc->nr_consts == MAX_CONSTS)
80 assert(0);
81 idx = fpc->nr_consts++;
82
83 fpc->consts[idx].pipe = pipe;
84 if (pipe == -1)
85 memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
86 return nvfx_reg(NVFXSR_CONST, idx);
87 }
88
89 static void
90 grow_insns(struct nvfx_fpc *fpc, int size)
91 {
92 struct nvfx_fragment_program *fp = fpc->fp;
93
94 fp->insn_len += size;
95 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
96 }
97
98 static void
99 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_src src)
100 {
101 struct nvfx_fragment_program *fp = fpc->fp;
102 uint32_t *hw = &fp->insn[fpc->inst_offset];
103 uint32_t sr = 0;
104
105 switch (src.reg.type) {
106 case NVFXSR_INPUT:
107 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
108 hw[0] |= (src.reg.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
109 break;
110 case NVFXSR_OUTPUT:
111 sr |= NVFX_FP_REG_SRC_HALF;
112 /* fall-through */
113 case NVFXSR_TEMP:
114 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
115 sr |= (src.reg.index << NVFX_FP_REG_SRC_SHIFT);
116 break;
117 case NVFXSR_RELOCATED:
118 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
119 sr |= (fpc->sprite_coord_temp << NVFX_FP_REG_SRC_SHIFT);
120 //printf("adding relocation at %x for %x\n", fpc->inst_offset, src.index);
121 util_dynarray_append(&fpc->fp->slot_relocations[src.reg.index], unsigned, fpc->inst_offset + pos + 1);
122 break;
123 case NVFXSR_CONST:
124 if (!fpc->have_const) {
125 grow_insns(fpc, 4);
126 fpc->have_const = 1;
127 }
128
129 hw = &fp->insn[fpc->inst_offset];
130 if (fpc->consts[src.reg.index].pipe >= 0) {
131 struct nvfx_fragment_program_data *fpd;
132
133 fp->consts = realloc(fp->consts, ++fp->nr_consts *
134 sizeof(*fpd));
135 fpd = &fp->consts[fp->nr_consts - 1];
136 fpd->offset = fpc->inst_offset + 4;
137 fpd->index = fpc->consts[src.reg.index].pipe;
138 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
139 } else {
140 memcpy(&fp->insn[fpc->inst_offset + 4],
141 fpc->consts[src.reg.index].vals,
142 sizeof(uint32_t) * 4);
143 }
144
145 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
146 break;
147 case NVFXSR_NONE:
148 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
149 break;
150 default:
151 assert(0);
152 }
153
154 if (src.negate)
155 sr |= NVFX_FP_REG_NEGATE;
156
157 if (src.abs)
158 hw[1] |= (1 << (29 + pos));
159
160 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
161 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
162 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
163 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
164
165 hw[pos + 1] |= sr;
166 }
167
168 static void
169 emit_dst(struct nvfx_fpc *fpc, struct nvfx_reg dst)
170 {
171 struct nvfx_fragment_program *fp = fpc->fp;
172 uint32_t *hw = &fp->insn[fpc->inst_offset];
173
174 switch (dst.type) {
175 case NVFXSR_TEMP:
176 if (fpc->num_regs < (dst.index + 1))
177 fpc->num_regs = dst.index + 1;
178 break;
179 case NVFXSR_OUTPUT:
180 if (dst.index == 1) {
181 fp->fp_control |= 0xe;
182 } else {
183 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
184 }
185 break;
186 case NVFXSR_NONE:
187 hw[0] |= (1 << 30);
188 break;
189 default:
190 assert(0);
191 }
192
193 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
194 }
195
196 static void
197 nvfx_fp_emit(struct nvfx_fpc *fpc, struct nvfx_insn insn)
198 {
199 struct nvfx_fragment_program *fp = fpc->fp;
200 uint32_t *hw;
201
202 fpc->inst_offset = fp->insn_len;
203 fpc->have_const = 0;
204 grow_insns(fpc, 4);
205 hw = &fp->insn[fpc->inst_offset];
206 memset(hw, 0, sizeof(uint32_t) * 4);
207
208 if (insn.op == NVFX_FP_OP_OPCODE_KIL)
209 fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
210 hw[0] |= (insn.op << NVFX_FP_OP_OPCODE_SHIFT);
211 hw[0] |= (insn.mask << NVFX_FP_OP_OUTMASK_SHIFT);
212 hw[2] |= (insn.scale << NVFX_FP_OP_DST_SCALE_SHIFT);
213
214 if (insn.sat)
215 hw[0] |= NVFX_FP_OP_OUT_SAT;
216
217 if (insn.cc_update)
218 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
219 hw[1] |= (insn.cc_test << NVFX_FP_OP_COND_SHIFT);
220 hw[1] |= ((insn.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
221 (insn.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
222 (insn.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
223 (insn.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
224
225 if(insn.unit >= 0)
226 {
227 hw[0] |= (insn.unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
228 fp->samplers |= (1 << insn.unit);
229 }
230
231 emit_dst(fpc, insn.dst);
232 emit_src(fpc, 0, insn.src[0]);
233 emit_src(fpc, 1, insn.src[1]);
234 emit_src(fpc, 2, insn.src[2]);
235 }
236
237 #define arith(s,o,d,m,s0,s1,s2) \
238 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \
239 (d), (m), (s0), (s1), (s2))
240
241 #define tex(s,o,u,d,m,s0,s1,s2) \
242 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \
243 (d), (m), (s0), none, none)
244
245 /* IF src.x != 0, as TGSI specifies */
246 static void
247 nv40_fp_if(struct nvfx_fpc *fpc, struct nvfx_src src)
248 {
249 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
250 struct nvfx_insn insn = arith(0, MOV, none.reg, NVFX_FP_MASK_X, src, none, none);
251 uint32_t *hw;
252 insn.cc_update = 1;
253 nvfx_fp_emit(fpc, insn);
254
255 fpc->inst_offset = fpc->fp->insn_len;
256 grow_insns(fpc, 4);
257 hw = &fpc->fp->insn[fpc->inst_offset];
258 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
259 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
260 NV40_FP_OP_OUT_NONE |
261 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
262 /* Use .xxxx swizzle so that we check only src[0].x*/
263 hw[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
264 (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
265 (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
266 (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT) |
267 (NVFX_FP_OP_COND_NE << NVFX_FP_OP_COND_SHIFT);
268 hw[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */
269 hw[3] = 0; /* | endif_offset */
270 util_dynarray_append(&fpc->if_stack, unsigned, fpc->inst_offset);
271 }
272
273 /* IF src.x != 0, as TGSI specifies */
274 static void
275 nv40_fp_cal(struct nvfx_fpc *fpc, unsigned target)
276 {
277 struct nvfx_relocation reloc;
278 uint32_t *hw;
279 fpc->inst_offset = fpc->fp->insn_len;
280 grow_insns(fpc, 4);
281 hw = &fpc->fp->insn[fpc->inst_offset];
282 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
283 hw[0] = (NV40_FP_OP_BRA_OPCODE_CAL << NVFX_FP_OP_OPCODE_SHIFT);
284 /* Use .xxxx swizzle so that we check only src[0].x*/
285 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
286 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
287 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
288 hw[3] = 0;
289 reloc.target = target;
290 reloc.location = fpc->inst_offset + 2;
291 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
292 }
293
294 static void
295 nv40_fp_ret(struct nvfx_fpc *fpc)
296 {
297 uint32_t *hw;
298 fpc->inst_offset = fpc->fp->insn_len;
299 grow_insns(fpc, 4);
300 hw = &fpc->fp->insn[fpc->inst_offset];
301 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
302 hw[0] = (NV40_FP_OP_BRA_OPCODE_RET << NVFX_FP_OP_OPCODE_SHIFT);
303 /* Use .xxxx swizzle so that we check only src[0].x*/
304 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
305 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
306 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */
307 hw[3] = 0;
308 }
309
310 static void
311 nv40_fp_rep(struct nvfx_fpc *fpc, unsigned count, unsigned target)
312 {
313 struct nvfx_relocation reloc;
314 uint32_t *hw;
315 fpc->inst_offset = fpc->fp->insn_len;
316 grow_insns(fpc, 4);
317 hw = &fpc->fp->insn[fpc->inst_offset];
318 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
319 hw[0] = (NV40_FP_OP_BRA_OPCODE_REP << NVFX_FP_OP_OPCODE_SHIFT) |
320 NV40_FP_OP_OUT_NONE |
321 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
322 /* Use .xxxx swizzle so that we check only src[0].x*/
323 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) |
324 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
325 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH |
326 (count << NV40_FP_OP_REP_COUNT1_SHIFT) |
327 (count << NV40_FP_OP_REP_COUNT2_SHIFT) |
328 (count << NV40_FP_OP_REP_COUNT3_SHIFT);
329 hw[3] = 0; /* | end_offset */
330 reloc.target = target;
331 reloc.location = fpc->inst_offset + 3;
332 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
333 //util_dynarray_append(&fpc->loop_stack, unsigned, target);
334 }
335
336 /* warning: this only works forward, and probably only if not inside any IF */
337 static void
338 nv40_fp_bra(struct nvfx_fpc *fpc, unsigned target)
339 {
340 struct nvfx_relocation reloc;
341 uint32_t *hw;
342 fpc->inst_offset = fpc->fp->insn_len;
343 grow_insns(fpc, 4);
344 hw = &fpc->fp->insn[fpc->inst_offset];
345 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
346 hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) |
347 NV40_FP_OP_OUT_NONE |
348 (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT);
349 /* Use .xxxx swizzle so that we check only src[0].x*/
350 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
351 (NVFX_FP_OP_COND_FL << NVFX_FP_OP_COND_SHIFT);
352 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | else_offset */
353 hw[3] = 0; /* | endif_offset */
354 reloc.target = target;
355 reloc.location = fpc->inst_offset + 2;
356 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
357 reloc.target = target;
358 reloc.location = fpc->inst_offset + 3;
359 util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc);
360 }
361
362 static void
363 nv40_fp_brk(struct nvfx_fpc *fpc)
364 {
365 uint32_t *hw;
366 fpc->inst_offset = fpc->fp->insn_len;
367 grow_insns(fpc, 4);
368 hw = &fpc->fp->insn[fpc->inst_offset];
369 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
370 hw[0] = (NV40_FP_OP_BRA_OPCODE_BRK << NVFX_FP_OP_OPCODE_SHIFT) |
371 NV40_FP_OP_OUT_NONE;
372 /* Use .xxxx swizzle so that we check only src[0].x*/
373 hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
374 (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT);
375 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH;
376 hw[3] = 0;
377 }
378
379 static INLINE struct nvfx_src
380 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
381 {
382 struct nvfx_src src;
383
384 switch (fsrc->Register.File) {
385 case TGSI_FILE_INPUT:
386 if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_POSITION) {
387 assert(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0);
388 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_POSITION);
389 } else if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_COLOR) {
390 if(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0)
391 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_COL0);
392 else if(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 1)
393 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_COL1);
394 else
395 assert(0);
396 } else if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FOG) {
397 assert(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0);
398 src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_FOGC);
399 } else if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FACE) {
400 /* TODO: check this has the correct values */
401 /* XXX: what do we do for nv30 here (assuming it lacks facing)?! */
402 assert(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0);
403 src.reg = nvfx_reg(NVFXSR_INPUT, NV40_FP_OP_INPUT_SRC_FACING);
404 } else {
405 assert(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_GENERIC);
406 src.reg = nvfx_reg(NVFXSR_RELOCATED, fpc->generic_to_slot[fpc->pfp->info.input_semantic_index[fsrc->Register.Index]]);
407 }
408 break;
409 case TGSI_FILE_CONSTANT:
410 src.reg = constant(fpc, fsrc->Register.Index, NULL);
411 break;
412 case TGSI_FILE_IMMEDIATE:
413 assert(fsrc->Register.Index < fpc->nr_imm);
414 src.reg = fpc->imm[fsrc->Register.Index];
415 break;
416 case TGSI_FILE_TEMPORARY:
417 src.reg = fpc->r_temp[fsrc->Register.Index];
418 break;
419 /* NV40 fragprog result regs are just temps, so this is simple */
420 case TGSI_FILE_OUTPUT:
421 src.reg = fpc->r_result[fsrc->Register.Index];
422 break;
423 default:
424 NOUVEAU_ERR("bad src file\n");
425 src.reg.index = 0;
426 src.reg.type = 0;
427 break;
428 }
429
430 src.abs = fsrc->Register.Absolute;
431 src.negate = fsrc->Register.Negate;
432 src.swz[0] = fsrc->Register.SwizzleX;
433 src.swz[1] = fsrc->Register.SwizzleY;
434 src.swz[2] = fsrc->Register.SwizzleZ;
435 src.swz[3] = fsrc->Register.SwizzleW;
436 return src;
437 }
438
439 static INLINE struct nvfx_reg
440 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
441 switch (fdst->Register.File) {
442 case TGSI_FILE_OUTPUT:
443 return fpc->r_result[fdst->Register.Index];
444 case TGSI_FILE_TEMPORARY:
445 return fpc->r_temp[fdst->Register.Index];
446 case TGSI_FILE_NULL:
447 return nvfx_reg(NVFXSR_NONE, 0);
448 default:
449 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
450 return nvfx_reg(NVFXSR_NONE, 0);
451 }
452 }
453
454 static INLINE int
455 tgsi_mask(uint tgsi)
456 {
457 int mask = 0;
458
459 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
460 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
461 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
462 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
463 return mask;
464 }
465
466 static boolean
467 nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
468 const struct tgsi_full_instruction *finst)
469 {
470 const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
471 struct nvfx_insn insn;
472 struct nvfx_src src[3], tmp, tmp2;
473 struct nvfx_reg dst;
474 int mask, sat, unit = 0;
475 int ai = -1, ci = -1, ii = -1;
476 int i;
477
478 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
479 return TRUE;
480
481 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
482 const struct tgsi_full_src_register *fsrc;
483
484 fsrc = &finst->Src[i];
485 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
486 src[i] = tgsi_src(fpc, fsrc);
487 }
488 }
489
490 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
491 const struct tgsi_full_src_register *fsrc;
492
493 fsrc = &finst->Src[i];
494
495 switch (fsrc->Register.File) {
496 case TGSI_FILE_INPUT:
497 if (ai == -1 || ai == fsrc->Register.Index) {
498 ai = fsrc->Register.Index;
499 src[i] = tgsi_src(fpc, fsrc);
500 } else {
501 src[i] = nvfx_src(temp(fpc));
502 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
503 }
504 break;
505 case TGSI_FILE_CONSTANT:
506 if ((ci == -1 && ii == -1) ||
507 ci == fsrc->Register.Index) {
508 ci = fsrc->Register.Index;
509 src[i] = tgsi_src(fpc, fsrc);
510 } else {
511 src[i] = nvfx_src(temp(fpc));
512 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
513 }
514 break;
515 case TGSI_FILE_IMMEDIATE:
516 if ((ci == -1 && ii == -1) ||
517 ii == fsrc->Register.Index) {
518 ii = fsrc->Register.Index;
519 src[i] = tgsi_src(fpc, fsrc);
520 } else {
521 src[i] = nvfx_src(temp(fpc));
522 nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none));
523 }
524 break;
525 case TGSI_FILE_TEMPORARY:
526 /* handled above */
527 break;
528 case TGSI_FILE_SAMPLER:
529 unit = fsrc->Register.Index;
530 break;
531 case TGSI_FILE_OUTPUT:
532 break;
533 default:
534 NOUVEAU_ERR("bad src file\n");
535 return FALSE;
536 }
537 }
538
539 dst = tgsi_dst(fpc, &finst->Dst[0]);
540 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
541 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
542
543 switch (finst->Instruction.Opcode) {
544 case TGSI_OPCODE_ABS:
545 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, abs(src[0]), none, none));
546 break;
547 case TGSI_OPCODE_ADD:
548 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], src[1], none));
549 break;
550 case TGSI_OPCODE_CMP:
551 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
552 insn.cc_update = 1;
553 nvfx_fp_emit(fpc, insn);
554
555 insn = arith(sat, MOV, dst, mask, src[2], none, none);
556 insn.cc_test = NVFX_COND_GE;
557 nvfx_fp_emit(fpc, insn);
558
559 insn = arith(sat, MOV, dst, mask, src[1], none, none);
560 insn.cc_test = NVFX_COND_LT;
561 nvfx_fp_emit(fpc, insn);
562 break;
563 case TGSI_OPCODE_COS:
564 nvfx_fp_emit(fpc, arith(sat, COS, dst, mask, src[0], none, none));
565 break;
566 case TGSI_OPCODE_DDX:
567 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
568 tmp = nvfx_src(temp(fpc));
569 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
570 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
571 nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
572 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
573 } else {
574 nvfx_fp_emit(fpc, arith(sat, DDX, dst, mask, src[0], none, none));
575 }
576 break;
577 case TGSI_OPCODE_DDY:
578 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
579 tmp = nvfx_src(temp(fpc));
580 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none));
581 nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none));
582 nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none));
583 nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none));
584 } else {
585 nvfx_fp_emit(fpc, arith(sat, DDY, dst, mask, src[0], none, none));
586 }
587 break;
588 case TGSI_OPCODE_DP2:
589 tmp = nvfx_src(temp(fpc));
590 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], src[1], none));
591 nvfx_fp_emit(fpc, arith(0, ADD, dst, mask, swz(tmp, X, X, X, X), swz(tmp, Y, Y, Y, Y), none));
592 break;
593 case TGSI_OPCODE_DP3:
594 nvfx_fp_emit(fpc, arith(sat, DP3, dst, mask, src[0], src[1], none));
595 break;
596 case TGSI_OPCODE_DP4:
597 nvfx_fp_emit(fpc, arith(sat, DP4, dst, mask, src[0], src[1], none));
598 break;
599 case TGSI_OPCODE_DPH:
600 tmp = nvfx_src(temp(fpc));
601 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[1], none));
602 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, swz(tmp, X, X, X, X), swz(src[1], W, W, W, W), none));
603 break;
604 case TGSI_OPCODE_DST:
605 nvfx_fp_emit(fpc, arith(sat, DST, dst, mask, src[0], src[1], none));
606 break;
607 case TGSI_OPCODE_EX2:
608 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, src[0], none, none));
609 break;
610 case TGSI_OPCODE_FLR:
611 nvfx_fp_emit(fpc, arith(sat, FLR, dst, mask, src[0], none, none));
612 break;
613 case TGSI_OPCODE_FRC:
614 nvfx_fp_emit(fpc, arith(sat, FRC, dst, mask, src[0], none, none));
615 break;
616 case TGSI_OPCODE_KILP:
617 nvfx_fp_emit(fpc, arith(0, KIL, none.reg, 0, none, none, none));
618 break;
619 case TGSI_OPCODE_KIL:
620 insn = arith(0, MOV, none.reg, NVFX_FP_MASK_ALL, src[0], none, none);
621 insn.cc_update = 1;
622 nvfx_fp_emit(fpc, insn);
623
624 insn = arith(0, KIL, none.reg, 0, none, none, none);
625 insn.cc_test = NVFX_COND_LT;
626 nvfx_fp_emit(fpc, insn);
627 break;
628 case TGSI_OPCODE_LG2:
629 nvfx_fp_emit(fpc, arith(sat, LG2, dst, mask, src[0], none, none));
630 break;
631 // case TGSI_OPCODE_LIT:
632 case TGSI_OPCODE_LRP:
633 if(!nvfx->is_nv4x)
634 nvfx_fp_emit(fpc, arith(sat, LRP_NV30, dst, mask, src[0], src[1], src[2]));
635 else {
636 tmp = nvfx_src(temp(fpc));
637 nvfx_fp_emit(fpc, arith(0, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
638 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], tmp));
639 }
640 break;
641 case TGSI_OPCODE_MAD:
642 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], src[2]));
643 break;
644 case TGSI_OPCODE_MAX:
645 nvfx_fp_emit(fpc, arith(sat, MAX, dst, mask, src[0], src[1], none));
646 break;
647 case TGSI_OPCODE_MIN:
648 nvfx_fp_emit(fpc, arith(sat, MIN, dst, mask, src[0], src[1], none));
649 break;
650 case TGSI_OPCODE_MOV:
651 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, src[0], none, none));
652 break;
653 case TGSI_OPCODE_MUL:
654 nvfx_fp_emit(fpc, arith(sat, MUL, dst, mask, src[0], src[1], none));
655 break;
656 case TGSI_OPCODE_NOP:
657 break;
658 case TGSI_OPCODE_POW:
659 if(!nvfx->is_nv4x)
660 nvfx_fp_emit(fpc, arith(sat, POW_NV30, dst, mask, src[0], src[1], none));
661 else {
662 tmp = nvfx_src(temp(fpc));
663 nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
664 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
665 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, swz(tmp, X, X, X, X), none, none));
666 }
667 break;
668 case TGSI_OPCODE_RCP:
669 nvfx_fp_emit(fpc, arith(sat, RCP, dst, mask, src[0], none, none));
670 break;
671 case TGSI_OPCODE_RFL:
672 if(!nvfx->is_nv4x)
673 nvfx_fp_emit(fpc, arith(0, RFL_NV30, dst, mask, src[0], src[1], none));
674 else {
675 tmp = nvfx_src(temp(fpc));
676 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[0], none));
677 nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_Y, src[0], src[1], none));
678 insn = arith(0, DIV, tmp.reg, NVFX_FP_MASK_Z, swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
679 insn.scale = NVFX_FP_OP_DST_SCALE_2X;
680 nvfx_fp_emit(fpc, insn);
681 nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, swz(tmp, Z, Z, Z, Z), src[0], neg(src[1])));
682 }
683 break;
684 case TGSI_OPCODE_RSQ:
685 if(!nvfx->is_nv4x)
686 nvfx_fp_emit(fpc, arith(sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none));
687 else {
688 tmp = nvfx_src(temp(fpc));
689 insn = arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, abs(swz(src[0], X, X, X, X)), none, none);
690 insn.scale = NVFX_FP_OP_DST_SCALE_INV_2X;
691 nvfx_fp_emit(fpc, insn);
692 nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, neg(swz(tmp, X, X, X, X)), none, none));
693 }
694 break;
695 case TGSI_OPCODE_SCS:
696 /* avoid overwriting the source */
697 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
698 {
699 if (mask & NVFX_FP_MASK_X)
700 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
701 if (mask & NVFX_FP_MASK_Y)
702 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
703 }
704 else
705 {
706 if (mask & NVFX_FP_MASK_Y)
707 nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none));
708 if (mask & NVFX_FP_MASK_X)
709 nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none));
710 }
711 break;
712 case TGSI_OPCODE_SEQ:
713 nvfx_fp_emit(fpc, arith(sat, SEQ, dst, mask, src[0], src[1], none));
714 break;
715 case TGSI_OPCODE_SFL:
716 nvfx_fp_emit(fpc, arith(sat, SFL, dst, mask, src[0], src[1], none));
717 break;
718 case TGSI_OPCODE_SGE:
719 nvfx_fp_emit(fpc, arith(sat, SGE, dst, mask, src[0], src[1], none));
720 break;
721 case TGSI_OPCODE_SGT:
722 nvfx_fp_emit(fpc, arith(sat, SGT, dst, mask, src[0], src[1], none));
723 break;
724 case TGSI_OPCODE_SIN:
725 nvfx_fp_emit(fpc, arith(sat, SIN, dst, mask, src[0], none, none));
726 break;
727 case TGSI_OPCODE_SLE:
728 nvfx_fp_emit(fpc, arith(sat, SLE, dst, mask, src[0], src[1], none));
729 break;
730 case TGSI_OPCODE_SLT:
731 nvfx_fp_emit(fpc, arith(sat, SLT, dst, mask, src[0], src[1], none));
732 break;
733 case TGSI_OPCODE_SNE:
734 nvfx_fp_emit(fpc, arith(sat, SNE, dst, mask, src[0], src[1], none));
735 break;
736 case TGSI_OPCODE_SSG:
737 tmp = nvfx_src(temp(fpc));
738 tmp2 = nvfx_src(temp(fpc));
739 nvfx_fp_emit(fpc, arith(0, SGT, tmp.reg, mask, src[0], nvfx_src(nvfx_reg(NVFXSR_CONST, 0)), none));
740 nvfx_fp_emit(fpc, arith(0, SLT, tmp.reg, mask, src[0], nvfx_src(nvfx_reg(NVFXSR_CONST, 0)), none));
741 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, tmp, neg(tmp2), none));
742 break;
743 case TGSI_OPCODE_STR:
744 nvfx_fp_emit(fpc, arith(sat, STR, dst, mask, src[0], src[1], none));
745 break;
746 case TGSI_OPCODE_SUB:
747 nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], neg(src[1]), none));
748 break;
749 case TGSI_OPCODE_TEX:
750 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
751 break;
752 case TGSI_OPCODE_TRUNC:
753 tmp = nvfx_src(temp(fpc));
754 insn = arith(0, MOV, none.reg, mask, src[0], none, none);
755 insn.cc_update = 1;
756 nvfx_fp_emit(fpc, insn);
757
758 nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, abs(src[0]), none, none));
759 nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, tmp, none, none));
760
761 insn = arith(sat, MOV, dst, mask, neg(tmp), none, none);
762 insn.cc_test = NVFX_COND_LT;
763 nvfx_fp_emit(fpc, insn);
764 break;
765 case TGSI_OPCODE_TXB:
766 nvfx_fp_emit(fpc, tex(sat, TXB, unit, dst, mask, src[0], none, none));
767 break;
768 case TGSI_OPCODE_TXL:
769 if(nvfx->is_nv4x)
770 nvfx_fp_emit(fpc, tex(sat, TXL_NV40, unit, dst, mask, src[0], none, none));
771 else /* unsupported on nv30, use TEX and hope they like it */
772 nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none));
773 break;
774 case TGSI_OPCODE_TXP:
775 nvfx_fp_emit(fpc, tex(sat, TXP, unit, dst, mask, src[0], none, none));
776 break;
777 case TGSI_OPCODE_XPD:
778 tmp = nvfx_src(temp(fpc));
779 nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
780 nvfx_fp_emit(fpc, arith(sat, MAD, dst, (mask & ~NVFX_FP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
781 break;
782
783 case TGSI_OPCODE_IF:
784 // MOVRC0 R31 (TR0.xyzw), R<src>:
785 // IF (NE.xxxx) ELSE <else> END <end>
786 if(!nvfx->is_nv4x)
787 goto nv3x_cflow;
788 nv40_fp_if(fpc, src[0]);
789 break;
790
791 case TGSI_OPCODE_ELSE:
792 {
793 uint32_t *hw;
794 if(!nvfx->is_nv4x)
795 goto nv3x_cflow;
796 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
797 hw = &fpc->fp->insn[util_dynarray_top(&fpc->if_stack, unsigned)];
798 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
799 break;
800 }
801
802 case TGSI_OPCODE_ENDIF:
803 {
804 uint32_t *hw;
805 if(!nvfx->is_nv4x)
806 goto nv3x_cflow;
807 assert(util_dynarray_contains(&fpc->if_stack, unsigned));
808 hw = &fpc->fp->insn[util_dynarray_pop(&fpc->if_stack, unsigned)];
809 if(!hw[2])
810 hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len;
811 hw[3] = fpc->fp->insn_len;
812 break;
813 }
814
815 case TGSI_OPCODE_BRA:
816 /* This can in limited cases be implemented with an IF with the else and endif labels pointing to the target */
817 /* no state tracker uses this, so don't implement this for now */
818 assert(0);
819 nv40_fp_bra(fpc, finst->Label.Label);
820 break;
821
822 case TGSI_OPCODE_BGNSUB:
823 case TGSI_OPCODE_ENDSUB:
824 /* nothing to do here */
825 break;
826
827 case TGSI_OPCODE_CAL:
828 if(!nvfx->is_nv4x)
829 goto nv3x_cflow;
830 nv40_fp_cal(fpc, finst->Label.Label);
831 break;
832
833 case TGSI_OPCODE_RET:
834 if(!nvfx->is_nv4x)
835 goto nv3x_cflow;
836 nv40_fp_ret(fpc);
837 break;
838
839 case TGSI_OPCODE_BGNLOOP:
840 if(!nvfx->is_nv4x)
841 goto nv3x_cflow;
842 /* TODO: we should support using two nested REPs to allow a > 255 iteration count */
843 nv40_fp_rep(fpc, 255, finst->Label.Label);
844 break;
845
846 case TGSI_OPCODE_ENDLOOP:
847 break;
848
849 case TGSI_OPCODE_BRK:
850 if(!nvfx->is_nv4x)
851 goto nv3x_cflow;
852 nv40_fp_brk(fpc);
853 break;
854
855 case TGSI_OPCODE_CONT:
856 {
857 static int warned = 0;
858 if(!warned) {
859 NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n");
860 warned = 1;
861 }
862 break;
863 }
864
865 default:
866 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
867 return FALSE;
868 }
869
870 out:
871 release_temps(fpc);
872 return TRUE;
873 nv3x_cflow:
874 {
875 static int warned = 0;
876 if(!warned) {
877 NOUVEAU_ERR(
878 "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n"
879 "If rendering is incorrect, try to disable GLSL support in the application.\n");
880 warned = 1;
881 }
882 }
883 goto out;
884 }
885
886 static boolean
887 nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
888 const struct tgsi_full_declaration *fdec)
889 {
890 unsigned idx = fdec->Range.First;
891 unsigned hw;
892
893 switch (fdec->Semantic.Name) {
894 case TGSI_SEMANTIC_POSITION:
895 hw = 1;
896 break;
897 case TGSI_SEMANTIC_COLOR:
898 hw = ~0;
899 switch (fdec->Semantic.Index) {
900 case 0: hw = 0; break;
901 case 1: hw = 2; break;
902 case 2: hw = 3; break;
903 case 3: hw = 4; break;
904 }
905 if(hw > ((nvfx->is_nv4x) ? 4 : 2)) {
906 NOUVEAU_ERR("bad rcol index\n");
907 return FALSE;
908 }
909 break;
910 default:
911 NOUVEAU_ERR("bad output semantic\n");
912 return FALSE;
913 }
914
915 fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
916 fpc->r_temps |= (1ULL << hw);
917 return TRUE;
918 }
919
920 static boolean
921 nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc)
922 {
923 struct tgsi_parse_context p;
924 int high_temp = -1, i;
925 struct util_semantic_set set;
926 float const0v[4] = {0, 0, 0, 0};
927 struct nvfx_reg const0;
928
929 fpc->fp->num_slots = util_semantic_set_from_program_file(&set, fpc->pfp->pipe.tokens, TGSI_FILE_INPUT);
930 if(fpc->fp->num_slots > 8)
931 return FALSE;
932 util_semantic_layout_from_set(fpc->fp->slot_to_generic, &set, 0, 8);
933 util_semantic_table_from_layout(fpc->generic_to_slot, fpc->fp->slot_to_generic, 0, 8);
934
935 memset(fpc->fp->slot_to_fp_input, 0xff, sizeof(fpc->fp->slot_to_fp_input));
936
937 const0 = constant(fpc, -1, const0v);
938 assert(const0.index == 0);
939
940 tgsi_parse_init(&p, fpc->pfp->pipe.tokens);
941 while (!tgsi_parse_end_of_tokens(&p)) {
942 const union tgsi_full_token *tok = &p.FullToken;
943
944 tgsi_parse_token(&p);
945 switch(tok->Token.Type) {
946 case TGSI_TOKEN_TYPE_DECLARATION:
947 {
948 const struct tgsi_full_declaration *fdec;
949 fdec = &p.FullToken.FullDeclaration;
950 switch (fdec->Declaration.File) {
951 case TGSI_FILE_OUTPUT:
952 if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec))
953 goto out_err;
954 break;
955 case TGSI_FILE_TEMPORARY:
956 if (fdec->Range.Last > high_temp) {
957 high_temp =
958 fdec->Range.Last;
959 }
960 break;
961 default:
962 break;
963 }
964 }
965 break;
966 case TGSI_TOKEN_TYPE_IMMEDIATE:
967 {
968 struct tgsi_full_immediate *imm;
969 float vals[4];
970
971 imm = &p.FullToken.FullImmediate;
972 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
973 assert(fpc->nr_imm < MAX_IMM);
974
975 vals[0] = imm->u[0].Float;
976 vals[1] = imm->u[1].Float;
977 vals[2] = imm->u[2].Float;
978 vals[3] = imm->u[3].Float;
979 fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
980 }
981 break;
982 default:
983 break;
984 }
985 }
986 tgsi_parse_free(&p);
987
988 if (++high_temp) {
989 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
990 for (i = 0; i < high_temp; i++)
991 fpc->r_temp[i] = temp(fpc);
992 fpc->r_temps_discard = 0ULL;
993 }
994
995 return TRUE;
996
997 out_err:
998 if (fpc->r_temp)
999 FREE(fpc->r_temp);
1000 tgsi_parse_free(&p);
1001 return FALSE;
1002 }
1003
1004 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", FALSE)
1005
1006 static struct nvfx_fragment_program*
1007 nvfx_fragprog_translate(struct nvfx_context *nvfx,
1008 struct nvfx_pipe_fragment_program *pfp,
1009 boolean emulate_sprite_flipping)
1010 {
1011 struct tgsi_parse_context parse;
1012 struct nvfx_fpc *fpc = NULL;
1013 struct util_dynarray insns;
1014 struct nvfx_fragment_program* fp = NULL;
1015 const int min_size = 4096;
1016
1017 fp = CALLOC_STRUCT(nvfx_fragment_program);
1018 if(!fp)
1019 goto out_err;
1020
1021 fpc = CALLOC_STRUCT(nvfx_fpc);
1022 if (!fpc)
1023 goto out_err;
1024
1025 fpc->pfp = pfp;
1026 fpc->fp = fp;
1027 fpc->num_regs = 2;
1028
1029 if (!nvfx_fragprog_prepare(nvfx, fpc))
1030 goto out_err;
1031
1032 tgsi_parse_init(&parse, pfp->pipe.tokens);
1033 util_dynarray_init(&insns);
1034
1035 if(emulate_sprite_flipping)
1036 {
1037 struct nvfx_reg reg = temp(fpc);
1038 struct nvfx_src sprite_input = nvfx_src(nvfx_reg(NVFXSR_RELOCATED, fp->num_slots));
1039 float v[4] = {1, -1, 0, 0};
1040 struct nvfx_src imm = nvfx_src(constant(fpc, -1, v));
1041
1042 fpc->sprite_coord_temp = reg.index;
1043 fpc->r_temps_discard = 0ULL;
1044 nvfx_fp_emit(fpc, arith(0, MAD, reg, NVFX_FP_MASK_ALL, sprite_input, swz(imm, X, Y, X, X), swz(imm, Z, X, Z, Z)));
1045 }
1046
1047 while (!tgsi_parse_end_of_tokens(&parse)) {
1048 tgsi_parse_token(&parse);
1049
1050 switch (parse.FullToken.Token.Type) {
1051 case TGSI_TOKEN_TYPE_INSTRUCTION:
1052 {
1053 const struct tgsi_full_instruction *finst;
1054
1055 util_dynarray_append(&insns, unsigned, fp->insn_len);
1056 finst = &parse.FullToken.FullInstruction;
1057 if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst))
1058 goto out_err;
1059 }
1060 break;
1061 default:
1062 break;
1063 }
1064 }
1065 util_dynarray_append(&insns, unsigned, fp->insn_len);
1066
1067 for(unsigned i = 0; i < fpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1068 {
1069 struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)fpc->label_relocs.data + i);
1070 fp->insn[label_reloc->location] |= ((unsigned*)insns.data)[label_reloc->target];
1071 }
1072 util_dynarray_fini(&insns);
1073
1074 if(!nvfx->is_nv4x)
1075 fp->fp_control |= (fpc->num_regs-1)/2;
1076 else
1077 fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
1078
1079 /* Terminate final instruction */
1080 if(fp->insn)
1081 fp->insn[fpc->inst_offset] |= 0x00000001;
1082
1083 /* Append NOP + END instruction for branches to the end of the program */
1084 fpc->inst_offset = fp->insn_len;
1085 grow_insns(fpc, 4);
1086 fp->insn[fpc->inst_offset + 0] = 0x00000001;
1087 fp->insn[fpc->inst_offset + 1] = 0x00000000;
1088 fp->insn[fpc->inst_offset + 2] = 0x00000000;
1089 fp->insn[fpc->inst_offset + 3] = 0x00000000;
1090
1091 if(debug_get_option_nvfx_dump_fp())
1092 {
1093 debug_printf("\n");
1094 tgsi_dump(pfp->pipe.tokens, 0);
1095
1096 debug_printf("\n%s fragment program:\n", nvfx->is_nv4x ? "nv4x" : "nv3x");
1097 for (unsigned i = 0; i < fp->insn_len; i += 4)
1098 debug_printf("%3u: %08x %08x %08x %08x\n", i >> 2, fp->insn[i], fp->insn[i + 1], fp->insn[i + 2], fp->insn[i + 3]);
1099 debug_printf("\n");
1100 }
1101
1102 fp->prog_size = (fp->insn_len * 4 + 63) & ~63;
1103
1104 if(fp->prog_size >= min_size)
1105 fp->progs_per_bo = 1;
1106 else
1107 fp->progs_per_bo = min_size / fp->prog_size;
1108 fp->bo_prog_idx = fp->progs_per_bo - 1;
1109
1110 out:
1111 tgsi_parse_free(&parse);
1112 if(fpc)
1113 {
1114 if (fpc->r_temp)
1115 FREE(fpc->r_temp);
1116 util_dynarray_fini(&fpc->if_stack);
1117 util_dynarray_fini(&fpc->label_relocs);
1118 //util_dynarray_fini(&fpc->loop_stack);
1119 FREE(fpc);
1120 }
1121 return fp;
1122
1123 out_err:
1124 _debug_printf("Error: failed to compile this fragment program:\n");
1125 tgsi_dump(pfp->pipe.tokens, 0);
1126
1127 if(fp)
1128 {
1129 FREE(fp);
1130 fp = NULL;
1131 }
1132 goto out;
1133 }
1134
1135 static inline void
1136 nvfx_fp_memcpy(void* dst, const void* src, size_t len)
1137 {
1138 #ifndef WORDS_BIGENDIAN
1139 memcpy(dst, src, len);
1140 #else
1141 size_t i;
1142 for(i = 0; i < len; i += 4) {
1143 uint32_t v = (uint32_t*)((char*)src + i);
1144 *(uint32_t*)((char*)dst + i) = (v >> 16) | (v << 16);
1145 }
1146 #endif
1147 }
1148
1149 /* The hardware only supports immediate constants inside the fragment program,
1150 * and at least on nv30 doesn't support an indirect linkage table.
1151 *
1152 * Hence, we need to patch the fragment program itself both to update constants
1153 * and update linkage.
1154 *
1155 * Using a single fragment program would entail unacceptable stalls if the GPU is
1156 * already rendering with that fragment program.
1157 * Thus, we instead use a "rotating queue" of buffer objects, each of which is
1158 * packed with multiple versions of the same program.
1159 *
1160 * Whenever we need to patch something, we move to the next program and
1161 * patch it. If all buffer objects are in use by the GPU, we allocate another one,
1162 * expanding the queue.
1163 *
1164 * As an additional optimization, we record when all the programs have the
1165 * current input slot configuration, and at that point we stop patching inputs.
1166 * This happens, for instance, if a given fragment program is always used with
1167 * the same vertex program (i.e. always with GLSL), or if the layouts match
1168 * enough (non-GLSL).
1169 *
1170 * Note that instead of using multiple programs, we could push commands
1171 * on the FIFO to patch a single program: it's not fully clear which option is
1172 * faster, but my guess is that the current way is faster.
1173 *
1174 * We also track the previous slot assignments for each version and don't
1175 * patch if they are the same (this could perhaps be removed).
1176 */
1177
1178 void
1179 nvfx_fragprog_validate(struct nvfx_context *nvfx)
1180 {
1181 struct nouveau_channel* chan = nvfx->screen->base.channel;
1182 struct nvfx_pipe_fragment_program *pfp = nvfx->fragprog;
1183 struct nvfx_vertex_program* vp;
1184 unsigned sprite_coord_enable = nvfx->rasterizer->pipe.point_quad_rasterization * nvfx->rasterizer->pipe.sprite_coord_enable;
1185 // TODO: correct or flipped?
1186 boolean emulate_sprite_flipping = sprite_coord_enable && nvfx->rasterizer->pipe.sprite_coord_mode;
1187 unsigned key = emulate_sprite_flipping;
1188 struct nvfx_fragment_program* fp;
1189
1190 fp = pfp->fps[key];
1191 if (!fp)
1192 {
1193 fp = nvfx_fragprog_translate(nvfx, pfp, emulate_sprite_flipping);
1194
1195 if(!fp)
1196 {
1197 if(!nvfx->dummy_fs)
1198 {
1199 struct ureg_program *ureg = ureg_create( TGSI_PROCESSOR_FRAGMENT );
1200 if (ureg)
1201 {
1202 ureg_END( ureg );
1203 nvfx->dummy_fs = ureg_create_shader_and_destroy( ureg, &nvfx->pipe );
1204 }
1205
1206 if(!nvfx->dummy_fs)
1207 {
1208 _debug_printf("Error: unable to create a dummy fragment shader: aborting.");
1209 abort();
1210 }
1211 }
1212
1213 fp = nvfx_fragprog_translate(nvfx, nvfx->dummy_fs, FALSE);
1214 emulate_sprite_flipping = FALSE;
1215
1216 if(!fp)
1217 {
1218 _debug_printf("Error: unable to compile even a dummy fragment shader: aborting.");
1219 abort();
1220 }
1221 }
1222
1223 pfp->fps[key] = fp;
1224 }
1225
1226 vp = nvfx->render_mode == HW ? nvfx->vertprog : nvfx->swtnl.vertprog;
1227
1228 if (fp->last_vp_id != vp->id || fp->last_sprite_coord_enable != sprite_coord_enable) {
1229 int sprite_real_input = -1;
1230 int sprite_reloc_input;
1231 unsigned i;
1232 fp->last_vp_id = vp->id;
1233 fp->last_sprite_coord_enable = sprite_coord_enable;
1234
1235 if(sprite_coord_enable)
1236 {
1237 sprite_real_input = vp->sprite_fp_input;
1238 if(sprite_real_input < 0)
1239 {
1240 unsigned used_texcoords = 0;
1241 for(unsigned i = 0; i < fp->num_slots; ++i) {
1242 unsigned generic = fp->slot_to_generic[i];
1243 if(!((1 << generic) & sprite_coord_enable))
1244 {
1245 unsigned char slot_mask = vp->generic_to_fp_input[generic];
1246 if(slot_mask >= 0xf0)
1247 used_texcoords |= 1 << ((slot_mask & 0xf) - NVFX_FP_OP_INPUT_SRC_TC0);
1248 }
1249 }
1250
1251 sprite_real_input = NVFX_FP_OP_INPUT_SRC_TC(__builtin_ctz(~used_texcoords));
1252 }
1253
1254 fp->point_sprite_control |= (1 << (sprite_real_input - NVFX_FP_OP_INPUT_SRC_TC0 + 8));
1255 }
1256 else
1257 fp->point_sprite_control = 0;
1258
1259 if(emulate_sprite_flipping)
1260 sprite_reloc_input = 0;
1261 else
1262 sprite_reloc_input = sprite_real_input;
1263
1264 for(i = 0; i < fp->num_slots; ++i) {
1265 unsigned generic = fp->slot_to_generic[i];
1266 if((1 << generic) & sprite_coord_enable)
1267 {
1268 if(fp->slot_to_fp_input[i] != sprite_reloc_input)
1269 goto update_slots;
1270 }
1271 else
1272 {
1273 unsigned char slot_mask = vp->generic_to_fp_input[generic];
1274 if((slot_mask >> 4) & (slot_mask ^ fp->slot_to_fp_input[i]))
1275 goto update_slots;
1276 }
1277 }
1278
1279 if(emulate_sprite_flipping)
1280 {
1281 if(fp->slot_to_fp_input[fp->num_slots] != sprite_real_input)
1282 goto update_slots;
1283 }
1284
1285 if(0)
1286 {
1287 update_slots:
1288 /* optimization: we start updating from the slot we found the first difference in */
1289 for(; i < fp->num_slots; ++i)
1290 {
1291 unsigned generic = fp->slot_to_generic[i];
1292 if((1 << generic) & sprite_coord_enable)
1293 fp->slot_to_fp_input[i] = sprite_reloc_input;
1294 else
1295 fp->slot_to_fp_input[i] = vp->generic_to_fp_input[generic] & 0xf;
1296 }
1297
1298 fp->slot_to_fp_input[fp->num_slots] = sprite_real_input;
1299
1300 if(nvfx->is_nv4x)
1301 {
1302 fp->or = 0;
1303 for(i = 0; i <= fp->num_slots; ++i) {
1304 unsigned fp_input = fp->slot_to_fp_input[i];
1305 if(fp_input == NVFX_FP_OP_INPUT_SRC_TC(8))
1306 fp->or |= (1 << 12);
1307 else if(fp_input == NVFX_FP_OP_INPUT_SRC_TC(9))
1308 fp->or |= (1 << 13);
1309 else if(fp_input >= NVFX_FP_OP_INPUT_SRC_TC(0) && fp_input <= NVFX_FP_OP_INPUT_SRC_TC(7))
1310 fp->or |= (1 << (fp_input - NVFX_FP_OP_INPUT_SRC_TC0 + 14));
1311 }
1312 }
1313
1314 fp->progs_left_with_obsolete_slot_assignments = fp->progs;
1315 goto update;
1316 }
1317 }
1318
1319 /* We must update constants even on "just" fragprog changes, because
1320 * we don't check whether the current constant buffer matches the latest
1321 * one bound to this fragment program.
1322 * Doing such a check would likely be a pessimization.
1323 */
1324 if ((nvfx->hw_fragprog != fp) || (nvfx->dirty & (NVFX_NEW_FRAGPROG | NVFX_NEW_FRAGCONST))) {
1325 int offset;
1326 uint32_t* fpmap;
1327
1328 update:
1329 ++fp->bo_prog_idx;
1330 if(fp->bo_prog_idx >= fp->progs_per_bo)
1331 {
1332 if(fp->fpbo && !nouveau_bo_busy(fp->fpbo->next->bo, NOUVEAU_BO_WR))
1333 {
1334 fp->fpbo = fp->fpbo->next;
1335 }
1336 else
1337 {
1338 struct nvfx_fragment_program_bo* fpbo = os_malloc_aligned(sizeof(struct nvfx_fragment_program) + (fp->prog_size + 8) * fp->progs_per_bo, 16);
1339 uint8_t* map;
1340 uint8_t* buf;
1341
1342 fpbo->slots = (unsigned char*)&fpbo->insn[(fp->prog_size) * fp->progs_per_bo];
1343 memset(fpbo->slots, 0, 8 * fp->progs_per_bo);
1344 if(fp->fpbo)
1345 {
1346 fpbo->next = fp->fpbo->next;
1347 fp->fpbo->next = fpbo;
1348 }
1349 else
1350 fpbo->next = fpbo;
1351 fp->fpbo = fpbo;
1352 fpbo->bo = 0;
1353 fp->progs += fp->progs_per_bo;
1354 fp->progs_left_with_obsolete_slot_assignments += fp->progs_per_bo;
1355 nouveau_bo_new(nvfx->screen->base.device, NOUVEAU_BO_VRAM | NOUVEAU_BO_MAP, 64, fp->prog_size * fp->progs_per_bo, &fpbo->bo);
1356 nouveau_bo_map(fpbo->bo, NOUVEAU_BO_NOSYNC);
1357
1358 map = fpbo->bo->map;
1359 buf = (uint8_t*)fpbo->insn;
1360 for(unsigned i = 0; i < fp->progs_per_bo; ++i)
1361 {
1362 memcpy(buf, fp->insn, fp->insn_len * 4);
1363 nvfx_fp_memcpy(map, fp->insn, fp->insn_len * 4);
1364 map += fp->prog_size;
1365 buf += fp->prog_size;
1366 }
1367 }
1368 fp->bo_prog_idx = 0;
1369 }
1370
1371 offset = fp->bo_prog_idx * fp->prog_size;
1372 fpmap = (uint32_t*)((char*)fp->fpbo->bo->map + offset);
1373
1374 if(nvfx->constbuf[PIPE_SHADER_FRAGMENT]) {
1375 struct pipe_resource* constbuf = nvfx->constbuf[PIPE_SHADER_FRAGMENT];
1376 uint32_t* map = (uint32_t*)nvfx_buffer(constbuf)->data;
1377 uint32_t* fpmap = (uint32_t*)((char*)fp->fpbo->bo->map + offset);
1378 uint32_t* buf = (uint32_t*)((char*)fp->fpbo->insn + offset);
1379 int i;
1380 for (i = 0; i < fp->nr_consts; ++i) {
1381 unsigned off = fp->consts[i].offset;
1382 unsigned idx = fp->consts[i].index * 4;
1383
1384 /* TODO: is checking a good idea? */
1385 if(memcmp(&buf[off], &map[idx], 4 * sizeof(uint32_t))) {
1386 memcpy(&buf[off], &map[idx], 4 * sizeof(uint32_t));
1387 nvfx_fp_memcpy(&fpmap[off], &map[idx], 4 * sizeof(uint32_t));
1388 }
1389 }
1390 }
1391
1392 /* we only do this if we aren't sure that all program versions have the
1393 * current slot assignments, otherwise we just update constants for speed
1394 */
1395 if(fp->progs_left_with_obsolete_slot_assignments) {
1396 unsigned char* fpbo_slots = &fp->fpbo->slots[fp->bo_prog_idx * 8];
1397 /* also relocate sprite coord slot, if any */
1398 for(unsigned i = 0; i <= fp->num_slots; ++i) {
1399 unsigned value = fp->slot_to_fp_input[i];;
1400 if(value != fpbo_slots[i]) {
1401 unsigned* p;
1402 unsigned* begin = (unsigned*)fp->slot_relocations[i].data;
1403 unsigned* end = (unsigned*)((char*)fp->slot_relocations[i].data + fp->slot_relocations[i].size);
1404 //printf("fp %p reloc slot %u/%u: %u -> %u\n", fp, i, fp->num_slots, fpbo_slots[i], value);
1405 if(value == 0)
1406 {
1407 /* was relocated to an input, switch type to temporary */
1408 for(p = begin; p != end; ++p) {
1409 unsigned off = *p;
1410 unsigned dw = fp->insn[off];
1411 dw &=~ NVFX_FP_REG_TYPE_MASK;
1412 //printf("reloc_tmp at %x\n", off);
1413 nvfx_fp_memcpy(&fpmap[off], &dw, sizeof(dw));
1414 }
1415 } else {
1416 if(!fpbo_slots[i])
1417 {
1418 /* was relocated to a temporary, switch type to input */
1419 for(p= begin; p != end; ++p) {
1420 unsigned off = *p;
1421 unsigned dw = fp->insn[off];
1422 //printf("reloc_in at %x\n", off);
1423 dw |= NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT;
1424 nvfx_fp_memcpy(&fpmap[off], &dw, sizeof(dw));
1425 }
1426 }
1427
1428 /* set the correct input index */
1429 for(p = begin; p != end; ++p) {
1430 unsigned off = *p & ~3;
1431 unsigned dw = fp->insn[off];
1432 //printf("reloc&~3 at %x\n", off);
1433 dw = (dw & ~NVFX_FP_OP_INPUT_SRC_MASK) | (value << NVFX_FP_OP_INPUT_SRC_SHIFT);
1434 nvfx_fp_memcpy(&fpmap[off], &dw, sizeof(dw));
1435 }
1436 }
1437 fpbo_slots[i] = value;
1438 }
1439 }
1440 --fp->progs_left_with_obsolete_slot_assignments;
1441 }
1442
1443 nvfx->hw_fragprog = fp;
1444
1445 MARK_RING(chan, 8, 1);
1446 OUT_RING(chan, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM, 1));
1447 OUT_RELOC(chan, fp->fpbo->bo, offset, NOUVEAU_BO_VRAM |
1448 NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
1449 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
1450 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
1451 OUT_RING(chan, RING_3D(NV34TCL_FP_CONTROL, 1));
1452 OUT_RING(chan, fp->fp_control);
1453 if(!nvfx->is_nv4x) {
1454 OUT_RING(chan, RING_3D(NV34TCL_FP_REG_CONTROL, 1));
1455 OUT_RING(chan, (1<<16)|0x4);
1456 OUT_RING(chan, RING_3D(NV34TCL_TX_UNITS_ENABLE, 1));
1457 OUT_RING(chan, fp->samplers);
1458 }
1459 }
1460
1461 {
1462 unsigned pointsprite_control = fp->point_sprite_control | nvfx->rasterizer->pipe.point_quad_rasterization;
1463 if(pointsprite_control != nvfx->hw_pointsprite_control)
1464 {
1465 WAIT_RING(chan, 2);
1466 OUT_RING(chan, RING_3D(NV34TCL_POINT_SPRITE, 1));
1467 OUT_RING(chan, pointsprite_control);
1468 nvfx->hw_pointsprite_control = pointsprite_control;
1469 }
1470 }
1471
1472 if(nvfx->is_nv4x)
1473 {
1474 unsigned vp_output = vp->or | fp->or;
1475
1476 if(vp_output != nvfx->hw_vp_output)
1477 {
1478 WAIT_RING(chan, 2);
1479 OUT_RING(chan, RING_3D(NV40TCL_VP_RESULT_EN, 1));
1480 OUT_RING(chan, vp_output);
1481 nvfx->hw_vp_output = vp_output;
1482 }
1483 }
1484 }
1485
1486 void
1487 nvfx_fragprog_relocate(struct nvfx_context *nvfx)
1488 {
1489 struct nouveau_channel* chan = nvfx->screen->base.channel;
1490 struct nvfx_fragment_program *fp = nvfx->hw_fragprog;
1491 struct nouveau_bo* bo = fp->fpbo->bo;
1492 int offset = fp->bo_prog_idx * fp->prog_size;
1493 unsigned fp_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD; // TODO: GART?
1494 fp_flags |= NOUVEAU_BO_DUMMY;
1495 MARK_RING(chan, 2, 2);
1496 OUT_RELOC(chan, bo, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM, 1), fp_flags, 0, 0);
1497 OUT_RELOC(chan, bo, offset, fp_flags | NOUVEAU_BO_LOW |
1498 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
1499 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
1500 }
1501
1502 void
1503 nvfx_fragprog_destroy(struct nvfx_context *nvfx,
1504 struct nvfx_fragment_program *fp)
1505 {
1506 unsigned i;
1507 struct nvfx_fragment_program_bo* fpbo = fp->fpbo;
1508 if(fpbo)
1509 {
1510 do
1511 {
1512 struct nvfx_fragment_program_bo* next = fpbo->next;
1513 nouveau_bo_unmap(fpbo->bo);
1514 nouveau_bo_ref(0, &fpbo->bo);
1515 free(fpbo);
1516 fpbo = next;
1517 }
1518 while(fpbo != fp->fpbo);
1519 }
1520
1521 for(i = 0; i < 8; ++i)
1522 util_dynarray_fini(&fp->slot_relocations[i]);
1523
1524 if (fp->insn_len)
1525 FREE(fp->insn);
1526 }
1527
1528 static void *
1529 nvfx_fp_state_create(struct pipe_context *pipe,
1530 const struct pipe_shader_state *cso)
1531 {
1532 struct nvfx_pipe_fragment_program *pfp;
1533
1534 pfp = CALLOC(1, sizeof(struct nvfx_pipe_fragment_program));
1535 pfp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
1536
1537 tgsi_scan_shader(pfp->pipe.tokens, &pfp->info);
1538
1539 return (void *)pfp;
1540 }
1541
1542 static void
1543 nvfx_fp_state_bind(struct pipe_context *pipe, void *hwcso)
1544 {
1545 struct nvfx_context *nvfx = nvfx_context(pipe);
1546
1547 nvfx->fragprog = hwcso;
1548 nvfx->dirty |= NVFX_NEW_FRAGPROG;
1549 }
1550
1551 static void
1552 nvfx_fp_state_delete(struct pipe_context *pipe, void *hwcso)
1553 {
1554 struct nvfx_context *nvfx = nvfx_context(pipe);
1555 struct nvfx_pipe_fragment_program *pfp = hwcso;
1556 unsigned i;
1557
1558 for(i = 0; i < Elements(pfp->fps); ++i)
1559 {
1560 if(pfp->fps[i])
1561 {
1562 nvfx_fragprog_destroy(nvfx, pfp->fps[i]);
1563 FREE(pfp->fps[i]);
1564 }
1565 }
1566
1567 FREE((void*)pfp->pipe.tokens);
1568 FREE(pfp);
1569 }
1570
1571 void
1572 nvfx_init_fragprog_functions(struct nvfx_context *nvfx)
1573 {
1574 nvfx->pipe.create_fs_state = nvfx_fp_state_create;
1575 nvfx->pipe.bind_fs_state = nvfx_fp_state_bind;
1576 nvfx->pipe.delete_fs_state = nvfx_fp_state_delete;
1577 }