1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_debug.h"
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_util.h"
10 #include "tgsi/tgsi_dump.h"
12 #include "nvfx_context.h"
13 #include "nvfx_shader.h"
14 #include "nvfx_resource.h"
16 #define MAX_CONSTS 128
20 struct nvfx_fragment_program
*fp
;
23 unsigned r_temps_discard
;
24 struct nvfx_reg r_result
[PIPE_MAX_SHADER_OUTPUTS
];
25 struct nvfx_reg
*r_temp
;
38 struct nvfx_reg imm
[MAX_IMM
];
41 unsigned char generic_to_slot
[256]; /* semantic idx for each input semantic */
43 struct util_dynarray if_stack
;
44 //struct util_dynarray loop_stack;
45 struct util_dynarray label_relocs
;
48 static INLINE
struct nvfx_reg
49 temp(struct nvfx_fpc
*fpc
)
51 int idx
= ffs(~fpc
->r_temps
) - 1;
54 NOUVEAU_ERR("out of temps!!\n");
56 return nvfx_reg(NVFXSR_TEMP
, 0);
59 fpc
->r_temps
|= (1 << idx
);
60 fpc
->r_temps_discard
|= (1 << idx
);
61 return nvfx_reg(NVFXSR_TEMP
, idx
);
65 release_temps(struct nvfx_fpc
*fpc
)
67 fpc
->r_temps
&= ~fpc
->r_temps_discard
;
68 fpc
->r_temps_discard
= 0;
71 static INLINE
struct nvfx_reg
72 constant(struct nvfx_fpc
*fpc
, int pipe
, float vals
[4])
76 if (fpc
->nr_consts
== MAX_CONSTS
)
78 idx
= fpc
->nr_consts
++;
80 fpc
->consts
[idx
].pipe
= pipe
;
82 memcpy(fpc
->consts
[idx
].vals
, vals
, 4 * sizeof(float));
83 return nvfx_reg(NVFXSR_CONST
, idx
);
87 grow_insns(struct nvfx_fpc
*fpc
, int size
)
89 struct nvfx_fragment_program
*fp
= fpc
->fp
;
92 fp
->insn
= realloc(fp
->insn
, sizeof(uint32_t) * fp
->insn_len
);
96 emit_src(struct nvfx_fpc
*fpc
, int pos
, struct nvfx_src src
)
98 struct nvfx_fragment_program
*fp
= fpc
->fp
;
99 uint32_t *hw
= &fp
->insn
[fpc
->inst_offset
];
102 switch (src
.reg
.type
) {
104 sr
|= (NVFX_FP_REG_TYPE_INPUT
<< NVFX_FP_REG_TYPE_SHIFT
);
105 hw
[0] |= (src
.reg
.index
<< NVFX_FP_OP_INPUT_SRC_SHIFT
);
108 sr
|= NVFX_FP_REG_SRC_HALF
;
111 sr
|= (NVFX_FP_REG_TYPE_TEMP
<< NVFX_FP_REG_TYPE_SHIFT
);
112 sr
|= (src
.reg
.index
<< NVFX_FP_REG_SRC_SHIFT
);
114 case NVFXSR_RELOCATED
:
115 sr
|= (NVFX_FP_REG_TYPE_INPUT
<< NVFX_FP_REG_TYPE_SHIFT
);
116 //printf("adding relocation at %x for %x\n", fpc->inst_offset, src.index);
117 util_dynarray_append(&fpc
->fp
->slot_relocations
[src
.reg
.index
], unsigned, fpc
->inst_offset
);
120 if (!fpc
->have_const
) {
125 hw
= &fp
->insn
[fpc
->inst_offset
];
126 if (fpc
->consts
[src
.reg
.index
].pipe
>= 0) {
127 struct nvfx_fragment_program_data
*fpd
;
129 fp
->consts
= realloc(fp
->consts
, ++fp
->nr_consts
*
131 fpd
= &fp
->consts
[fp
->nr_consts
- 1];
132 fpd
->offset
= fpc
->inst_offset
+ 4;
133 fpd
->index
= fpc
->consts
[src
.reg
.index
].pipe
;
134 memset(&fp
->insn
[fpd
->offset
], 0, sizeof(uint32_t) * 4);
136 memcpy(&fp
->insn
[fpc
->inst_offset
+ 4],
137 fpc
->consts
[src
.reg
.index
].vals
,
138 sizeof(uint32_t) * 4);
141 sr
|= (NVFX_FP_REG_TYPE_CONST
<< NVFX_FP_REG_TYPE_SHIFT
);
144 sr
|= (NVFX_FP_REG_TYPE_INPUT
<< NVFX_FP_REG_TYPE_SHIFT
);
151 sr
|= NVFX_FP_REG_NEGATE
;
154 hw
[1] |= (1 << (29 + pos
));
156 sr
|= ((src
.swz
[0] << NVFX_FP_REG_SWZ_X_SHIFT
) |
157 (src
.swz
[1] << NVFX_FP_REG_SWZ_Y_SHIFT
) |
158 (src
.swz
[2] << NVFX_FP_REG_SWZ_Z_SHIFT
) |
159 (src
.swz
[3] << NVFX_FP_REG_SWZ_W_SHIFT
));
165 emit_dst(struct nvfx_fpc
*fpc
, struct nvfx_reg dst
)
167 struct nvfx_fragment_program
*fp
= fpc
->fp
;
168 uint32_t *hw
= &fp
->insn
[fpc
->inst_offset
];
172 if (fpc
->num_regs
< (dst
.index
+ 1))
173 fpc
->num_regs
= dst
.index
+ 1;
176 if (dst
.index
== 1) {
177 fp
->fp_control
|= 0xe;
179 hw
[0] |= NVFX_FP_OP_OUT_REG_HALF
;
189 hw
[0] |= (dst
.index
<< NVFX_FP_OP_OUT_REG_SHIFT
);
193 nvfx_fp_emit(struct nvfx_fpc
*fpc
, struct nvfx_insn insn
)
195 struct nvfx_fragment_program
*fp
= fpc
->fp
;
198 fpc
->inst_offset
= fp
->insn_len
;
201 hw
= &fp
->insn
[fpc
->inst_offset
];
202 memset(hw
, 0, sizeof(uint32_t) * 4);
204 if (insn
.op
== NVFX_FP_OP_OPCODE_KIL
)
205 fp
->fp_control
|= NV34TCL_FP_CONTROL_USES_KIL
;
206 hw
[0] |= (insn
.op
<< NVFX_FP_OP_OPCODE_SHIFT
);
207 hw
[0] |= (insn
.mask
<< NVFX_FP_OP_OUTMASK_SHIFT
);
208 hw
[2] |= (insn
.scale
<< NVFX_FP_OP_DST_SCALE_SHIFT
);
211 hw
[0] |= NVFX_FP_OP_OUT_SAT
;
214 hw
[0] |= NVFX_FP_OP_COND_WRITE_ENABLE
;
215 hw
[1] |= (insn
.cc_test
<< NVFX_FP_OP_COND_SHIFT
);
216 hw
[1] |= ((insn
.cc_swz
[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
217 (insn
.cc_swz
[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT
) |
218 (insn
.cc_swz
[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT
) |
219 (insn
.cc_swz
[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT
));
223 hw
[0] |= (insn
.unit
<< NVFX_FP_OP_TEX_UNIT_SHIFT
);
224 fp
->samplers
|= (1 << insn
.unit
);
227 emit_dst(fpc
, insn
.dst
);
228 emit_src(fpc
, 0, insn
.src
[0]);
229 emit_src(fpc
, 1, insn
.src
[1]);
230 emit_src(fpc
, 2, insn
.src
[2]);
233 #define arith(s,o,d,m,s0,s1,s2) \
234 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \
235 (d), (m), (s0), (s1), (s2))
237 #define tex(s,o,u,d,m,s0,s1,s2) \
238 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \
239 (d), (m), (s0), none, none)
241 /* IF src.x != 0, as TGSI specifies */
243 nv40_fp_if(struct nvfx_fpc
*fpc
, struct nvfx_src src
)
245 const struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
246 struct nvfx_insn insn
= arith(0, MOV
, none
.reg
, NVFX_FP_MASK_X
, src
, none
, none
);
248 nvfx_fp_emit(fpc
, insn
);
250 fpc
->inst_offset
= fpc
->fp
->insn_len
;
252 uint32_t *hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
253 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
254 hw
[0] = (NV40_FP_OP_BRA_OPCODE_IF
<< NVFX_FP_OP_OPCODE_SHIFT
) |
255 NV40_FP_OP_OUT_NONE
|
256 (NVFX_FP_PRECISION_FP16
<< NVFX_FP_OP_PRECISION_SHIFT
);
257 /* Use .xxxx swizzle so that we check only src[0].x*/
258 hw
[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
259 (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT
) |
260 (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT
) |
261 (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT
) |
262 (NVFX_FP_OP_COND_NE
<< NVFX_FP_OP_COND_SHIFT
);
263 hw
[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */
264 hw
[3] = 0; /* | endif_offset */
265 util_dynarray_append(&fpc
->if_stack
, unsigned, fpc
->inst_offset
);
268 /* IF src.x != 0, as TGSI specifies */
270 nv40_fp_cal(struct nvfx_fpc
*fpc
, unsigned target
)
272 struct nvfx_label_relocation reloc
;
273 fpc
->inst_offset
= fpc
->fp
->insn_len
;
275 uint32_t *hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
276 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
277 hw
[0] = (NV40_FP_OP_BRA_OPCODE_CAL
<< NVFX_FP_OP_OPCODE_SHIFT
);
278 /* Use .xxxx swizzle so that we check only src[0].x*/
279 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_ALL_SHIFT
) |
280 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
281 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
; /* | call_offset */
283 reloc
.target
= target
;
284 reloc
.location
= fpc
->inst_offset
+ 2;
285 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_label_relocation
, reloc
);
289 nv40_fp_ret(struct nvfx_fpc
*fpc
)
291 fpc
->inst_offset
= fpc
->fp
->insn_len
;
293 uint32_t *hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
294 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
295 hw
[0] = (NV40_FP_OP_BRA_OPCODE_RET
<< NVFX_FP_OP_OPCODE_SHIFT
);
296 /* Use .xxxx swizzle so that we check only src[0].x*/
297 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_ALL_SHIFT
) |
298 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
299 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
; /* | call_offset */
304 nv40_fp_rep(struct nvfx_fpc
*fpc
, unsigned count
, unsigned target
)
306 struct nvfx_label_relocation reloc
;
307 fpc
->inst_offset
= fpc
->fp
->insn_len
;
309 uint32_t *hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
310 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
311 hw
[0] = (NV40_FP_OP_BRA_OPCODE_REP
<< NVFX_FP_OP_OPCODE_SHIFT
) |
312 NV40_FP_OP_OUT_NONE
|
313 (NVFX_FP_PRECISION_FP16
<< NVFX_FP_OP_PRECISION_SHIFT
);
314 /* Use .xxxx swizzle so that we check only src[0].x*/
315 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_ALL_SHIFT
) |
316 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
317 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
|
318 (count
<< NV40_FP_OP_REP_COUNT1_SHIFT
) |
319 (count
<< NV40_FP_OP_REP_COUNT2_SHIFT
) |
320 (count
<< NV40_FP_OP_REP_COUNT3_SHIFT
);
321 hw
[3] = 0; /* | end_offset */
322 reloc
.target
= target
;
323 reloc
.location
= fpc
->inst_offset
+ 3;
324 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_label_relocation
, reloc
);
325 //util_dynarray_append(&fpc->loop_stack, unsigned, target);
328 /* warning: this only works forward, and probably only if not inside any IF */
330 nv40_fp_bra(struct nvfx_fpc
*fpc
, unsigned target
)
332 struct nvfx_label_relocation reloc
;
333 fpc
->inst_offset
= fpc
->fp
->insn_len
;
335 uint32_t *hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
336 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
337 hw
[0] = (NV40_FP_OP_BRA_OPCODE_IF
<< NVFX_FP_OP_OPCODE_SHIFT
) |
338 NV40_FP_OP_OUT_NONE
|
339 (NVFX_FP_PRECISION_FP16
<< NVFX_FP_OP_PRECISION_SHIFT
);
340 /* Use .xxxx swizzle so that we check only src[0].x*/
341 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
342 (NVFX_FP_OP_COND_FL
<< NVFX_FP_OP_COND_SHIFT
);
343 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
; /* | else_offset */
344 hw
[3] = 0; /* | endif_offset */
345 reloc
.target
= target
;
346 reloc
.location
= fpc
->inst_offset
+ 2;
347 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_label_relocation
, reloc
);
348 reloc
.target
= target
;
349 reloc
.location
= fpc
->inst_offset
+ 3;
350 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_label_relocation
, reloc
);
354 nv40_fp_brk(struct nvfx_fpc
*fpc
)
356 fpc
->inst_offset
= fpc
->fp
->insn_len
;
358 uint32_t *hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
359 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
360 hw
[0] = (NV40_FP_OP_BRA_OPCODE_BRK
<< NVFX_FP_OP_OPCODE_SHIFT
) |
362 /* Use .xxxx swizzle so that we check only src[0].x*/
363 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
364 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
365 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
;
369 static INLINE
struct nvfx_src
370 tgsi_src(struct nvfx_fpc
*fpc
, const struct tgsi_full_src_register
*fsrc
)
374 switch (fsrc
->Register
.File
) {
375 case TGSI_FILE_INPUT
:
376 if(fpc
->fp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_POSITION
) {
377 assert(fpc
->fp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0);
378 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_POSITION
);
379 } else if(fpc
->fp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_COLOR
) {
380 if(fpc
->fp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0)
381 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_COL0
);
382 else if(fpc
->fp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 1)
383 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_COL1
);
386 } else if(fpc
->fp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_FOG
) {
387 assert(fpc
->fp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0);
388 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_FOGC
);
389 } else if(fpc
->fp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_FACE
) {
390 /* TODO: check this has the correct values */
391 /* XXX: what do we do for nv30 here (assuming it lacks facing)?! */
392 assert(fpc
->fp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0);
393 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NV40_FP_OP_INPUT_SRC_FACING
);
395 assert(fpc
->fp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_GENERIC
);
396 src
.reg
= nvfx_reg(NVFXSR_RELOCATED
, fpc
->generic_to_slot
[fpc
->fp
->info
.input_semantic_index
[fsrc
->Register
.Index
]]);
399 case TGSI_FILE_CONSTANT
:
400 src
.reg
= constant(fpc
, fsrc
->Register
.Index
, NULL
);
402 case TGSI_FILE_IMMEDIATE
:
403 assert(fsrc
->Register
.Index
< fpc
->nr_imm
);
404 src
.reg
= fpc
->imm
[fsrc
->Register
.Index
];
406 case TGSI_FILE_TEMPORARY
:
407 src
.reg
= fpc
->r_temp
[fsrc
->Register
.Index
];
409 /* NV40 fragprog result regs are just temps, so this is simple */
410 case TGSI_FILE_OUTPUT
:
411 src
.reg
= fpc
->r_result
[fsrc
->Register
.Index
];
414 NOUVEAU_ERR("bad src file\n");
418 src
.abs
= fsrc
->Register
.Absolute
;
419 src
.negate
= fsrc
->Register
.Negate
;
420 src
.swz
[0] = fsrc
->Register
.SwizzleX
;
421 src
.swz
[1] = fsrc
->Register
.SwizzleY
;
422 src
.swz
[2] = fsrc
->Register
.SwizzleZ
;
423 src
.swz
[3] = fsrc
->Register
.SwizzleW
;
427 static INLINE
struct nvfx_reg
428 tgsi_dst(struct nvfx_fpc
*fpc
, const struct tgsi_full_dst_register
*fdst
) {
429 switch (fdst
->Register
.File
) {
430 case TGSI_FILE_OUTPUT
:
431 return fpc
->r_result
[fdst
->Register
.Index
];
432 case TGSI_FILE_TEMPORARY
:
433 return fpc
->r_temp
[fdst
->Register
.Index
];
435 return nvfx_reg(NVFXSR_NONE
, 0);
437 NOUVEAU_ERR("bad dst file %d\n", fdst
->Register
.File
);
438 return nvfx_reg(NVFXSR_NONE
, 0);
447 if (tgsi
& TGSI_WRITEMASK_X
) mask
|= NVFX_FP_MASK_X
;
448 if (tgsi
& TGSI_WRITEMASK_Y
) mask
|= NVFX_FP_MASK_Y
;
449 if (tgsi
& TGSI_WRITEMASK_Z
) mask
|= NVFX_FP_MASK_Z
;
450 if (tgsi
& TGSI_WRITEMASK_W
) mask
|= NVFX_FP_MASK_W
;
455 nvfx_fragprog_parse_instruction(struct nvfx_context
* nvfx
, struct nvfx_fpc
*fpc
,
456 const struct tgsi_full_instruction
*finst
)
458 const struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
459 struct nvfx_insn insn
;
460 struct nvfx_src src
[3], tmp
;
462 int mask
, sat
, unit
= 0;
463 int ai
= -1, ci
= -1, ii
= -1;
466 if (finst
->Instruction
.Opcode
== TGSI_OPCODE_END
)
469 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
470 const struct tgsi_full_src_register
*fsrc
;
472 fsrc
= &finst
->Src
[i
];
473 if (fsrc
->Register
.File
== TGSI_FILE_TEMPORARY
) {
474 src
[i
] = tgsi_src(fpc
, fsrc
);
478 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
479 const struct tgsi_full_src_register
*fsrc
;
481 fsrc
= &finst
->Src
[i
];
483 switch (fsrc
->Register
.File
) {
484 case TGSI_FILE_INPUT
:
485 if (ai
== -1 || ai
== fsrc
->Register
.Index
) {
486 ai
= fsrc
->Register
.Index
;
487 src
[i
] = tgsi_src(fpc
, fsrc
);
489 src
[i
] = nvfx_src(temp(fpc
));
490 nvfx_fp_emit(fpc
, arith(0, MOV
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), none
, none
));
493 case TGSI_FILE_CONSTANT
:
494 if ((ci
== -1 && ii
== -1) ||
495 ci
== fsrc
->Register
.Index
) {
496 ci
= fsrc
->Register
.Index
;
497 src
[i
] = tgsi_src(fpc
, fsrc
);
499 src
[i
] = nvfx_src(temp(fpc
));
500 nvfx_fp_emit(fpc
, arith(0, MOV
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), none
, none
));
503 case TGSI_FILE_IMMEDIATE
:
504 if ((ci
== -1 && ii
== -1) ||
505 ii
== fsrc
->Register
.Index
) {
506 ii
= fsrc
->Register
.Index
;
507 src
[i
] = tgsi_src(fpc
, fsrc
);
509 src
[i
] = nvfx_src(temp(fpc
));
510 nvfx_fp_emit(fpc
, arith(0, MOV
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), none
, none
));
513 case TGSI_FILE_TEMPORARY
:
516 case TGSI_FILE_SAMPLER
:
517 unit
= fsrc
->Register
.Index
;
519 case TGSI_FILE_OUTPUT
:
522 NOUVEAU_ERR("bad src file\n");
527 dst
= tgsi_dst(fpc
, &finst
->Dst
[0]);
528 mask
= tgsi_mask(finst
->Dst
[0].Register
.WriteMask
);
529 sat
= (finst
->Instruction
.Saturate
== TGSI_SAT_ZERO_ONE
);
531 switch (finst
->Instruction
.Opcode
) {
532 case TGSI_OPCODE_ABS
:
533 nvfx_fp_emit(fpc
, arith(sat
, MOV
, dst
, mask
, abs(src
[0]), none
, none
));
535 case TGSI_OPCODE_ADD
:
536 nvfx_fp_emit(fpc
, arith(sat
, ADD
, dst
, mask
, src
[0], src
[1], none
));
538 case TGSI_OPCODE_CMP
:
539 insn
= arith(0, MOV
, none
.reg
, 0xf, src
[0], none
, none
);
541 nvfx_fp_emit(fpc
, insn
);
543 insn
= arith(sat
, MOV
, dst
, mask
, src
[2], none
, none
);
544 insn
.cc_test
= NVFX_COND_GE
;
545 nvfx_fp_emit(fpc
, insn
);
547 insn
= arith(sat
, MOV
, dst
, mask
, src
[1], none
, none
);
548 insn
.cc_test
= NVFX_COND_LT
;
549 nvfx_fp_emit(fpc
, insn
);
551 case TGSI_OPCODE_COS
:
552 nvfx_fp_emit(fpc
, arith(sat
, COS
, dst
, mask
, src
[0], none
, none
));
554 case TGSI_OPCODE_DDX
:
555 if (mask
& (NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
)) {
556 tmp
= nvfx_src(temp(fpc
));
557 nvfx_fp_emit(fpc
, arith(sat
, DDX
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, swz(src
[0], Z
, W
, Z
, W
), none
, none
));
558 nvfx_fp_emit(fpc
, arith(0, MOV
, tmp
.reg
, NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
, swz(tmp
, X
, Y
, X
, Y
), none
, none
));
559 nvfx_fp_emit(fpc
, arith(sat
, DDX
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, src
[0], none
, none
));
560 nvfx_fp_emit(fpc
, arith(0, MOV
, dst
, mask
, tmp
, none
, none
));
562 nvfx_fp_emit(fpc
, arith(sat
, DDX
, dst
, mask
, src
[0], none
, none
));
565 case TGSI_OPCODE_DDY
:
566 if (mask
& (NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
)) {
567 tmp
= nvfx_src(temp(fpc
));
568 nvfx_fp_emit(fpc
, arith(sat
, DDY
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, swz(src
[0], Z
, W
, Z
, W
), none
, none
));
569 nvfx_fp_emit(fpc
, arith(0, MOV
, tmp
.reg
, NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
, swz(tmp
, X
, Y
, X
, Y
), none
, none
));
570 nvfx_fp_emit(fpc
, arith(sat
, DDY
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, src
[0], none
, none
));
571 nvfx_fp_emit(fpc
, arith(0, MOV
, dst
, mask
, tmp
, none
, none
));
573 nvfx_fp_emit(fpc
, arith(sat
, DDY
, dst
, mask
, src
[0], none
, none
));
576 case TGSI_OPCODE_DP3
:
577 nvfx_fp_emit(fpc
, arith(sat
, DP3
, dst
, mask
, src
[0], src
[1], none
));
579 case TGSI_OPCODE_DP4
:
580 nvfx_fp_emit(fpc
, arith(sat
, DP4
, dst
, mask
, src
[0], src
[1], none
));
582 case TGSI_OPCODE_DPH
:
583 tmp
= nvfx_src(temp(fpc
));
584 nvfx_fp_emit(fpc
, arith(0, DP3
, tmp
.reg
, NVFX_FP_MASK_X
, src
[0], src
[1], none
));
585 nvfx_fp_emit(fpc
, arith(sat
, ADD
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], W
, W
, W
, W
), none
));
587 case TGSI_OPCODE_DST
:
588 nvfx_fp_emit(fpc
, arith(sat
, DST
, dst
, mask
, src
[0], src
[1], none
));
590 case TGSI_OPCODE_EX2
:
591 nvfx_fp_emit(fpc
, arith(sat
, EX2
, dst
, mask
, src
[0], none
, none
));
593 case TGSI_OPCODE_FLR
:
594 nvfx_fp_emit(fpc
, arith(sat
, FLR
, dst
, mask
, src
[0], none
, none
));
596 case TGSI_OPCODE_FRC
:
597 nvfx_fp_emit(fpc
, arith(sat
, FRC
, dst
, mask
, src
[0], none
, none
));
599 case TGSI_OPCODE_KILP
:
600 nvfx_fp_emit(fpc
, arith(0, KIL
, none
.reg
, 0, none
, none
, none
));
602 case TGSI_OPCODE_KIL
:
603 insn
= arith(0, MOV
, none
.reg
, NVFX_FP_MASK_ALL
, src
[0], none
, none
);
605 nvfx_fp_emit(fpc
, insn
);
607 insn
= arith(0, KIL
, none
.reg
, 0, none
, none
, none
);
608 insn
.cc_test
= NVFX_COND_LT
;
609 nvfx_fp_emit(fpc
, insn
);
611 case TGSI_OPCODE_LG2
:
612 nvfx_fp_emit(fpc
, arith(sat
, LG2
, dst
, mask
, src
[0], none
, none
));
614 // case TGSI_OPCODE_LIT:
615 case TGSI_OPCODE_LRP
:
617 nvfx_fp_emit(fpc
, arith(sat
, LRP_NV30
, dst
, mask
, src
[0], src
[1], src
[2]));
619 tmp
= nvfx_src(temp(fpc
));
620 nvfx_fp_emit(fpc
, arith(0, MAD
, tmp
.reg
, mask
, neg(src
[0]), src
[2], src
[2]));
621 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, mask
, src
[0], src
[1], tmp
));
624 case TGSI_OPCODE_MAD
:
625 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, mask
, src
[0], src
[1], src
[2]));
627 case TGSI_OPCODE_MAX
:
628 nvfx_fp_emit(fpc
, arith(sat
, MAX
, dst
, mask
, src
[0], src
[1], none
));
630 case TGSI_OPCODE_MIN
:
631 nvfx_fp_emit(fpc
, arith(sat
, MIN
, dst
, mask
, src
[0], src
[1], none
));
633 case TGSI_OPCODE_MOV
:
634 nvfx_fp_emit(fpc
, arith(sat
, MOV
, dst
, mask
, src
[0], none
, none
));
636 case TGSI_OPCODE_MUL
:
637 nvfx_fp_emit(fpc
, arith(sat
, MUL
, dst
, mask
, src
[0], src
[1], none
));
639 case TGSI_OPCODE_NOP
:
641 case TGSI_OPCODE_POW
:
643 nvfx_fp_emit(fpc
, arith(sat
, POW_NV30
, dst
, mask
, src
[0], src
[1], none
));
645 tmp
= nvfx_src(temp(fpc
));
646 nvfx_fp_emit(fpc
, arith(0, LG2
, tmp
.reg
, NVFX_FP_MASK_X
, swz(src
[0], X
, X
, X
, X
), none
, none
));
647 nvfx_fp_emit(fpc
, arith(0, MUL
, tmp
.reg
, NVFX_FP_MASK_X
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], X
, X
, X
, X
), none
));
648 nvfx_fp_emit(fpc
, arith(sat
, EX2
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), none
, none
));
651 case TGSI_OPCODE_RCP
:
652 nvfx_fp_emit(fpc
, arith(sat
, RCP
, dst
, mask
, src
[0], none
, none
));
654 case TGSI_OPCODE_RFL
:
656 nvfx_fp_emit(fpc
, arith(0, RFL_NV30
, dst
, mask
, src
[0], src
[1], none
));
658 tmp
= nvfx_src(temp(fpc
));
659 nvfx_fp_emit(fpc
, arith(0, DP3
, tmp
.reg
, NVFX_FP_MASK_X
, src
[0], src
[0], none
));
660 nvfx_fp_emit(fpc
, arith(0, DP3
, tmp
.reg
, NVFX_FP_MASK_Y
, src
[0], src
[1], none
));
661 insn
= arith(0, DIV
, tmp
.reg
, NVFX_FP_MASK_Z
, swz(tmp
, Y
, Y
, Y
, Y
), swz(tmp
, X
, X
, X
, X
), none
);
662 insn
.scale
= NVFX_FP_OP_DST_SCALE_2X
;
663 nvfx_fp_emit(fpc
, insn
);
664 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, mask
, swz(tmp
, Z
, Z
, Z
, Z
), src
[0], neg(src
[1])));
667 case TGSI_OPCODE_RSQ
:
669 nvfx_fp_emit(fpc
, arith(sat
, RSQ_NV30
, dst
, mask
, abs(swz(src
[0], X
, X
, X
, X
)), none
, none
));
671 tmp
= nvfx_src(temp(fpc
));
672 insn
= arith(0, LG2
, tmp
.reg
, NVFX_FP_MASK_X
, abs(swz(src
[0], X
, X
, X
, X
)), none
, none
);
673 insn
.scale
= NVFX_FP_OP_DST_SCALE_INV_2X
;
674 nvfx_fp_emit(fpc
, insn
);
675 nvfx_fp_emit(fpc
, arith(sat
, EX2
, dst
, mask
, neg(swz(tmp
, X
, X
, X
, X
)), none
, none
));
678 case TGSI_OPCODE_SCS
:
679 /* avoid overwriting the source */
680 if(src
[0].swz
[NVFX_SWZ_X
] != NVFX_SWZ_X
)
682 if (mask
& NVFX_FP_MASK_X
)
683 nvfx_fp_emit(fpc
, arith(sat
, COS
, dst
, NVFX_FP_MASK_X
, swz(src
[0], X
, X
, X
, X
), none
, none
));
684 if (mask
& NVFX_FP_MASK_Y
)
685 nvfx_fp_emit(fpc
, arith(sat
, SIN
, dst
, NVFX_FP_MASK_Y
, swz(src
[0], X
, X
, X
, X
), none
, none
));
689 if (mask
& NVFX_FP_MASK_Y
)
690 nvfx_fp_emit(fpc
, arith(sat
, SIN
, dst
, NVFX_FP_MASK_Y
, swz(src
[0], X
, X
, X
, X
), none
, none
));
691 if (mask
& NVFX_FP_MASK_X
)
692 nvfx_fp_emit(fpc
, arith(sat
, COS
, dst
, NVFX_FP_MASK_X
, swz(src
[0], X
, X
, X
, X
), none
, none
));
695 case TGSI_OPCODE_SEQ
:
696 nvfx_fp_emit(fpc
, arith(sat
, SEQ
, dst
, mask
, src
[0], src
[1], none
));
698 case TGSI_OPCODE_SFL
:
699 nvfx_fp_emit(fpc
, arith(sat
, SFL
, dst
, mask
, src
[0], src
[1], none
));
701 case TGSI_OPCODE_SGE
:
702 nvfx_fp_emit(fpc
, arith(sat
, SGE
, dst
, mask
, src
[0], src
[1], none
));
704 case TGSI_OPCODE_SGT
:
705 nvfx_fp_emit(fpc
, arith(sat
, SGT
, dst
, mask
, src
[0], src
[1], none
));
707 case TGSI_OPCODE_SIN
:
708 nvfx_fp_emit(fpc
, arith(sat
, SIN
, dst
, mask
, src
[0], none
, none
));
710 case TGSI_OPCODE_SLE
:
711 nvfx_fp_emit(fpc
, arith(sat
, SLE
, dst
, mask
, src
[0], src
[1], none
));
713 case TGSI_OPCODE_SLT
:
714 nvfx_fp_emit(fpc
, arith(sat
, SLT
, dst
, mask
, src
[0], src
[1], none
));
716 case TGSI_OPCODE_SNE
:
717 nvfx_fp_emit(fpc
, arith(sat
, SNE
, dst
, mask
, src
[0], src
[1], none
));
719 case TGSI_OPCODE_STR
:
720 nvfx_fp_emit(fpc
, arith(sat
, STR
, dst
, mask
, src
[0], src
[1], none
));
722 case TGSI_OPCODE_SUB
:
723 nvfx_fp_emit(fpc
, arith(sat
, ADD
, dst
, mask
, src
[0], neg(src
[1]), none
));
725 case TGSI_OPCODE_TEX
:
726 nvfx_fp_emit(fpc
, tex(sat
, TEX
, unit
, dst
, mask
, src
[0], none
, none
));
728 case TGSI_OPCODE_TXB
:
729 nvfx_fp_emit(fpc
, tex(sat
, TXB
, unit
, dst
, mask
, src
[0], none
, none
));
731 case TGSI_OPCODE_TXP
:
732 nvfx_fp_emit(fpc
, tex(sat
, TXP
, unit
, dst
, mask
, src
[0], none
, none
));
734 case TGSI_OPCODE_XPD
:
735 tmp
= nvfx_src(temp(fpc
));
736 nvfx_fp_emit(fpc
, arith(0, MUL
, tmp
.reg
, mask
, swz(src
[0], Z
, X
, Y
, Y
), swz(src
[1], Y
, Z
, X
, X
), none
));
737 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, (mask
& ~NVFX_FP_MASK_W
), swz(src
[0], Y
, Z
, X
, X
), swz(src
[1], Z
, X
, Y
, Y
), neg(tmp
)));
741 // MOVRC0 R31 (TR0.xyzw), R<src>:
742 // IF (NE.xxxx) ELSE <else> END <end>
745 nv40_fp_if(fpc
, src
[0]);
748 case TGSI_OPCODE_ELSE
:
752 assert(util_dynarray_contains(&fpc
->if_stack
, unsigned));
753 uint32_t *hw
= &fpc
->fp
->insn
[util_dynarray_top(&fpc
->if_stack
, unsigned)];
754 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
| fpc
->fp
->insn_len
;
758 case TGSI_OPCODE_ENDIF
:
762 assert(util_dynarray_contains(&fpc
->if_stack
, unsigned));
763 uint32_t *hw
= &fpc
->fp
->insn
[util_dynarray_pop(&fpc
->if_stack
, unsigned)];
765 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
| fpc
->fp
->insn_len
;
766 hw
[3] = fpc
->fp
->insn_len
;
770 case TGSI_OPCODE_BRA
:
771 /* This can in limited cases be implemented with an IF with the else and endif labels pointing to the target */
772 /* no state tracker uses this, so don't implement this for now */
774 nv40_fp_bra(fpc
, finst
->Label
.Label
);
777 case TGSI_OPCODE_BGNSUB
:
778 case TGSI_OPCODE_ENDSUB
:
779 /* nothing to do here */
782 case TGSI_OPCODE_CAL
:
785 nv40_fp_cal(fpc
, finst
->Label
.Label
);
788 case TGSI_OPCODE_RET
:
794 case TGSI_OPCODE_BGNLOOP
:
797 /* TODO: we should support using two nested REPs to allow a > 255 iteration count */
798 nv40_fp_rep(fpc
, 255, finst
->Label
.Label
);
801 case TGSI_OPCODE_ENDLOOP
:
804 case TGSI_OPCODE_BRK
:
810 case TGSI_OPCODE_CONT
:
812 static int warned
= 0;
814 NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n");
821 NOUVEAU_ERR("invalid opcode %d\n", finst
->Instruction
.Opcode
);
830 static int warned
= 0;
833 "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n"
834 "If rendering is incorrect, try to disable GLSL support in the application.\n");
842 nvfx_fragprog_parse_decl_output(struct nvfx_context
* nvfx
, struct nvfx_fpc
*fpc
,
843 const struct tgsi_full_declaration
*fdec
)
845 unsigned idx
= fdec
->Range
.First
;
848 switch (fdec
->Semantic
.Name
) {
849 case TGSI_SEMANTIC_POSITION
:
852 case TGSI_SEMANTIC_COLOR
:
854 switch (fdec
->Semantic
.Index
) {
855 case 0: hw
= 0; break;
856 case 1: hw
= 2; break;
857 case 2: hw
= 3; break;
858 case 3: hw
= 4; break;
860 if(hw
> ((nvfx
->is_nv4x
) ? 4 : 2)) {
861 NOUVEAU_ERR("bad rcol index\n");
866 NOUVEAU_ERR("bad output semantic\n");
870 fpc
->r_result
[idx
] = nvfx_reg(NVFXSR_OUTPUT
, hw
);
871 fpc
->r_temps
|= (1 << hw
);
876 nvfx_fragprog_prepare(struct nvfx_context
* nvfx
, struct nvfx_fpc
*fpc
)
878 struct tgsi_parse_context p
;
879 int high_temp
= -1, i
;
880 struct util_semantic_set set
;
882 fpc
->fp
->num_slots
= util_semantic_set_from_program_file(&set
, fpc
->fp
->pipe
.tokens
, TGSI_FILE_INPUT
);
883 if(fpc
->fp
->num_slots
> 8)
885 util_semantic_layout_from_set(fpc
->fp
->slot_to_generic
, &set
, 0, 8);
886 util_semantic_table_from_layout(fpc
->generic_to_slot
, fpc
->fp
->slot_to_generic
, 0, 8);
888 memset(fpc
->fp
->slot_to_fp_input
, 0xff, sizeof(fpc
->fp
->slot_to_fp_input
));
890 tgsi_parse_init(&p
, fpc
->fp
->pipe
.tokens
);
891 while (!tgsi_parse_end_of_tokens(&p
)) {
892 const union tgsi_full_token
*tok
= &p
.FullToken
;
894 tgsi_parse_token(&p
);
895 switch(tok
->Token
.Type
) {
896 case TGSI_TOKEN_TYPE_DECLARATION
:
898 const struct tgsi_full_declaration
*fdec
;
899 fdec
= &p
.FullToken
.FullDeclaration
;
900 switch (fdec
->Declaration
.File
) {
901 case TGSI_FILE_OUTPUT
:
902 if (!nvfx_fragprog_parse_decl_output(nvfx
, fpc
, fdec
))
905 case TGSI_FILE_TEMPORARY
:
906 if (fdec
->Range
.Last
> high_temp
) {
916 case TGSI_TOKEN_TYPE_IMMEDIATE
:
918 struct tgsi_full_immediate
*imm
;
921 imm
= &p
.FullToken
.FullImmediate
;
922 assert(imm
->Immediate
.DataType
== TGSI_IMM_FLOAT32
);
923 assert(fpc
->nr_imm
< MAX_IMM
);
925 vals
[0] = imm
->u
[0].Float
;
926 vals
[1] = imm
->u
[1].Float
;
927 vals
[2] = imm
->u
[2].Float
;
928 vals
[3] = imm
->u
[3].Float
;
929 fpc
->imm
[fpc
->nr_imm
++] = constant(fpc
, -1, vals
);
939 fpc
->r_temp
= CALLOC(high_temp
, sizeof(struct nvfx_reg
));
940 for (i
= 0; i
< high_temp
; i
++)
941 fpc
->r_temp
[i
] = temp(fpc
);
942 fpc
->r_temps_discard
= 0;
954 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp
, "NVFX_DUMP_FP", FALSE
)
957 nvfx_fragprog_translate(struct nvfx_context
*nvfx
,
958 struct nvfx_fragment_program
*fp
)
960 struct tgsi_parse_context parse
;
961 struct nvfx_fpc
*fpc
= NULL
;
962 struct util_dynarray insns
;
964 fpc
= CALLOC(1, sizeof(struct nvfx_fpc
));
970 if (!nvfx_fragprog_prepare(nvfx
, fpc
)) {
975 tgsi_parse_init(&parse
, fp
->pipe
.tokens
);
977 util_dynarray_init(&insns
);
978 while (!tgsi_parse_end_of_tokens(&parse
)) {
979 tgsi_parse_token(&parse
);
981 switch (parse
.FullToken
.Token
.Type
) {
982 case TGSI_TOKEN_TYPE_INSTRUCTION
:
984 const struct tgsi_full_instruction
*finst
;
986 util_dynarray_append(&insns
, unsigned, fp
->insn_len
);
987 finst
= &parse
.FullToken
.FullInstruction
;
988 if (!nvfx_fragprog_parse_instruction(nvfx
, fpc
, finst
))
996 util_dynarray_append(&insns
, unsigned, fp
->insn_len
);
998 for(unsigned i
= 0; i
< fpc
->label_relocs
.size
; i
+= sizeof(struct nvfx_label_relocation
))
1000 struct nvfx_label_relocation
* label_reloc
= (struct nvfx_label_relocation
*)((char*)fpc
->label_relocs
.data
+ i
);
1001 fp
->insn
[label_reloc
->location
] |= ((unsigned*)insns
.data
)[label_reloc
->target
];
1003 util_dynarray_fini(&insns
);
1006 fp
->fp_control
|= (fpc
->num_regs
-1)/2;
1008 fp
->fp_control
|= fpc
->num_regs
<< NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT
;
1010 /* Terminate final instruction */
1012 fp
->insn
[fpc
->inst_offset
] |= 0x00000001;
1014 /* Append NOP + END instruction for branches to the end of the program */
1015 fpc
->inst_offset
= fp
->insn_len
;
1017 fp
->insn
[fpc
->inst_offset
+ 0] = 0x00000001;
1018 fp
->insn
[fpc
->inst_offset
+ 1] = 0x00000000;
1019 fp
->insn
[fpc
->inst_offset
+ 2] = 0x00000000;
1020 fp
->insn
[fpc
->inst_offset
+ 3] = 0x00000000;
1022 if(debug_get_option_nvfx_dump_fp())
1025 tgsi_dump(fp
->pipe
.tokens
, 0);
1027 debug_printf("\n%s fragment program:\n", nvfx
->is_nv4x
? "nv4x" : "nv3x");
1028 for (unsigned i
= 0; i
< fp
->insn_len
; i
+= 4)
1029 debug_printf("%3u: %08x %08x %08x %08x\n", i
>> 2, fp
->insn
[i
], fp
->insn
[i
+ 1], fp
->insn
[i
+ 2], fp
->insn
[i
+ 3]);
1033 fp
->translated
= TRUE
;
1035 tgsi_parse_free(&parse
);
1038 util_dynarray_fini(&fpc
->if_stack
);
1039 util_dynarray_fini(&fpc
->label_relocs
);
1040 //util_dynarray_fini(&fpc->loop_stack);
1045 nvfx_fp_memcpy(void* dst
, const void* src
, size_t len
)
1047 #ifndef WORDS_BIGENDIAN
1048 memcpy(dst
, src
, len
);
1051 for(i
= 0; i
< len
; i
+= 4) {
1052 uint32_t v
= (uint32_t*)((char*)src
+ i
);
1053 *(uint32_t*)((char*)dst
+ i
) = (v
>> 16) | (v
<< 16);
1059 nvfx_fragprog_validate(struct nvfx_context
*nvfx
)
1061 struct nouveau_channel
* chan
= nvfx
->screen
->base
.channel
;
1062 struct nvfx_fragment_program
*fp
= nvfx
->fragprog
;
1065 if (!fp
->translated
)
1067 const int min_size
= 4096;
1069 nvfx_fragprog_translate(nvfx
, fp
);
1070 if (!fp
->translated
) {
1071 static unsigned dummy
[8] = {1, 0, 0, 0, 1, 0, 0, 0};
1072 static int warned
= 0;
1075 fprintf(stderr
, "nvfx: failed to translate fragment program!\n");
1079 /* use dummy program: we cannot fail here */
1080 fp
->translated
= TRUE
;
1081 fp
->insn
= malloc(sizeof(dummy
));
1082 memcpy(fp
->insn
, dummy
, sizeof(dummy
));
1083 fp
->insn_len
= sizeof(dummy
) / sizeof(dummy
[0]);
1087 fp
->prog_size
= (fp
->insn_len
* 4 + 63) & ~63;
1089 if(fp
->prog_size
>= min_size
)
1090 fp
->progs_per_bo
= 1;
1092 fp
->progs_per_bo
= min_size
/ fp
->prog_size
;
1093 fp
->bo_prog_idx
= fp
->progs_per_bo
- 1;
1096 /* we must update constants even on "just" fragprog changes, because
1097 we don't check whether the current constant buffer matches the latest
1098 one bound to this fragment program */
1099 if (nvfx
->dirty
& (NVFX_NEW_FRAGCONST
| NVFX_NEW_FRAGPROG
))
1102 struct nvfx_vertex_program
* vp
= nvfx
->render_mode
== HW
? nvfx
->vertprog
: nvfx
->swtnl
.vertprog
;
1103 if (fp
->last_vp_id
!= vp
->id
) {
1104 char* vp_sem_table
= vp
->generic_to_fp_input
;
1105 unsigned char* fp_semantics
= fp
->slot_to_generic
;
1107 fp
->last_vp_id
= nvfx
->vertprog
->id
;
1108 unsigned char* cur_slots
= fp
->slot_to_fp_input
;
1109 for(unsigned i
= 0; i
< fp
->num_slots
; ++i
) {
1110 unsigned char slot_mask
= vp_sem_table
[fp_semantics
[i
]];
1111 diff
|= (slot_mask
>> 4) & (slot_mask
^ cur_slots
[i
]);
1116 for(unsigned i
= 0; i
< fp
->num_slots
; ++i
) {
1117 /* if 0xff, then this will write to the dummy value at fp->last_layout_mask[0] */
1118 fp
->slot_to_fp_input
[i
] = vp_sem_table
[fp_semantics
[i
]] & 0xf;
1119 //printf("fp: GENERIC[%i] from fpreg %i\n", fp_semantics[i], fp->slot_to_fp_input[i]);
1122 fp
->progs_left_with_obsolete_slot_assignments
= fp
->progs
;
1127 // last_sprite_coord_enable
1128 unsigned sprite_coord_enable
= nvfx
->rasterizer
->pipe
.point_quad_rasterization
* nvfx
->rasterizer
->pipe
.sprite_coord_enable
;
1129 if(fp
->last_sprite_coord_enable
!= sprite_coord_enable
)
1131 unsigned texcoord_mask
= vp
->texcoord_ouput_mask
;
1132 fp
->last_sprite_coord_enable
= sprite_coord_enable
;
1133 fp
->point_sprite_control
= 0;
1134 for(unsigned i
= 0; i
< fp
->num_slots
; ++i
) {
1135 if((1 << fp
->slot_to_generic
[i
]) & sprite_coord_enable
)
1137 unsigned fpin
= fp
->slot_to_fp_input
[i
];
1138 //printf("sprite: slot %i generic %i had texcoord %i\n", i, fp->slot_to_generic[i], fpin - NVFX_FP_OP_INPUT_SRC_TC0);
1141 unsigned tc
= __builtin_ctz(~texcoord_mask
);
1142 texcoord_mask
|= (1 << tc
);
1143 fp
->slot_to_fp_input
[i
] = fpin
= NVFX_FP_OP_INPUT_SRC_TC(tc
);
1145 fp
->progs_left_with_obsolete_slot_assignments
= fp
->progs
;
1148 //printf("sprite: slot %i texcoord %i\n", i, fpin - NVFX_FP_OP_INPUT_SRC_TC0);
1149 fp
->point_sprite_control
|= (1 << (fpin
- NVFX_FP_OP_INPUT_SRC_TC0
+ 8));
1153 unsigned fpin
= fp
->slot_to_fp_input
[i
];
1154 if(!(vp
->texcoord_ouput_mask
& (1 << (fpin
- NVFX_FP_OP_INPUT_SRC_TC0
))))
1156 fp
->slot_to_fp_input
[i
] = 0x0f;
1158 fp
->progs_left_with_obsolete_slot_assignments
= fp
->progs
;
1167 if(fp
->bo_prog_idx
>= fp
->progs_per_bo
)
1169 if(fp
->fpbo
&& !nouveau_bo_busy(fp
->fpbo
->next
->bo
, NOUVEAU_BO_WR
))
1171 fp
->fpbo
= fp
->fpbo
->next
;
1175 struct nvfx_fragment_program_bo
* fpbo
= os_malloc_aligned(sizeof(struct nvfx_fragment_program
) + (fp
->prog_size
+ 8) * fp
->progs_per_bo
, 16);
1176 fpbo
->slots
= (unsigned char*)&fpbo
->insn
[(fp
->prog_size
) * fp
->progs_per_bo
];
1177 memset(fpbo
->slots
, 0, 8 * fp
->progs_per_bo
);
1180 fpbo
->next
= fp
->fpbo
->next
;
1181 fp
->fpbo
->next
= fpbo
;
1187 fp
->progs
+= fp
->progs_per_bo
;
1188 fp
->progs_left_with_obsolete_slot_assignments
+= fp
->progs_per_bo
;
1189 nouveau_bo_new(nvfx
->screen
->base
.device
, NOUVEAU_BO_VRAM
| NOUVEAU_BO_MAP
, 64, fp
->prog_size
* fp
->progs_per_bo
, &fpbo
->bo
);
1190 nouveau_bo_map(fpbo
->bo
, NOUVEAU_BO_NOSYNC
);
1192 uint8_t* map
= fpbo
->bo
->map
;
1193 uint8_t* buf
= (uint8_t*)fpbo
->insn
;
1194 for(unsigned i
= 0; i
< fp
->progs_per_bo
; ++i
)
1196 memcpy(buf
, fp
->insn
, fp
->insn_len
* 4);
1197 nvfx_fp_memcpy(map
, fp
->insn
, fp
->insn_len
* 4);
1198 map
+= fp
->prog_size
;
1199 buf
+= fp
->prog_size
;
1202 fp
->bo_prog_idx
= 0;
1205 int offset
= fp
->bo_prog_idx
* fp
->prog_size
;
1206 uint32_t* fpmap
= (uint32_t*)((char*)fp
->fpbo
->bo
->map
+ offset
);
1208 if(nvfx
->constbuf
[PIPE_SHADER_FRAGMENT
]) {
1209 struct pipe_resource
* constbuf
= nvfx
->constbuf
[PIPE_SHADER_FRAGMENT
];
1210 uint32_t* map
= (uint32_t*)nvfx_buffer(constbuf
)->data
;
1211 uint32_t* fpmap
= (uint32_t*)((char*)fp
->fpbo
->bo
->map
+ offset
);
1212 uint32_t* buf
= (uint32_t*)((char*)fp
->fpbo
->insn
+ offset
);
1214 for (i
= 0; i
< fp
->nr_consts
; ++i
) {
1215 unsigned off
= fp
->consts
[i
].offset
;
1216 unsigned idx
= fp
->consts
[i
].index
* 4;
1218 /* TODO: is checking a good idea? */
1219 if(memcmp(&buf
[off
], &map
[idx
], 4 * sizeof(uint32_t))) {
1220 memcpy(&buf
[off
], &map
[idx
], 4 * sizeof(uint32_t));
1221 nvfx_fp_memcpy(&fpmap
[off
], &map
[idx
], 4 * sizeof(uint32_t));
1226 if(fp
->progs_left_with_obsolete_slot_assignments
) {
1227 unsigned char* fpbo_slots
= &fp
->fpbo
->slots
[fp
->bo_prog_idx
* 8];
1228 for(unsigned i
= 0; i
< fp
->num_slots
; ++i
) {
1229 unsigned value
= fp
->slot_to_fp_input
[i
];;
1230 if(value
!= fpbo_slots
[i
]) {
1231 unsigned* p
= (unsigned*)fp
->slot_relocations
[i
].data
;
1232 unsigned* pend
= (unsigned*)((char*)fp
->slot_relocations
[i
].data
+ fp
->slot_relocations
[i
].size
);
1233 for(; p
!= pend
; ++p
) {
1235 unsigned dw
= fp
->insn
[off
];
1236 dw
= (dw
& ~NVFX_FP_OP_INPUT_SRC_MASK
) | (value
<< NVFX_FP_OP_INPUT_SRC_SHIFT
);
1237 nvfx_fp_memcpy(&fpmap
[*p
], &dw
, sizeof(dw
));
1239 fpbo_slots
[i
] = value
;
1242 --fp
->progs_left_with_obsolete_slot_assignments
;
1246 if(update
|| (nvfx
->dirty
& NVFX_NEW_FRAGPROG
)) {
1247 int offset
= fp
->bo_prog_idx
* fp
->prog_size
;
1248 MARK_RING(chan
, 8, 1);
1249 OUT_RING(chan
, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM
, 1));
1250 OUT_RELOC(chan
, fp
->fpbo
->bo
, offset
, NOUVEAU_BO_VRAM
|
1251 NOUVEAU_BO_GART
| NOUVEAU_BO_RD
| NOUVEAU_BO_LOW
|
1252 NOUVEAU_BO_OR
, NV34TCL_FP_ACTIVE_PROGRAM_DMA0
,
1253 NV34TCL_FP_ACTIVE_PROGRAM_DMA1
);
1254 OUT_RING(chan
, RING_3D(NV34TCL_FP_CONTROL
, 1));
1255 OUT_RING(chan
, fp
->fp_control
);
1256 if(!nvfx
->is_nv4x
) {
1257 OUT_RING(chan
, RING_3D(NV34TCL_FP_REG_CONTROL
, 1));
1258 OUT_RING(chan
, (1<<16)|0x4);
1259 OUT_RING(chan
, RING_3D(NV34TCL_TX_UNITS_ENABLE
, 1));
1260 OUT_RING(chan
, fp
->samplers
);
1264 if(nvfx
->dirty
& (NVFX_NEW_FRAGPROG
| NVFX_NEW_SPRITE
))
1267 OUT_RING(chan
, RING_3D(NV34TCL_POINT_SPRITE
, 1));
1268 OUT_RING(chan
, fp
->point_sprite_control
| nvfx
->rasterizer
->pipe
.point_quad_rasterization
);
1273 nvfx_fragprog_relocate(struct nvfx_context
*nvfx
)
1275 struct nouveau_channel
* chan
= nvfx
->screen
->base
.channel
;
1276 struct nvfx_fragment_program
*fp
= nvfx
->fragprog
;
1277 struct nouveau_bo
* bo
= fp
->fpbo
->bo
;
1278 int offset
= fp
->bo_prog_idx
* fp
->prog_size
;
1279 unsigned fp_flags
= NOUVEAU_BO_VRAM
| NOUVEAU_BO_RD
; // TODO: GART?
1280 fp_flags
|= NOUVEAU_BO_DUMMY
;
1281 MARK_RING(chan
, 2, 2);
1282 OUT_RELOC(chan
, bo
, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM
, 1), fp_flags
, 0, 0);
1283 OUT_RELOC(chan
, bo
, offset
, fp_flags
| NOUVEAU_BO_LOW
|
1284 NOUVEAU_BO_OR
, NV34TCL_FP_ACTIVE_PROGRAM_DMA0
,
1285 NV34TCL_FP_ACTIVE_PROGRAM_DMA1
);
1289 nvfx_fragprog_destroy(struct nvfx_context
*nvfx
,
1290 struct nvfx_fragment_program
*fp
)
1293 struct nvfx_fragment_program_bo
* fpbo
= fp
->fpbo
;
1298 struct nvfx_fragment_program_bo
* next
= fpbo
->next
;
1299 nouveau_bo_unmap(fpbo
->bo
);
1300 nouveau_bo_ref(0, &fpbo
->bo
);
1304 while(fpbo
!= fp
->fpbo
);
1307 for(i
= 0; i
< 8; ++i
)
1308 util_dynarray_fini(&fp
->slot_relocations
[i
]);