1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_linkage.h"
5 #include "util/u_debug.h"
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_dump.h"
10 #include "tgsi/tgsi_util.h"
12 #include "draw/draw_context.h"
14 #include "nvfx_context.h"
15 #include "nvfx_state.h"
16 #include "nvfx_resource.h"
18 /* TODO (at least...):
19 * 1. Indexed consts + ARL
20 * 3. NV_vp11, NV_vp2, NV_vp3 features
21 * - extra arith opcodes
29 #include "nv30_vertprog.h"
30 #include "nv40_vertprog.h"
32 struct nvfx_loop_entry
39 struct nvfx_context
* nvfx
;
40 struct nvfx_vertex_program
*vp
;
42 struct nvfx_vertex_program_exec
*vpi
;
45 unsigned r_temps_discard
;
46 struct nvfx_reg r_result
[PIPE_MAX_SHADER_OUTPUTS
];
47 struct nvfx_reg
*r_address
;
48 struct nvfx_reg
*r_temp
;
55 struct util_dynarray label_relocs
;
56 struct util_dynarray loop_stack
;
59 static struct nvfx_reg
60 temp(struct nvfx_vpc
*vpc
)
62 int idx
= ffs(~vpc
->r_temps
) - 1;
65 NOUVEAU_ERR("out of temps!!\n");
67 return nvfx_reg(NVFXSR_TEMP
, 0);
70 vpc
->r_temps
|= (1 << idx
);
71 vpc
->r_temps_discard
|= (1 << idx
);
72 return nvfx_reg(NVFXSR_TEMP
, idx
);
76 release_temps(struct nvfx_vpc
*vpc
)
78 vpc
->r_temps
&= ~vpc
->r_temps_discard
;
79 vpc
->r_temps_discard
= 0;
82 static struct nvfx_reg
83 constant(struct nvfx_vpc
*vpc
, int pipe
, float x
, float y
, float z
, float w
)
85 struct nvfx_vertex_program
*vp
= vpc
->vp
;
86 struct nvfx_vertex_program_data
*vpd
;
90 for (idx
= 0; idx
< vp
->nr_consts
; idx
++) {
91 if (vp
->consts
[idx
].index
== pipe
)
92 return nvfx_reg(NVFXSR_CONST
, idx
);
96 idx
= vp
->nr_consts
++;
97 vp
->consts
= realloc(vp
->consts
, sizeof(*vpd
) * vp
->nr_consts
);
98 vpd
= &vp
->consts
[idx
];
105 return nvfx_reg(NVFXSR_CONST
, idx
);
108 #define arith(s,o,d,m,s0,s1,s2) \
109 nvfx_insn(0, (NVFX_VP_INST_SLOT_##s << 7) | NVFX_VP_INST_##s##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
112 emit_src(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int pos
, struct nvfx_src src
)
114 struct nvfx_vertex_program
*vp
= vpc
->vp
;
116 struct nvfx_relocation reloc
;
118 switch (src
.reg
.type
) {
120 sr
|= (NVFX_VP(SRC_REG_TYPE_TEMP
) << NVFX_VP(SRC_REG_TYPE_SHIFT
));
121 sr
|= (src
.reg
.index
<< NVFX_VP(SRC_TEMP_SRC_SHIFT
));
124 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
125 NVFX_VP(SRC_REG_TYPE_SHIFT
));
126 vp
->ir
|= (1 << src
.reg
.index
);
127 hw
[1] |= (src
.reg
.index
<< NVFX_VP(INST_INPUT_SRC_SHIFT
));
130 sr
|= (NVFX_VP(SRC_REG_TYPE_CONST
) <<
131 NVFX_VP(SRC_REG_TYPE_SHIFT
));
132 reloc
.location
= vp
->nr_insns
- 1;
133 reloc
.target
= src
.reg
.index
;
134 util_dynarray_append(&vp
->const_relocs
, struct nvfx_relocation
, reloc
);
137 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
138 NVFX_VP(SRC_REG_TYPE_SHIFT
));
145 sr
|= NVFX_VP(SRC_NEGATE
);
148 hw
[0] |= (1 << (21 + pos
));
150 sr
|= ((src
.swz
[0] << NVFX_VP(SRC_SWZ_X_SHIFT
)) |
151 (src
.swz
[1] << NVFX_VP(SRC_SWZ_Y_SHIFT
)) |
152 (src
.swz
[2] << NVFX_VP(SRC_SWZ_Z_SHIFT
)) |
153 (src
.swz
[3] << NVFX_VP(SRC_SWZ_W_SHIFT
)));
157 hw
[1] |= ((sr
& NVFX_VP(SRC0_HIGH_MASK
)) >>
158 NVFX_VP(SRC0_HIGH_SHIFT
)) << NVFX_VP(INST_SRC0H_SHIFT
);
159 hw
[2] |= (sr
& NVFX_VP(SRC0_LOW_MASK
)) <<
160 NVFX_VP(INST_SRC0L_SHIFT
);
163 hw
[2] |= sr
<< NVFX_VP(INST_SRC1_SHIFT
);
166 hw
[2] |= ((sr
& NVFX_VP(SRC2_HIGH_MASK
)) >>
167 NVFX_VP(SRC2_HIGH_SHIFT
)) << NVFX_VP(INST_SRC2H_SHIFT
);
168 hw
[3] |= (sr
& NVFX_VP(SRC2_LOW_MASK
)) <<
169 NVFX_VP(INST_SRC2L_SHIFT
);
177 emit_dst(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int slot
, struct nvfx_reg dst
)
179 struct nvfx_vertex_program
*vp
= vpc
->vp
;
184 hw
[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK
;
186 hw
[3] |= NV40_VP_INST_DEST_MASK
;
188 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
190 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
195 hw
[0] |= (dst
.index
<< NV30_VP_INST_DEST_TEMP_ID_SHIFT
);
197 hw
[3] |= NV40_VP_INST_DEST_MASK
;
199 hw
[0] |= (dst
.index
<< NV40_VP_INST_VEC_DEST_TEMP_SHIFT
);
201 hw
[3] |= (dst
.index
<< NV40_VP_INST_SCA_DEST_TEMP_SHIFT
);
205 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
208 case NV30_VP_INST_DEST_CLP(0):
209 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
211 case NV30_VP_INST_DEST_CLP(1):
212 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
214 case NV30_VP_INST_DEST_CLP(2):
215 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
217 case NV30_VP_INST_DEST_CLP(3):
218 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
220 case NV30_VP_INST_DEST_CLP(4):
221 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
223 case NV30_VP_INST_DEST_CLP(5):
224 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
226 case NV40_VP_INST_DEST_COL0
: vp
->or |= (1 << 0); break;
227 case NV40_VP_INST_DEST_COL1
: vp
->or |= (1 << 1); break;
228 case NV40_VP_INST_DEST_BFC0
: vp
->or |= (1 << 2); break;
229 case NV40_VP_INST_DEST_BFC1
: vp
->or |= (1 << 3); break;
230 case NV40_VP_INST_DEST_FOGC
: vp
->or |= (1 << 4); break;
231 case NV40_VP_INST_DEST_PSZ
: vp
->or |= (1 << 5); break;
236 hw
[3] |= (dst
.index
<< NV30_VP_INST_DEST_SHIFT
);
237 hw
[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK
;
239 /*XXX: no way this is entirely correct, someone needs to
240 * figure out what exactly it is.
244 hw
[3] |= (dst
.index
<< NV40_VP_INST_DEST_SHIFT
);
246 hw
[0] |= NV40_VP_INST_VEC_RESULT
;
247 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
249 hw
[3] |= NV40_VP_INST_SCA_RESULT
;
250 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
260 nvfx_vp_emit(struct nvfx_vpc
*vpc
, struct nvfx_insn insn
)
262 struct nvfx_context
* nvfx
= vpc
->nvfx
;
263 struct nvfx_vertex_program
*vp
= vpc
->vp
;
264 unsigned slot
= insn
.op
>> 7;
265 unsigned op
= insn
.op
& 0x7f;
268 vp
->insns
= realloc(vp
->insns
, ++vp
->nr_insns
* sizeof(*vpc
->vpi
));
269 vpc
->vpi
= &vp
->insns
[vp
->nr_insns
- 1];
270 memset(vpc
->vpi
, 0, sizeof(*vpc
->vpi
));
274 hw
[0] |= (insn
.cc_test
<< NVFX_VP(INST_COND_SHIFT
));
275 hw
[0] |= ((insn
.cc_swz
[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT
)) |
276 (insn
.cc_swz
[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT
)) |
277 (insn
.cc_swz
[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT
)) |
278 (insn
.cc_swz
[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT
)));
280 hw
[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE
);
284 hw
[1] |= (op
<< NV30_VP_INST_VEC_OPCODE_SHIFT
);
287 hw
[0] |= ((op
>> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT
);
288 hw
[1] |= ((op
& 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT
);
290 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
291 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
293 if (insn
.dst
.type
== NVFXSR_OUTPUT
) {
295 hw
[3] |= (insn
.mask
<< NV30_VP_INST_SDEST_WRITEMASK_SHIFT
);
297 hw
[3] |= (insn
.mask
<< NV30_VP_INST_VDEST_WRITEMASK_SHIFT
);
300 hw
[3] |= (insn
.mask
<< NV30_VP_INST_STEMP_WRITEMASK_SHIFT
);
302 hw
[3] |= (insn
.mask
<< NV30_VP_INST_VTEMP_WRITEMASK_SHIFT
);
306 hw
[1] |= (op
<< NV40_VP_INST_VEC_OPCODE_SHIFT
);
307 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
308 hw
[3] |= (insn
.mask
<< NV40_VP_INST_VEC_WRITEMASK_SHIFT
);
310 hw
[1] |= (op
<< NV40_VP_INST_SCA_OPCODE_SHIFT
);
311 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
312 hw
[3] |= (insn
.mask
<< NV40_VP_INST_SCA_WRITEMASK_SHIFT
);
316 emit_dst(nvfx
, vpc
, hw
, slot
, insn
.dst
);
317 emit_src(nvfx
, vpc
, hw
, 0, insn
.src
[0]);
318 emit_src(nvfx
, vpc
, hw
, 1, insn
.src
[1]);
319 emit_src(nvfx
, vpc
, hw
, 2, insn
.src
[2]);
322 static inline struct nvfx_src
323 tgsi_src(struct nvfx_vpc
*vpc
, const struct tgsi_full_src_register
*fsrc
) {
326 switch (fsrc
->Register
.File
) {
327 case TGSI_FILE_INPUT
:
328 src
.reg
= nvfx_reg(NVFXSR_INPUT
, fsrc
->Register
.Index
);
330 case TGSI_FILE_CONSTANT
:
331 src
.reg
= constant(vpc
, fsrc
->Register
.Index
, 0, 0, 0, 0);
333 case TGSI_FILE_IMMEDIATE
:
334 src
.reg
= vpc
->imm
[fsrc
->Register
.Index
];
336 case TGSI_FILE_TEMPORARY
:
337 src
.reg
= vpc
->r_temp
[fsrc
->Register
.Index
];
340 NOUVEAU_ERR("bad src file\n");
346 src
.abs
= fsrc
->Register
.Absolute
;
347 src
.negate
= fsrc
->Register
.Negate
;
348 src
.swz
[0] = fsrc
->Register
.SwizzleX
;
349 src
.swz
[1] = fsrc
->Register
.SwizzleY
;
350 src
.swz
[2] = fsrc
->Register
.SwizzleZ
;
351 src
.swz
[3] = fsrc
->Register
.SwizzleW
;
355 static INLINE
struct nvfx_reg
356 tgsi_dst(struct nvfx_vpc
*vpc
, const struct tgsi_full_dst_register
*fdst
) {
359 switch (fdst
->Register
.File
) {
361 dst
= nvfx_reg(NVFXSR_NONE
, 0);
363 case TGSI_FILE_OUTPUT
:
364 dst
= vpc
->r_result
[fdst
->Register
.Index
];
366 case TGSI_FILE_TEMPORARY
:
367 dst
= vpc
->r_temp
[fdst
->Register
.Index
];
369 case TGSI_FILE_ADDRESS
:
370 dst
= vpc
->r_address
[fdst
->Register
.Index
];
373 NOUVEAU_ERR("bad dst file %i\n", fdst
->Register
.File
);
387 if (tgsi
& TGSI_WRITEMASK_X
) mask
|= NVFX_VP_MASK_X
;
388 if (tgsi
& TGSI_WRITEMASK_Y
) mask
|= NVFX_VP_MASK_Y
;
389 if (tgsi
& TGSI_WRITEMASK_Z
) mask
|= NVFX_VP_MASK_Z
;
390 if (tgsi
& TGSI_WRITEMASK_W
) mask
|= NVFX_VP_MASK_W
;
395 nvfx_vertprog_parse_instruction(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
396 unsigned idx
, const struct tgsi_full_instruction
*finst
)
398 struct nvfx_src src
[3], tmp
;
400 struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
401 struct nvfx_insn insn
;
402 struct nvfx_relocation reloc
;
403 struct nvfx_loop_entry loop
;
405 int ai
= -1, ci
= -1, ii
= -1;
408 if (finst
->Instruction
.Opcode
== TGSI_OPCODE_END
)
411 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
412 const struct tgsi_full_src_register
*fsrc
;
414 fsrc
= &finst
->Src
[i
];
415 if (fsrc
->Register
.File
== TGSI_FILE_TEMPORARY
) {
416 src
[i
] = tgsi_src(vpc
, fsrc
);
420 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
421 const struct tgsi_full_src_register
*fsrc
;
423 fsrc
= &finst
->Src
[i
];
425 switch (fsrc
->Register
.File
) {
426 case TGSI_FILE_INPUT
:
427 if (ai
== -1 || ai
== fsrc
->Register
.Index
) {
428 ai
= fsrc
->Register
.Index
;
429 src
[i
] = tgsi_src(vpc
, fsrc
);
431 src
[i
] = nvfx_src(temp(vpc
));
432 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
435 case TGSI_FILE_CONSTANT
:
436 if ((ci
== -1 && ii
== -1) ||
437 ci
== fsrc
->Register
.Index
) {
438 ci
= fsrc
->Register
.Index
;
439 src
[i
] = tgsi_src(vpc
, fsrc
);
441 src
[i
] = nvfx_src(temp(vpc
));
442 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
445 case TGSI_FILE_IMMEDIATE
:
446 if ((ci
== -1 && ii
== -1) ||
447 ii
== fsrc
->Register
.Index
) {
448 ii
= fsrc
->Register
.Index
;
449 src
[i
] = tgsi_src(vpc
, fsrc
);
451 src
[i
] = nvfx_src(temp(vpc
));
452 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
455 case TGSI_FILE_TEMPORARY
:
459 NOUVEAU_ERR("bad src file\n");
464 dst
= tgsi_dst(vpc
, &finst
->Dst
[0]);
465 mask
= tgsi_mask(finst
->Dst
[0].Register
.WriteMask
);
467 switch (finst
->Instruction
.Opcode
) {
468 case TGSI_OPCODE_ABS
:
469 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, dst
, mask
, abs(src
[0]), none
, none
));
471 case TGSI_OPCODE_ADD
:
472 nvfx_vp_emit(vpc
, arith(VEC
, ADD
, dst
, mask
, src
[0], none
, src
[1]));
474 case TGSI_OPCODE_ARL
:
475 nvfx_vp_emit(vpc
, arith(VEC
, ARL
, dst
, mask
, src
[0], none
, none
));
477 case TGSI_OPCODE_CMP
:
478 insn
= arith(VEC
, MOV
, none
.reg
, mask
, src
[0], none
, none
);
480 nvfx_vp_emit(vpc
, insn
);
482 insn
= arith(VEC
, MOV
, dst
, mask
, src
[2], none
, none
);
483 insn
.cc_test
= NVFX_COND_GE
;
484 nvfx_vp_emit(vpc
, insn
);
486 insn
= arith(VEC
, MOV
, dst
, mask
, src
[1], none
, none
);
487 insn
.cc_test
= NVFX_COND_LT
;
488 nvfx_vp_emit(vpc
, insn
);
490 case TGSI_OPCODE_COS
:
491 nvfx_vp_emit(vpc
, arith(SCA
, COS
, dst
, mask
, none
, none
, src
[0]));
493 case TGSI_OPCODE_DP2
:
494 tmp
= nvfx_src(temp(vpc
));
495 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, tmp
.reg
, NVFX_VP_MASK_X
| NVFX_VP_MASK_Y
, src
[0], src
[1], none
));
496 nvfx_vp_emit(vpc
, arith(VEC
, ADD
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), swz(tmp
, Y
, Y
, Y
, Y
), none
));
498 case TGSI_OPCODE_DP3
:
499 nvfx_vp_emit(vpc
, arith(VEC
, DP3
, dst
, mask
, src
[0], src
[1], none
));
501 case TGSI_OPCODE_DP4
:
502 nvfx_vp_emit(vpc
, arith(VEC
, DP4
, dst
, mask
, src
[0], src
[1], none
));
504 case TGSI_OPCODE_DPH
:
505 nvfx_vp_emit(vpc
, arith(VEC
, DPH
, dst
, mask
, src
[0], src
[1], none
));
507 case TGSI_OPCODE_DST
:
508 nvfx_vp_emit(vpc
, arith(VEC
, DST
, dst
, mask
, src
[0], src
[1], none
));
510 case TGSI_OPCODE_EX2
:
511 nvfx_vp_emit(vpc
, arith(SCA
, EX2
, dst
, mask
, none
, none
, src
[0]));
513 case TGSI_OPCODE_EXP
:
514 nvfx_vp_emit(vpc
, arith(SCA
, EXP
, dst
, mask
, none
, none
, src
[0]));
516 case TGSI_OPCODE_FLR
:
517 nvfx_vp_emit(vpc
, arith(VEC
, FLR
, dst
, mask
, src
[0], none
, none
));
519 case TGSI_OPCODE_FRC
:
520 nvfx_vp_emit(vpc
, arith(VEC
, FRC
, dst
, mask
, src
[0], none
, none
));
522 case TGSI_OPCODE_LG2
:
523 nvfx_vp_emit(vpc
, arith(SCA
, LG2
, dst
, mask
, none
, none
, src
[0]));
525 case TGSI_OPCODE_LIT
:
526 nvfx_vp_emit(vpc
, arith(SCA
, LIT
, dst
, mask
, none
, none
, src
[0]));
528 case TGSI_OPCODE_LOG
:
529 nvfx_vp_emit(vpc
, arith(SCA
, LOG
, dst
, mask
, none
, none
, src
[0]));
531 case TGSI_OPCODE_LRP
:
532 tmp
= nvfx_src(temp(vpc
));
533 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, tmp
.reg
, mask
, neg(src
[0]), src
[2], src
[2]));
534 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, dst
, mask
, src
[0], src
[1], tmp
));
536 case TGSI_OPCODE_MAD
:
537 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, dst
, mask
, src
[0], src
[1], src
[2]));
539 case TGSI_OPCODE_MAX
:
540 nvfx_vp_emit(vpc
, arith(VEC
, MAX
, dst
, mask
, src
[0], src
[1], none
));
542 case TGSI_OPCODE_MIN
:
543 nvfx_vp_emit(vpc
, arith(VEC
, MIN
, dst
, mask
, src
[0], src
[1], none
));
545 case TGSI_OPCODE_MOV
:
546 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, dst
, mask
, src
[0], none
, none
));
548 case TGSI_OPCODE_MUL
:
549 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, dst
, mask
, src
[0], src
[1], none
));
551 case TGSI_OPCODE_NOP
:
553 case TGSI_OPCODE_POW
:
554 tmp
= nvfx_src(temp(vpc
));
555 nvfx_vp_emit(vpc
, arith(SCA
, LG2
, tmp
.reg
, NVFX_VP_MASK_X
, none
, none
, swz(src
[0], X
, X
, X
, X
)));
556 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, tmp
.reg
, NVFX_VP_MASK_X
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], X
, X
, X
, X
), none
));
557 nvfx_vp_emit(vpc
, arith(SCA
, EX2
, dst
, mask
, none
, none
, swz(tmp
, X
, X
, X
, X
)));
559 case TGSI_OPCODE_RCP
:
560 nvfx_vp_emit(vpc
, arith(SCA
, RCP
, dst
, mask
, none
, none
, src
[0]));
562 case TGSI_OPCODE_RSQ
:
563 nvfx_vp_emit(vpc
, arith(SCA
, RSQ
, dst
, mask
, none
, none
, abs(src
[0])));
565 case TGSI_OPCODE_SEQ
:
566 nvfx_vp_emit(vpc
, arith(VEC
, SEQ
, dst
, mask
, src
[0], src
[1], none
));
568 case TGSI_OPCODE_SFL
:
569 nvfx_vp_emit(vpc
, arith(VEC
, SFL
, dst
, mask
, src
[0], src
[1], none
));
571 case TGSI_OPCODE_SGE
:
572 nvfx_vp_emit(vpc
, arith(VEC
, SGE
, dst
, mask
, src
[0], src
[1], none
));
574 case TGSI_OPCODE_SGT
:
575 nvfx_vp_emit(vpc
, arith(VEC
, SGT
, dst
, mask
, src
[0], src
[1], none
));
577 case TGSI_OPCODE_SIN
:
578 nvfx_vp_emit(vpc
, arith(SCA
, SIN
, dst
, mask
, none
, none
, src
[0]));
580 case TGSI_OPCODE_SLE
:
581 nvfx_vp_emit(vpc
, arith(VEC
, SLE
, dst
, mask
, src
[0], src
[1], none
));
583 case TGSI_OPCODE_SLT
:
584 nvfx_vp_emit(vpc
, arith(VEC
, SLT
, dst
, mask
, src
[0], src
[1], none
));
586 case TGSI_OPCODE_SNE
:
587 nvfx_vp_emit(vpc
, arith(VEC
, SNE
, dst
, mask
, src
[0], src
[1], none
));
589 case TGSI_OPCODE_SSG
:
590 nvfx_vp_emit(vpc
, arith(VEC
, SSG
, dst
, mask
, src
[0], src
[1], none
));
592 case TGSI_OPCODE_STR
:
593 nvfx_vp_emit(vpc
, arith(VEC
, STR
, dst
, mask
, src
[0], src
[1], none
));
595 case TGSI_OPCODE_SUB
:
596 nvfx_vp_emit(vpc
, arith(VEC
, ADD
, dst
, mask
, src
[0], none
, neg(src
[1])));
598 case TGSI_OPCODE_TRUNC
:
599 tmp
= nvfx_src(temp(vpc
));
600 insn
= arith(VEC
, MOV
, none
.reg
, mask
, src
[0], none
, none
);
602 nvfx_vp_emit(vpc
, insn
);
604 nvfx_vp_emit(vpc
, arith(VEC
, FLR
, tmp
.reg
, mask
, abs(src
[0]), none
, none
));
605 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, dst
, mask
, tmp
, none
, none
));
607 insn
= arith(VEC
, MOV
, dst
, mask
, neg(tmp
), none
, none
);
608 insn
.cc_test
= NVFX_COND_LT
;
609 nvfx_vp_emit(vpc
, insn
);
611 case TGSI_OPCODE_XPD
:
612 tmp
= nvfx_src(temp(vpc
));
613 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, tmp
.reg
, mask
, swz(src
[0], Z
, X
, Y
, Y
), swz(src
[1], Y
, Z
, X
, X
), none
));
614 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, dst
, (mask
& ~NVFX_VP_MASK_W
), swz(src
[0], Y
, Z
, X
, X
), swz(src
[1], Z
, X
, Y
, Y
), neg(tmp
)));
618 insn
= arith(VEC
, MOV
, none
.reg
, NVFX_VP_MASK_X
, src
[0], none
, none
);
620 nvfx_vp_emit(vpc
, insn
);
622 reloc
.location
= vpc
->vp
->nr_insns
;
623 reloc
.target
= finst
->Label
.Label
+ 1;
624 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
626 insn
= arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
);
627 insn
.cc_test
= NVFX_COND_EQ
;
628 insn
.cc_swz
[0] = insn
.cc_swz
[1] = insn
.cc_swz
[2] = insn
.cc_swz
[3] = 0;
629 nvfx_vp_emit(vpc
, insn
);
632 case TGSI_OPCODE_ELSE
:
633 case TGSI_OPCODE_BRA
:
634 case TGSI_OPCODE_CAL
:
635 reloc
.location
= vpc
->vp
->nr_insns
;
636 reloc
.target
= finst
->Label
.Label
;
637 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
639 if(finst
->Instruction
.Opcode
== TGSI_OPCODE_CAL
)
640 insn
= arith(SCA
, CAL
, none
.reg
, 0, none
, none
, none
);
642 insn
= arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
);
643 nvfx_vp_emit(vpc
, insn
);
646 case TGSI_OPCODE_RET
:
648 tmp
.swz
[0] = tmp
.swz
[1] = tmp
.swz
[2] = tmp
.swz
[3] = 0;
649 nvfx_vp_emit(vpc
, arith(SCA
, RET
, none
.reg
, 0, none
, none
, tmp
));
652 case TGSI_OPCODE_BGNSUB
:
653 case TGSI_OPCODE_ENDSUB
:
654 case TGSI_OPCODE_ENDIF
:
655 /* nothing to do here */
658 case TGSI_OPCODE_BGNLOOP
:
659 loop
.cont_target
= idx
;
660 loop
.brk_target
= finst
->Label
.Label
+ 1;
661 util_dynarray_append(&vpc
->loop_stack
, struct nvfx_loop_entry
, loop
);
664 case TGSI_OPCODE_ENDLOOP
:
665 loop
= util_dynarray_pop(&vpc
->loop_stack
, struct nvfx_loop_entry
);
667 reloc
.location
= vpc
->vp
->nr_insns
;
668 reloc
.target
= loop
.cont_target
;
669 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
671 nvfx_vp_emit(vpc
, arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
674 case TGSI_OPCODE_CONT
:
675 loop
= util_dynarray_top(&vpc
->loop_stack
, struct nvfx_loop_entry
);
677 reloc
.location
= vpc
->vp
->nr_insns
;
678 reloc
.target
= loop
.cont_target
;
679 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
681 nvfx_vp_emit(vpc
, arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
684 case TGSI_OPCODE_BRK
:
685 loop
= util_dynarray_top(&vpc
->loop_stack
, struct nvfx_loop_entry
);
687 reloc
.location
= vpc
->vp
->nr_insns
;
688 reloc
.target
= loop
.brk_target
;
689 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
691 nvfx_vp_emit(vpc
, arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
695 NOUVEAU_ERR("invalid opcode %d\n", finst
->Instruction
.Opcode
);
704 nvfx_vertprog_parse_decl_output(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
705 const struct tgsi_full_declaration
*fdec
)
707 unsigned idx
= fdec
->Range
.First
;
710 switch (fdec
->Semantic
.Name
) {
711 case TGSI_SEMANTIC_POSITION
:
712 hw
= NVFX_VP(INST_DEST_POS
);
715 case TGSI_SEMANTIC_COLOR
:
716 if (fdec
->Semantic
.Index
== 0) {
717 hw
= NVFX_VP(INST_DEST_COL0
);
719 if (fdec
->Semantic
.Index
== 1) {
720 hw
= NVFX_VP(INST_DEST_COL1
);
722 NOUVEAU_ERR("bad colour semantic index\n");
726 case TGSI_SEMANTIC_BCOLOR
:
727 if (fdec
->Semantic
.Index
== 0) {
728 hw
= NVFX_VP(INST_DEST_BFC0
);
730 if (fdec
->Semantic
.Index
== 1) {
731 hw
= NVFX_VP(INST_DEST_BFC1
);
733 NOUVEAU_ERR("bad bcolour semantic index\n");
737 case TGSI_SEMANTIC_FOG
:
738 hw
= NVFX_VP(INST_DEST_FOGC
);
740 case TGSI_SEMANTIC_PSIZE
:
741 hw
= NVFX_VP(INST_DEST_PSZ
);
743 case TGSI_SEMANTIC_GENERIC
:
744 hw
= (vpc
->vp
->generic_to_fp_input
[fdec
->Semantic
.Index
] & 0xf)
745 + NVFX_VP(INST_DEST_TC(0)) - NVFX_FP_OP_INPUT_SRC_TC(0);
747 case TGSI_SEMANTIC_EDGEFLAG
:
748 /* not really an error just a fallback */
749 NOUVEAU_ERR("cannot handle edgeflag output\n");
752 NOUVEAU_ERR("bad output semantic\n");
756 vpc
->r_result
[idx
] = nvfx_reg(NVFXSR_OUTPUT
, hw
);
761 nvfx_vertprog_prepare(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
)
763 struct tgsi_parse_context p
;
764 int high_temp
= -1, high_addr
= -1, nr_imm
= 0, i
;
765 struct util_semantic_set set
;
766 unsigned char sem_layout
[8];
767 unsigned num_outputs
;
769 num_outputs
= util_semantic_set_from_program_file(&set
, vpc
->vp
->pipe
.tokens
, TGSI_FILE_OUTPUT
);
771 if(num_outputs
> 8) {
772 NOUVEAU_ERR("too many vertex program outputs: %i\n", num_outputs
);
775 util_semantic_layout_from_set(sem_layout
, &set
, 8, 8);
777 /* hope 0xf is (0, 0, 0, 1) initialized; otherwise, we are _probably_ not required to do this */
778 memset(vpc
->vp
->generic_to_fp_input
, 0x0f, sizeof(vpc
->vp
->generic_to_fp_input
));
779 for(int i
= 0; i
< 8; ++i
) {
780 if(sem_layout
[i
] == 0xff)
782 //printf("vp: GENERIC[%i] to fpreg %i\n", sem_layout[i], NVFX_FP_OP_INPUT_SRC_TC(0) + i);
783 vpc
->vp
->generic_to_fp_input
[sem_layout
[i
]] = 0xf0 | NVFX_FP_OP_INPUT_SRC_TC(i
);
786 vpc
->vp
->sprite_fp_input
= -1;
787 for(int i
= 0; i
< 8; ++i
)
789 if(sem_layout
[i
] == 0xff)
791 vpc
->vp
->sprite_fp_input
= NVFX_FP_OP_INPUT_SRC_TC(i
);
796 tgsi_parse_init(&p
, vpc
->vp
->pipe
.tokens
);
797 while (!tgsi_parse_end_of_tokens(&p
)) {
798 const union tgsi_full_token
*tok
= &p
.FullToken
;
800 tgsi_parse_token(&p
);
801 switch(tok
->Token
.Type
) {
802 case TGSI_TOKEN_TYPE_IMMEDIATE
:
805 case TGSI_TOKEN_TYPE_DECLARATION
:
807 const struct tgsi_full_declaration
*fdec
;
809 fdec
= &p
.FullToken
.FullDeclaration
;
810 switch (fdec
->Declaration
.File
) {
811 case TGSI_FILE_TEMPORARY
:
812 if (fdec
->Range
.Last
> high_temp
) {
817 #if 0 /* this would be nice.. except gallium doesn't track it */
818 case TGSI_FILE_ADDRESS
:
819 if (fdec
->Range
.Last
> high_addr
) {
825 case TGSI_FILE_OUTPUT
:
826 if (!nvfx_vertprog_parse_decl_output(nvfx
, vpc
, fdec
))
834 #if 1 /* yay, parse instructions looking for address regs instead */
835 case TGSI_TOKEN_TYPE_INSTRUCTION
:
837 const struct tgsi_full_instruction
*finst
;
838 const struct tgsi_full_dst_register
*fdst
;
840 finst
= &p
.FullToken
.FullInstruction
;
841 fdst
= &finst
->Dst
[0];
843 if (fdst
->Register
.File
== TGSI_FILE_ADDRESS
) {
844 if (fdst
->Register
.Index
> high_addr
)
845 high_addr
= fdst
->Register
.Index
;
858 vpc
->imm
= CALLOC(nr_imm
, sizeof(struct nvfx_reg
));
863 vpc
->r_temp
= CALLOC(high_temp
, sizeof(struct nvfx_reg
));
864 for (i
= 0; i
< high_temp
; i
++)
865 vpc
->r_temp
[i
] = temp(vpc
);
869 vpc
->r_address
= CALLOC(high_addr
, sizeof(struct nvfx_reg
));
870 for (i
= 0; i
< high_addr
; i
++)
871 vpc
->r_address
[i
] = temp(vpc
);
874 vpc
->r_temps_discard
= 0;
878 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp
, "NVFX_DUMP_VP", FALSE
)
881 nvfx_vertprog_translate(struct nvfx_context
*nvfx
,
882 struct nvfx_vertex_program
*vp
)
884 struct tgsi_parse_context parse
;
885 struct nvfx_vpc
*vpc
= NULL
;
886 struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
887 struct util_dynarray insns
;
890 vpc
= CALLOC(1, sizeof(struct nvfx_vpc
));
896 /* reserve space for ucps */
897 if(nvfx
->use_vp_clipping
)
899 for(i
= 0; i
< 6; ++i
)
900 constant(vpc
, -1, 0, 0, 0, 0);
903 if (!nvfx_vertprog_prepare(nvfx
, vpc
)) {
908 /* Redirect post-transform vertex position to a temp if user clip
909 * planes are enabled. We need to append code to the vtxprog
910 * to handle clip planes later.
912 /* TODO: maybe support patching this depending on whether there are ucps: not sure if it is really matters much */
913 if (nvfx
->use_vp_clipping
) {
914 vpc
->r_result
[vpc
->hpos_idx
] = temp(vpc
);
915 vpc
->r_temps_discard
= 0;
918 tgsi_parse_init(&parse
, vp
->pipe
.tokens
);
920 util_dynarray_init(&insns
);
921 while (!tgsi_parse_end_of_tokens(&parse
)) {
922 tgsi_parse_token(&parse
);
924 switch (parse
.FullToken
.Token
.Type
) {
925 case TGSI_TOKEN_TYPE_IMMEDIATE
:
927 const struct tgsi_full_immediate
*imm
;
929 imm
= &parse
.FullToken
.FullImmediate
;
930 assert(imm
->Immediate
.DataType
== TGSI_IMM_FLOAT32
);
931 assert(imm
->Immediate
.NrTokens
== 4 + 1);
932 vpc
->imm
[vpc
->nr_imm
++] =
940 case TGSI_TOKEN_TYPE_INSTRUCTION
:
942 const struct tgsi_full_instruction
*finst
;
943 unsigned idx
= insns
.size
>> 2;
944 util_dynarray_append(&insns
, unsigned, vp
->nr_insns
);
945 finst
= &parse
.FullToken
.FullInstruction
;
946 if (!nvfx_vertprog_parse_instruction(nvfx
, vpc
, idx
, finst
))
955 util_dynarray_append(&insns
, unsigned, vp
->nr_insns
);
957 for(unsigned i
= 0; i
< vpc
->label_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
959 struct nvfx_relocation
* label_reloc
= (struct nvfx_relocation
*)((char*)vpc
->label_relocs
.data
+ i
);
960 struct nvfx_relocation hw_reloc
;
962 hw_reloc
.location
= label_reloc
->location
;
963 hw_reloc
.target
= ((unsigned*)insns
.data
)[label_reloc
->target
];
965 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
967 util_dynarray_append(&vp
->branch_relocs
, struct nvfx_relocation
, hw_reloc
);
969 util_dynarray_fini(&insns
);
970 util_dynarray_trim(&vp
->branch_relocs
);
972 /* XXX: what if we add a RET before?! make sure we jump here...*/
974 /* Write out HPOS if it was redirected to a temp earlier */
975 if (vpc
->r_result
[vpc
->hpos_idx
].type
!= NVFXSR_OUTPUT
) {
976 struct nvfx_reg hpos
= nvfx_reg(NVFXSR_OUTPUT
,
977 NVFX_VP(INST_DEST_POS
));
978 struct nvfx_src htmp
= nvfx_src(vpc
->r_result
[vpc
->hpos_idx
]);
980 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, hpos
, NVFX_VP_MASK_ALL
, htmp
, none
, none
));
983 /* Insert code to handle user clip planes */
984 if(nvfx
->use_vp_clipping
)
986 for (i
= 0; i
< 6; i
++) {
987 struct nvfx_reg cdst
= nvfx_reg(NVFXSR_OUTPUT
, NV30_VP_INST_DEST_CLP(i
));
988 struct nvfx_src ceqn
= nvfx_src(nvfx_reg(NVFXSR_CONST
, i
));
989 struct nvfx_src htmp
= nvfx_src(vpc
->r_result
[vpc
->hpos_idx
]);
995 case 0: case 3: mask
= NVFX_VP_MASK_Y
; break;
996 case 1: case 4: mask
= NVFX_VP_MASK_Z
; break;
997 case 2: case 5: mask
= NVFX_VP_MASK_W
; break;
999 NOUVEAU_ERR("invalid clip dist #%d\n", i
);
1004 mask
= NVFX_VP_MASK_X
;
1006 nvfx_vp_emit(vpc
, arith(VEC
, DP4
, cdst
, mask
, htmp
, ceqn
, none
));
1012 vp
->insns
[vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
1014 nvfx_vp_emit(vpc
, arith(VEC
, NOP
, none
.reg
, 0, none
, none
, none
));
1015 vp
->insns
[vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
1018 if(debug_get_option_nvfx_dump_vp())
1021 tgsi_dump(vp
->pipe
.tokens
, 0);
1023 debug_printf("\n%s vertex program:\n", nvfx
->is_nv4x
? "nv4x" : "nv3x");
1024 for (i
= 0; i
< vp
->nr_insns
; i
++)
1025 debug_printf("%3u: %08x %08x %08x %08x\n", i
, vp
->insns
[i
].data
[0], vp
->insns
[i
].data
[1], vp
->insns
[i
].data
[2], vp
->insns
[i
].data
[3]);
1030 vp
->exec_start
= -1;
1031 vp
->translated
= TRUE
;
1033 tgsi_parse_free(&parse
);
1034 util_dynarray_fini(&vpc
->label_relocs
);
1035 util_dynarray_fini(&vpc
->loop_stack
);
1039 FREE(vpc
->r_address
);
1046 nvfx_vertprog_validate(struct nvfx_context
*nvfx
)
1048 struct nvfx_screen
*screen
= nvfx
->screen
;
1049 struct nouveau_channel
*chan
= screen
->base
.channel
;
1050 struct nouveau_grobj
*eng3d
= screen
->eng3d
;
1051 struct nvfx_vertex_program
*vp
;
1052 struct pipe_resource
*constbuf
;
1053 boolean upload_code
= FALSE
, upload_data
= FALSE
;
1056 if (nvfx
->render_mode
== HW
) {
1057 vp
= nvfx
->vertprog
;
1058 constbuf
= nvfx
->constbuf
[PIPE_SHADER_VERTEX
];
1060 vp
= nvfx
->swtnl
.vertprog
;
1064 /* Translate TGSI shader into hw bytecode */
1065 if (!vp
->translated
)
1067 nvfx
->fallback_swtnl
&= ~NVFX_NEW_VERTPROG
;
1068 nvfx_vertprog_translate(nvfx
, vp
);
1069 if (!vp
->translated
) {
1070 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1075 /* Allocate hw vtxprog exec slots */
1077 struct nouveau_resource
*heap
= nvfx
->screen
->vp_exec_heap
;
1078 uint vplen
= vp
->nr_insns
;
1080 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
)) {
1081 while (heap
->next
&& heap
->size
< vplen
) {
1082 struct nvfx_vertex_program
*evict
;
1084 evict
= heap
->next
->priv
;
1085 nouveau_resource_free(&evict
->exec
);
1088 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
))
1090 debug_printf("Vertex shader too long: %u instructions\n", vplen
);
1091 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1099 /* Allocate hw vtxprog const slots */
1100 if (vp
->nr_consts
&& !vp
->data
) {
1101 struct nouveau_resource
*heap
= nvfx
->screen
->vp_data_heap
;
1103 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
)) {
1104 while (heap
->next
&& heap
->size
< vp
->nr_consts
) {
1105 struct nvfx_vertex_program
*evict
;
1107 evict
= heap
->next
->priv
;
1108 nouveau_resource_free(&evict
->data
);
1111 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
))
1113 debug_printf("Vertex shader uses too many constants: %u constants\n", vp
->nr_consts
);
1114 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1119 /*XXX: handle this some day */
1120 assert(vp
->data
->start
>= vp
->data_start_min
);
1123 if (vp
->data_start
!= vp
->data
->start
)
1127 /* If exec or data segments moved we need to patch the program to
1128 * fixup offsets and register IDs.
1130 if (vp
->exec_start
!= vp
->exec
->start
) {
1131 //printf("vp_relocs %u -> %u\n", vp->exec_start, vp->exec->start);
1132 for(unsigned i
= 0; i
< vp
->branch_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1134 struct nvfx_relocation
* reloc
= (struct nvfx_relocation
*)((char*)vp
->branch_relocs
.data
+ i
);
1135 uint32_t* hw
= vp
->insns
[reloc
->location
].data
;
1136 unsigned target
= vp
->exec
->start
+ reloc
->target
;
1138 //debug_printf("vp_reloc hw %u -> hw %u\n", reloc->location, target);
1142 hw
[2] &=~ NV30_VP_INST_IADDR_MASK
;
1143 hw
[2] |= (target
& 0x1ff) << NV30_VP_INST_IADDR_SHIFT
;
1147 hw
[3] &=~ NV40_VP_INST_IADDRL_MASK
;
1148 hw
[3] |= (target
& 7) << NV40_VP_INST_IADDRL_SHIFT
;
1150 hw
[2] &=~ NV40_VP_INST_IADDRH_MASK
;
1151 hw
[2] |= ((target
>> 3) & 0x3f) << NV40_VP_INST_IADDRH_SHIFT
;
1155 vp
->exec_start
= vp
->exec
->start
;
1158 if (vp
->data_start
!= vp
->data
->start
) {
1159 for(unsigned i
= 0; i
< vp
->const_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1161 struct nvfx_relocation
* reloc
= (struct nvfx_relocation
*)((char*)vp
->const_relocs
.data
+ i
);
1162 struct nvfx_vertex_program_exec
*vpi
= &vp
->insns
[reloc
->location
];
1164 vpi
->data
[1] &= ~NVFX_VP(INST_CONST_SRC_MASK
);
1166 (reloc
->target
+ vp
->data
->start
) <<
1167 NVFX_VP(INST_CONST_SRC_SHIFT
);
1170 vp
->data_start
= vp
->data
->start
;
1174 /* Update + Upload constant values */
1175 if (vp
->nr_consts
) {
1179 map
= (float*)nvfx_buffer(constbuf
)->data
;
1181 for (i
= nvfx
->use_vp_clipping
? 6 : 0; i
< vp
->nr_consts
; i
++) {
1182 struct nvfx_vertex_program_data
*vpd
= &vp
->consts
[i
];
1184 if (vpd
->index
>= 0) {
1186 !memcmp(vpd
->value
, &map
[vpd
->index
* 4],
1189 memcpy(vpd
->value
, &map
[vpd
->index
* 4],
1193 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_CONST_ID
, 5);
1194 OUT_RING (chan
, i
+ vp
->data
->start
);
1195 OUT_RINGp (chan
, (uint32_t *)vpd
->value
, 4);
1199 /* Upload vtxprog */
1201 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_FROM_ID
, 1);
1202 OUT_RING (chan
, vp
->exec
->start
);
1203 for (i
= 0; i
< vp
->nr_insns
; i
++) {
1204 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_INST(0), 4);
1205 OUT_RINGp (chan
, vp
->insns
[i
].data
, 4);
1210 if(nvfx
->dirty
& (NVFX_NEW_VERTPROG
))
1213 OUT_RING(chan
, RING_3D(NV34TCL_VP_START_FROM_ID
, 1));
1214 OUT_RING(chan
, vp
->exec
->start
);
1216 OUT_RING(chan
, RING_3D(NV40TCL_VP_ATTRIB_EN
, 1));
1217 OUT_RING(chan
, vp
->ir
);
1225 nvfx_vertprog_destroy(struct nvfx_context
*nvfx
, struct nvfx_vertex_program
*vp
)
1233 nouveau_resource_free(&vp
->exec
);
1234 nouveau_resource_free(&vp
->data
);
1236 util_dynarray_fini(&vp
->branch_relocs
);
1237 util_dynarray_fini(&vp
->const_relocs
);
1241 nvfx_vp_state_create(struct pipe_context
*pipe
,
1242 const struct pipe_shader_state
*cso
)
1244 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1245 struct nvfx_vertex_program
*vp
;
1247 // TODO: use a 64-bit atomic here!
1248 static unsigned long long id
= 0;
1250 vp
= CALLOC(1, sizeof(struct nvfx_vertex_program
));
1251 vp
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1252 vp
->draw
= draw_create_vertex_shader(nvfx
->draw
, &vp
->pipe
);
1259 nvfx_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1261 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1263 nvfx
->vertprog
= hwcso
;
1264 nvfx
->dirty
|= NVFX_NEW_VERTPROG
;
1265 nvfx
->draw_dirty
|= NVFX_NEW_VERTPROG
;
1269 nvfx_vp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
1271 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1272 struct nvfx_vertex_program
*vp
= hwcso
;
1274 draw_delete_vertex_shader(nvfx
->draw
, vp
->draw
);
1275 nvfx_vertprog_destroy(nvfx
, vp
);
1276 FREE((void*)vp
->pipe
.tokens
);
1281 nvfx_init_vertprog_functions(struct nvfx_context
*nvfx
)
1283 nvfx
->pipe
.create_vs_state
= nvfx_vp_state_create
;
1284 nvfx
->pipe
.bind_vs_state
= nvfx_vp_state_bind
;
1285 nvfx
->pipe
.delete_vs_state
= nvfx_vp_state_delete
;