1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_linkage.h"
5 #include "util/u_debug.h"
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_dump.h"
10 #include "tgsi/tgsi_util.h"
11 #include "tgsi/tgsi_ureg.h"
13 #include "draw/draw_context.h"
15 #include "nvfx_context.h"
16 #include "nvfx_state.h"
17 #include "nvfx_resource.h"
19 /* TODO (at least...):
20 * 1. Indexed consts + ARL
21 * 3. NV_vp11, NV_vp2, NV_vp3 features
22 * - extra arith opcodes
30 #include "nv30_vertprog.h"
31 #include "nv40_vertprog.h"
33 struct nvfx_loop_entry
40 struct nvfx_context
* nvfx
;
41 struct pipe_shader_state pipe
;
42 struct nvfx_vertex_program
*vp
;
44 struct nvfx_vertex_program_exec
*vpi
;
47 unsigned r_temps_discard
;
48 struct nvfx_reg r_result
[PIPE_MAX_SHADER_OUTPUTS
];
49 struct nvfx_reg
*r_address
;
50 struct nvfx_reg
*r_temp
;
51 struct nvfx_reg
*r_const
;
58 struct util_dynarray label_relocs
;
59 struct util_dynarray loop_stack
;
62 static struct nvfx_reg
63 temp(struct nvfx_vpc
*vpc
)
65 int idx
= ffs(~vpc
->r_temps
) - 1;
68 NOUVEAU_ERR("out of temps!!\n");
70 return nvfx_reg(NVFXSR_TEMP
, 0);
73 vpc
->r_temps
|= (1 << idx
);
74 vpc
->r_temps_discard
|= (1 << idx
);
75 return nvfx_reg(NVFXSR_TEMP
, idx
);
79 release_temps(struct nvfx_vpc
*vpc
)
81 vpc
->r_temps
&= ~vpc
->r_temps_discard
;
82 vpc
->r_temps_discard
= 0;
85 static struct nvfx_reg
86 constant(struct nvfx_vpc
*vpc
, int pipe
, float x
, float y
, float z
, float w
)
88 struct nvfx_vertex_program
*vp
= vpc
->vp
;
89 struct nvfx_vertex_program_data
*vpd
;
93 for (idx
= 0; idx
< vp
->nr_consts
; idx
++) {
94 if (vp
->consts
[idx
].index
== pipe
)
95 return nvfx_reg(NVFXSR_CONST
, idx
);
99 idx
= vp
->nr_consts
++;
100 vp
->consts
= realloc(vp
->consts
, sizeof(*vpd
) * vp
->nr_consts
);
101 vpd
= &vp
->consts
[idx
];
108 return nvfx_reg(NVFXSR_CONST
, idx
);
111 #define arith(s,o,d,m,s0,s1,s2) \
112 nvfx_insn(0, (NVFX_VP_INST_SLOT_##s << 7) | NVFX_VP_INST_##s##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
115 emit_src(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int pos
, struct nvfx_src src
)
117 struct nvfx_vertex_program
*vp
= vpc
->vp
;
119 struct nvfx_relocation reloc
;
121 switch (src
.reg
.type
) {
123 sr
|= (NVFX_VP(SRC_REG_TYPE_TEMP
) << NVFX_VP(SRC_REG_TYPE_SHIFT
));
124 sr
|= (src
.reg
.index
<< NVFX_VP(SRC_TEMP_SRC_SHIFT
));
127 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
128 NVFX_VP(SRC_REG_TYPE_SHIFT
));
129 vp
->ir
|= (1 << src
.reg
.index
);
130 hw
[1] |= (src
.reg
.index
<< NVFX_VP(INST_INPUT_SRC_SHIFT
));
133 sr
|= (NVFX_VP(SRC_REG_TYPE_CONST
) <<
134 NVFX_VP(SRC_REG_TYPE_SHIFT
));
135 reloc
.location
= vp
->nr_insns
- 1;
136 reloc
.target
= src
.reg
.index
;
137 util_dynarray_append(&vp
->const_relocs
, struct nvfx_relocation
, reloc
);
140 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
141 NVFX_VP(SRC_REG_TYPE_SHIFT
));
148 sr
|= NVFX_VP(SRC_NEGATE
);
151 hw
[0] |= (1 << (21 + pos
));
153 sr
|= ((src
.swz
[0] << NVFX_VP(SRC_SWZ_X_SHIFT
)) |
154 (src
.swz
[1] << NVFX_VP(SRC_SWZ_Y_SHIFT
)) |
155 (src
.swz
[2] << NVFX_VP(SRC_SWZ_Z_SHIFT
)) |
156 (src
.swz
[3] << NVFX_VP(SRC_SWZ_W_SHIFT
)));
159 if(src
.reg
.type
== NVFXSR_CONST
)
160 hw
[3] |= NVFX_VP(INST_INDEX_CONST
);
161 else if(src
.reg
.type
== NVFXSR_INPUT
)
162 hw
[0] |= NVFX_VP(INST_INDEX_INPUT
);
166 hw
[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1
);
167 hw
[0] |= src
.indirect_swz
<< NVFX_VP(INST_ADDR_SWZ_SHIFT
);
172 hw
[1] |= ((sr
& NVFX_VP(SRC0_HIGH_MASK
)) >>
173 NVFX_VP(SRC0_HIGH_SHIFT
)) << NVFX_VP(INST_SRC0H_SHIFT
);
174 hw
[2] |= (sr
& NVFX_VP(SRC0_LOW_MASK
)) <<
175 NVFX_VP(INST_SRC0L_SHIFT
);
178 hw
[2] |= sr
<< NVFX_VP(INST_SRC1_SHIFT
);
181 hw
[2] |= ((sr
& NVFX_VP(SRC2_HIGH_MASK
)) >>
182 NVFX_VP(SRC2_HIGH_SHIFT
)) << NVFX_VP(INST_SRC2H_SHIFT
);
183 hw
[3] |= (sr
& NVFX_VP(SRC2_LOW_MASK
)) <<
184 NVFX_VP(INST_SRC2L_SHIFT
);
192 emit_dst(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int slot
, struct nvfx_reg dst
)
194 struct nvfx_vertex_program
*vp
= vpc
->vp
;
199 hw
[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK
;
201 hw
[3] |= NV40_VP_INST_DEST_MASK
;
203 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
205 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
210 hw
[0] |= (dst
.index
<< NV30_VP_INST_DEST_TEMP_ID_SHIFT
);
212 hw
[3] |= NV40_VP_INST_DEST_MASK
;
214 hw
[0] |= (dst
.index
<< NV40_VP_INST_VEC_DEST_TEMP_SHIFT
);
216 hw
[3] |= (dst
.index
<< NV40_VP_INST_SCA_DEST_TEMP_SHIFT
);
220 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
223 case NV30_VP_INST_DEST_CLP(0):
224 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
226 case NV30_VP_INST_DEST_CLP(1):
227 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
229 case NV30_VP_INST_DEST_CLP(2):
230 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
232 case NV30_VP_INST_DEST_CLP(3):
233 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
235 case NV30_VP_INST_DEST_CLP(4):
236 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
238 case NV30_VP_INST_DEST_CLP(5):
239 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
241 case NV40_VP_INST_DEST_COL0
: vp
->or |= (1 << 0); break;
242 case NV40_VP_INST_DEST_COL1
: vp
->or |= (1 << 1); break;
243 case NV40_VP_INST_DEST_BFC0
: vp
->or |= (1 << 2); break;
244 case NV40_VP_INST_DEST_BFC1
: vp
->or |= (1 << 3); break;
245 case NV40_VP_INST_DEST_FOGC
: vp
->or |= (1 << 4); break;
246 case NV40_VP_INST_DEST_PSZ
: vp
->or |= (1 << 5); break;
251 hw
[3] |= (dst
.index
<< NV30_VP_INST_DEST_SHIFT
);
252 hw
[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK
;
254 /*XXX: no way this is entirely correct, someone needs to
255 * figure out what exactly it is.
259 hw
[3] |= (dst
.index
<< NV40_VP_INST_DEST_SHIFT
);
261 hw
[0] |= NV40_VP_INST_VEC_RESULT
;
262 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
264 hw
[3] |= NV40_VP_INST_SCA_RESULT
;
265 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
275 nvfx_vp_emit(struct nvfx_vpc
*vpc
, struct nvfx_insn insn
)
277 struct nvfx_context
* nvfx
= vpc
->nvfx
;
278 struct nvfx_vertex_program
*vp
= vpc
->vp
;
279 unsigned slot
= insn
.op
>> 7;
280 unsigned op
= insn
.op
& 0x7f;
283 vp
->insns
= realloc(vp
->insns
, ++vp
->nr_insns
* sizeof(*vpc
->vpi
));
284 vpc
->vpi
= &vp
->insns
[vp
->nr_insns
- 1];
285 memset(vpc
->vpi
, 0, sizeof(*vpc
->vpi
));
289 hw
[0] |= (insn
.cc_test
<< NVFX_VP(INST_COND_SHIFT
));
290 hw
[0] |= ((insn
.cc_swz
[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT
)) |
291 (insn
.cc_swz
[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT
)) |
292 (insn
.cc_swz
[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT
)) |
293 (insn
.cc_swz
[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT
)));
295 hw
[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE
);
299 hw
[1] |= (op
<< NV30_VP_INST_VEC_OPCODE_SHIFT
);
302 hw
[0] |= ((op
>> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT
);
303 hw
[1] |= ((op
& 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT
);
305 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
306 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
308 if (insn
.dst
.type
== NVFXSR_OUTPUT
) {
310 hw
[3] |= (insn
.mask
<< NV30_VP_INST_SDEST_WRITEMASK_SHIFT
);
312 hw
[3] |= (insn
.mask
<< NV30_VP_INST_VDEST_WRITEMASK_SHIFT
);
315 hw
[3] |= (insn
.mask
<< NV30_VP_INST_STEMP_WRITEMASK_SHIFT
);
317 hw
[3] |= (insn
.mask
<< NV30_VP_INST_VTEMP_WRITEMASK_SHIFT
);
321 hw
[1] |= (op
<< NV40_VP_INST_VEC_OPCODE_SHIFT
);
322 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
323 hw
[3] |= (insn
.mask
<< NV40_VP_INST_VEC_WRITEMASK_SHIFT
);
325 hw
[1] |= (op
<< NV40_VP_INST_SCA_OPCODE_SHIFT
);
326 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
327 hw
[3] |= (insn
.mask
<< NV40_VP_INST_SCA_WRITEMASK_SHIFT
);
331 emit_dst(nvfx
, vpc
, hw
, slot
, insn
.dst
);
332 emit_src(nvfx
, vpc
, hw
, 0, insn
.src
[0]);
333 emit_src(nvfx
, vpc
, hw
, 1, insn
.src
[1]);
334 emit_src(nvfx
, vpc
, hw
, 2, insn
.src
[2]);
336 // if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
337 // hw[3] |= NV40_VP_INST_SCA_RESULT;
340 static inline struct nvfx_src
341 tgsi_src(struct nvfx_vpc
*vpc
, const struct tgsi_full_src_register
*fsrc
) {
344 switch (fsrc
->Register
.File
) {
345 case TGSI_FILE_INPUT
:
346 src
.reg
= nvfx_reg(NVFXSR_INPUT
, fsrc
->Register
.Index
);
348 case TGSI_FILE_CONSTANT
:
349 src
.reg
= vpc
->r_const
[fsrc
->Register
.Index
];
351 case TGSI_FILE_IMMEDIATE
:
352 src
.reg
= vpc
->imm
[fsrc
->Register
.Index
];
354 case TGSI_FILE_TEMPORARY
:
355 src
.reg
= vpc
->r_temp
[fsrc
->Register
.Index
];
358 NOUVEAU_ERR("bad src file\n");
364 src
.abs
= fsrc
->Register
.Absolute
;
365 src
.negate
= fsrc
->Register
.Negate
;
366 src
.swz
[0] = fsrc
->Register
.SwizzleX
;
367 src
.swz
[1] = fsrc
->Register
.SwizzleY
;
368 src
.swz
[2] = fsrc
->Register
.SwizzleZ
;
369 src
.swz
[3] = fsrc
->Register
.SwizzleW
;
372 if(fsrc
->Register
.Indirect
) {
373 if(fsrc
->Indirect
.File
== TGSI_FILE_ADDRESS
&&
374 (fsrc
->Register
.File
== TGSI_FILE_CONSTANT
|| fsrc
->Register
.File
== TGSI_FILE_INPUT
))
377 src
.indirect_reg
= fsrc
->Indirect
.Index
;
378 src
.indirect_swz
= fsrc
->Indirect
.SwizzleX
;
389 static INLINE
struct nvfx_reg
390 tgsi_dst(struct nvfx_vpc
*vpc
, const struct tgsi_full_dst_register
*fdst
) {
393 switch (fdst
->Register
.File
) {
395 dst
= nvfx_reg(NVFXSR_NONE
, 0);
397 case TGSI_FILE_OUTPUT
:
398 dst
= vpc
->r_result
[fdst
->Register
.Index
];
400 case TGSI_FILE_TEMPORARY
:
401 dst
= vpc
->r_temp
[fdst
->Register
.Index
];
403 case TGSI_FILE_ADDRESS
:
404 dst
= vpc
->r_address
[fdst
->Register
.Index
];
407 NOUVEAU_ERR("bad dst file %i\n", fdst
->Register
.File
);
421 if (tgsi
& TGSI_WRITEMASK_X
) mask
|= NVFX_VP_MASK_X
;
422 if (tgsi
& TGSI_WRITEMASK_Y
) mask
|= NVFX_VP_MASK_Y
;
423 if (tgsi
& TGSI_WRITEMASK_Z
) mask
|= NVFX_VP_MASK_Z
;
424 if (tgsi
& TGSI_WRITEMASK_W
) mask
|= NVFX_VP_MASK_W
;
429 nvfx_vertprog_parse_instruction(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
430 unsigned idx
, const struct tgsi_full_instruction
*finst
)
432 struct nvfx_src src
[3], tmp
;
434 struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
435 struct nvfx_insn insn
;
436 struct nvfx_relocation reloc
;
437 struct nvfx_loop_entry loop
;
439 int ai
= -1, ci
= -1, ii
= -1;
442 if (finst
->Instruction
.Opcode
== TGSI_OPCODE_END
)
445 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
446 const struct tgsi_full_src_register
*fsrc
;
448 fsrc
= &finst
->Src
[i
];
449 if (fsrc
->Register
.File
== TGSI_FILE_TEMPORARY
) {
450 src
[i
] = tgsi_src(vpc
, fsrc
);
454 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
455 const struct tgsi_full_src_register
*fsrc
;
457 fsrc
= &finst
->Src
[i
];
459 switch (fsrc
->Register
.File
) {
460 case TGSI_FILE_INPUT
:
461 if (ai
== -1 || ai
== fsrc
->Register
.Index
) {
462 ai
= fsrc
->Register
.Index
;
463 src
[i
] = tgsi_src(vpc
, fsrc
);
465 src
[i
] = nvfx_src(temp(vpc
));
466 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
469 case TGSI_FILE_CONSTANT
:
470 if ((ci
== -1 && ii
== -1) ||
471 ci
== fsrc
->Register
.Index
) {
472 ci
= fsrc
->Register
.Index
;
473 src
[i
] = tgsi_src(vpc
, fsrc
);
475 src
[i
] = nvfx_src(temp(vpc
));
476 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
479 case TGSI_FILE_IMMEDIATE
:
480 if ((ci
== -1 && ii
== -1) ||
481 ii
== fsrc
->Register
.Index
) {
482 ii
= fsrc
->Register
.Index
;
483 src
[i
] = tgsi_src(vpc
, fsrc
);
485 src
[i
] = nvfx_src(temp(vpc
));
486 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
489 case TGSI_FILE_TEMPORARY
:
493 NOUVEAU_ERR("bad src file\n");
498 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
499 if(src
[i
].reg
.type
< 0)
503 if(finst
->Dst
[0].Register
.File
== TGSI_FILE_ADDRESS
&&
504 finst
->Instruction
.Opcode
!= TGSI_OPCODE_ARL
)
507 dst
= tgsi_dst(vpc
, &finst
->Dst
[0]);
508 mask
= tgsi_mask(finst
->Dst
[0].Register
.WriteMask
);
510 switch (finst
->Instruction
.Opcode
) {
511 case TGSI_OPCODE_ABS
:
512 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, dst
, mask
, abs(src
[0]), none
, none
));
514 case TGSI_OPCODE_ADD
:
515 nvfx_vp_emit(vpc
, arith(VEC
, ADD
, dst
, mask
, src
[0], none
, src
[1]));
517 case TGSI_OPCODE_ARL
:
518 nvfx_vp_emit(vpc
, arith(VEC
, ARL
, dst
, mask
, src
[0], none
, none
));
520 case TGSI_OPCODE_CMP
:
521 insn
= arith(VEC
, MOV
, none
.reg
, mask
, src
[0], none
, none
);
523 nvfx_vp_emit(vpc
, insn
);
525 insn
= arith(VEC
, MOV
, dst
, mask
, src
[2], none
, none
);
526 insn
.cc_test
= NVFX_COND_GE
;
527 nvfx_vp_emit(vpc
, insn
);
529 insn
= arith(VEC
, MOV
, dst
, mask
, src
[1], none
, none
);
530 insn
.cc_test
= NVFX_COND_LT
;
531 nvfx_vp_emit(vpc
, insn
);
533 case TGSI_OPCODE_COS
:
534 nvfx_vp_emit(vpc
, arith(SCA
, COS
, dst
, mask
, none
, none
, src
[0]));
536 case TGSI_OPCODE_DP2
:
537 tmp
= nvfx_src(temp(vpc
));
538 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, tmp
.reg
, NVFX_VP_MASK_X
| NVFX_VP_MASK_Y
, src
[0], src
[1], none
));
539 nvfx_vp_emit(vpc
, arith(VEC
, ADD
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), none
, swz(tmp
, Y
, Y
, Y
, Y
)));
541 case TGSI_OPCODE_DP3
:
542 nvfx_vp_emit(vpc
, arith(VEC
, DP3
, dst
, mask
, src
[0], src
[1], none
));
544 case TGSI_OPCODE_DP4
:
545 nvfx_vp_emit(vpc
, arith(VEC
, DP4
, dst
, mask
, src
[0], src
[1], none
));
547 case TGSI_OPCODE_DPH
:
548 nvfx_vp_emit(vpc
, arith(VEC
, DPH
, dst
, mask
, src
[0], src
[1], none
));
550 case TGSI_OPCODE_DST
:
551 nvfx_vp_emit(vpc
, arith(VEC
, DST
, dst
, mask
, src
[0], src
[1], none
));
553 case TGSI_OPCODE_EX2
:
554 nvfx_vp_emit(vpc
, arith(SCA
, EX2
, dst
, mask
, none
, none
, src
[0]));
556 case TGSI_OPCODE_EXP
:
557 nvfx_vp_emit(vpc
, arith(SCA
, EXP
, dst
, mask
, none
, none
, src
[0]));
559 case TGSI_OPCODE_FLR
:
560 nvfx_vp_emit(vpc
, arith(VEC
, FLR
, dst
, mask
, src
[0], none
, none
));
562 case TGSI_OPCODE_FRC
:
563 nvfx_vp_emit(vpc
, arith(VEC
, FRC
, dst
, mask
, src
[0], none
, none
));
565 case TGSI_OPCODE_LG2
:
566 nvfx_vp_emit(vpc
, arith(SCA
, LG2
, dst
, mask
, none
, none
, src
[0]));
568 case TGSI_OPCODE_LIT
:
569 nvfx_vp_emit(vpc
, arith(SCA
, LIT
, dst
, mask
, none
, none
, src
[0]));
571 case TGSI_OPCODE_LOG
:
572 nvfx_vp_emit(vpc
, arith(SCA
, LOG
, dst
, mask
, none
, none
, src
[0]));
574 case TGSI_OPCODE_LRP
:
575 tmp
= nvfx_src(temp(vpc
));
576 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, tmp
.reg
, mask
, neg(src
[0]), src
[2], src
[2]));
577 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, dst
, mask
, src
[0], src
[1], tmp
));
579 case TGSI_OPCODE_MAD
:
580 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, dst
, mask
, src
[0], src
[1], src
[2]));
582 case TGSI_OPCODE_MAX
:
583 nvfx_vp_emit(vpc
, arith(VEC
, MAX
, dst
, mask
, src
[0], src
[1], none
));
585 case TGSI_OPCODE_MIN
:
586 nvfx_vp_emit(vpc
, arith(VEC
, MIN
, dst
, mask
, src
[0], src
[1], none
));
588 case TGSI_OPCODE_MOV
:
589 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, dst
, mask
, src
[0], none
, none
));
591 case TGSI_OPCODE_MUL
:
592 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, dst
, mask
, src
[0], src
[1], none
));
594 case TGSI_OPCODE_NOP
:
596 case TGSI_OPCODE_POW
:
597 tmp
= nvfx_src(temp(vpc
));
598 nvfx_vp_emit(vpc
, arith(SCA
, LG2
, tmp
.reg
, NVFX_VP_MASK_X
, none
, none
, swz(src
[0], X
, X
, X
, X
)));
599 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, tmp
.reg
, NVFX_VP_MASK_X
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], X
, X
, X
, X
), none
));
600 nvfx_vp_emit(vpc
, arith(SCA
, EX2
, dst
, mask
, none
, none
, swz(tmp
, X
, X
, X
, X
)));
602 case TGSI_OPCODE_RCP
:
603 nvfx_vp_emit(vpc
, arith(SCA
, RCP
, dst
, mask
, none
, none
, src
[0]));
605 case TGSI_OPCODE_RSQ
:
606 nvfx_vp_emit(vpc
, arith(SCA
, RSQ
, dst
, mask
, none
, none
, abs(src
[0])));
608 case TGSI_OPCODE_SEQ
:
609 nvfx_vp_emit(vpc
, arith(VEC
, SEQ
, dst
, mask
, src
[0], src
[1], none
));
611 case TGSI_OPCODE_SFL
:
612 nvfx_vp_emit(vpc
, arith(VEC
, SFL
, dst
, mask
, src
[0], src
[1], none
));
614 case TGSI_OPCODE_SGE
:
615 nvfx_vp_emit(vpc
, arith(VEC
, SGE
, dst
, mask
, src
[0], src
[1], none
));
617 case TGSI_OPCODE_SGT
:
618 nvfx_vp_emit(vpc
, arith(VEC
, SGT
, dst
, mask
, src
[0], src
[1], none
));
620 case TGSI_OPCODE_SIN
:
621 nvfx_vp_emit(vpc
, arith(SCA
, SIN
, dst
, mask
, none
, none
, src
[0]));
623 case TGSI_OPCODE_SLE
:
624 nvfx_vp_emit(vpc
, arith(VEC
, SLE
, dst
, mask
, src
[0], src
[1], none
));
626 case TGSI_OPCODE_SLT
:
627 nvfx_vp_emit(vpc
, arith(VEC
, SLT
, dst
, mask
, src
[0], src
[1], none
));
629 case TGSI_OPCODE_SNE
:
630 nvfx_vp_emit(vpc
, arith(VEC
, SNE
, dst
, mask
, src
[0], src
[1], none
));
632 case TGSI_OPCODE_SSG
:
633 nvfx_vp_emit(vpc
, arith(VEC
, SSG
, dst
, mask
, src
[0], src
[1], none
));
635 case TGSI_OPCODE_STR
:
636 nvfx_vp_emit(vpc
, arith(VEC
, STR
, dst
, mask
, src
[0], src
[1], none
));
638 case TGSI_OPCODE_SUB
:
639 nvfx_vp_emit(vpc
, arith(VEC
, ADD
, dst
, mask
, src
[0], none
, neg(src
[1])));
641 case TGSI_OPCODE_TRUNC
:
642 tmp
= nvfx_src(temp(vpc
));
643 insn
= arith(VEC
, MOV
, none
.reg
, mask
, src
[0], none
, none
);
645 nvfx_vp_emit(vpc
, insn
);
647 nvfx_vp_emit(vpc
, arith(VEC
, FLR
, tmp
.reg
, mask
, abs(src
[0]), none
, none
));
648 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, dst
, mask
, tmp
, none
, none
));
650 insn
= arith(VEC
, MOV
, dst
, mask
, neg(tmp
), none
, none
);
651 insn
.cc_test
= NVFX_COND_LT
;
652 nvfx_vp_emit(vpc
, insn
);
654 case TGSI_OPCODE_XPD
:
655 tmp
= nvfx_src(temp(vpc
));
656 nvfx_vp_emit(vpc
, arith(VEC
, MUL
, tmp
.reg
, mask
, swz(src
[0], Z
, X
, Y
, Y
), swz(src
[1], Y
, Z
, X
, X
), none
));
657 nvfx_vp_emit(vpc
, arith(VEC
, MAD
, dst
, (mask
& ~NVFX_VP_MASK_W
), swz(src
[0], Y
, Z
, X
, X
), swz(src
[1], Z
, X
, Y
, Y
), neg(tmp
)));
661 insn
= arith(VEC
, MOV
, none
.reg
, NVFX_VP_MASK_X
, src
[0], none
, none
);
663 nvfx_vp_emit(vpc
, insn
);
665 reloc
.location
= vpc
->vp
->nr_insns
;
666 reloc
.target
= finst
->Label
.Label
+ 1;
667 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
669 insn
= arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
);
670 insn
.cc_test
= NVFX_COND_EQ
;
671 insn
.cc_swz
[0] = insn
.cc_swz
[1] = insn
.cc_swz
[2] = insn
.cc_swz
[3] = 0;
672 nvfx_vp_emit(vpc
, insn
);
675 case TGSI_OPCODE_ELSE
:
676 case TGSI_OPCODE_BRA
:
677 case TGSI_OPCODE_CAL
:
678 reloc
.location
= vpc
->vp
->nr_insns
;
679 reloc
.target
= finst
->Label
.Label
;
680 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
682 if(finst
->Instruction
.Opcode
== TGSI_OPCODE_CAL
)
683 insn
= arith(SCA
, CAL
, none
.reg
, 0, none
, none
, none
);
685 insn
= arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
);
686 nvfx_vp_emit(vpc
, insn
);
689 case TGSI_OPCODE_RET
:
691 tmp
.swz
[0] = tmp
.swz
[1] = tmp
.swz
[2] = tmp
.swz
[3] = 0;
692 nvfx_vp_emit(vpc
, arith(SCA
, RET
, none
.reg
, 0, none
, none
, tmp
));
695 case TGSI_OPCODE_BGNSUB
:
696 case TGSI_OPCODE_ENDSUB
:
697 case TGSI_OPCODE_ENDIF
:
698 /* nothing to do here */
701 case TGSI_OPCODE_BGNLOOP
:
702 loop
.cont_target
= idx
;
703 loop
.brk_target
= finst
->Label
.Label
+ 1;
704 util_dynarray_append(&vpc
->loop_stack
, struct nvfx_loop_entry
, loop
);
707 case TGSI_OPCODE_ENDLOOP
:
708 loop
= util_dynarray_pop(&vpc
->loop_stack
, struct nvfx_loop_entry
);
710 reloc
.location
= vpc
->vp
->nr_insns
;
711 reloc
.target
= loop
.cont_target
;
712 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
714 nvfx_vp_emit(vpc
, arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
717 case TGSI_OPCODE_CONT
:
718 loop
= util_dynarray_top(&vpc
->loop_stack
, struct nvfx_loop_entry
);
720 reloc
.location
= vpc
->vp
->nr_insns
;
721 reloc
.target
= loop
.cont_target
;
722 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
724 nvfx_vp_emit(vpc
, arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
727 case TGSI_OPCODE_BRK
:
728 loop
= util_dynarray_top(&vpc
->loop_stack
, struct nvfx_loop_entry
);
730 reloc
.location
= vpc
->vp
->nr_insns
;
731 reloc
.target
= loop
.brk_target
;
732 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
734 nvfx_vp_emit(vpc
, arith(SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
738 NOUVEAU_ERR("invalid opcode %d\n", finst
->Instruction
.Opcode
);
747 nvfx_vertprog_parse_decl_output(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
748 const struct tgsi_full_declaration
*fdec
)
750 unsigned idx
= fdec
->Range
.First
;
753 switch (fdec
->Semantic
.Name
) {
754 case TGSI_SEMANTIC_POSITION
:
755 hw
= NVFX_VP(INST_DEST_POS
);
758 case TGSI_SEMANTIC_COLOR
:
759 if (fdec
->Semantic
.Index
== 0) {
760 hw
= NVFX_VP(INST_DEST_COL0
);
762 if (fdec
->Semantic
.Index
== 1) {
763 hw
= NVFX_VP(INST_DEST_COL1
);
765 NOUVEAU_ERR("bad colour semantic index\n");
769 case TGSI_SEMANTIC_BCOLOR
:
770 if (fdec
->Semantic
.Index
== 0) {
771 hw
= NVFX_VP(INST_DEST_BFC0
);
773 if (fdec
->Semantic
.Index
== 1) {
774 hw
= NVFX_VP(INST_DEST_BFC1
);
776 NOUVEAU_ERR("bad bcolour semantic index\n");
780 case TGSI_SEMANTIC_FOG
:
781 hw
= NVFX_VP(INST_DEST_FOGC
);
783 case TGSI_SEMANTIC_PSIZE
:
784 hw
= NVFX_VP(INST_DEST_PSZ
);
786 case TGSI_SEMANTIC_GENERIC
:
787 hw
= (vpc
->vp
->generic_to_fp_input
[fdec
->Semantic
.Index
] & 0xf) - NVFX_FP_OP_INPUT_SRC_TC(0);
789 hw
= NVFX_VP(INST_DEST_TC(hw
));
790 else if(hw
== 9) /* TODO: this is correct, but how does this overlapping work exactly? */
791 hw
= NV40_VP_INST_DEST_PSZ
;
795 case TGSI_SEMANTIC_EDGEFLAG
:
796 /* not really an error just a fallback */
797 NOUVEAU_ERR("cannot handle edgeflag output\n");
800 NOUVEAU_ERR("bad output semantic\n");
804 vpc
->r_result
[idx
] = nvfx_reg(NVFXSR_OUTPUT
, hw
);
809 nvfx_vertprog_prepare(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
)
811 struct tgsi_parse_context p
;
812 int high_const
= -1, high_temp
= -1, high_addr
= -1, nr_imm
= 0, i
;
813 struct util_semantic_set set
;
814 unsigned char sem_layout
[10];
815 unsigned num_outputs
;
816 unsigned num_texcoords
= nvfx
->is_nv4x
? 10 : 8;
818 num_outputs
= util_semantic_set_from_program_file(&set
, vpc
->pipe
.tokens
, TGSI_FILE_OUTPUT
);
820 if(num_outputs
> num_texcoords
) {
821 NOUVEAU_ERR("too many vertex program outputs: %i\n", num_outputs
);
824 util_semantic_layout_from_set(sem_layout
, &set
, num_texcoords
, num_texcoords
);
826 /* hope 0xf is (0, 0, 0, 1) initialized; otherwise, we are _probably_ not required to do this */
827 memset(vpc
->vp
->generic_to_fp_input
, 0x0f, sizeof(vpc
->vp
->generic_to_fp_input
));
828 for(int i
= 0; i
< 10; ++i
) {
829 if(sem_layout
[i
] == 0xff)
831 //printf("vp: GENERIC[%i] to fpreg %i\n", sem_layout[i], NVFX_FP_OP_INPUT_SRC_TC(0) + i);
832 vpc
->vp
->generic_to_fp_input
[sem_layout
[i
]] = 0xf0 | NVFX_FP_OP_INPUT_SRC_TC(i
);
835 vpc
->vp
->sprite_fp_input
= -1;
836 for(int i
= 0; i
< 10; ++i
)
838 if(sem_layout
[i
] == 0xff)
840 vpc
->vp
->sprite_fp_input
= NVFX_FP_OP_INPUT_SRC_TC(i
);
845 tgsi_parse_init(&p
, vpc
->pipe
.tokens
);
846 while (!tgsi_parse_end_of_tokens(&p
)) {
847 const union tgsi_full_token
*tok
= &p
.FullToken
;
849 tgsi_parse_token(&p
);
850 switch(tok
->Token
.Type
) {
851 case TGSI_TOKEN_TYPE_IMMEDIATE
:
854 case TGSI_TOKEN_TYPE_DECLARATION
:
856 const struct tgsi_full_declaration
*fdec
;
858 fdec
= &p
.FullToken
.FullDeclaration
;
859 switch (fdec
->Declaration
.File
) {
860 case TGSI_FILE_TEMPORARY
:
861 if (fdec
->Range
.Last
> high_temp
) {
866 case TGSI_FILE_ADDRESS
:
867 if (fdec
->Range
.Last
> high_addr
) {
872 case TGSI_FILE_CONSTANT
:
873 if (fdec
->Range
.Last
> high_const
) {
878 case TGSI_FILE_OUTPUT
:
879 if (!nvfx_vertprog_parse_decl_output(nvfx
, vpc
, fdec
))
894 vpc
->imm
= CALLOC(nr_imm
, sizeof(struct nvfx_reg
));
899 vpc
->r_temp
= CALLOC(high_temp
, sizeof(struct nvfx_reg
));
900 for (i
= 0; i
< high_temp
; i
++)
901 vpc
->r_temp
[i
] = temp(vpc
);
905 vpc
->r_address
= CALLOC(high_addr
, sizeof(struct nvfx_reg
));
906 for (i
= 0; i
< high_addr
; i
++)
907 vpc
->r_address
[i
] = nvfx_reg(NVFXSR_TEMP
, i
);
911 vpc
->r_const
= CALLOC(high_const
, sizeof(struct nvfx_reg
));
912 for (i
= 0; i
< high_const
; i
++)
913 vpc
->r_const
[i
] = constant(vpc
, i
, 0, 0, 0, 0);
916 vpc
->r_temps_discard
= 0;
920 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp
, "NVFX_DUMP_VP", FALSE
)
922 static struct nvfx_vertex_program
*
923 nvfx_vertprog_translate(struct nvfx_context
*nvfx
, const struct pipe_shader_state
* vps
)
925 struct tgsi_parse_context parse
;
926 struct nvfx_vertex_program
* vp
= NULL
;
927 struct nvfx_vpc
*vpc
= NULL
;
928 struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
929 struct util_dynarray insns
;
932 tgsi_parse_init(&parse
, vps
->tokens
);
934 vp
= CALLOC_STRUCT(nvfx_vertex_program
);
938 vpc
= CALLOC_STRUCT(nvfx_vpc
);
947 // TODO: use a 64-bit atomic here!
948 static unsigned long long id
= 0;
952 /* reserve space for ucps */
953 if(nvfx
->use_vp_clipping
)
955 for(i
= 0; i
< 6; ++i
)
956 constant(vpc
, -1, 0, 0, 0, 0);
959 if (!nvfx_vertprog_prepare(nvfx
, vpc
)) {
964 /* Redirect post-transform vertex position to a temp if user clip
965 * planes are enabled. We need to append code to the vtxprog
966 * to handle clip planes later.
968 /* TODO: maybe support patching this depending on whether there are ucps: not sure if it is really matters much */
969 if (nvfx
->use_vp_clipping
) {
970 vpc
->r_result
[vpc
->hpos_idx
] = temp(vpc
);
971 vpc
->r_temps_discard
= 0;
974 util_dynarray_init(&insns
);
975 while (!tgsi_parse_end_of_tokens(&parse
)) {
976 tgsi_parse_token(&parse
);
978 switch (parse
.FullToken
.Token
.Type
) {
979 case TGSI_TOKEN_TYPE_IMMEDIATE
:
981 const struct tgsi_full_immediate
*imm
;
983 imm
= &parse
.FullToken
.FullImmediate
;
984 assert(imm
->Immediate
.DataType
== TGSI_IMM_FLOAT32
);
985 assert(imm
->Immediate
.NrTokens
== 4 + 1);
986 vpc
->imm
[vpc
->nr_imm
++] =
994 case TGSI_TOKEN_TYPE_INSTRUCTION
:
996 const struct tgsi_full_instruction
*finst
;
997 unsigned idx
= insns
.size
>> 2;
998 util_dynarray_append(&insns
, unsigned, vp
->nr_insns
);
999 finst
= &parse
.FullToken
.FullInstruction
;
1000 if (!nvfx_vertprog_parse_instruction(nvfx
, vpc
, idx
, finst
))
1009 util_dynarray_append(&insns
, unsigned, vp
->nr_insns
);
1011 for(unsigned i
= 0; i
< vpc
->label_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1013 struct nvfx_relocation
* label_reloc
= (struct nvfx_relocation
*)((char*)vpc
->label_relocs
.data
+ i
);
1014 struct nvfx_relocation hw_reloc
;
1016 hw_reloc
.location
= label_reloc
->location
;
1017 hw_reloc
.target
= ((unsigned*)insns
.data
)[label_reloc
->target
];
1019 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1021 util_dynarray_append(&vp
->branch_relocs
, struct nvfx_relocation
, hw_reloc
);
1023 util_dynarray_fini(&insns
);
1024 util_dynarray_trim(&vp
->branch_relocs
);
1026 /* XXX: what if we add a RET before?! make sure we jump here...*/
1028 /* Write out HPOS if it was redirected to a temp earlier */
1029 if (vpc
->r_result
[vpc
->hpos_idx
].type
!= NVFXSR_OUTPUT
) {
1030 struct nvfx_reg hpos
= nvfx_reg(NVFXSR_OUTPUT
,
1031 NVFX_VP(INST_DEST_POS
));
1032 struct nvfx_src htmp
= nvfx_src(vpc
->r_result
[vpc
->hpos_idx
]);
1034 nvfx_vp_emit(vpc
, arith(VEC
, MOV
, hpos
, NVFX_VP_MASK_ALL
, htmp
, none
, none
));
1037 /* Insert code to handle user clip planes */
1038 if(nvfx
->use_vp_clipping
)
1040 for (i
= 0; i
< 6; i
++) {
1041 struct nvfx_reg cdst
= nvfx_reg(NVFXSR_OUTPUT
, NV30_VP_INST_DEST_CLP(i
));
1042 struct nvfx_src ceqn
= nvfx_src(nvfx_reg(NVFXSR_CONST
, i
));
1043 struct nvfx_src htmp
= nvfx_src(vpc
->r_result
[vpc
->hpos_idx
]);
1049 case 0: case 3: mask
= NVFX_VP_MASK_Y
; break;
1050 case 1: case 4: mask
= NVFX_VP_MASK_Z
; break;
1051 case 2: case 5: mask
= NVFX_VP_MASK_W
; break;
1053 NOUVEAU_ERR("invalid clip dist #%d\n", i
);
1058 mask
= NVFX_VP_MASK_X
;
1060 nvfx_vp_emit(vpc
, arith(VEC
, DP4
, cdst
, mask
, htmp
, ceqn
, none
));
1066 vp
->insns
[vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
1068 nvfx_vp_emit(vpc
, arith(VEC
, NOP
, none
.reg
, 0, none
, none
, none
));
1069 vp
->insns
[vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
1072 if(debug_get_option_nvfx_dump_vp())
1075 tgsi_dump(vpc
->pipe
.tokens
, 0);
1077 debug_printf("\n%s vertex program:\n", nvfx
->is_nv4x
? "nv4x" : "nv3x");
1078 for (i
= 0; i
< vp
->nr_insns
; i
++)
1079 debug_printf("%3u: %08x %08x %08x %08x\n", i
, vp
->insns
[i
].data
[0], vp
->insns
[i
].data
[1], vp
->insns
[i
].data
[2], vp
->insns
[i
].data
[3]);
1084 vp
->exec_start
= -1;
1087 tgsi_parse_free(&parse
);
1089 util_dynarray_fini(&vpc
->label_relocs
);
1090 util_dynarray_fini(&vpc
->loop_stack
);
1092 FREE(vpc
->r_address
);
1105 static struct nvfx_vertex_program
*
1106 nvfx_vertprog_translate_draw_vp(struct nvfx_context
*nvfx
, struct nvfx_pipe_vertex_program
* pvp
)
1108 struct nvfx_vertex_program
* vp
= NULL
;
1109 struct pipe_shader_state vps
;
1110 struct ureg_program
*ureg
= NULL
;
1111 unsigned num_outputs
= MIN2(pvp
->info
.num_outputs
, 16);
1113 ureg
= ureg_create( TGSI_PROCESSOR_VERTEX
);
1117 for (unsigned i
= 0; i
< num_outputs
; i
++)
1118 ureg_MOV(ureg
, ureg_DECL_output(ureg
, pvp
->info
.output_semantic_name
[i
], pvp
->info
.output_semantic_index
[i
]), ureg_DECL_vs_input(ureg
, i
));
1122 vps
.tokens
= ureg_get_tokens(ureg
, 0);
1123 vp
= nvfx_vertprog_translate(nvfx
, &vps
);
1124 ureg_free_tokens(vps
.tokens
);
1131 nvfx_vertprog_validate(struct nvfx_context
*nvfx
)
1133 struct nvfx_screen
*screen
= nvfx
->screen
;
1134 struct nouveau_channel
*chan
= screen
->base
.channel
;
1135 struct nouveau_grobj
*eng3d
= screen
->eng3d
;
1136 struct nvfx_pipe_vertex_program
*pvp
= nvfx
->vertprog
;
1137 struct nvfx_vertex_program
* vp
;
1138 struct pipe_resource
*constbuf
;
1139 boolean upload_code
= FALSE
, upload_data
= FALSE
;
1142 if (nvfx
->render_mode
== HW
) {
1143 nvfx
->fallback_swtnl
&= ~NVFX_NEW_VERTPROG
;
1147 vp
= nvfx_vertprog_translate(nvfx
, &pvp
->pipe
);
1149 vp
= NVFX_VP_FAILED
;
1153 if(vp
== NVFX_VP_FAILED
) {
1154 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1158 constbuf
= nvfx
->constbuf
[PIPE_SHADER_VERTEX
];
1163 pvp
->draw_vp
= vp
= nvfx_vertprog_translate_draw_vp(nvfx
, pvp
);
1165 _debug_printf("Error: unable to create a swtnl passthrough vertex shader: aborting.");
1172 nvfx
->hw_vertprog
= vp
;
1174 /* Allocate hw vtxprog exec slots */
1176 struct nouveau_resource
*heap
= nvfx
->screen
->vp_exec_heap
;
1177 uint vplen
= vp
->nr_insns
;
1179 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
)) {
1180 while (heap
->next
&& heap
->size
< vplen
) {
1181 struct nvfx_vertex_program
*evict
;
1183 evict
= heap
->next
->priv
;
1184 nouveau_resource_free(&evict
->exec
);
1187 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
))
1189 debug_printf("Vertex shader too long: %u instructions\n", vplen
);
1190 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1198 /* Allocate hw vtxprog const slots */
1199 if (vp
->nr_consts
&& !vp
->data
) {
1200 struct nouveau_resource
*heap
= nvfx
->screen
->vp_data_heap
;
1202 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
)) {
1203 while (heap
->next
&& heap
->size
< vp
->nr_consts
) {
1204 struct nvfx_vertex_program
*evict
;
1206 evict
= heap
->next
->priv
;
1207 nouveau_resource_free(&evict
->data
);
1210 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
))
1212 debug_printf("Vertex shader uses too many constants: %u constants\n", vp
->nr_consts
);
1213 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1218 //printf("start at %u nc %u\n", vp->data->start, vp->nr_consts);
1220 /*XXX: handle this some day */
1221 assert(vp
->data
->start
>= vp
->data_start_min
);
1224 if (vp
->data_start
!= vp
->data
->start
)
1228 /* If exec or data segments moved we need to patch the program to
1229 * fixup offsets and register IDs.
1231 if (vp
->exec_start
!= vp
->exec
->start
) {
1232 //printf("vp_relocs %u -> %u\n", vp->exec_start, vp->exec->start);
1233 for(unsigned i
= 0; i
< vp
->branch_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1235 struct nvfx_relocation
* reloc
= (struct nvfx_relocation
*)((char*)vp
->branch_relocs
.data
+ i
);
1236 uint32_t* hw
= vp
->insns
[reloc
->location
].data
;
1237 unsigned target
= vp
->exec
->start
+ reloc
->target
;
1239 //debug_printf("vp_reloc hw %u -> hw %u\n", reloc->location, target);
1243 hw
[2] &=~ NV30_VP_INST_IADDR_MASK
;
1244 hw
[2] |= (target
& 0x1ff) << NV30_VP_INST_IADDR_SHIFT
;
1248 hw
[3] &=~ NV40_VP_INST_IADDRL_MASK
;
1249 hw
[3] |= (target
& 7) << NV40_VP_INST_IADDRL_SHIFT
;
1251 hw
[2] &=~ NV40_VP_INST_IADDRH_MASK
;
1252 hw
[2] |= ((target
>> 3) & 0x3f) << NV40_VP_INST_IADDRH_SHIFT
;
1256 vp
->exec_start
= vp
->exec
->start
;
1259 if (vp
->data_start
!= vp
->data
->start
) {
1260 for(unsigned i
= 0; i
< vp
->const_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1262 struct nvfx_relocation
* reloc
= (struct nvfx_relocation
*)((char*)vp
->const_relocs
.data
+ i
);
1263 struct nvfx_vertex_program_exec
*vpi
= &vp
->insns
[reloc
->location
];
1265 //printf("reloc %i to %i + %i\n", reloc->location, vp->data->start, reloc->target);
1267 vpi
->data
[1] &= ~NVFX_VP(INST_CONST_SRC_MASK
);
1269 (reloc
->target
+ vp
->data
->start
) <<
1270 NVFX_VP(INST_CONST_SRC_SHIFT
);
1273 vp
->data_start
= vp
->data
->start
;
1277 /* Update + Upload constant values */
1278 if (vp
->nr_consts
) {
1282 map
= (float*)nvfx_buffer(constbuf
)->data
;
1285 for (i = 0; i < 512; i++) {
1286 float v[4] = {0.1, 0,2, 0.3, 0.4};
1287 BEGIN_RING(chan, eng3d, NV34TCL_VP_UPLOAD_CONST_ID, 5);
1289 OUT_RINGp (chan, (uint32_t *)v, 4);
1290 printf("frob %i\n", i);
1294 for (i
= nvfx
->use_vp_clipping
? 6 : 0; i
< vp
->nr_consts
; i
++) {
1295 struct nvfx_vertex_program_data
*vpd
= &vp
->consts
[i
];
1297 if (vpd
->index
>= 0) {
1299 !memcmp(vpd
->value
, &map
[vpd
->index
* 4],
1302 memcpy(vpd
->value
, &map
[vpd
->index
* 4],
1306 //printf("upload into %i + %i: %f %f %f %f\n", vp->data->start, i, vpd->value[0], vpd->value[1], vpd->value[2], vpd->value[3]);
1308 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_CONST_ID
, 5);
1309 OUT_RING (chan
, i
+ vp
->data
->start
);
1310 OUT_RINGp (chan
, (uint32_t *)vpd
->value
, 4);
1314 /* Upload vtxprog */
1316 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_FROM_ID
, 1);
1317 OUT_RING (chan
, vp
->exec
->start
);
1318 for (i
= 0; i
< vp
->nr_insns
; i
++) {
1319 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_INST(0), 4);
1320 //printf("%08x %08x %08x %08x\n", vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1321 OUT_RINGp (chan
, vp
->insns
[i
].data
, 4);
1326 if(nvfx
->dirty
& (NVFX_NEW_VERTPROG
))
1329 OUT_RING(chan
, RING_3D(NV34TCL_VP_START_FROM_ID
, 1));
1330 OUT_RING(chan
, vp
->exec
->start
);
1332 OUT_RING(chan
, RING_3D(NV40TCL_VP_ATTRIB_EN
, 1));
1333 OUT_RING(chan
, vp
->ir
);
1341 nvfx_vertprog_destroy(struct nvfx_context
*nvfx
, struct nvfx_vertex_program
*vp
)
1349 nouveau_resource_free(&vp
->exec
);
1350 nouveau_resource_free(&vp
->data
);
1352 util_dynarray_fini(&vp
->branch_relocs
);
1353 util_dynarray_fini(&vp
->const_relocs
);
1358 nvfx_vp_state_create(struct pipe_context
*pipe
, const struct pipe_shader_state
*cso
)
1360 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1361 struct nvfx_pipe_vertex_program
*pvp
;
1363 pvp
= CALLOC(1, sizeof(struct nvfx_pipe_vertex_program
));
1364 pvp
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1365 tgsi_scan_shader(pvp
->pipe
.tokens
, &pvp
->info
);
1366 pvp
->draw_elements
= MAX2(1, MIN2(pvp
->info
.num_outputs
, 16));
1367 pvp
->draw_no_elements
= pvp
->info
.num_outputs
== 0;
1373 nvfx_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1375 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1377 nvfx
->vertprog
= hwcso
;
1378 nvfx
->dirty
|= NVFX_NEW_VERTPROG
;
1379 nvfx
->draw_dirty
|= NVFX_NEW_VERTPROG
;
1383 nvfx_vp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
1385 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1386 struct nvfx_pipe_vertex_program
*pvp
= hwcso
;
1389 draw_delete_vertex_shader(nvfx
->draw
, pvp
->draw_vs
);
1390 if(pvp
->vp
&& pvp
->vp
!= NVFX_VP_FAILED
)
1391 nvfx_vertprog_destroy(nvfx
, pvp
->vp
);
1393 nvfx_vertprog_destroy(nvfx
, pvp
->draw_vp
);
1394 FREE((void*)pvp
->pipe
.tokens
);
1399 nvfx_init_vertprog_functions(struct nvfx_context
*nvfx
)
1401 nvfx
->pipe
.create_vs_state
= nvfx_vp_state_create
;
1402 nvfx
->pipe
.bind_vs_state
= nvfx_vp_state_bind
;
1403 nvfx
->pipe
.delete_vs_state
= nvfx_vp_state_delete
;