} stipple;
struct nouveau_stateobj *fragprog;
+ struct nouveau_stateobj *vertprog;
};
struct nv40_context {
struct pipe_scissor_state scissor;
unsigned stipple[32];
struct pipe_clip_state clip;
+ struct nv40_vertex_program *vertprog;
struct nv40_fragment_program *fragprog;
struct pipe_buffer *constbuf[PIPE_SHADER_TYPES];
} pipe_state;
struct nouveau_stateobj *so_viewport;
struct nouveau_stateobj *so_stipple;
- struct {
- struct nv40_vertex_program *active;
-
- struct nv40_vertex_program *current;
- struct pipe_buffer *constant_buf;
- } vertprog;
-
struct pipe_vertex_buffer vtxbuf[PIPE_ATTRIB_MAX];
struct pipe_vertex_element vtxelt[PIPE_ATTRIB_MAX];
};
extern struct draw_stage *nv40_draw_render_stage(struct nv40_context *nv40);
/* nv40_vertprog.c */
-extern void nv40_vertprog_translate(struct nv40_context *,
- struct nv40_vertex_program *);
-extern void nv40_vertprog_bind(struct nv40_context *,
- struct nv40_vertex_program *);
extern void nv40_vertprog_destroy(struct nv40_context *,
struct nv40_vertex_program *);
extern struct nv40_state_entry nv40_state_scissor;
extern struct nv40_state_entry nv40_state_stipple;
extern struct nv40_state_entry nv40_state_fragprog;
+extern struct nv40_state_entry nv40_state_vertprog;
/* nv40_vbo.c */
extern boolean nv40_draw_arrays(struct pipe_context *, unsigned mode,
nv40->pipe_state.constbuf[PIPE_SHADER_FRAGMENT];
struct pipe_winsys *ws = nv40->pipe.winsys;
struct nouveau_stateobj *so;
- unsigned new_program = FALSE;
int i;
if (fp->translated)
nv40->fallback |= NV40_FALLBACK_RAST;
return FALSE;
}
- new_program = TRUE;
fp->buffer = ws->buffer_create(ws, 0x100, 0, fp->insn_len * 4);
nv40_fragprog_upload(nv40, fp);
nv40_fragprog_upload(nv40, fp);
}
- so_ref(fp->so, &nv40->state.fragprog);
- return new_program;
+ if (fp->so != nv40->state.fragprog) {
+ so_ref(fp->so, &nv40->state.fragprog);
+ return TRUE;
+ }
+
+ return FALSE;
}
void
nv40_vp_state_bind(struct pipe_context *pipe, void *hwcso)
{
struct nv40_context *nv40 = nv40_context(pipe);
- struct nv40_vertex_program *vp = hwcso;
- nv40->vertprog.current = vp;
+ nv40->pipe_state.vertprog = hwcso;
nv40->dirty |= NV40_NEW_VERTPROG;
}
struct nv40_context *nv40 = nv40_context(pipe);
if (shader == PIPE_SHADER_VERTEX) {
- nv40->vertprog.constant_buf = buf->buffer;
+ nv40->pipe_state.constbuf[PIPE_SHADER_VERTEX] = buf->buffer;
nv40->dirty |= NV40_NEW_VERTPROG;
} else
if (shader == PIPE_SHADER_FRAGMENT) {
uint32_t ir;
uint32_t or;
+ struct nouveau_stateobj *so;
};
struct nv40_fragment_program_data {
&nv40_state_scissor,
&nv40_state_stipple,
&nv40_state_fragprog,
+ &nv40_state_vertprog,
NULL
};
}
if (nv40->dirty & NV40_NEW_VERTPROG) {
- nv40_vertprog_bind(nv40, nv40->vertprog.current);
+ so_emit(nv40->nvws, nv40->state.vertprog);
nv40->dirty &= ~NV40_NEW_VERTPROG;
}
nv40_vbo_arrays_update(struct nv40_context *nv40, struct pipe_buffer *ib,
unsigned ib_format)
{
- struct nv40_vertex_program *vp = nv40->vertprog.active;
+ struct nv40_vertex_program *vp = nv40->pipe_state.vertprog;
struct nouveau_stateobj *vtxbuf, *vtxfmt;
unsigned inputs, hw, num_hw;
unsigned vb_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
return TRUE;
}
-void
+static void
nv40_vertprog_translate(struct nv40_context *nv40,
struct nv40_vertex_program *vp)
{
free(vpc);
}
-void
-nv40_vertprog_bind(struct nv40_context *nv40, struct nv40_vertex_program *vp)
+static boolean
+nv40_vertprog_validate(struct nv40_context *nv40)
{
+ struct nv40_vertex_program *vp = nv40->pipe_state.vertprog;
+ struct pipe_buffer *constbuf =
+ nv40->pipe_state.constbuf[PIPE_SHADER_VERTEX];
struct nouveau_winsys *nvws = nv40->nvws;
struct pipe_winsys *ws = nv40->pipe.winsys;
boolean upload_code = FALSE, upload_data = FALSE;
int i;
/* Translate TGSI shader into hw bytecode */
+ if (vp->translated)
+ goto check_gpu_resources;
+
+ nv40_vertprog_translate(nv40, vp);
if (!vp->translated) {
- nv40_vertprog_translate(nv40, vp);
- if (!vp->translated)
- assert(0);
+ nv40->fallback |= NV40_FALLBACK_TNL;
+ return FALSE;
}
+check_gpu_resources:
/* Allocate hw vtxprog exec slots */
if (!vp->exec) {
struct nouveau_resource *heap = nv40->hw->vp_exec_heap;
+ struct nouveau_stateobj *so;
uint vplen = vp->nr_insns;
if (nvws->res_alloc(heap, vplen, vp, &vp->exec)) {
assert(0);
}
+ so = so_new(5, 0);
+ so_method(so, nv40->hw->curie, NV40TCL_VP_START_FROM_ID, 1);
+ so_data (so, vp->exec->start);
+ so_method(so, nv40->hw->curie, NV40TCL_VP_ATTRIB_EN, 2);
+ so_data (so, vp->ir);
+ so_data (so, vp->or);
+ so_ref(so, &vp->so);
+ so_ref(NULL, &so);
+
upload_code = TRUE;
}
if (vp->nr_consts) {
float *map = NULL;
- if (nv40->vertprog.constant_buf) {
- map = ws->buffer_map(ws, nv40->vertprog.constant_buf,
+ if (constbuf) {
+ map = ws->buffer_map(ws, constbuf,
PIPE_BUFFER_USAGE_CPU_READ);
}
OUT_RINGp ((uint32_t *)vpd->value, 4);
}
- if (map) {
- ws->buffer_unmap(ws, nv40->vertprog.constant_buf);
- }
+ if (constbuf)
+ ws->buffer_unmap(ws, constbuf);
}
/* Upload vtxprog */
}
}
- BEGIN_RING(curie, NV40TCL_VP_START_FROM_ID, 1);
- OUT_RING (vp->exec->start);
- BEGIN_RING(curie, NV40TCL_VP_ATTRIB_EN, 2);
- OUT_RING (vp->ir);
- OUT_RING (vp->or);
+ if (vp->so != nv40->state.vertprog) {
+ so_ref(vp->so, &nv40->state.vertprog);
+ return TRUE;
+ }
- nv40->vertprog.active = vp;
+ return FALSE;
}
void
free(vp->insns);
}
+struct nv40_state_entry nv40_state_vertprog = {
+ .validate = nv40_vertprog_validate,
+ .dirty = {
+ .pipe = NV40_NEW_VERTPROG,
+ .hw = NV40_NEW_VERTPROG
+ }
+};
+