static boolean
nv04_init_hwctx(struct nv04_context *nv04)
{
- BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOTIFY, 1);
- OUT_RING(0);
+ // requires a valid handle
+// BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOTIFY, 1);
+// OUT_RING(0);
BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOP, 1);
OUT_RING(0);
#define NV04_NEW_CONTROL (1 << 5)
#define NV04_NEW_VIEWPORT (1 << 6)
#define NV04_NEW_SAMPLER (1 << 7)
+#define NV04_NEW_FRAMEBUFFER (1 << 8)
+#define NV04_NEW_VTXARRAYS (1 << 9)
struct nv04_context {
struct pipe_context pipe;
unsigned vp_samplers;
uint32_t rt_enable;
- struct pipe_buffer *rt[4];
- struct pipe_buffer *zeta;
+ struct pipe_framebuffer_state *framebuffer;
+ struct pipe_surface *rt;
+ struct pipe_surface *zeta;
struct {
struct pipe_buffer *buffer;
unsigned delta;
} vb[16];
+ float *constbuf[PIPE_SHADER_TYPES][32][4];
+ unsigned constbuf_nr[PIPE_SHADER_TYPES];
+
struct vertex_info vertex_info;
struct {
struct pipe_buffer *constant_buf;
} fragprog;
- struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- unsigned num_vertex_buffers;
- unsigned num_vertex_elements;
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
struct pipe_viewport_state viewport;
};
extern void nv04_init_state_functions(struct nv04_context *nv04);
extern void nv04_init_surface_functions(struct nv04_context *nv04);
-extern void nv04_init_miptree_functions(struct pipe_screen *screen);
+extern void nv04_screen_init_miptree_functions(struct pipe_screen *screen);
/* nv04_clear.c */
extern void nv04_clear(struct pipe_context *pipe, struct pipe_surface *ps,
struct pipe_texture *pt = &nv04mt->base;
uint width = pt->width[0], height = pt->height[0];
uint offset = 0;
- int nr_faces, l, f;
+ int nr_faces, l;
nr_faces = 1;
pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
- nv04mt->level[l].pitch = pt->width[0] * pt->block.size;
+ nv04mt->level[l].pitch = pt->width[0];
nv04mt->level[l].pitch = (nv04mt->level[l].pitch + 63) & ~63;
- nv04mt->level[l].image_offset =
- CALLOC(nr_faces, sizeof(unsigned));
-
width = MAX2(1, width >> 1);
height = MAX2(1, height >> 1);
-
}
- for (f = 0; f < nr_faces; f++) {
- for (l = 0; l <= pt->last_level; l++) {
- nv04mt->level[l].image_offset[f] = offset;
- offset += nv04mt->level[l].pitch * pt->height[l];
- }
+ for (l = 0; l <= pt->last_level; l++) {
+
+ nv04mt->level[l].image_offset = offset;
+ offset += nv04mt->level[l].pitch * pt->height[l];
}
nv04mt->total_size = offset;
}
static struct pipe_texture *
-nv04_miptree_create(struct pipe_screen *screen, const struct pipe_texture *pt)
+nv04_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
{
- struct pipe_winsys *ws = screen->winsys;
+ struct pipe_winsys *ws = pscreen->winsys;
struct nv04_miptree *mt;
mt = MALLOC(sizeof(struct nv04_miptree));
return NULL;
mt->base = *pt;
mt->base.refcount = 1;
- mt->base.screen = screen;
+ mt->base.screen = pscreen;
+ mt->shadow_tex = NULL;
+ mt->shadow_surface = NULL;
+
+ //mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
nv04_miptree_layout(mt);
- mt->buffer = ws->buffer_create(ws, 256, PIPE_BUFFER_USAGE_PIXEL,
- mt->total_size);
+ mt->buffer = ws->buffer_create(ws, 256, PIPE_BUFFER_USAGE_PIXEL |
+ NOUVEAU_BUFFER_USAGE_TEXTURE,
+ mt->total_size);
if (!mt->buffer) {
+ printf("failed %d byte alloc\n",mt->total_size);
FREE(mt);
return NULL;
}
}
static void
-nv04_miptree_release(struct pipe_screen *screen, struct pipe_texture **pt)
+nv04_miptree_release(struct pipe_screen *pscreen, struct pipe_texture **ppt)
{
- struct pipe_texture *mt = *pt;
-
- *pt = NULL;
- if (--mt->refcount <= 0) {
- struct nv04_miptree *nv04mt = (struct nv04_miptree *)mt;
- int l;
-
- pipe_buffer_reference(screen, &nv04mt->buffer, NULL);
- for (l = 0; l <= mt->last_level; l++) {
- if (nv04mt->level[l].image_offset)
- FREE(nv04mt->level[l].image_offset);
- }
- FREE(nv04mt);
+ struct pipe_texture *pt = *ppt;
+ struct nv04_miptree *mt = (struct nv04_miptree *)pt;
+ int l;
+
+ *ppt = NULL;
+ if (--pt->refcount)
+ return;
+
+ pipe_buffer_reference(pscreen, &mt->buffer, NULL);
+ for (l = 0; l <= pt->last_level; l++) {
+ if (mt->level[l].image_offset)
+ FREE(mt->level[l].image_offset);
}
+
+ if (mt->shadow_tex) {
+ assert(mt->shadow_surface);
+ pscreen->tex_surface_release(pscreen, &mt->shadow_surface);
+ nv04_miptree_release(pscreen, &mt->shadow_tex);
+ }
+
+ FREE(mt);
}
static struct pipe_surface *
unsigned face, unsigned level, unsigned zslice,
unsigned flags)
{
- struct pipe_winsys *ws = pscreen->winsys;
struct nv04_miptree *nv04mt = (struct nv04_miptree *)pt;
struct pipe_surface *ps;
pipe_texture_reference(&ps->texture, pt);
pipe_buffer_reference(pscreen, &ps->buffer, nv04mt->buffer);
ps->format = pt->format;
- ps->block = pt->block;
ps->width = pt->width[level];
ps->height = pt->height[level];
+ ps->block = pt->block;
ps->nblocksx = pt->nblocksx[level];
ps->nblocksy = pt->nblocksy[level];
ps->stride = nv04mt->level[level].pitch;
+ ps->usage = flags;
+ ps->status = PIPE_SURFACE_STATUS_DEFINED;
ps->refcount = 1;
+ ps->face = face;
+ ps->level = level;
+ ps->zslice = zslice;
- if (pt->target == PIPE_TEXTURE_CUBE) {
- ps->offset = nv04mt->level[level].image_offset[face];
- } else {
- ps->offset = nv04mt->level[level].image_offset[0];
- }
+ ps->offset = nv04mt->level[level].image_offset;
return ps;
}
nv04_miptree_surface_del(struct pipe_screen *pscreen,
struct pipe_surface **psurface)
{
+ struct pipe_surface *ps = *psurface;
+
+ *psurface = NULL;
+ if (--ps->refcount > 0)
+ return;
+
+ pipe_texture_reference(&ps->texture, NULL);
+ pipe_buffer_reference(pscreen->winsys, &ps->buffer, NULL);
+ FREE(ps);
}
void
-nv04_init_miptree_functions(struct pipe_screen *pscreen)
+nv04_screen_init_miptree_functions(struct pipe_screen *pscreen)
{
pscreen->texture_create = nv04_miptree_create;
pscreen->texture_release = nv04_miptree_release;
return 0;
case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
return 0;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 0;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ case NOUVEAU_CAP_HW_IDXBUF:
+ return 0;
default:
NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
return 0;
const struct pipe_constant_buffer *buf )
{
struct nv04_context *nv04 = nv04_context(pipe);
-
- if (shader == PIPE_SHADER_VERTEX) {
- nv04->vertprog.constant_buf = buf->buffer;
- nv04->dirty |= NV04_NEW_VERTPROG;
- } else
- if (shader == PIPE_SHADER_FRAGMENT) {
- nv04->fragprog.constant_buf = buf->buffer;
- nv04->dirty |= NV04_NEW_FRAGPROG;
+ struct pipe_winsys *ws = pipe->winsys;
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ if (buf) {
+ void *mapped;
+ if (buf->buffer && buf->buffer->size &&
+ (mapped = ws->buffer_map(ws, buf->buffer, PIPE_BUFFER_USAGE_CPU_READ)))
+ {
+ memcpy(nv04->constbuf[shader], mapped, buf->buffer->size);
+ nv04->constbuf_nr[shader] =
+ buf->buffer->size / (4 * sizeof(float));
+ ws->buffer_unmap(ws, buf->buffer);
+ }
}
}
const struct pipe_framebuffer_state *fb)
{
struct nv04_context *nv04 = nv04_context(pipe);
- struct pipe_surface *rt, *zeta;
- uint32_t rt_format, w, h;
- int colour_format = 0, zeta_format = 0;
-
- w = fb->cbufs[0]->width;
- h = fb->cbufs[0]->height;
- colour_format = fb->cbufs[0]->format;
- rt = fb->cbufs[0];
-
- if (fb->zsbuf) {
- if (colour_format) {
- assert(w == fb->zsbuf->width);
- assert(h == fb->zsbuf->height);
- } else {
- w = fb->zsbuf->width;
- h = fb->zsbuf->height;
- }
-
- zeta_format = fb->zsbuf->format;
- zeta = fb->zsbuf;
- }
-
- switch (colour_format) {
- case PIPE_FORMAT_A8R8G8B8_UNORM:
- case 0:
- rt_format = 0x108;
- break;
- case PIPE_FORMAT_R5G6B5_UNORM:
- rt_format = 0x103;
- break;
- default:
- assert(0);
- }
-
- BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_FORMAT, 1);
- OUT_RING(rt_format);
+
+ nv04->framebuffer = (struct pipe_framebuffer_state*)fb;
- /* FIXME pitches have to be aligned ! */
- BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
- OUT_RING(rt->stride|(zeta->stride<<16));
- OUT_RELOCl(rt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
- if (fb->zsbuf) {
- BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
- OUT_RELOCl(zeta->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
- }
+ nv04->dirty |= NV04_NEW_FRAMEBUFFER;
}
-
static void
nv04_set_polygon_stipple(struct pipe_context *pipe,
const struct pipe_poly_stipple *stipple)
{
struct nv04_context *nv04 = nv04_context(pipe);
- draw_flush(nv04->draw);
-
- memcpy(nv04->vertex_buffer, buffers, count * sizeof(buffers[0]));
- nv04->num_vertex_buffers = count;
+ memcpy(nv04->vtxbuf, buffers, count * sizeof(buffers[0]));
+ nv04->dirty |= NV04_NEW_VTXARRAYS;
draw_set_vertex_buffers(nv04->draw, count, buffers);
}
{
struct nv04_context *nv04 = nv04_context(pipe);
- draw_flush(nv04->draw);
+ memcpy(nv04->vtxelt, elements, sizeof(*elements) * count);
+ nv04->dirty |= NV04_NEW_VTXARRAYS;
- nv04->num_vertex_elements = count;
draw_set_vertex_elements(nv04->draw, count, elements);
}
struct pipe_buffer *buffer;
uint total_size;
+ struct pipe_texture *shadow_tex;
+ struct pipe_surface *shadow_surface;
+
struct {
uint pitch;
- uint *image_offset;
+ uint image_offset;
} level[PIPE_MAX_TEXTURE_LEVELS];
};
memset(&vinfo, 0, sizeof(vinfo));
for (i = 0; i < fp->info.num_inputs; i++) {
- switch (i) {
+ int isn = fp->info.input_semantic_name[i];
+ int isi = fp->info.input_semantic_index[i];
+ switch (isn) {
case TGSI_SEMANTIC_POSITION:
draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_LINEAR, src++);
break;
break;
}
}
+
+ printf("%d vertex input\n",fp->info.num_inputs);
draw_compute_vertex_size(&vinfo);
}
OUT_RING(nv04->sampler[unit]->filter);
}
+static void nv04_state_emit_framebuffer(struct nv04_context* nv04)
+{
+ struct pipe_framebuffer_state* fb = nv04->framebuffer;
+ struct pipe_surface *rt, *zeta;
+ uint32_t rt_format, w, h;
+ int colour_format = 0, zeta_format = 0;
+
+ w = fb->cbufs[0]->width;
+ h = fb->cbufs[0]->height;
+ colour_format = fb->cbufs[0]->format;
+ rt = fb->cbufs[0];
+
+ if (fb->zsbuf) {
+ if (colour_format) {
+ assert(w == fb->zsbuf->width);
+ assert(h == fb->zsbuf->height);
+ } else {
+ w = fb->zsbuf->width;
+ h = fb->zsbuf->height;
+ }
+
+ zeta_format = fb->zsbuf->format;
+ zeta = fb->zsbuf;
+ }
+
+ switch (colour_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case 0:
+ rt_format = 0x108;
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ rt_format = 0x103;
+ break;
+ default:
+ assert(0);
+ }
+
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_FORMAT, 1);
+ OUT_RING(rt_format);
+
+ /* FIXME pitches have to be aligned ! */
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
+ OUT_RING(rt->stride|(zeta->stride<<16));
+ OUT_RELOCl(rt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ if (fb->zsbuf) {
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
+ OUT_RELOCl(zeta->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ }
+}
+
void
nv04_emit_hw_state(struct nv04_context *nv04)
{
if (nv04->dirty & NV04_NEW_FRAGPROG) {
nv04_fragprog_bind(nv04, nv04->fragprog.current);
- /*XXX: clear NV04_NEW_FRAGPROG if no new program uploaded */
+ nv04->dirty &= ~NV04_NEW_FRAGPROG;
nv04->dirty_samplers |= (1<<10);
nv04->dirty_samplers = 0;
}
nv04_emit_blend(nv04);
}
+ if (nv04->dirty & NV04_NEW_VTXARRAYS) {
+ nv04->dirty &= ~NV04_NEW_VTXARRAYS;
+ nv04_vertex_layout(nv04);
+ }
+
if (nv04->dirty & NV04_NEW_SAMPLER) {
nv04->dirty &= ~NV04_NEW_SAMPLER;
// nv04_state_emit_viewport(nv04);
}
+ if (nv04->dirty & NV04_NEW_FRAMEBUFFER) {
+ nv04->dirty &= ~NV04_NEW_FRAMEBUFFER;
+ nv04_state_emit_framebuffer(nv04);
+ }
+
/* Emit relocs for every referenced buffer.
* This is to ensure the bufmgr has an accurate idea of how
* the buffer is used. This isn't very efficient, but we don't
*/
/* Render target */
-/* BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
- OUT_RING(rt->stride|(zeta->stride<<16));
- OUT_RELOCl(rt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
- if (fb->zsbuf) {
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
+ OUT_RING(nv04->rt->stride|(nv04->zeta->stride<<16));
+ OUT_RELOCl(nv04->rt, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ if (nv04->zeta) {
BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
- OUT_RELOCl(zeta->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
- }*/
+ OUT_RELOCl(nv04->zeta, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ }
/* Texture images */
for (i = 0; i < 1; i++) {
struct draw_context *draw = nv04->draw;
unsigned i;
+ nv04_emit_hw_state(nv04);
+
/*
* Map vertex buffers
*/
for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
- if (nv04->vertex_buffer[i].buffer) {
+ if (nv04->vtxbuf[i].buffer) {
void *buf
= pipe->winsys->buffer_map(pipe->winsys,
- nv04->vertex_buffer[i].buffer,
+ nv04->vtxbuf[i].buffer,
PIPE_BUFFER_USAGE_CPU_READ);
draw_set_mapped_vertex_buffer(draw, i, buf);
}
draw_set_mapped_element_buffer(draw, 0, NULL);
}
+ draw_set_mapped_constant_buffer(draw,
+ nv04->constbuf[PIPE_SHADER_VERTEX],
+ nv04->constbuf_nr[PIPE_SHADER_VERTEX]);
+
/* draw! */
draw_arrays(nv04->draw, prim, start, count);
* unmap vertex/index buffers
*/
for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
- if (nv04->vertex_buffer[i].buffer) {
- pipe->winsys->buffer_unmap(pipe->winsys, nv04->vertex_buffer[i].buffer);
+ if (nv04->vtxbuf[i].buffer) {
+ pipe->winsys->buffer_unmap(pipe->winsys, nv04->vtxbuf[i].buffer);
draw_set_mapped_vertex_buffer(draw, i, NULL);
}
}
boolean nv04_draw_arrays( struct pipe_context *pipe,
unsigned prim, unsigned start, unsigned count)
{
+ printf("coucou in draw arrays\n");
return nv04_draw_elements(pipe, NULL, 0, prim, start, count);
}
int i;
switch (dev->chipset & 0xf0) {
+ case 0x00:
+ /* NV04 */
case 0x10:
case 0x20:
/* NV10 */
NOUVEAU_ERR("AII wrap unhandled\n");
/*XXX: assumes subc 0 is populated */
- RING_SPACE_CH(fence->channel, 2);
- OUT_RING_CH (fence->channel, 0x00040050);
- OUT_RING_CH (fence->channel, nvfence->sequence);
+ /* Not the way to fence on nv4 */
+ if (nvchan->base.device->chipset >= 0x10) {
+ RING_SPACE_CH(fence->channel, 2);
+ OUT_RING_CH (fence->channel, 0x00040050);
+ OUT_RING_CH (fence->channel, nvfence->sequence);
+ }
if (nvchan->fence_tail) {
nouveau_fence(nvchan->fence_tail)->next = fence;
return NULL;
switch (chipset & 0xf0) {
+ case 0x00:
+ hws_create = nv04_screen_create;
+ hw_create = nv04_create;
+ break;
case 0x10:
hws_create = nv10_screen_create;
hw_create = nv10_create;