nv04_state.c \
nv04_state_emit.c \
nv04_surface.c \
+ nv04_transfer.c \
nv04_vbo.c
include ../../Makefile.template
mt->base = *pt;
mt->base.refcount = 1;
mt->base.screen = pscreen;
- mt->shadow_tex = NULL;
- mt->shadow_surface = NULL;
//mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
FREE(mt->level[l].image_offset);
}
- if (mt->shadow_tex) {
- assert(mt->shadow_surface);
- pscreen->tex_surface_release(pscreen, &mt->shadow_surface);
- nv04_miptree_release(pscreen, &mt->shadow_tex);
- }
-
FREE(mt);
}
unsigned flags)
{
struct nv04_miptree *nv04mt = (struct nv04_miptree *)pt;
- struct pipe_surface *ps;
+ struct nv04_surface *ns;
- ps = CALLOC_STRUCT(pipe_surface);
- if (!ps)
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
return NULL;
- pipe_texture_reference(&ps->texture, pt);
- ps->format = pt->format;
- ps->width = pt->width[level];
- ps->height = pt->height[level];
- ps->block = pt->block;
- ps->nblocksx = pt->nblocksx[level];
- ps->nblocksy = pt->nblocksy[level];
- ps->stride = nv04mt->level[level].pitch;
- ps->usage = flags;
- ps->status = PIPE_SURFACE_STATUS_DEFINED;
- ps->refcount = 1;
- ps->face = face;
- ps->level = level;
- ps->zslice = zslice;
-
- ps->offset = nv04mt->level[level].image_offset;
-
- return ps;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ ns->base.status = PIPE_SURFACE_STATUS_DEFINED;
+ ns->base.refcount = 1;
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv04mt->level[level].pitch;
+
+ ns->base.offset = nv04mt->level[level].image_offset;
+
+ return &ns->base;
}
static void
return FALSE;
}
-static void *
-nv04_surface_map(struct pipe_screen *screen, struct pipe_surface *surface,
- unsigned flags )
-{
- void *map;
- struct nv04_miptree *nv04mt = (struct nv04_miptree *)surface->texture;
-
- map = pipe_buffer_map(screen, nv04mt->buffer, flags);
- if (!map)
- return NULL;
-
- return map + surface->offset;
-}
-
-static void
-nv04_surface_unmap(struct pipe_screen *screen, struct pipe_surface *surface)
-{
- struct nv04_miptree *nv04mt = (struct nv04_miptree *)surface->texture;
-
- pipe_buffer_unmap(screen, nv04mt->buffer);
-}
-
static void
nv04_screen_destroy(struct pipe_screen *pscreen)
{
screen->pipe.is_format_supported = nv04_screen_is_format_supported;
- screen->pipe.surface_map = nv04_surface_map;
- screen->pipe.surface_unmap = nv04_surface_unmap;
-
nv04_screen_init_miptree_functions(&screen->pipe);
+ nv04_screen_init_transfer_functions(&screen->pipe);
u_simple_screen_init(&screen->pipe);
return &screen->pipe;
return (struct nv04_screen *)screen;
}
+void
+nv04_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
#endif
struct pipe_buffer *buffer;
uint total_size;
- struct pipe_texture *shadow_tex;
- struct pipe_surface *shadow_surface;
-
struct {
uint pitch;
uint image_offset;
static void nv04_state_emit_framebuffer(struct nv04_context* nv04)
{
struct pipe_framebuffer_state* fb = nv04->framebuffer;
- struct pipe_surface *rt, *zeta;
+ struct nv04_surface *rt, *zeta;
uint32_t rt_format, w, h;
int colour_format = 0, zeta_format = 0;
struct nv04_miptree *nv04mt = 0;
w = fb->cbufs[0]->width;
h = fb->cbufs[0]->height;
colour_format = fb->cbufs[0]->format;
- rt = fb->cbufs[0];
+ rt = (struct nv04_surface *)fb->cbufs[0];
if (fb->zsbuf) {
if (colour_format) {
}
zeta_format = fb->zsbuf->format;
- zeta = fb->zsbuf;
+ zeta = (struct nv04_surface *)fb->zsbuf;
}
switch (colour_format) {
BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_FORMAT, 1);
OUT_RING(rt_format);
- nv04mt = (struct nv04_miptree *)rt->texture;
+ nv04mt = (struct nv04_miptree *)rt->base.texture;
/* FIXME pitches have to be aligned ! */
BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
- OUT_RING(rt->stride|(zeta->stride<<16));
+ OUT_RING(rt->pitch|(zeta->pitch<<16));
OUT_RELOCl(nv04mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
if (fb->zsbuf) {
- nv04mt = (struct nv04_miptree *)zeta->texture;
+ nv04mt = (struct nv04_miptree *)zeta->base.texture;
BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
OUT_RELOCl(nv04mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
}
*/
/* Render target */
+ unsigned rt_pitch = ((struct nv04_surface *)nv04->rt)->pitch;
+ unsigned zeta_pitch = ((struct nv04_surface *)nv04->zeta)->pitch;
+
BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
- OUT_RING(nv04->rt->stride|(nv04->zeta->stride<<16));
+ OUT_RING(rt_pitch|(zeta_pitch<<16));
OUT_RELOCl(nv04->rt, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
if (nv04->zeta) {
BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
struct nouveau_grobj *sifm = ctx->sifm;
struct nouveau_bo *src_bo = ctx->nvws->get_bo(ctx->buf(src));
struct nouveau_bo *dst_bo = ctx->nvws->get_bo(ctx->buf(dst));
+ const unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
const unsigned max_w = 1024;
const unsigned max_h = 1024;
const unsigned sub_w = w > max_w ? max_w : w;
/* POT or GTFO */
assert(!(w & (w - 1)) && !(h & (h - 1)));
+ /* That's the way she likes it */
+ assert(src_pitch == ((struct nv04_surface *)dst)->pitch);
BEGIN_RING(chan, swzsurf, NV04_SWIZZLED_SURFACE_DMA_IMAGE, 1);
OUT_RELOCo(chan, dst_bo,
for (cx = 0; cx < w; cx += sub_w) {
BEGIN_RING(chan, swzsurf, NV04_SWIZZLED_SURFACE_OFFSET, 1);
OUT_RELOCl(chan, dst_bo, dst->offset + nv04_swizzle_bits(cx, cy) *
- dst->block.size, NOUVEAU_BO_GART |
+ dst->texture->block.size, NOUVEAU_BO_GART |
NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
BEGIN_RING(chan, sifm, NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_CONVERSION, 9);
BEGIN_RING(chan, sifm, NV04_SCALED_IMAGE_FROM_MEMORY_SIZE, 4);
OUT_RING (chan, sub_h << 16 | sub_w);
- OUT_RING (chan, src->stride |
+ OUT_RING (chan, src_pitch |
NV04_SCALED_IMAGE_FROM_MEMORY_FORMAT_ORIGIN_CENTER |
NV04_SCALED_IMAGE_FROM_MEMORY_FORMAT_FILTER_POINT_SAMPLE);
- OUT_RELOCl(chan, src_bo, src->offset + cy * src->stride +
- cx * src->block.size, NOUVEAU_BO_GART |
+ OUT_RELOCl(chan, src_bo, src->offset + cy * src_pitch +
+ cx * src->texture->block.size, NOUVEAU_BO_GART |
NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
OUT_RING (chan, 0);
}
struct nouveau_grobj *m2mf = ctx->m2mf;
struct nouveau_bo *src_bo = ctx->nvws->get_bo(ctx->buf(src));
struct nouveau_bo *dst_bo = ctx->nvws->get_bo(ctx->buf(dst));
- unsigned dst_offset, src_offset;
-
- dst_offset = dst->offset + (dy * dst->stride) + (dx * dst->block.size);
- src_offset = src->offset + (sy * src->stride) + (sx * src->block.size);
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
+ unsigned dst_offset = dst->offset + dy * dst_pitch +
+ dx * dst->texture->block.size;
+ unsigned src_offset = src->offset + sy * src_pitch +
+ sx * src->texture->block.size;
WAIT_RING (chan, 3 + ((h / 2047) + 1) * 9);
BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_DMA_BUFFER_IN, 2);
NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
OUT_RELOCl(chan, dst_bo, dst_offset,
NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_WR);
- OUT_RING (chan, src->stride);
- OUT_RING (chan, dst->stride);
- OUT_RING (chan, w * src->block.size);
+ OUT_RING (chan, src_pitch);
+ OUT_RING (chan, dst_pitch);
+ OUT_RING (chan, w * src->texture->block.size);
OUT_RING (chan, count);
OUT_RING (chan, 0x0101);
OUT_RING (chan, 0);
h -= count;
- src_offset += src->stride * count;
- dst_offset += dst->stride * count;
+ src_offset += src_pitch * count;
+ dst_offset += dst_pitch * count;
}
return 0;
struct nouveau_grobj *blit = ctx->blit;
struct nouveau_bo *src_bo = ctx->nvws->get_bo(ctx->buf(src));
struct nouveau_bo *dst_bo = ctx->nvws->get_bo(ctx->buf(dst));
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
int format;
format = nv04_surface_format(dst->format);
OUT_RELOCo(chan, dst_bo, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_FORMAT, 4);
OUT_RING (chan, format);
- OUT_RING (chan, (dst->stride << 16) | src->stride);
+ OUT_RING (chan, (dst_pitch << 16) | src_pitch);
OUT_RELOCl(chan, src_bo, src->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
OUT_RELOCl(chan, dst_bo, dst->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
int dx, int dy, struct pipe_surface *src, int sx, int sy,
int w, int h)
{
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
int src_linear = src->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR;
int dst_linear = dst->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR;
* to NV_MEMORY_TO_MEMORY_FORMAT in this case.
*/
if ((src->offset & 63) || (dst->offset & 63) ||
- (src->stride & 63) || (dst->stride & 63)) {
+ (src_pitch & 63) || (dst_pitch & 63) ||
+ debug_get_bool_option("NOUVEAU_NO_COPYBLIT", FALSE)) {
nv04_surface_copy_m2mf(ctx, dst, dx, dy, src, sx, sy, w, h);
return;
}
struct nouveau_grobj *surf2d = ctx->surf2d;
struct nouveau_grobj *rect = ctx->rect;
struct nouveau_bo *dst_bo = ctx->nvws->get_bo(ctx->buf(dst));
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
int cs2d_format, gdirect_format;
cs2d_format = nv04_surface_format(dst->format);
OUT_RELOCo(chan, dst_bo, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_FORMAT, 4);
OUT_RING (chan, cs2d_format);
- OUT_RING (chan, (dst->stride << 16) | dst->stride);
+ OUT_RING (chan, (dst_pitch << 16) | dst_pitch);
OUT_RELOCl(chan, dst_bo, dst->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
OUT_RELOCl(chan, dst_bo, dst->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
#ifndef __NV04_SURFACE_2D_H__
#define __NV04_SURFACE_2D_H__
+struct nv04_surface {
+ struct pipe_surface base;
+ unsigned pitch;
+};
+
struct nv04_surface_2d {
struct nouveau_winsys *nvws;
struct nouveau_notifier *ntfy;
--- /dev/null
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv04_context.h"
+#include "nv04_screen.h"
+#include "nv04_state.h"
+
+struct nv04_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv04_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv04_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->compressed = pt->compressed;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv04_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv04_miptree *mt = (struct nv04_miptree *)pt;
+ struct nv04_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv04_transfer);
+ if (!tx)
+ return NULL;
+
+ tx->base.refcount = 1;
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv04_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv04_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv04_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv04_screen *nvscreen = nv04_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv04_transfer_del(struct pipe_screen *pscreen, struct pipe_transfer **pptx)
+{
+ struct pipe_transfer *ptx = *pptx;
+ struct nv04_transfer *tx = (struct nv04_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct nv04_screen *nvscreen = nv04_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ *pptx = NULL;
+ if (--ptx->refcount)
+ return;
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv04_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv04_transfer *tx = (struct nv04_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv04_miptree *mt = (struct nv04_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv04_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv04_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv04_transfer *tx = (struct nv04_transfer *)ptx;
+ struct nv04_miptree *mt = (struct nv04_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv04_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv04_transfer_new;
+ pscreen->tex_transfer_release = nv04_transfer_del;
+ pscreen->transfer_map = nv04_transfer_map;
+ pscreen->transfer_unmap = nv04_transfer_unmap;
+}
nv10_state.c \
nv10_state_emit.c \
nv10_surface.c \
+ nv10_transfer.c \
nv10_vbo.c
include ../../Makefile.template
unsigned face, unsigned level, unsigned zslice,
unsigned flags)
{
- struct pipe_winsys *ws = screen->winsys;
struct nv10_miptree *nv10mt = (struct nv10_miptree *)pt;
- struct pipe_surface *ps;
+ struct nv04_surface *ns;
- ps = CALLOC_STRUCT(pipe_surface);
- if (!ps)
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
return NULL;
- pipe_texture_reference(&ps->texture, pt);
- ps->format = pt->format;
- ps->width = pt->width[level];
- ps->height = pt->height[level];
- ps->block = pt->block;
- ps->nblocksx = pt->nblocksx[level];
- ps->nblocksy = pt->nblocksy[level];
- ps->stride = nv10mt->level[level].pitch;
- ps->refcount = 1;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ ns->base.status = PIPE_SURFACE_STATUS_DEFINED;
+ ns->base.refcount = 1;
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv10mt->level[level].pitch;
if (pt->target == PIPE_TEXTURE_CUBE) {
- ps->offset = nv10mt->level[level].image_offset[face];
+ ns->base.offset = nv10mt->level[level].image_offset[face];
} else {
- ps->offset = nv10mt->level[level].image_offset[0];
+ ns->base.offset = nv10mt->level[level].image_offset[0];
}
- return ps;
+ return &ns->base;
}
static void
return FALSE;
}
-static void *
-nv10_surface_map(struct pipe_screen *screen, struct pipe_surface *surface,
- unsigned flags )
-{
- struct pipe_winsys *ws = screen->winsys;
- void *map;
- struct nv10_miptree *nv10mt = (struct nv10_miptree *)surface->texture;
-
- map = ws->buffer_map(ws, nv10mt->buffer, flags);
- if (!map)
- return NULL;
-
- return map + surface->offset;
-}
-
-static void
-nv10_surface_unmap(struct pipe_screen *screen, struct pipe_surface *surface)
-{
- struct pipe_winsys *ws = screen->winsys;
- struct nv10_miptree *nv10mt = (struct nv10_miptree *)surface->texture;
-
- ws->buffer_unmap(ws, nv10mt->buffer);
-}
-
static void
nv10_screen_destroy(struct pipe_screen *pscreen)
{
screen->pipe.is_format_supported = nv10_screen_is_format_supported;
- screen->pipe.surface_map = nv10_surface_map;
- screen->pipe.surface_unmap = nv10_surface_unmap;
-
nv10_screen_init_miptree_functions(&screen->pipe);
+ nv10_screen_init_transfer_functions(&screen->pipe);
u_simple_screen_init(&screen->pipe);
return &screen->pipe;
return (struct nv10_screen *)screen;
}
+
+void
+nv10_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
#endif
static void nv10_state_emit_framebuffer(struct nv10_context* nv10)
{
struct pipe_framebuffer_state* fb = nv10->framebuffer;
- struct pipe_surface *rt, *zeta = NULL;
+ struct nv04_surface *rt, *zeta = NULL;
uint32_t rt_format, w, h;
int colour_format = 0, zeta_format = 0;
struct nv10_miptree *nv10mt = 0;
w = fb->cbufs[0]->width;
h = fb->cbufs[0]->height;
colour_format = fb->cbufs[0]->format;
- rt = fb->cbufs[0];
+ rt = (struct nv04_surface *)fb->cbufs[0];
if (fb->zsbuf) {
if (colour_format) {
}
zeta_format = fb->zsbuf->format;
- zeta = fb->zsbuf;
+ zeta = (struct nv04_surface *)fb->zsbuf;
}
rt_format = NV10TCL_RT_FORMAT_TYPE_LINEAR;
if (zeta) {
BEGIN_RING(celsius, NV10TCL_RT_PITCH, 1);
- OUT_RING (rt->stride | (zeta->stride << 16));
+ OUT_RING (rt->pitch | (zeta->pitch << 16));
} else {
BEGIN_RING(celsius, NV10TCL_RT_PITCH, 1);
- OUT_RING (rt->stride | (rt->stride << 16));
+ OUT_RING (rt->pitch | (rt->pitch << 16));
}
- nv10mt = (struct nv10_miptree *)rt->texture;
+ nv10mt = (struct nv10_miptree *)rt->base.texture;
nv10->rt[0] = nv10mt->buffer;
if (zeta_format)
{
- nv10mt = (struct nv10_miptree *)zeta->texture;
+ nv10mt = (struct nv10_miptree *)zeta->base.texture;
nv10->zeta = nv10mt->buffer;
}
--- /dev/null
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv10_context.h"
+#include "nv10_screen.h"
+#include "nv10_state.h"
+
+struct nv10_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv10_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv10_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->compressed = pt->compressed;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv10_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv10_miptree *mt = (struct nv10_miptree *)pt;
+ struct nv10_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv10_transfer);
+ if (!tx)
+ return NULL;
+
+ tx->base.refcount = 1;
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv10_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv10_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv10_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv10_screen *nvscreen = nv10_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv10_transfer_del(struct pipe_screen *pscreen, struct pipe_transfer **pptx)
+{
+ struct pipe_transfer *ptx = *pptx;
+ struct nv10_transfer *tx = (struct nv10_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct nv10_screen *nvscreen = nv10_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ *pptx = NULL;
+ if (--ptx->refcount)
+ return;
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv10_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv10_transfer *tx = (struct nv10_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv10_miptree *mt = (struct nv10_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv10_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv10_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv10_transfer *tx = (struct nv10_transfer *)ptx;
+ struct nv10_miptree *mt = (struct nv10_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv10_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv10_transfer_new;
+ pscreen->tex_transfer_release = nv10_transfer_del;
+ pscreen->transfer_map = nv10_transfer_map;
+ pscreen->transfer_unmap = nv10_transfer_unmap;
+}
nv20_state.c \
nv20_state_emit.c \
nv20_surface.c \
+ nv20_transfer.c \
nv20_vbo.c
# nv20_vertprog.c
nv20_miptree_layout(struct nv20_miptree *nv20mt)
{
struct pipe_texture *pt = &nv20mt->base;
- boolean swizzled = FALSE;
uint width = pt->width[0], height = pt->height[0];
uint offset = 0;
int nr_faces, l, f;
+ uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
+ PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY);
if (pt->target == PIPE_TEXTURE_CUBE) {
nr_faces = 6;
pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
- if (swizzled)
- nv20mt->level[l].pitch = pt->nblocksx[l] * pt->block.size;
+ if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
+ nv20mt->level[l].pitch = align(pt->width[0] * pt->block.size, 64);
else
- nv20mt->level[l].pitch = pt->nblocksx[0] * pt->block.size;
- nv20mt->level[l].pitch = (nv20mt->level[l].pitch + 63) & ~63;
+ nv20mt->level[l].pitch = pt->width[l] * pt->block.size;
nv20mt->level[l].image_offset =
CALLOC(nr_faces, sizeof(unsigned));
width = MAX2(1, width >> 1);
height = MAX2(1, height >> 1);
-
}
for (f = 0; f < nr_faces; f++) {
- for (l = 0; l <= pt->last_level; l++) {
+ for (l = 0; l < pt->last_level; l++) {
nv20mt->level[l].image_offset[f] = offset;
- offset += nv20mt->level[l].pitch * pt->height[l];
+
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
+ pt->width[l + 1] > 1 && pt->height[l + 1] > 1)
+ offset += align(nv20mt->level[l].pitch * pt->height[l], 64);
+ else
+ offset += nv20mt->level[l].pitch * pt->height[l];
}
+
+ nv20mt->level[l].image_offset[f] = offset;
+ offset += nv20mt->level[l].pitch * pt->height[l];
}
nv20mt->total_size = offset;
mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
else
if (pt->tex_usage & (PIPE_TEXTURE_USAGE_PRIMARY |
- PIPE_TEXTURE_USAGE_DISPLAY_TARGET))
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
else
if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
case PIPE_FORMAT_A8R8G8B8_UNORM:
case PIPE_FORMAT_X8R8G8B8_UNORM:
case PIPE_FORMAT_R16_SNORM:
+ {
+ if (debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
break;
+ }
default:
mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
}
unsigned flags)
{
struct nv20_miptree *nv20mt = (struct nv20_miptree *)pt;
- struct pipe_surface *ps;
+ struct nv04_surface *ns;
- ps = CALLOC_STRUCT(pipe_surface);
- if (!ps)
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
return NULL;
- pipe_texture_reference(&ps->texture, pt);
- ps->format = pt->format;
- ps->width = pt->width[level];
- ps->height = pt->height[level];
- ps->block = pt->block;
- ps->nblocksx = pt->nblocksx[level];
- ps->nblocksy = pt->nblocksy[level];
- ps->stride = nv20mt->level[level].pitch;
- ps->usage = flags;
- ps->status = PIPE_SURFACE_STATUS_DEFINED;
- ps->refcount = 1;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ ns->base.status = PIPE_SURFACE_STATUS_DEFINED;
+ ns->base.refcount = 1;
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv20mt->level[level].pitch;
if (pt->target == PIPE_TEXTURE_CUBE) {
- ps->offset = nv20mt->level[level].image_offset[face];
+ ns->base.offset = nv20mt->level[level].image_offset[face];
} else
if (pt->target == PIPE_TEXTURE_3D) {
- ps->offset = nv20mt->level[level].image_offset[zslice];
+ ns->base.offset = nv20mt->level[level].image_offset[zslice];
} else {
- ps->offset = nv20mt->level[level].image_offset[0];
+ ns->base.offset = nv20mt->level[level].image_offset[0];
}
- return ps;
+ return &ns->base;
}
static void
return FALSE;
}
-static void *
-nv20_surface_map(struct pipe_screen *screen, struct pipe_surface *surface,
- unsigned flags )
-{
- struct pipe_winsys *ws = screen->winsys;
- void *map;
- struct nv20_miptree *nv20mt = (struct nv20_miptree *)surface->texture;
-
- map = ws->buffer_map(ws, nv20mt->buffer, flags);
- if (!map)
- return NULL;
-
- return map + surface->offset;
-}
-
-static void
-nv20_surface_unmap(struct pipe_screen *screen, struct pipe_surface *surface)
-{
- struct pipe_winsys *ws = screen->winsys;
- struct nv20_miptree *nv20mt = (struct nv20_miptree *)surface->texture;
-
- ws->buffer_unmap(ws, nv20mt->buffer);
-}
-
static void
nv20_screen_destroy(struct pipe_screen *pscreen)
{
screen->pipe.is_format_supported = nv20_screen_is_format_supported;
- screen->pipe.surface_map = nv20_surface_map;
- screen->pipe.surface_unmap = nv20_surface_unmap;
-
nv20_screen_init_miptree_functions(&screen->pipe);
+ nv20_screen_init_transfer_functions(&screen->pipe);
u_simple_screen_init(&screen->pipe);
return &screen->pipe;
return (struct nv20_screen *)screen;
}
+
+void
+nv20_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
#endif
static void nv20_state_emit_framebuffer(struct nv20_context* nv20)
{
struct pipe_framebuffer_state* fb = nv20->framebuffer;
- struct pipe_surface *rt, *zeta = NULL;
+ struct nv04_surface *rt, *zeta = NULL;
uint32_t rt_format, w, h;
int colour_format = 0, zeta_format = 0;
struct nv20_miptree *nv20mt = 0;
w = fb->cbufs[0]->width;
h = fb->cbufs[0]->height;
colour_format = fb->cbufs[0]->format;
- rt = fb->cbufs[0];
+ rt = (struct nv04_surface *)fb->cbufs[0];
if (fb->zsbuf) {
if (colour_format) {
}
zeta_format = fb->zsbuf->format;
- zeta = fb->zsbuf;
+ zeta = (struct nv04_surface *)fb->zsbuf;
}
rt_format = NV20TCL_RT_FORMAT_TYPE_LINEAR | 0x20;
if (zeta) {
BEGIN_RING(kelvin, NV20TCL_RT_PITCH, 1);
- OUT_RING (rt->stride | (zeta->stride << 16));
+ OUT_RING (rt->pitch | (zeta->pitch << 16));
} else {
BEGIN_RING(kelvin, NV20TCL_RT_PITCH, 1);
- OUT_RING (rt->stride | (rt->stride << 16));
+ OUT_RING (rt->pitch | (rt->pitch << 16));
}
- nv20mt = (struct nv20_miptree *)rt->texture;
+ nv20mt = (struct nv20_miptree *)rt->base.texture;
nv20->rt[0] = nv20mt->buffer;
if (zeta_format)
{
- nv20mt = (struct nv20_miptree *)zeta->texture;
+ nv20mt = (struct nv20_miptree *)zeta->base.texture;
nv20->zeta = nv20mt->buffer;
}
--- /dev/null
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv20_context.h"
+#include "nv20_screen.h"
+#include "nv20_state.h"
+
+struct nv20_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv20_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv20_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->compressed = pt->compressed;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv20_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv20_miptree *mt = (struct nv20_miptree *)pt;
+ struct nv20_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv20_transfer);
+ if (!tx)
+ return NULL;
+
+ tx->base.refcount = 1;
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv20_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv20_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv20_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv20_screen *nvscreen = nv20_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv20_transfer_del(struct pipe_screen *pscreen, struct pipe_transfer **pptx)
+{
+ struct pipe_transfer *ptx = *pptx;
+ struct nv20_transfer *tx = (struct nv20_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct nv20_screen *nvscreen = nv20_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ *pptx = NULL;
+ if (--ptx->refcount)
+ return;
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv20_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv20_transfer *tx = (struct nv20_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv20_miptree *mt = (struct nv20_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv20_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv20_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv20_transfer *tx = (struct nv20_transfer *)ptx;
+ struct nv20_miptree *mt = (struct nv20_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv20_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv20_transfer_new;
+ pscreen->tex_transfer_release = nv20_transfer_del;
+ pscreen->transfer_map = nv20_transfer_map;
+ pscreen->transfer_unmap = nv20_transfer_unmap;
+}
nv30_state_viewport.c \
nv30_state_zsa.c \
nv30_surface.c \
+ nv30_transfer.c \
nv30_vbo.c \
nv30_vertprog.c
{
struct pipe_winsys *ws = pscreen->winsys;
struct nv30_miptree *mt;
+ unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
+ NOUVEAU_BUFFER_USAGE_TEXTURE;
mt = MALLOC(sizeof(struct nv30_miptree));
if (!mt)
mt->base = *pt;
mt->base.refcount = 1;
mt->base.screen = pscreen;
- mt->shadow_tex = NULL;
- mt->shadow_surface = NULL;
/* Swizzled textures must be POT */
if (pt->width[0] & (pt->width[0] - 1) ||
}
}
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+
nv30_miptree_layout(mt);
- mt->buffer = ws->buffer_create(ws, 256,
- PIPE_BUFFER_USAGE_PIXEL |
- NOUVEAU_BUFFER_USAGE_TEXTURE,
+ mt->buffer = ws->buffer_create(ws, 256, buf_usage,
mt->total_size);
if (!mt->buffer) {
FREE(mt);
FREE(mt->level[l].image_offset);
}
- if (mt->shadow_tex) {
- if (mt->shadow_surface)
- pscreen->tex_surface_release(pscreen, &mt->shadow_surface);
- nv30_miptree_release(pscreen, &mt->shadow_tex);
- }
-
FREE(mt);
}
unsigned flags)
{
struct nv30_miptree *nv30mt = (struct nv30_miptree *)pt;
- struct pipe_surface *ps;
+ struct nv04_surface *ns;
- ps = CALLOC_STRUCT(pipe_surface);
- if (!ps)
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
return NULL;
- pipe_texture_reference(&ps->texture, pt);
- ps->format = pt->format;
- ps->width = pt->width[level];
- ps->height = pt->height[level];
- ps->block = pt->block;
- ps->nblocksx = pt->nblocksx[level];
- ps->nblocksy = pt->nblocksy[level];
- ps->stride = nv30mt->level[level].pitch;
- ps->usage = flags;
- ps->status = PIPE_SURFACE_STATUS_DEFINED;
- ps->refcount = 1;
- ps->face = face;
- ps->level = level;
- ps->zslice = zslice;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ ns->base.status = PIPE_SURFACE_STATUS_DEFINED;
+ ns->base.refcount = 1;
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv30mt->level[level].pitch;
if (pt->target == PIPE_TEXTURE_CUBE) {
- ps->offset = nv30mt->level[level].image_offset[face];
+ ns->base.offset = nv30mt->level[level].image_offset[face];
} else
if (pt->target == PIPE_TEXTURE_3D) {
- ps->offset = nv30mt->level[level].image_offset[zslice];
+ ns->base.offset = nv30mt->level[level].image_offset[zslice];
} else {
- ps->offset = nv30mt->level[level].image_offset[0];
+ ns->base.offset = nv30mt->level[level].image_offset[0];
}
- return ps;
+ return &ns->base;
}
static void
return mt->buffer;
}
-static void *
-nv30_surface_map(struct pipe_screen *screen, struct pipe_surface *surface,
- unsigned flags )
-{
- struct pipe_winsys *ws = screen->winsys;
- struct pipe_surface *surface_to_map;
- void *map;
-
- if (!(surface->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
- struct nv30_miptree *mt = (struct nv30_miptree *)surface->texture;
-
- if (!mt->shadow_tex) {
- unsigned old_tex_usage = surface->texture->tex_usage;
- surface->texture->tex_usage = NOUVEAU_TEXTURE_USAGE_LINEAR |
- PIPE_TEXTURE_USAGE_DYNAMIC;
- mt->shadow_tex = screen->texture_create(screen, surface->texture);
- surface->texture->tex_usage = old_tex_usage;
-
- assert(mt->shadow_tex->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR);
- }
-
- mt->shadow_surface = screen->get_tex_surface
- (
- screen, mt->shadow_tex,
- surface->face, surface->level, surface->zslice,
- surface->usage
- );
-
- surface_to_map = mt->shadow_surface;
- }
- else
- surface_to_map = surface;
-
- assert(surface_to_map);
-
- map = ws->buffer_map(ws, nv30_surface_buffer(surface_to_map), flags);
- if (!map)
- return NULL;
-
- return map + surface_to_map->offset;
-}
-
-static void
-nv30_surface_unmap(struct pipe_screen *screen, struct pipe_surface *surface)
-{
- struct pipe_winsys *ws = screen->winsys;
- struct pipe_surface *surface_to_unmap;
-
- /* TODO: Copy from shadow just before push buffer is flushed instead.
- There are probably some programs that map/unmap excessively
- before rendering. */
- if (!(surface->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
- struct nv30_miptree *mt = (struct nv30_miptree *)surface->texture;
-
- assert(mt->shadow_tex);
-
- surface_to_unmap = mt->shadow_surface;
- }
- else
- surface_to_unmap = surface;
-
- assert(surface_to_unmap);
-
- ws->buffer_unmap(ws, nv30_surface_buffer(surface_to_unmap));
-
- if (surface_to_unmap != surface) {
- struct nv30_screen *nvscreen = nv30_screen(screen);
-
- nvscreen->eng2d->copy(nvscreen->eng2d, surface, 0, 0,
- surface_to_unmap, 0, 0,
- surface->width, surface->height);
-
- screen->tex_surface_release(screen, &surface_to_unmap);
- }
-}
-
static void
nv30_screen_destroy(struct pipe_screen *pscreen)
{
screen->pipe.is_format_supported = nv30_screen_surface_format_supported;
- screen->pipe.surface_map = nv30_surface_map;
- screen->pipe.surface_unmap = nv30_surface_unmap;
-
nv30_screen_init_miptree_functions(&screen->pipe);
+ nv30_screen_init_transfer_functions(&screen->pipe);
u_simple_screen_init(&screen->pipe);
return &screen->pipe;
return (struct nv30_screen *)screen;
}
+void
+nv30_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
#endif
struct pipe_buffer *buffer;
uint total_size;
- struct pipe_texture *shadow_tex;
- struct pipe_surface *shadow_surface;
-
struct {
uint pitch;
uint *image_offset;
nv30_state_framebuffer_validate(struct nv30_context *nv30)
{
struct pipe_framebuffer_state *fb = &nv30->framebuffer;
- struct pipe_surface *rt[2], *zeta = NULL;
+ struct nv04_surface *rt[2], *zeta = NULL;
uint32_t rt_enable, rt_format;
int i, colour_format = 0, zeta_format = 0;
struct nouveau_stateobj *so = so_new(64, 10);
} else {
colour_format = fb->cbufs[i]->format;
rt_enable |= (NV34TCL_RT_ENABLE_COLOR0 << i);
- rt[i] = fb->cbufs[i];
+ rt[i] = (struct nv04_surface *)fb->cbufs[i];
}
}
if (fb->zsbuf) {
zeta_format = fb->zsbuf->format;
- zeta = fb->zsbuf;
+ zeta = (struct nv04_surface *)fb->zsbuf;
}
- if (!(rt[0]->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ if (!(rt[0]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
assert(!(fb->width & (fb->width - 1)) && !(fb->height & (fb->height - 1)));
for (i = 1; i < fb->nr_cbufs; i++)
- assert(!(rt[i]->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
+ assert(!(rt[i]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
/* FIXME: NV34TCL_RT_FORMAT_LOG2_[WIDTH/HEIGHT] */
rt_format = NV34TCL_RT_FORMAT_TYPE_SWIZZLED |
}
if (rt_enable & NV34TCL_RT_ENABLE_COLOR0) {
- uint32_t pitch = rt[0]->stride;
+ uint32_t pitch = rt[0]->pitch;
if (zeta) {
- pitch |= (zeta->stride << 16);
+ pitch |= (zeta->pitch << 16);
} else {
pitch |= (pitch << 16);
}
- nv30mt = (struct nv30_miptree *)rt[0]->texture;
+ nv30mt = (struct nv30_miptree *)rt[0]->base.texture;
so_method(so, nv30->screen->rankine, NV34TCL_DMA_COLOR0, 1);
so_reloc (so, nv30mt->buffer, 0, rt_flags | NOUVEAU_BO_OR,
nv30->nvws->channel->vram->handle,
nv30->nvws->channel->gart->handle);
so_method(so, nv30->screen->rankine, NV34TCL_COLOR0_PITCH, 2);
so_data (so, pitch);
- so_reloc (so, nv30mt->buffer, rt[0]->offset, rt_flags |
+ so_reloc (so, nv30mt->buffer, rt[0]->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
}
if (rt_enable & NV34TCL_RT_ENABLE_COLOR1) {
- nv30mt = (struct nv30_miptree *)rt[1]->texture;
+ nv30mt = (struct nv30_miptree *)rt[1]->base.texture;
so_method(so, nv30->screen->rankine, NV34TCL_DMA_COLOR1, 1);
so_reloc (so, nv30mt->buffer, 0, rt_flags | NOUVEAU_BO_OR,
nv30->nvws->channel->vram->handle,
nv30->nvws->channel->gart->handle);
so_method(so, nv30->screen->rankine, NV34TCL_COLOR1_OFFSET, 2);
- so_reloc (so, nv30mt->buffer, rt[1]->offset, rt_flags |
+ so_reloc (so, nv30mt->buffer, rt[1]->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
- so_data (so, rt[1]->stride);
+ so_data (so, rt[1]->pitch);
}
if (zeta_format) {
- nv30mt = (struct nv30_miptree *)zeta->texture;
+ nv30mt = (struct nv30_miptree *)zeta->base.texture;
so_method(so, nv30->screen->rankine, NV34TCL_DMA_ZETA, 1);
so_reloc (so, nv30mt->buffer, 0, rt_flags | NOUVEAU_BO_OR,
nv30->nvws->channel->vram->handle,
nv30->nvws->channel->gart->handle);
so_method(so, nv30->screen->rankine, NV34TCL_ZETA_OFFSET, 1);
- so_reloc (so, nv30mt->buffer, zeta->offset, rt_flags |
+ so_reloc (so, nv30mt->buffer, zeta->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
/* TODO: allocate LMA depth buffer */
}
--- /dev/null
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv30_context.h"
+#include "nv30_screen.h"
+#include "nv30_state.h"
+
+struct nv30_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv30_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv30_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->compressed = pt->compressed;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv30_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv30_miptree *mt = (struct nv30_miptree *)pt;
+ struct nv30_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv30_transfer);
+ if (!tx)
+ return NULL;
+
+ tx->base.refcount = 1;
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv30_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv30_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv30_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv30_screen *nvscreen = nv30_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv30_transfer_del(struct pipe_screen *pscreen, struct pipe_transfer **pptx)
+{
+ struct pipe_transfer *ptx = *pptx;
+ struct nv30_transfer *tx = (struct nv30_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct nv30_screen *nvscreen = nv30_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ *pptx = NULL;
+ if (--ptx->refcount)
+ return;
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv30_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv30_transfer *tx = (struct nv30_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv30_miptree *mt = (struct nv30_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv30_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv30_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv30_transfer *tx = (struct nv30_transfer *)ptx;
+ struct nv30_miptree *mt = (struct nv30_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv30_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv30_transfer_new;
+ pscreen->tex_transfer_release = nv30_transfer_del;
+ pscreen->transfer_map = nv30_transfer_map;
+ pscreen->transfer_unmap = nv30_transfer_unmap;
+}
nv40_state_viewport.c \
nv40_state_zsa.c \
nv40_surface.c \
+ nv40_transfer.c \
nv40_vbo.c \
nv40_vertprog.c
mt->base = *pt;
mt->base.refcount = 1;
mt->base.screen = pscreen;
- mt->shadow_tex = NULL;
- mt->shadow_surface = NULL;
/* Swizzled textures must be POT */
if (pt->width[0] & (pt->width[0] - 1) ||
FREE(mt->level[l].image_offset);
}
- if (mt->shadow_tex) {
- if (mt->shadow_surface)
- pscreen->tex_surface_release(pscreen, &mt->shadow_surface);
- nv40_miptree_release(pscreen, &mt->shadow_tex);
- }
-
FREE(mt);
}
unsigned flags)
{
struct nv40_miptree *mt = (struct nv40_miptree *)pt;
- struct pipe_surface *ps;
+ struct nv04_surface *ns;
- ps = CALLOC_STRUCT(pipe_surface);
- if (!ps)
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
return NULL;
- pipe_texture_reference(&ps->texture, pt);
- ps->format = pt->format;
- ps->width = pt->width[level];
- ps->height = pt->height[level];
- ps->block = pt->block;
- ps->nblocksx = pt->nblocksx[level];
- ps->nblocksy = pt->nblocksy[level];
- ps->stride = mt->level[level].pitch;
- ps->usage = flags;
- ps->status = PIPE_SURFACE_STATUS_DEFINED;
- ps->refcount = 1;
- ps->face = face;
- ps->level = level;
- ps->zslice = zslice;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ ns->base.status = PIPE_SURFACE_STATUS_DEFINED;
+ ns->base.refcount = 1;
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = mt->level[level].pitch;
if (pt->target == PIPE_TEXTURE_CUBE) {
- ps->offset = mt->level[level].image_offset[face];
+ ns->base.offset = mt->level[level].image_offset[face];
} else
if (pt->target == PIPE_TEXTURE_3D) {
- ps->offset = mt->level[level].image_offset[zslice];
+ ns->base.offset = mt->level[level].image_offset[zslice];
} else {
- ps->offset = mt->level[level].image_offset[0];
+ ns->base.offset = mt->level[level].image_offset[0];
}
- return ps;
+ return &ns->base;
}
static void
return mt->buffer;
}
-static void *
-nv40_surface_map(struct pipe_screen *screen, struct pipe_surface *surface,
- unsigned flags )
-{
- struct pipe_winsys *ws = screen->winsys;
- struct pipe_surface *surface_to_map;
- void *map;
-
- if (!(surface->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
- struct nv40_miptree *mt = (struct nv40_miptree *)surface->texture;
-
- if (!mt->shadow_tex) {
- unsigned old_tex_usage = surface->texture->tex_usage;
- surface->texture->tex_usage = NOUVEAU_TEXTURE_USAGE_LINEAR |
- PIPE_TEXTURE_USAGE_DYNAMIC;
- mt->shadow_tex = screen->texture_create(screen, surface->texture);
- surface->texture->tex_usage = old_tex_usage;
-
- assert(mt->shadow_tex->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR);
- }
-
- mt->shadow_surface = screen->get_tex_surface
- (
- screen, mt->shadow_tex,
- surface->face, surface->level, surface->zslice,
- surface->usage
- );
-
- surface_to_map = mt->shadow_surface;
- }
- else
- surface_to_map = surface;
-
- assert(surface_to_map);
- map = ws->buffer_map(ws, nv40_surface_buffer(surface_to_map), flags);
- if (!map)
- return NULL;
-
- return map + surface_to_map->offset;
-}
-
-static void
-nv40_surface_unmap(struct pipe_screen *screen, struct pipe_surface *surface)
-{
- struct pipe_winsys *ws = screen->winsys;
- struct pipe_surface *surface_to_unmap;
-
- /* TODO: Copy from shadow just before push buffer is flushed instead.
- There are probably some programs that map/unmap excessively
- before rendering. */
- if (!(surface->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
- struct nv40_miptree *mt = (struct nv40_miptree *)surface->texture;
-
- assert(mt->shadow_tex);
-
- surface_to_unmap = mt->shadow_surface;
- }
- else
- surface_to_unmap = surface;
-
- assert(surface_to_unmap);
-
- ws->buffer_unmap(ws, nv40_surface_buffer(surface_to_unmap));
-
- if (surface_to_unmap != surface) {
- struct nv40_screen *nvscreen = nv40_screen(screen);
-
- nvscreen->eng2d->copy(nvscreen->eng2d, surface, 0, 0,
- surface_to_unmap, 0, 0,
- surface->width, surface->height);
-
- screen->tex_surface_release(screen, &surface_to_unmap);
- }
-}
-
static void
nv40_screen_destroy(struct pipe_screen *pscreen)
{
{
struct nv40_screen *screen = CALLOC_STRUCT(nv40_screen);
struct nouveau_stateobj *so;
- unsigned curie_class;
+ unsigned curie_class = 0;
unsigned chipset = nvws->channel->device->chipset;
int ret;
if (NV6X_GRCLASS4497_CHIPSETS & (1 << (chipset & 0x0f)))
curie_class = NV44TCL;
break;
- default:
- break;
}
if (!curie_class) {
screen->pipe.is_format_supported = nv40_screen_surface_format_supported;
- screen->pipe.surface_map = nv40_surface_map;
- screen->pipe.surface_unmap = nv40_surface_unmap;
-
nv40_screen_init_miptree_functions(&screen->pipe);
+ nv40_screen_init_transfer_functions(&screen->pipe);
u_simple_screen_init(&screen->pipe);
return &screen->pipe;
return (struct nv40_screen *)screen;
}
+void
+nv40_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
#endif
struct pipe_buffer *buffer;
uint total_size;
- struct pipe_texture *shadow_tex;
- struct pipe_surface *shadow_surface;
-
struct {
uint pitch;
uint *image_offset;
nv40_state_framebuffer_validate(struct nv40_context *nv40)
{
struct pipe_framebuffer_state *fb = &nv40->framebuffer;
- struct pipe_surface *rt[4], *zeta;
+ struct nv04_surface *rt[4], *zeta;
uint32_t rt_enable, rt_format;
int i, colour_format = 0, zeta_format = 0;
struct nouveau_stateobj *so = so_new(64, 10);
} else {
colour_format = fb->cbufs[i]->format;
rt_enable |= (NV40TCL_RT_ENABLE_COLOR0 << i);
- rt[i] = fb->cbufs[i];
+ rt[i] = (struct nv04_surface *)fb->cbufs[i];
}
}
if (fb->zsbuf) {
zeta_format = fb->zsbuf->format;
- zeta = fb->zsbuf;
+ zeta = (struct nv04_surface *)fb->zsbuf;
}
- if (!(rt[0]->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ if (!(rt[0]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
assert(!(fb->width & (fb->width - 1)) && !(fb->height & (fb->height - 1)));
for (i = 1; i < fb->nr_cbufs; i++)
- assert(!(rt[i]->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
+ assert(!(rt[i]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
rt_format = NV40TCL_RT_FORMAT_TYPE_SWIZZLED |
log2i(fb->width) << NV40TCL_RT_FORMAT_LOG2_WIDTH_SHIFT |
if (rt_enable & NV40TCL_RT_ENABLE_COLOR0) {
so_method(so, nv40->screen->curie, NV40TCL_DMA_COLOR0, 1);
- so_reloc (so, nv40_surface_buffer(rt[0]), 0, rt_flags | NOUVEAU_BO_OR,
+ so_reloc (so, nv40_surface_buffer(&rt[0]->base), 0, rt_flags | NOUVEAU_BO_OR,
nv40->nvws->channel->vram->handle,
nv40->nvws->channel->gart->handle);
so_method(so, nv40->screen->curie, NV40TCL_COLOR0_PITCH, 2);
- so_data (so, rt[0]->stride);
- so_reloc (so, nv40_surface_buffer(rt[0]), rt[0]->offset, rt_flags |
+ so_data (so, rt[0]->pitch);
+ so_reloc (so, nv40_surface_buffer(&rt[0]->base), rt[0]->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
}
if (rt_enable & NV40TCL_RT_ENABLE_COLOR1) {
so_method(so, nv40->screen->curie, NV40TCL_DMA_COLOR1, 1);
- so_reloc (so, nv40_surface_buffer(rt[1]), 0, rt_flags | NOUVEAU_BO_OR,
+ so_reloc (so, nv40_surface_buffer(&rt[1]->base), 0, rt_flags | NOUVEAU_BO_OR,
nv40->nvws->channel->vram->handle,
nv40->nvws->channel->gart->handle);
so_method(so, nv40->screen->curie, NV40TCL_COLOR1_OFFSET, 2);
- so_reloc (so, nv40_surface_buffer(rt[1]), rt[1]->offset, rt_flags |
+ so_reloc (so, nv40_surface_buffer(&rt[1]->base), rt[1]->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
- so_data (so, rt[1]->stride);
+ so_data (so, rt[1]->pitch);
}
if (rt_enable & NV40TCL_RT_ENABLE_COLOR2) {
so_method(so, nv40->screen->curie, NV40TCL_DMA_COLOR2, 1);
- so_reloc (so, nv40_surface_buffer(rt[2]), 0, rt_flags | NOUVEAU_BO_OR,
+ so_reloc (so, nv40_surface_buffer(&rt[2]->base), 0, rt_flags | NOUVEAU_BO_OR,
nv40->nvws->channel->vram->handle,
nv40->nvws->channel->gart->handle);
so_method(so, nv40->screen->curie, NV40TCL_COLOR2_OFFSET, 1);
- so_reloc (so, nv40_surface_buffer(rt[2]), rt[2]->offset, rt_flags |
+ so_reloc (so, nv40_surface_buffer(&rt[2]->base), rt[2]->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
so_method(so, nv40->screen->curie, NV40TCL_COLOR2_PITCH, 1);
- so_data (so, rt[2]->stride);
+ so_data (so, rt[2]->pitch);
}
if (rt_enable & NV40TCL_RT_ENABLE_COLOR3) {
so_method(so, nv40->screen->curie, NV40TCL_DMA_COLOR3, 1);
- so_reloc (so, nv40_surface_buffer(rt[3]), 0, rt_flags | NOUVEAU_BO_OR,
+ so_reloc (so, nv40_surface_buffer(&rt[3]->base), 0, rt_flags | NOUVEAU_BO_OR,
nv40->nvws->channel->vram->handle,
nv40->nvws->channel->gart->handle);
so_method(so, nv40->screen->curie, NV40TCL_COLOR3_OFFSET, 1);
- so_reloc (so, nv40_surface_buffer(rt[3]), rt[3]->offset, rt_flags |
+ so_reloc (so, nv40_surface_buffer(&rt[3]->base), rt[3]->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
so_method(so, nv40->screen->curie, NV40TCL_COLOR3_PITCH, 1);
- so_data (so, rt[3]->stride);
+ so_data (so, rt[3]->pitch);
}
if (zeta_format) {
so_method(so, nv40->screen->curie, NV40TCL_DMA_ZETA, 1);
- so_reloc (so, nv40_surface_buffer(zeta), 0, rt_flags | NOUVEAU_BO_OR,
+ so_reloc (so, nv40_surface_buffer(&zeta->base), 0, rt_flags | NOUVEAU_BO_OR,
nv40->nvws->channel->vram->handle,
nv40->nvws->channel->gart->handle);
so_method(so, nv40->screen->curie, NV40TCL_ZETA_OFFSET, 1);
- so_reloc (so, nv40_surface_buffer(zeta), zeta->offset, rt_flags |
+ so_reloc (so, nv40_surface_buffer(&zeta->base), zeta->base.offset, rt_flags |
NOUVEAU_BO_LOW, 0, 0);
so_method(so, nv40->screen->curie, NV40TCL_ZETA_PITCH, 1);
- so_data (so, zeta->stride);
+ so_data (so, zeta->pitch);
}
so_method(so, nv40->screen->curie, NV40TCL_RT_ENABLE, 1);
--- /dev/null
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv40_context.h"
+#include "nv40_screen.h"
+#include "nv40_state.h"
+
+struct nv40_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv40_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv40_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->compressed = pt->compressed;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv40_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv40_miptree *mt = (struct nv40_miptree *)pt;
+ struct nv40_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv40_transfer);
+ if (!tx)
+ return NULL;
+
+ tx->base.refcount = 1;
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv40_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv40_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv40_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv40_screen *nvscreen = nv40_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv40_transfer_del(struct pipe_screen *pscreen, struct pipe_transfer **pptx)
+{
+ struct pipe_transfer *ptx = *pptx;
+ struct nv40_transfer *tx = (struct nv40_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct nv40_screen *nvscreen = nv40_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ *pptx = NULL;
+ if (--ptx->refcount)
+ return;
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv40_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv40_transfer *tx = (struct nv40_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv40_miptree *mt = (struct nv40_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv40_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv40_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv40_transfer *tx = (struct nv40_transfer *)ptx;
+ struct nv40_miptree *mt = (struct nv40_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv40_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv40_transfer_new;
+ pscreen->tex_transfer_release = nv40_transfer_del;
+ pscreen->transfer_map = nv40_transfer_map;
+ pscreen->transfer_unmap = nv40_transfer_unmap;
+}